diff --git a/.github/workflows/tags_stable.yml b/.github/workflows/tags_stable.yml new file mode 100644 index 00000000000..423fc64c4fc --- /dev/null +++ b/.github/workflows/tags_stable.yml @@ -0,0 +1,38 @@ +name: TagsStableWorkflow +# - Gets artifacts from S3 +# - Sends it to JFROG Artifactory +# - Adds them to the release assets + +on: # yamllint disable-line rule:truthy + push: + tags: + - 'v*-stable' + - 'v*-lts' + + +jobs: + UpdateVersions: + runs-on: [self-hosted, style-checker] + steps: + - name: Get tag name + run: echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV" + - name: Check out repository code + uses: actions/checkout@v2 + with: + ref: master + - name: Generate versions + run: | + git fetch --tags + ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv + - name: Create Pull Request + uses: peter-evans/create-pull-request@v3 + with: + commit-message: Update version_date.tsv after ${{ env.GITHUB_TAG }} + branch: auto/${{ env.GITHUB_TAG }} + delete-branch: true + title: Update version_date.tsv after ${{ env.GITHUB_TAG }} + body: | + Update version_date.tsv after ${{ env.GITHUB_TAG }} + + Changelog category (leave one): + - Not for changelog (changelog entry is not required) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3c846cdd51e..08ccfef3324 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -67,7 +67,7 @@ if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git" AND NOT EXISTS "${ClickHouse_SOURC message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive") endif () -include (cmake/find/ccache.cmake) +include (cmake/ccache.cmake) # Take care to add prlimit in command line before ccache, or else ccache thinks that # prlimit is compiler, and clang++ is its input file, and refuses to work with diff --git a/cmake/find/ccache.cmake b/cmake/ccache.cmake similarity index 100% rename from cmake/find/ccache.cmake rename to cmake/ccache.cmake diff --git a/cmake/find/cxx.cmake b/cmake/cxx.cmake similarity index 100% rename from cmake/find/cxx.cmake rename to cmake/cxx.cmake diff --git a/cmake/darwin/default_libs.cmake b/cmake/darwin/default_libs.cmake index ca4beaea8b6..1f92663a4b9 100644 --- a/cmake/darwin/default_libs.cmake +++ b/cmake/darwin/default_libs.cmake @@ -22,7 +22,7 @@ set(CMAKE_OSX_DEPLOYMENT_TARGET 10.15) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) -include (cmake/find/cxx.cmake) +include (cmake/cxx.cmake) target_link_libraries(global-group INTERFACE $ diff --git a/cmake/freebsd/default_libs.cmake b/cmake/freebsd/default_libs.cmake index f7a333df6e6..65d5f0511d9 100644 --- a/cmake/freebsd/default_libs.cmake +++ b/cmake/freebsd/default_libs.cmake @@ -22,8 +22,8 @@ set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS}) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) -include (cmake/find/unwind.cmake) -include (cmake/find/cxx.cmake) +include (cmake/unwind.cmake) +include (cmake/cxx.cmake) target_link_libraries(global-group INTERFACE $ diff --git a/cmake/linux/default_libs.cmake b/cmake/linux/default_libs.cmake index 98951822015..21bead7020c 100644 --- a/cmake/linux/default_libs.cmake +++ b/cmake/linux/default_libs.cmake @@ -42,8 +42,8 @@ if (NOT OS_ANDROID) add_subdirectory(base/harmful) endif () -include (cmake/find/unwind.cmake) -include (cmake/find/cxx.cmake) +include (cmake/unwind.cmake) +include (cmake/cxx.cmake) target_link_libraries(global-group INTERFACE -Wl,--start-group diff --git a/cmake/find/unwind.cmake b/cmake/unwind.cmake similarity index 100% rename from cmake/find/unwind.cmake rename to cmake/unwind.cmake diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index 54bfead6da7..2e4059efc17 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -29,12 +29,6 @@ if (OS_FREEBSD) message (FATAL_ERROR "Using internal parquet library on FreeBSD is not supported") endif() -if(USE_STATIC_LIBRARIES) - set(FLATBUFFERS_LIBRARY flatbuffers) -else() - set(FLATBUFFERS_LIBRARY flatbuffers_shared) -endif() - set (CMAKE_CXX_STANDARD 17) set(ARROW_VERSION "6.0.1") @@ -95,9 +89,16 @@ set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Skip flatbuffers tests") add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}") -message(STATUS "FLATBUFFERS_LIBRARY: ${FLATBUFFERS_LIBRARY}") +add_library(_flatbuffers INTERFACE) +if(USE_STATIC_LIBRARIES) + target_link_libraries(_flatbuffers INTERFACE flatbuffers) +else() + target_link_libraries(_flatbuffers INTERFACE flatbuffers_shared) +endif() +target_include_directories(_flatbuffers INTERFACE ${FLATBUFFERS_INCLUDE_DIR}) # === hdfs +# NOTE: cannot use ch_contrib::hdfs since it's INCLUDE_DIRECTORIES does not includes trailing "hdfs/" set(HDFS_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libhdfs3/include/hdfs/") # arrow-cmake cmake file calling orc cmake subroutine which detects certain compiler features. @@ -123,8 +124,6 @@ configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/A # ARROW_ORC + adapters/orc/CMakefiles set(ORC_SRCS - "${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc" - "${ARROW_SRC_DIR}/arrow/adapters/orc/adapter_util.cc" "${ORC_SOURCE_SRC_DIR}/Exceptions.cc" "${ORC_SOURCE_SRC_DIR}/OrcFile.cc" "${ORC_SOURCE_SRC_DIR}/Reader.cc" @@ -151,6 +150,22 @@ set(ORC_SRCS "${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc" ) +add_library(_orc ${ORC_SRCS}) +target_link_libraries(_orc PRIVATE + ch_contrib::protobuf + ch_contrib::lz4 + ch_contrib::snappy + ch_contrib::zlib + ch_contrib::zstd) +target_include_directories(_orc SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR}) +target_include_directories(_orc SYSTEM BEFORE PUBLIC ${ORC_BUILD_INCLUDE_DIR}) +target_include_directories(_orc SYSTEM PRIVATE + ${ORC_SOURCE_SRC_DIR} + ${ORC_SOURCE_WRAP_DIR} + ${ORC_BUILD_SRC_DIR} + ${ORC_ADDITION_SOURCE_DIR} + ${ARROW_SRC_DIR}) + # === arrow @@ -336,7 +351,8 @@ set(ARROW_SRCS "${LIBRARY_DIR}/ipc/reader.cc" "${LIBRARY_DIR}/ipc/writer.cc" - ${ORC_SRCS} + "${ARROW_SRC_DIR}/arrow/adapters/orc/adapter.cc" + "${ARROW_SRC_DIR}/arrow/adapters/orc/adapter_util.cc" ) add_definitions(-DARROW_WITH_LZ4) @@ -356,30 +372,27 @@ endif () add_library(_arrow ${ARROW_SRCS}) -# Arrow dependencies -add_dependencies(_arrow ${FLATBUFFERS_LIBRARY}) +target_link_libraries(_arrow PRIVATE + boost::filesystem -target_link_libraries(_arrow PRIVATE ${FLATBUFFERS_LIBRARY} boost::filesystem) + _flatbuffers + + ch_contrib::double_conversion + + ch_contrib::lz4 + ch_contrib::snappy + ch_contrib::zlib + ch_contrib::zstd + ch_contrib::zstd +) +target_link_libraries(_arrow PUBLIC _orc) add_dependencies(_arrow protoc) target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ARROW_SRC_DIR}) target_include_directories(_arrow SYSTEM BEFORE PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/cpp/src") -target_link_libraries(_arrow PRIVATE ch_contrib::double_conversion) -target_link_libraries(_arrow PRIVATE ch_contrib::protobuf) -target_link_libraries(_arrow PRIVATE ch_contrib::lz4) -target_link_libraries(_arrow PRIVATE ch_contrib::snappy) -target_link_libraries(_arrow PRIVATE ch_contrib::zlib) -target_link_libraries(_arrow PRIVATE ch_contrib::zstd) -target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR}) -target_include_directories(_arrow SYSTEM BEFORE PUBLIC ${ORC_BUILD_INCLUDE_DIR}) -target_include_directories(_arrow SYSTEM PRIVATE ${ORC_SOURCE_SRC_DIR}) -target_include_directories(_arrow SYSTEM PRIVATE ${ORC_SOURCE_WRAP_DIR}) -target_include_directories(_arrow SYSTEM PRIVATE ${ORC_BUILD_SRC_DIR}) -target_include_directories(_arrow SYSTEM PRIVATE ${ORC_ADDITION_SOURCE_DIR}) target_include_directories(_arrow SYSTEM PRIVATE ${ARROW_SRC_DIR}) -target_include_directories(_arrow SYSTEM PRIVATE ${FLATBUFFERS_INCLUDE_DIR}) target_include_directories(_arrow SYSTEM PRIVATE ${HDFS_INCLUDE_DIR}) # === parquet diff --git a/contrib/rocksdb-cmake/CMakeLists.txt b/contrib/rocksdb-cmake/CMakeLists.txt index 902d29a9630..c35009ba10a 100644 --- a/contrib/rocksdb-cmake/CMakeLists.txt +++ b/contrib/rocksdb-cmake/CMakeLists.txt @@ -72,11 +72,6 @@ else() if(WITH_ZSTD) add_definitions(-DZSTD) - include_directories(${ZSTD_INCLUDE_DIR}) - include_directories("${ZSTD_INCLUDE_DIR}/common") - include_directories("${ZSTD_INCLUDE_DIR}/dictBuilder") - include_directories("${ZSTD_INCLUDE_DIR}/deprecated") - list(APPEND THIRDPARTY_LIBS ch_contrib::zstd) endif() endif() diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index d6d9f189e89..bd0fc494826 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -12,7 +12,11 @@ dpkg -i package_folder/clickhouse-common-static_*.deb dpkg -i package_folder/clickhouse-common-static-dbg_*.deb dpkg -i package_folder/clickhouse-server_*.deb dpkg -i package_folder/clickhouse-client_*.deb -dpkg -i package_folder/clickhouse-test_*.deb +if [[ -n "$TEST_CASES_FROM_DEB" ]] && [[ "$TEST_CASES_FROM_DEB" -eq 1 ]]; then + dpkg -i package_folder/clickhouse-test_*.deb +else + ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test +fi # install test configs /usr/share/clickhouse-test/config/install.sh diff --git a/docs/_includes/cmake_in_clickhouse_header.md b/docs/_includes/cmake_in_clickhouse_header.md index f950cdcc6db..02019f13964 100644 --- a/docs/_includes/cmake_in_clickhouse_header.md +++ b/docs/_includes/cmake_in_clickhouse_header.md @@ -22,7 +22,7 @@ cmake .. \ 1. ClickHouse's source CMake files (located in the root directory and in `/src`). 2. Arch-dependent CMake files (located in `/cmake/*os_name*`). -3. Libraries finders (search for contrib libraries, located in `/cmake/find`). +3. Libraries finders (search for contrib libraries, located in `/contrib/*/CMakeLists.txt`). 3. Contrib build CMake files (used instead of libraries' own CMake files, located in `/cmake/modules`) ## List of CMake flags diff --git a/docs/_includes/install/deb.sh b/docs/_includes/install/deb.sh index 7dcca601d33..21106e9fc47 100644 --- a/docs/_includes/install/deb.sh +++ b/docs/_includes/install/deb.sh @@ -8,4 +8,4 @@ sudo apt-get update sudo apt-get install -y clickhouse-server clickhouse-client sudo service clickhouse-server start -clickhouse-client +clickhouse-client # or "clickhouse-client --password" if you set up a password. diff --git a/docs/_includes/install/rpm.sh b/docs/_includes/install/rpm.sh index de4a07420f7..e3fd1232047 100644 --- a/docs/_includes/install/rpm.sh +++ b/docs/_includes/install/rpm.sh @@ -4,4 +4,4 @@ sudo yum-config-manager --add-repo https://repo.clickhouse.com/rpm/clickhouse.re sudo yum install clickhouse-server clickhouse-client sudo /etc/init.d/clickhouse-server start -clickhouse-client +clickhouse-client # or "clickhouse-client --password" if you set up a password. diff --git a/docs/ru/development/build-osx.md b/docs/ru/development/build-osx.md index a1192b509df..48d92501f06 100644 --- a/docs/ru/development/build-osx.md +++ b/docs/ru/development/build-osx.md @@ -2,8 +2,13 @@ toc_priority: 65 toc_title: Сборка на Mac OS X --- + # Как собрать ClickHouse на Mac OS X {#how-to-build-clickhouse-on-mac-os-x} +!!! info "Вам не нужно собирать ClickHouse самостоятельно" + Вы можете установить предварительно собранный ClickHouse, как описано в [Быстром старте](https://clickhouse.com/#quick-start). + Следуйте инструкциям по установке для `macOS (Intel)` или `macOS (Apple Silicon)`. + Сборка должна запускаться с x86_64 (Intel) на macOS версии 10.15 (Catalina) и выше в последней версии компилятора Xcode's native AppleClang, Homebrew's vanilla Clang или в GCC-компиляторах. ## Установка Homebrew {#install-homebrew} diff --git a/docs/tools/single_page.py b/docs/tools/single_page.py index cf41e2b78c2..3d32ba30a21 100644 --- a/docs/tools/single_page.py +++ b/docs/tools/single_page.py @@ -90,7 +90,10 @@ def concatenate(lang, docs_path, single_page_file, nav): line) # If failed to replace the relative link, print to log - if '../' in line: + # But with some exceptions: + # - "../src/" -- for cmake-in-clickhouse.md (link to sources) + # - "../usr/share" -- changelog entry that has "../usr/share/zoneinfo" + if '../' in line and (not '../usr/share' in line) and (not '../src/' in line): logging.info('Failed to resolve relative link:') logging.info(path) logging.info(line) diff --git a/docs/zh/faq/integration/index.md b/docs/zh/faq/integration/index.md deleted file mode 120000 index 8323d6218a3..00000000000 --- a/docs/zh/faq/integration/index.md +++ /dev/null @@ -1 +0,0 @@ -../../../en/faq/integration/index.md \ No newline at end of file diff --git a/docs/zh/faq/integration/index.md b/docs/zh/faq/integration/index.md new file mode 100644 index 00000000000..2bfd728ec8c --- /dev/null +++ b/docs/zh/faq/integration/index.md @@ -0,0 +1,21 @@ +--- +title: 关于集成ClickHouse和其他系统的问题 +toc_hidden_folder: true +toc_priority: 4 +toc_title: Integration +--- + +# 关于集成ClickHouse和其他系统的问题 {#question-about-integrating-clickhouse-and-other-systems} + +问题: + +- [如何从 ClickHouse 导出数据到一个文件?](../../faq/integration/file-export.md) +- [如何导入JSON到ClickHouse?](../../faq/integration/json-import.md) +- [如果我用ODBC链接Oracle数据库出现编码问题该怎么办?](../../faq/integration/oracle-odbc.md) + + + +!!! info "没看到你要找的东西吗?" + 查看[其他faq类别](../../faq/index.md)或浏览左边栏中的主要文档文章。 + +{## [原文](https://clickhouse.com/docs/en/faq/integration/) ##} \ No newline at end of file diff --git a/docs/zh/sql-reference/ansi.md b/docs/zh/sql-reference/ansi.md index 0e7fa1d06c3..5aad2cf52a8 100644 --- a/docs/zh/sql-reference/ansi.md +++ b/docs/zh/sql-reference/ansi.md @@ -1,180 +1,189 @@ --- -machine_translated: true -machine_translated_rev: ad252bbb4f7e2899c448eb42ecc39ff195c8faa1 toc_priority: 40 toc_title: "ANSI\u517C\u5BB9\u6027" --- -# Ansi Sql兼容性的ClickHouse SQL方言 {#ansi-sql-compatibility-of-clickhouse-sql-dialect} +# ClickHouse SQL方言 与ANSI SQL的兼容性{#ansi-sql-compatibility-of-clickhouse-sql-dialect} !!! note "注" - 本文依赖于表38, “Feature taxonomy and definition for mandatory features”, Annex F of ISO/IEC CD 9075-2:2013. + 本文参考Annex G所著的[ISO/IEC CD 9075-2:2011](https://www.iso.org/obp/ui/#iso:std:iso-iec:9075:-2:ed-4:v1:en:sec:8)标准. ## 行为差异 {#differences-in-behaviour} -下表列出了查询功能在ClickHouse中有效但不符合ANSI SQL标准的情况。 +下表列出了ClickHouse能够使用,但与ANSI SQL规定有差异的查询特性。 -| Feature ID | 功能名称 | 差异 | -|------------|--------------------|---------------------------------------------------------------------| -| E011 | 数值(Numeric)数据类型 | 带小数点的数值文字被解释为近似值 (`Float64`)而不是精确值 (`Decimal`) | -| E051-05 | SELECT字段可以重命名 | 字段不仅仅在SELECT结果中可被重命名 | -| E141-01 | 非空约束 | 表中每一列默认为`NOT NULL` | -| E011-04 | 算术运算符 | ClickHouse不会检查算法,并根据自定义规则更改结果数据类型,而是会溢出 | +| 功能ID | 功能名称 | 差异 | +| ------- | --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| E011 | 数值型数据类型 | 带小数点的数字被视为近似值 (`Float64`)而不是精确值 (`Decimal`) | +| E051-05 | SELECT 的列可以重命名 | 字段重命名的作用范围不限于进行重命名的SELECT子查询(参考[表达式别名](https://clickhouse.com/docs/zh/sql-reference/syntax/#notes-on-usage)) | +| E141-01 | NOT NULL(非空)约束 | ClickHouse表中每一列默认为`NOT NULL` | +| E011-04 | 算术运算符 | ClickHouse在运算时会进行溢出,而不是四舍五入。此外会根据自定义规则修改结果数据类型(参考[溢出检查](https://clickhouse.com/docs/zh/sql-reference/data-types/decimal/#yi-chu-jian-cha)) | -## 功能匹配 {#feature-status} +## 功能状态 {#feature-status} -| Feature ID | 功能名称 | 匹配 | 评论 | -|------------|----------------------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **E011** | **数字数据类型** | **部分**{.text-warning} | | -| E011-01 | 整型和小型数据类型 | 是 {.text-success} | | -| E011-02 | 真实、双精度和浮点数据类型数据类型 | 部分 {.text-warning} | `FLOAT()`, `REAL` 和 `DOUBLE PRECISION` 不支持 | -| E011-03 | 十进制和数值数据类型 | 部分 {.text-warning} | 只有 `DECIMAL(p,s)` 支持,而不是 `NUMERIC` | -| E011-04 | 算术运算符 | 是 {.text-success} | | -| E011-05 | 数字比较 | 是 {.text-success} | | -| E011-06 | 数字数据类型之间的隐式转换 | 否。 {.text-danger} | ANSI SQL允许在数值类型之间进行任意隐式转换,而ClickHouse依赖于具有多个重载的函数而不是隐式转换 | -| **E021** | **字符串类型** | **部分**{.text-warning} | | -| E021-01 | 字符数据类型 | 否。 {.text-danger} | | -| E021-02 | 字符变化数据类型 | 否。 {.text-danger} | `String` 行为类似,但括号中没有长度限制 | -| E021-03 | 字符文字 | 部分 {.text-warning} | 不自动连接连续文字和字符集支持 | -| E021-04 | 字符长度函数 | 部分 {.text-warning} | 非也。 `USING` 条款 | -| E021-05 | OCTET_LENGTH函数 | 非也。 {.text-danger} | `LENGTH` 表现类似 | -| E021-06 | SUBSTRING | 部分 {.text-warning} | 不支持 `SIMILAR` 和 `ESCAPE` 条款,否 `SUBSTRING_REGEX` 备选案文 | -| E021-07 | 字符串联 | 部分 {.text-warning} | 非也。 `COLLATE` 条款 | -| E021-08 | 上下功能 | 是 {.text-success} | | -| E021-09 | 修剪功能 | 是 {.text-success} | | -| E021-10 | 固定长度和可变长度字符串类型之间的隐式转换 | 否。 {.text-danger} | ANSI SQL允许在字符串类型之间进行任意隐式转换,而ClickHouse依赖于具有多个重载的函数而不是隐式转换 | -| E021-11 | 职位功能 | 部分 {.text-warning} | 不支持 `IN` 和 `USING` 条款,否 `POSITION_REGEX` 备选案文 | -| E021-12 | 字符比较 | 是 {.text-success} | | -| **E031** | **标识符** | **部分**{.text-warning} | | -| E031-01 | 分隔标识符 | 部分 {.text-warning} | Unicode文字支持有限 | -| E031-02 | 小写标识符 | 是 {.text-success} | | -| E031-03 | 尾部下划线 | 是 {.text-success} | | -| **E051** | **基本查询规范** | **部分**{.text-warning} | | -| E051-01 | SELECT DISTINCT | 是 {.text-success} | | -| E051-02 | GROUP BY子句 | 是 {.text-success} | | -| E051-04 | 分组依据可以包含不在列 ``中出现的列 | 是 {.text-success} | | +| E051-05 | SELECT 的列可以重命名 | 是 {.text-success} | | +| E051-06 | HAVING 从句 | 是 {.text-success} | | +| E051-07 | SELECT 选择的列中允许出现\* | 是 {.text-success} | | +| E051-08 | FROM 从句中的关联名称 | 是 {.text-success} | | +| E051-09 | 重命名 FROM 从句中的列 | 否 {.text-danger} | | +| **E061** | **基本谓词和搜索条件** | **部分**{.text-warning} | | +| E061-01 | 比较谓词 | 是 {.text-success} | | +| E061-02 | BETWEEN 谓词 | 部分 {.text-warning} | 不支持 `SYMMETRIC` 和 `ASYMMETRIC` 从句 | +| E061-03 | IN 谓词后可接值列表 | 是 {.text-success} | | +| E061-04 | LIKE 谓词 | 是 {.text-success} | | +| E061-05 | LIKE 谓词后接 ESCAPE 从句 | 否 {.text-danger} | | +| E061-06 | NULL 谓词 | 是 {.text-success} | | +| E061-07 | 量化比较谓词(ALL、SOME、ANY) | 否 {.text-danger} | | +| E061-08 | EXISTS 谓词 | 否 {.text-danger} | | +| E061-09 | 比较谓词中的子查询 | 是 {.text-success} | | +| E061-11 | IN 谓词中的子查询 | 是 {.text-success} | | +| E061-12 | 量化比较谓词(BETWEEN、IN、LIKE)中的子查询 | 否 {.text-danger} | | +| E061-13 | 相关子查询 | 否 {.text-danger} | | +| E061-14 | 搜索条件 | 是 {.text-success} | | +| **E071** | **基本查询表达式** | **部分**{.text-warning} | | +| E071-01 | UNION DISTINCT 表运算符 | 是 {.text-success} | | +| E071-02 | UNION ALL 表运算符 | 是 {.text-success} | | +| E071-03 | EXCEPT DISTINCT 表运算符 | 否 {.text-danger} | | +| E071-05 | 通过表运算符组合的列不必具有完全相同的数据类型 | 是 {.text-success} | | +| E071-06 | 子查询中的表运算符 | 是 {.text-success} | | +| **E081** | **基本权限** | **是**{.text-success} | | +| E081-01 | 表级别的SELECT(查询)权限 | 是 {.text-success} | | +| E081-02 | DELETE(删除)权限 | 是 {.text-success} | | +| E081-03 | 表级别的INSERT(插入)权限 | 是 {.text-success} | | +| E081-04 | 表级别的UPDATE(更新)权限 | 是 {.text-success} | | +| E081-05 | 列级别的UPDATE(更新)权限 | 是 {.text-success} | | +| E081-06 | 表级别的REFERENCES(引用)权限 | 是 {.text-success} | | +| E081-07 | 列级别的REFERENCES(引用)权限 | 是 {.text-success} | | +| E081-08 | WITH GRANT OPTION | 是 {.text-success} | | +| E081-09 | USAGE(使用)权限 | 是 {.text-success} | | +| E081-10 | EXECUTE(执行)权限 | 是 {.text-success} | | +| **E091** | **集合函数** | **是**{.text-success} | | +| E091-01 | AVG | 是 {.text-success} | | +| E091-02 | COUNT | 是 {.text-success} | | +| E091-03 | MAX | 是 {.text-success} | | +| E091-04 | MIN | 是 {.text-success} | | +| E091-05 | SUM | 是 {.text-success} | | +| E091-06 | ALL修饰词 | 否。 {.text-danger} | | +| E091-07 | DISTINCT修饰词 | 是 {.text-success} | 并非所有聚合函数都支持该修饰词 | +| **E101** | **基本数据操作** | **部分**{.text-warning} | | +| E101-01 | INSERT(插入)语句 | 是 {.text-success} | 注:ClickHouse中的主键并不隐含`UNIQUE` 约束 | +| E101-03 | 可指定范围的UPDATE(更新)语句 | 部分 {.text-warning} | `ALTER UPDATE` 语句用来批量更新数据 | +| E101-04 | 可指定范围的DELETE(删除)语句 | 部分 {.text-warning} | `ALTER DELETE` 语句用来批量删除数据 | +| **E111** | **返回一行的SELECT语句** | **否**{.text-danger} | | +| **E121** | **基本游标支持** | **否**{.text-danger} | | +| E121-01 | DECLARE CURSOR | 否 {.text-danger} | | +| E121-02 | ORDER BY 涉及的列不需要出现在SELECT的列中 | 是 {.text-success} | | +| E121-03 | ORDER BY 从句中的表达式 | 是 {.text-success} | | +| E121-04 | OPEN 语句 | 否 {.text-danger} | | +| E121-06 | 受游标位置控制的 UPDATE 语句 | 否 {.text-danger} | | +| E121-07 | 受游标位置控制的 DELETE 语句 | 否 {.text-danger} | | +| E121-08 | CLOSE 语句 | 否 {.text-danger} | | +| E121-10 | FETCH 语句中包含隐式NEXT | 否 {.text-danger} | | +| E121-17 | WITH HOLD 游标 | 否 {.text-danger} | | +| **E131** | **空值支持** | **是**{.text-success} | 有部分限制 | +| **E141** | **基本完整性约束** | **部分**{.text-warning} | | +| E141-01 | NOT NULL(非空)约束 | 是 {.text-success} | 注: 默认情况下ClickHouse表中的列隐含`NOT NULL`约束 | +| E141-02 | NOT NULL(非空)列的UNIQUE(唯一)约束 | 否 {.text-danger} | | +| E141-03 | 主键约束 | 部分 {.text-warning} | | +| E141-04 | 对于引用删除和引用更新操作,基本的FOREIGN KEY(外键)约束默认不进行任何操作(NO ACTION) | 否 {.text-danger} | | +| E141-06 | CHECK(检查)约束 | 是 {.text-success} | | +| E141-07 | 列默认值 | 是 {.text-success} | | +| E141-08 | 在主键上推断非空 | 是 {.text-success} | | +| E141-10 | 可以按任何顺序指定外键中的名称 | 否 {.text-danger} | | +| **E151** | **事务支持** | **否**{.text-danger} | | +| E151-01 | COMMIT(提交)语句 | 否 {.text-danger} | | +| E151-02 | ROLLBACK(回滚)语句 | 否 {.text-danger} | | +| **E152** | **基本的SET TRANSACTION(设置事务隔离级别)语句** | **否**{.text-danger} | | +| E152-01 | SET TRANSACTION语句:ISOLATION LEVEL SERIALIZABLE(隔离级别为串行化)从句 | 否 {.text-danger} | | +| E152-02 | SET TRANSACTION语句:READ ONLY(只读)和READ WRITE(读写)从句 | 否 {.text-danger} | | +| **E153** | **具有子查询的可更新查询** | **是**{.text-success} | | +| **E161** | **使用“--”符号作为SQL注释** | **是**{.text-success} | | +| **E171** | **SQLSTATE支持** | **否**{.text-danger} | | +| **E182** | **主机语言绑定** | **否**{.text-danger} | | +| **F031** | **基本架构操作** | **部分**{.text-warning} | | +| F031-01 | 使用 CREATE TABLE 语句创建持久表 | 部分 {.text-warning} | 不支持 `SYSTEM VERSIONING`, `ON COMMIT`, `GLOBAL`, `LOCAL`, `PRESERVE`, `DELETE`, `REF IS`, `WITH OPTIONS`, `UNDER`, `LIKE`, `PERIOD FOR` 从句,不支持用户解析的数据类型 | +| F031-02 | CREATE VIEW(创建视图)语句 | 部分 {.text-warning} | 不支持 `RECURSIVE`, `CHECK`, `UNDER`, `WITH OPTIONS` 从句,不支持用户解析的数据类型 | +| F031-03 | GRANT(授权)语句 | 是 {.text-success} | | +| F031-04 | ALTER TABLE语句:ADD COLUMN从句 | 是 {.text-success} | 不支持 `GENERATED` 从句和以系统时间做参数 | +| F031-13 | DROP TABLE语句:RESTRICT从句 | 否 {.text-danger} | | +| F031-16 | DROP VIEW语句:RESTRICT子句 | 否 {.text-danger} | | +| F031-19 | REVOKE语句:RESTRICT子句 | 否 {.text-danger} | | +| **F041** | **基本连接关系** | **部分**{.text-warning} | | +| F041-01 | Inner join(但不一定是INNER关键字) | 是 {.text-success} | | +| F041-02 | INNER 关键字 | 是 {.text-success} | | +| F041-03 | LEFT OUTER JOIN | 是 {.text-success} | | +| F041-04 | RIGHT OUTER JOIN | 是 {.text-success} | | +| F041-05 | 外连接可嵌套 | 是 {.text-success} | | +| F041-07 | 左外部连接或右外连接中的内部表也可用于内部联接 | 是 {.text-success} | | +| F041-08 | 支持所有比较运算符(而不仅仅是=) | 否 {.text-danger} | | +| **F051** | **基本日期和时间** | **部分**{.text-warning} | | +| F051-01 | DATE(日期)数据类型(并支持用于表达日期的字面量) | 是 {.text-success} | | +| F051-02 | TIME(时间)数据类型(并支持用于表达时间的字面量),小数秒精度至少为0 | 否 {.text-danger} | | +| F051-03 | 时间戳数据类型(并支持用于表达时间戳的字面量),小数秒精度至少为0和6 | 是 {.text-danger} | | +| F051-04 | 日期、时间和时间戳数据类型的比较谓词 | 是 {.text-success} | | +| F051-05 | Datetime 类型和字符串形式表达的时间之间的显式转换 | 是 {.text-success} | | +| F051-06 | CURRENT_DATE | 否 {.text-danger} | 使用`today()`替代 | +| F051-07 | LOCALTIME | 否 {.text-danger} | 使用`now()`替代 | +| F051-08 | LOCALTIMESTAMP | 否 {.text-danger} | | +| **F081** | **视图的UNION和EXCEPT操作** | **部分**{.text-warning} | | +| **F131** | **分组操作** | **部分**{.text-warning} | | +| F131-01 | 在具有分组视图的查询中支持 WHERE、GROUP BY 和 HAVING 子句 | 是 {.text-success} | | +| F131-02 | 在分组视图中支持多张表 | 是 {.text-success} | | +| F131-03 | 分组视图的查询中支持集合函数 | 是 {.text-success} | | +| F131-04 | 带有 `GROUP BY` 和 `HAVING` 从句,以及分组视图的子查询 | 是 {.text-success} | | +| F131-05 | 带有 `GROUP BY` 和 `HAVING` 从句,以及分组视图的仅返回1条记录的SELECT查询 | 否 {.text-danger} | | +| **F181** | **多模块支持** | **否**{.text-danger} | | +| **F201** | **CAST 函数** | **是**{.text-success} | | +| **F221** | **显式默认值** | **否**{.text-danger} | | +| **F261** | **CASE 表达式** | **是**{.text-success} | | +| F261-01 | 简单 CASE 表达式 | 是 {.text-success} | | +| F261-02 | 搜索型 CASE 表达式 | 是 {.text-success} | | +| F261-03 | NULLIF | 是 {.text-success} | | +| F261-04 | COALESCE | 是 {.text-success} | | +| **F311** | **架构定义语句** | **部分**{.text-warning} | | +| F311-01 | CREATE SCHEMA | 部分 {.text-warning} | 见`CREATE DATABASE` | +| F311-02 | 用于创建持久表的 CREATE TABLE | 是 {.text-success} | | +| F311-03 | CREATE VIEW | 是 {.text-success} | | +| F311-04 | CREATE VIEW: WITH CHECK OPTION | 否 {.text-danger} | | +| F311-05 | GRANT 语句 | 是 {.text-success} | | +| **F471** | **标量子查询** | **是**{.text-success} | | +| **F481** | **扩展 NULL 谓词** | **是**{.text-success} | | +| **F812** | **基本标志位** | **否**{.text-danger} | +| **S011** | **用于不重复数据的数据类型** | **否**{.text-danger} | +| **T321** | **基本的SQL调用例程** | **否**{.text-danger} | | +| T321-01 | 没有重载的用户定义函数 | 否{.text-danger} | | +| T321-02 | 没有重载的用户定义存储过程 | 否{.text-danger} | | +| T321-03 | 功能调用 | 否 {.text-danger} | | +| T321-04 | CALL 语句 | 否 {.text-danger} | | +| T321-05 | RETURN 语句 | 否 {.text-danger} | | +| **T631** | **IN 谓词后接一个列表** | **是**{.text-success} | | diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index a5e4517824d..1a5c7d3e492 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -286,7 +286,7 @@ bool Client::executeMultiQuery(const String & all_queries_text) // , where the inline data is delimited by semicolon and not by a // newline. auto * insert_ast = parsed_query->as(); - if (insert_ast && insert_ast->data) + if (insert_ast && isSyncInsertWithData(*insert_ast, global_context)) { this_query_end = insert_ast->end; adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth); diff --git a/src/Access/CachedAccessChecking.cpp b/src/Access/CachedAccessChecking.cpp new file mode 100644 index 00000000000..aa8ef6073d3 --- /dev/null +++ b/src/Access/CachedAccessChecking.cpp @@ -0,0 +1,44 @@ +#include +#include + + +namespace DB +{ +CachedAccessChecking::CachedAccessChecking(const std::shared_ptr & access_, AccessFlags access_flags_) + : CachedAccessChecking(access_, AccessRightsElement{access_flags_}) +{ +} + +CachedAccessChecking::CachedAccessChecking(const std::shared_ptr & access_, const AccessRightsElement & element_) + : access(access_), element(element_) +{ +} + +CachedAccessChecking::~CachedAccessChecking() = default; + +bool CachedAccessChecking::checkAccess(bool throw_if_denied) +{ + if (checked) + return result; + if (throw_if_denied) + { + try + { + access->checkAccess(element); + result = true; + } + catch (...) + { + result = false; + throw; + } + } + else + { + result = access->isGranted(element); + } + checked = true; + return result; +} + +} diff --git a/src/Access/CachedAccessChecking.h b/src/Access/CachedAccessChecking.h new file mode 100644 index 00000000000..e87c28dd823 --- /dev/null +++ b/src/Access/CachedAccessChecking.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include + + +namespace DB +{ +class ContextAccess; + +/// Checks if the current user has a specified access type granted, +/// and if it's checked another time later, it will just return the first result. +class CachedAccessChecking +{ +public: + CachedAccessChecking(const std::shared_ptr & access_, AccessFlags access_flags_); + CachedAccessChecking(const std::shared_ptr & access_, const AccessRightsElement & element_); + ~CachedAccessChecking(); + + bool checkAccess(bool throw_if_denied = true); + +private: + const std::shared_ptr access; + const AccessRightsElement element; + bool checked = false; + bool result = false; +}; + +} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 57d4bf29491..bb359b427c7 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -475,11 +475,6 @@ if (TARGET ch_contrib::sqlite) dbms_target_link_libraries(PUBLIC ch_contrib::sqlite) endif() -if (USE_CASSANDRA) - dbms_target_link_libraries(PUBLIC ${CASSANDRA_LIBRARY}) - dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR}) -endif() - if (TARGET ch_contrib::msgpack) target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::msgpack) endif() diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 27deace416d..0bc2a161feb 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -573,6 +573,18 @@ void ClientBase::updateSuggest(const ASTCreateQuery & ast_create) suggest->addWords(std::move(new_words)); } +bool ClientBase::isSyncInsertWithData(const ASTInsertQuery & insert_query, const ContextPtr & context) +{ + if (!insert_query.data) + return false; + + auto settings = context->getSettings(); + if (insert_query.settings_ast) + settings.applyChanges(insert_query.settings_ast->as()->changes); + + return !settings.async_insert; +} + void ClientBase::processTextAsSingleQuery(const String & full_query) { /// Some parts of a query (result output and formatting) are executed @@ -597,10 +609,12 @@ void ClientBase::processTextAsSingleQuery(const String & full_query) updateSuggest(*create); } - // An INSERT query may have the data that follow query text. Remove the - /// Send part of query without data, because data will be sent separately. - auto * insert = parsed_query->as(); - if (insert && insert->data) + /// An INSERT query may have the data that follows query text. + /// Send part of the query without data, because data will be sent separately. + /// But for asynchronous inserts we don't extract data, because it's needed + /// to be done on server side in that case (for coalescing the data from multiple inserts on server side). + const auto * insert = parsed_query->as(); + if (insert && isSyncInsertWithData(*insert, global_context)) query_to_execute = full_query.substr(0, insert->data - full_query.data()); else query_to_execute = full_query; @@ -1303,8 +1317,10 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin if (insert && insert->select) insert->tryFindInputFunction(input_function); + bool is_async_insert = global_context->getSettings().async_insert && insert && insert->hasInlinedData(); + /// INSERT query for which data transfer is needed (not an INSERT SELECT or input()) is processed separately. - if (insert && (!insert->select || input_function) && !insert->watch) + if (insert && (!insert->select || input_function) && !insert->watch && !is_async_insert) { if (input_function && insert->format.empty()) throw Exception("FORMAT must be specified for function input()", ErrorCodes::INVALID_USAGE_OF_INPUT); @@ -1434,17 +1450,17 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( // row input formats (e.g. TSV) can't tell when the input stops, // unlike VALUES. auto * insert_ast = parsed_query->as(); + const char * query_to_execute_end = this_query_end; + if (insert_ast && insert_ast->data) { this_query_end = find_first_symbols<'\n'>(insert_ast->data, all_queries_end); insert_ast->end = this_query_end; - query_to_execute = all_queries_text.substr(this_query_begin - all_queries_text.data(), insert_ast->data - this_query_begin); - } - else - { - query_to_execute = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin); + query_to_execute_end = isSyncInsertWithData(*insert_ast, global_context) ? insert_ast->data : this_query_end; } + query_to_execute = all_queries_text.substr(this_query_begin - all_queries_text.data(), query_to_execute_end - this_query_begin); + // Try to include the trailing comment with test hints. It is just // a guess for now, because we don't yet know where the query ends // if it is an INSERT query with inline data. We will do it again diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 89e0770182b..5d753cfd702 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -139,6 +139,8 @@ private: void updateSuggest(const ASTCreateQuery & ast_create); protected: + static bool isSyncInsertWithData(const ASTInsertQuery & insert_query, const ContextPtr & context); + bool is_interactive = false; /// Use either interactive line editing interface or batch mode. bool is_multiquery = false; bool delayed_interactive = false; diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index 896168253cf..a741f1f1bfc 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -80,6 +80,7 @@ M(SyncDrainedConnections, "Number of connections drained synchronously.") \ M(ActiveSyncDrainedConnections, "Number of active connections drained synchronously.") \ M(AsynchronousReadWait, "Number of threads waiting for asynchronous read.") \ + M(PendingAsyncInsert, "Number of asynchronous inserts that are waiting for flush.") \ namespace CurrentMetrics { diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index ea6c782ebb4..aa507b1ce59 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -8,6 +8,7 @@ M(Query, "Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries.") \ M(SelectQuery, "Same as Query, but only for SELECT queries.") \ M(InsertQuery, "Same as Query, but only for INSERT queries.") \ + M(AsyncInsertQuery, "Same as InsertQuery, but only for asynchronous INSERT queries.") \ M(FailedQuery, "Number of failed queries.") \ M(FailedSelectQuery, "Same as FailedQuery, but only for SELECT queries.") \ M(FailedInsertQuery, "Same as FailedQuery, but only for INSERT queries.") \ diff --git a/src/Common/ProgressIndication.cpp b/src/Common/ProgressIndication.cpp index b9a8bc923f7..00e2326b0b4 100644 --- a/src/Common/ProgressIndication.cpp +++ b/src/Common/ProgressIndication.cpp @@ -243,7 +243,7 @@ void ProgressIndication::writeProgress() if (width_of_progress_bar > 0) { - size_t bar_width = UnicodeBar::getWidth(current_count, 0, max_count, width_of_progress_bar); + double bar_width = UnicodeBar::getWidth(current_count, 0, max_count, width_of_progress_bar); std::string bar = UnicodeBar::render(bar_width); /// Render profiling_msg at left on top of the progress bar. diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 54307b25efa..48dd637a943 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -170,6 +170,7 @@ class IColumn; M(Bool, force_index_by_date, false, "Throw an exception if there is a partition key in a table, and it is not used.", 0) \ M(Bool, force_primary_key, false, "Throw an exception if there is primary key in a table, and it is not used.", 0) \ M(Bool, use_skip_indexes, true, "Use data skipping indexes during query execution.", 0) \ + M(Bool, use_skip_indexes_if_final, false, "If query has FINAL, then skipping data based on indexes may produce incorrect result, hence disabled by default.", 0) \ M(String, force_data_skipping_indices, "", "Comma separated list of strings or literals with the name of the data skipping indices that should be used during query execution, otherwise an exception will be thrown.", 0) \ \ M(Float, max_streams_to_max_threads_ratio, 1, "Allows you to use more sources than the number of threads - to more evenly distribute work across threads. It is assumed that this is a temporary solution, since it will be possible in the future to make the number of sources equal to the number of threads, but for each source to dynamically select available work for itself.", 0) \ diff --git a/src/Interpreters/Access/InterpreterShowGrantsQuery.cpp b/src/Interpreters/Access/InterpreterShowGrantsQuery.cpp index cd98d8d4575..17d9f321b56 100644 --- a/src/Interpreters/Access/InterpreterShowGrantsQuery.cpp +++ b/src/Interpreters/Access/InterpreterShowGrantsQuery.cpp @@ -4,6 +4,8 @@ #include #include #include +#include +#include #include #include #include @@ -135,15 +137,25 @@ QueryPipeline InterpreterShowGrantsQuery::executeImpl() std::vector InterpreterShowGrantsQuery::getEntities() const { - const auto & show_query = query_ptr->as(); + const auto & access = getContext()->getAccess(); const auto & access_control = getContext()->getAccessControl(); + + const auto & show_query = query_ptr->as(); auto ids = RolesOrUsersSet{*show_query.for_roles, access_control, getContext()->getUserID()}.getMatchingIDs(access_control); + CachedAccessChecking show_users(access, AccessType::SHOW_USERS); + CachedAccessChecking show_roles(access, AccessType::SHOW_ROLES); + bool throw_if_access_denied = !show_query.for_roles->all; + std::vector entities; for (const auto & id : ids) { auto entity = access_control.tryRead(id); - if (entity) + if (!entity) + continue; + if ((id == access->getUserID() /* Any user can see his own grants */) + || (entity->isTypeOf() && show_users.checkAccess(throw_if_access_denied)) + || (entity->isTypeOf() && show_roles.checkAccess(throw_if_access_denied))) entities.push_back(entity); } diff --git a/src/Interpreters/AsynchronousInsertQueue.cpp b/src/Interpreters/AsynchronousInsertQueue.cpp index 0e2605fa2e2..c3ec9fa42f1 100644 --- a/src/Interpreters/AsynchronousInsertQueue.cpp +++ b/src/Interpreters/AsynchronousInsertQueue.cpp @@ -24,6 +24,16 @@ #include +namespace CurrentMetrics +{ + extern const Metric PendingAsyncInsert; +} + +namespace ProfileEvents +{ + extern const Event AsyncInsertQuery; +} + namespace DB { @@ -223,6 +233,9 @@ void AsynchronousInsertQueue::pushImpl(InsertData::EntryPtr entry, QueueIterator if (data->size > max_data_size) scheduleDataProcessingJob(it->first, std::move(data), getContext()); + + CurrentMetrics::add(CurrentMetrics::PendingAsyncInsert); + ProfileEvents::increment(ProfileEvents::AsyncInsertQuery); } void AsynchronousInsertQueue::waitForProcessingQuery(const String & query_id, const Milliseconds & timeout) @@ -437,6 +450,8 @@ try for (const auto & entry : data->entries) if (!entry->isFinished()) entry->finish(); + + CurrentMetrics::sub(CurrentMetrics::PendingAsyncInsert, data->entries.size()); } catch (const Exception & e) { diff --git a/src/Interpreters/OpenTelemetrySpanLog.cpp b/src/Interpreters/OpenTelemetrySpanLog.cpp index 10de6ba0e7b..40f31e4976c 100644 --- a/src/Interpreters/OpenTelemetrySpanLog.cpp +++ b/src/Interpreters/OpenTelemetrySpanLog.cpp @@ -150,24 +150,6 @@ OpenTelemetrySpanHolder::~OpenTelemetrySpanHolder() } } - -template -static T readHex(const char * data) -{ - T x{}; - - const char * end = data + sizeof(T) * 2; - while (data < end) - { - x *= 16; - x += unhex(*data); - ++data; - } - - return x; -} - - bool OpenTelemetryTraceContext::parseTraceparentHeader(const std::string & traceparent, std::string & error) { @@ -185,7 +167,7 @@ bool OpenTelemetryTraceContext::parseTraceparentHeader(const std::string & trace const char * data = traceparent.data(); - uint8_t version = readHex(data); + uint8_t version = unhex2(data); data += 2; if (version != 0) @@ -201,7 +183,8 @@ bool OpenTelemetryTraceContext::parseTraceparentHeader(const std::string & trace } ++data; - UInt128 trace_id_128 = readHex(data); + UInt64 trace_id_higher_64 = unhexUInt(data); + UInt64 trace_id_lower_64 = unhexUInt(data + 16); data += 32; if (*data != '-') @@ -211,7 +194,7 @@ bool OpenTelemetryTraceContext::parseTraceparentHeader(const std::string & trace } ++data; - UInt64 span_id_64 = readHex(data); + UInt64 span_id_64 = unhexUInt(data); data += 16; if (*data != '-') @@ -221,8 +204,11 @@ bool OpenTelemetryTraceContext::parseTraceparentHeader(const std::string & trace } ++data; - this->trace_flags = readHex(data); - this->trace_id = trace_id_128; + this->trace_flags = unhex2(data); + + // store the 128-bit trace id in big-endian order + this->trace_id.toUnderType().items[0] = trace_id_higher_64; + this->trace_id.toUnderType().items[1] = trace_id_lower_64; this->span_id = span_id_64; return true; } @@ -232,11 +218,14 @@ std::string OpenTelemetryTraceContext::composeTraceparentHeader() const { // This span is a parent for its children, so we specify this span_id as a // parent id. - return fmt::format("00-{:032x}-{:016x}-{:02x}", __uint128_t(trace_id.toUnderType()), - span_id, - // This cast is needed because fmt is being weird and complaining that - // "mixing character types is not allowed". - static_cast(trace_flags)); + return fmt::format("00-{:016x}{:016x}-{:016x}-{:02x}", + // Output the trace id in network byte order + trace_id.toUnderType().items[0], + trace_id.toUnderType().items[1], + span_id, + // This cast is needed because fmt is being weird and complaining that + // "mixing character types is not allowed". + static_cast(trace_flags)); } diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index 37b2992d657..3c91f00ebf4 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -17,6 +17,11 @@ #include +namespace CurrentMetrics +{ + extern const Metric Query; +} + namespace DB { @@ -313,6 +318,7 @@ QueryStatus::QueryStatus( , client_info(client_info_) , priority_handle(std::move(priority_handle_)) , query_kind(query_kind_) + , num_queries_increment(CurrentMetrics::Query) { auto settings = getContext()->getSettings(); limits.max_execution_time = settings.max_execution_time; diff --git a/src/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h index 545e5b07345..2ba0b0814ee 100644 --- a/src/Interpreters/ProcessList.h +++ b/src/Interpreters/ProcessList.h @@ -121,6 +121,10 @@ protected: IAST::QueryKind query_kind; + /// This field is unused in this class, but it + /// increments/decrements metric in constructor/destructor. + CurrentMetrics::Increment num_queries_increment; + public: QueryStatus( diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index c8c36f6c595..21505610e09 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -912,6 +912,11 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( parts_before_pk = parts.size(); auto reader_settings = getMergeTreeReaderSettings(context); + + bool use_skip_indexes = context->getSettings().use_skip_indexes; + if (select.final() && !context->getSettings().use_skip_indexes_if_final) + use_skip_indexes = false; + result.parts_with_ranges = MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipIndexes( std::move(parts), metadata_snapshot, @@ -922,7 +927,7 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( log, num_streams, result.index_stats, - context->getSettings().use_skip_indexes); + use_skip_indexes); } catch (...) { diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 49595a9c658..668017f8ef8 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1392,6 +1392,12 @@ void TCPHandler::receiveQuery() if (is_interserver_mode) { ClientInfo original_session_client_info = session->getClientInfo(); + + /// Cleanup fields that should not be reused from previous query. + original_session_client_info.current_user.clear(); + original_session_client_info.current_query_id.clear(); + original_session_client_info.current_address = {}; + session = std::make_unique(server.context(), ClientInfo::Interface::TCP_INTERSERVER); session->getClientInfo() = original_session_client_info; } diff --git a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp index 797caff8f69..499a8fbbaa6 100644 --- a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp @@ -3,11 +3,19 @@ #include #include +#include #include + namespace DB { +namespace ErrorCodes +{ + extern const int ABORTED; +} + + template void MergeTreeBackgroundExecutor::wait() { @@ -86,12 +94,18 @@ void MergeTreeBackgroundExecutor::routine(TaskRuntimeDataPtr item) ALLOW_ALLOCATIONS_IN_SCOPE; need_execute_again = item->task->executeStep(); } + catch (const Exception & e) + { + if (e.code() == ErrorCodes::ABORTED) /// Cancelled merging parts is not an error - log as info. + LOG_INFO(log, fmt::runtime(getCurrentExceptionMessage(false))); + else + tryLogCurrentException(__PRETTY_FUNCTION__); + } catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__); } - if (need_execute_again) { std::lock_guard guard(mutex); @@ -118,7 +132,6 @@ void MergeTreeBackgroundExecutor::routine(TaskRuntimeDataPtr item) return; } - { std::lock_guard guard(mutex); erase_from_active(); @@ -132,12 +145,18 @@ void MergeTreeBackgroundExecutor::routine(TaskRuntimeDataPtr item) /// But it is rather safe, because we have try...catch block here, and another one in ThreadPool. item->task->onCompleted(); } + catch (const Exception & e) + { + if (e.code() == ErrorCodes::ABORTED) /// Cancelled merging parts is not an error - log as info. + LOG_INFO(log, fmt::runtime(getCurrentExceptionMessage(false))); + else + tryLogCurrentException(__PRETTY_FUNCTION__); + } catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__); } - /// We have to call reset() under a lock, otherwise a race is possible. /// Imagine, that task is finally completed (last execution returned false), /// we removed the task from both queues, but still have pointer. diff --git a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h index f4635812e08..5cfa7b6ed7c 100644 --- a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h +++ b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h @@ -159,7 +159,6 @@ template class MergeTreeBackgroundExecutor final : public shared_ptr_helper> { public: - MergeTreeBackgroundExecutor( String name_, size_t threads_count_, @@ -194,7 +193,6 @@ public: void wait(); private: - String name; size_t threads_count{0}; size_t max_tasks_count{0}; @@ -210,6 +208,7 @@ private: std::condition_variable has_tasks; std::atomic_bool shutdown{false}; ThreadPool pool; + Poco::Logger * log = &Poco::Logger::get("MergeTreeBackgroundExecutor"); }; extern template class MergeTreeBackgroundExecutor; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 373a97aa915..dfd0e1a43a8 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1199,15 +1199,31 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( return false; } - if (merge_strategy_picker.shouldMergeOnSingleReplica(entry)) + bool should_execute_on_single_replica = merge_strategy_picker.shouldMergeOnSingleReplica(entry); + if (!should_execute_on_single_replica) { + /// Separate check. If we use only s3, check remote_fs_execute_merges_on_single_replica_time_threshold as well. + auto disks = storage.getDisks(); + bool only_s3_storage = true; + for (const auto & disk : disks) + if (disk->getType() != DB::DiskType::S3) + only_s3_storage = false; + + if (!disks.empty() && only_s3_storage) + should_execute_on_single_replica = merge_strategy_picker.shouldMergeOnSingleReplicaShared(entry); + } + + if (should_execute_on_single_replica) + { + auto replica_to_execute_merge = merge_strategy_picker.pickReplicaToExecuteMerge(entry); if (replica_to_execute_merge && !merge_strategy_picker.isMergeFinishedByReplica(replica_to_execute_merge.value(), entry)) { - String reason = "Not executing merge for the part " + entry.new_part_name - + ", waiting for " + replica_to_execute_merge.value() + " to execute merge."; - out_postpone_reason = reason; + out_postpone_reason = fmt::format( + "Not executing merge for the part {}, waiting for {} to execute merge.", + entry.new_part_name, replica_to_execute_merge.value()); + LOG_DEBUG(log, fmt::runtime(out_postpone_reason)); return false; } } diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index 7220b86a482..7328db26926 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -44,7 +44,7 @@ def get_image_name(check_name): else: raise Exception(f"Cannot deduce image name based on check name {check_name}") -def get_run_command(builds_path, result_path, server_log_path, kill_timeout, additional_envs, image, flaky_check, tests_to_run): +def get_run_command(builds_path, repo_tests_path, result_path, server_log_path, kill_timeout, additional_envs, image, flaky_check, tests_to_run): additional_options = ['--hung-check'] additional_options.append('--print-time') @@ -63,6 +63,7 @@ def get_run_command(builds_path, result_path, server_log_path, kill_timeout, add env_str = ' '.join(envs) return f"docker run --volume={builds_path}:/package_folder " \ + f"--volume={repo_tests_path}:/usr/share/clickhouse-test " \ f"--volume={result_path}:/test_output --volume={server_log_path}:/var/log/clickhouse-server " \ f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image}" @@ -167,6 +168,8 @@ if __name__ == "__main__": image_name = get_image_name(check_name) docker_image = get_image_with_version(reports_path, image_name) + repo_tests_path = os.path.join(repo_path, "tests") + packages_path = os.path.join(temp_path, "packages") if not os.path.exists(packages_path): os.makedirs(packages_path) @@ -184,7 +187,7 @@ if __name__ == "__main__": run_log_path = os.path.join(result_path, "runlog.log") additional_envs = get_additional_envs(check_name, run_by_hash_num, run_by_hash_total) - run_command = get_run_command(packages_path, result_path, server_log_path, kill_timeout, additional_envs, docker_image, flaky_check, tests_to_run) + run_command = get_run_command(packages_path, repo_tests_path, result_path, server_log_path, kill_timeout, additional_envs, docker_image, flaky_check, tests_to_run) logging.info("Going to run func tests: %s", run_command) with TeePopen(run_command, run_log_path) as process: diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index 73d338ba870..2601163d790 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -87,6 +87,17 @@ def get_query_user_info(node, query_pattern): type = 'QueryFinish' """.format(query_pattern)).strip().split('\t') +# @return -- [user, initial_user] +def get_query_user_info_by_id(node, query_id): + node.query("SYSTEM FLUSH LOGS") + return node.query(""" + SELECT user, initial_user + FROM system.query_log + WHERE + query_id = '{}' AND + type = 'QueryFinish' + """.format(query_id)).strip().split('\t') + # @return -- settings def get_query_setting_on_shard(node, query_pattern, setting): node.query("SYSTEM FLUSH LOGS") @@ -183,6 +194,7 @@ def test_secure_insert_buffer_async(): # previous connection that was instantiated with "ro" user (using # interserver secret) assert not n1.contains_in_log('{' + query_id + '} Connection (n2:9000): Connecting.') + assert get_query_user_info_by_id(n1, query_id) == ['default', 'default'] # And before the bug was fixed this query will fail with the following error: # @@ -191,6 +203,18 @@ def test_secure_insert_buffer_async(): n1.query('OPTIMIZE TABLE dist_secure_buffer') n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure dist_secure_from_buffer') + # Check user from which the INSERT on the remote node will be executed + # + # Incorrect example: + # + # {2c55669f-71ad-48fe-98fa-7b475b80718e} executeQuery: (from 172.16.1.1:44636, user: ro) INSERT INTO default.data_from_buffer (key) VALUES + # + # Correct example: + # + # {2c55669f-71ad-48fe-98fa-7b475b80718e} executeQuery: (from 0.0.0.0:0, user: ) INSERT INTO default.data_from_buffer (key) VALUES + # + assert n2.contains_in_log('executeQuery: (from 0.0.0.0:0, user: ) INSERT INTO default.data_from_buffer (key) VALUES') + assert int(n1.query('SELECT count() FROM dist_secure_from_buffer')) == 2 n1.query('TRUNCATE TABLE data_from_buffer ON CLUSTER secure') diff --git a/tests/integration/test_grant_and_revoke/test.py b/tests/integration/test_grant_and_revoke/test.py index b905e4df219..89e07fecb0a 100644 --- a/tests/integration/test_grant_and_revoke/test.py +++ b/tests/integration/test_grant_and_revoke/test.py @@ -250,6 +250,15 @@ def test_introspection(): assert instance.query("SHOW GRANTS", user='A') == TSV(["GRANT SELECT ON test.table TO A"]) assert instance.query("SHOW GRANTS", user='B') == TSV(["GRANT CREATE ON *.* TO B WITH GRANT OPTION"]) + assert instance.query("SHOW GRANTS FOR ALL", user='A') == TSV(["GRANT SELECT ON test.table TO A"]) + assert instance.query("SHOW GRANTS FOR ALL", user='B') == TSV(["GRANT CREATE ON *.* TO B WITH GRANT OPTION"]) + assert instance.query("SHOW GRANTS FOR ALL") == TSV(["GRANT SELECT ON test.table TO A", + "GRANT CREATE ON *.* TO B WITH GRANT OPTION", + "GRANT ALL ON *.* TO default WITH GRANT OPTION"]) + + expected_error = "necessary to have grant SHOW USERS" + assert expected_error in instance.query_and_get_error("SHOW GRANTS FOR B", user='A') + expected_access1 = "CREATE USER A\n" \ "CREATE USER B\n" \ "CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default" diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh index fbbf74bbba2..95d99449837 100755 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh @@ -20,7 +20,7 @@ select attribute['db.statement'] as query, attribute['clickhouse.tracestate'] as tracestate, 1 as sorted_by_start_time from system.opentelemetry_span_log - where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) + where trace_id = UUIDNumToString(toFixedString(unhex('$trace_id'), 16)) and operation_name = 'query' order by start_time_us ; @@ -31,7 +31,7 @@ select attribute['db.statement'] as query, attribute['clickhouse.tracestate'] as tracestate, 1 as sorted_by_finish_time from system.opentelemetry_span_log - where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) + where trace_id = UUIDNumToString(toFixedString(unhex('$trace_id'), 16)) and operation_name = 'query' order by finish_time_us ; @@ -43,7 +43,7 @@ select count(*) "'"'"total spans"'"'", uniqExactIf(parent_span_id, parent_span_id != 0) "'"'"unique non-zero parent spans"'"'" from system.opentelemetry_span_log - where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) + where trace_id = UUIDNumToString(toFixedString(unhex('$trace_id'), 16)) and operation_name = 'query' ; @@ -56,7 +56,7 @@ select count(*) "'"'"initial query spans with proper parent"'"'" mapValues(attribute) as attribute_value) o join system.query_log on query_id = o.attribute_value where - trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) + trace_id = UUIDNumToString(toFixedString(unhex('$trace_id'), 16)) and current_database = currentDatabase() and operation_name = 'query' and parent_span_id = reinterpretAsUInt64(unhex('73')) @@ -71,7 +71,7 @@ select uniqExact(value) "'"'"unique non-empty tracestate values"'"'" from system.opentelemetry_span_log array join mapKeys(attribute) as name, mapValues(attribute) as value where - trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) + trace_id = UUIDNumToString(toFixedString(unhex('$trace_id'), 16)) and operation_name = 'query' and name = 'clickhouse.tracestate' and length(value) > 0 diff --git a/tests/queries/0_stateless/02190_current_metrics_query.reference b/tests/queries/0_stateless/02190_current_metrics_query.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02190_current_metrics_query.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02190_current_metrics_query.sql b/tests/queries/0_stateless/02190_current_metrics_query.sql new file mode 100644 index 00000000000..e8b22e92a99 --- /dev/null +++ b/tests/queries/0_stateless/02190_current_metrics_query.sql @@ -0,0 +1,2 @@ +-- This query itself is also accounted in metric. +SELECT value > 0 FROM system.metrics WHERE metric = 'Query'; diff --git a/tests/queries/0_stateless/02193_async_insert_tcp_client_1.reference b/tests/queries/0_stateless/02193_async_insert_tcp_client_1.reference new file mode 100644 index 00000000000..333b3b69fb6 --- /dev/null +++ b/tests/queries/0_stateless/02193_async_insert_tcp_client_1.reference @@ -0,0 +1,5 @@ +1 aaa +2 bbb +3 ccc +4 ddd +4 4 diff --git a/tests/queries/0_stateless/02193_async_insert_tcp_client_1.sql b/tests/queries/0_stateless/02193_async_insert_tcp_client_1.sql new file mode 100644 index 00000000000..795a27883e6 --- /dev/null +++ b/tests/queries/0_stateless/02193_async_insert_tcp_client_1.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t_async_insert_02193_1; + +CREATE TABLE t_async_insert_02193_1 (id UInt32, s String) ENGINE = Memory; + +INSERT INTO t_async_insert_02193_1 FORMAT CSV SETTINGS async_insert = 1 +1,aaa +; + +INSERT INTO t_async_insert_02193_1 FORMAT Values SETTINGS async_insert = 1 (2, 'bbb'); + +SET async_insert = 1; + +INSERT INTO t_async_insert_02193_1 VALUES (3, 'ccc'); +INSERT INTO t_async_insert_02193_1 FORMAT JSONEachRow {"id": 4, "s": "ddd"}; + +SELECT * FROM t_async_insert_02193_1 ORDER BY id; + +SYSTEM FLUSH LOGS; + +SELECT count(), sum(ProfileEvents['AsyncInsertQuery']) FROM system.query_log +WHERE + event_date >= yesterday() AND + type = 'QueryFinish' AND + current_database = currentDatabase() AND + query ILIKE 'INSERT INTO t_async_insert_02193_1%'; + +DROP TABLE IF EXISTS t_async_insert_02193_1; diff --git a/tests/queries/0_stateless/02193_async_insert_tcp_client_2.reference b/tests/queries/0_stateless/02193_async_insert_tcp_client_2.reference new file mode 100644 index 00000000000..398d1450c6c --- /dev/null +++ b/tests/queries/0_stateless/02193_async_insert_tcp_client_2.reference @@ -0,0 +1,4 @@ +1 aaa +2 bbb +3 ccc +4 ddd diff --git a/tests/queries/0_stateless/02193_async_insert_tcp_client_2.sh b/tests/queries/0_stateless/02193_async_insert_tcp_client_2.sh new file mode 100755 index 00000000000..e99d0fa69b8 --- /dev/null +++ b/tests/queries/0_stateless/02193_async_insert_tcp_client_2.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Tags: long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_async_insert_02193_2" + +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_async_insert_02193_2 (id UInt32, s String) ENGINE = Memory" + +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_async_insert_02193_2 FORMAT CSV SETTINGS async_insert = 1 1,aaa" +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_async_insert_02193_2 FORMAT Values SETTINGS async_insert = 1 (2, 'bbb')" + +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_async_insert_02193_2 VALUES (3, 'ccc')" +${CLICKHOUSE_CLIENT} -q 'INSERT INTO t_async_insert_02193_2 FORMAT JSONEachRow {"id": 4, "s": "ddd"}' + +${CLICKHOUSE_CLIENT} -q "SELECT * FROM t_async_insert_02193_2 ORDER BY id" +${CLICKHOUSE_CLIENT} -q "TRUNCATE TABLE t_async_insert_02193_2" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_async_insert_02193_2" diff --git a/tests/queries/0_stateless/02200_use_skip_indexes.reference b/tests/queries/0_stateless/02200_use_skip_indexes.reference new file mode 100644 index 00000000000..f11db7af711 --- /dev/null +++ b/tests/queries/0_stateless/02200_use_skip_indexes.reference @@ -0,0 +1,4 @@ +-- { echoOn } +SELECT * FROM data_02200 WHERE value = 1 SETTINGS use_skip_indexes=1, max_rows_to_read=1; +1 1 +SELECT * FROM data_02200 WHERE value = 1 SETTINGS use_skip_indexes=0, max_rows_to_read=1; -- { serverError TOO_MANY_ROWS } diff --git a/tests/queries/0_stateless/02200_use_skip_indexes.sql b/tests/queries/0_stateless/02200_use_skip_indexes.sql new file mode 100644 index 00000000000..64003285abb --- /dev/null +++ b/tests/queries/0_stateless/02200_use_skip_indexes.sql @@ -0,0 +1,14 @@ +CREATE TABLE data_02200 ( + key Int, + value Int, + INDEX idx value TYPE minmax GRANULARITY 1 +) +Engine=MergeTree() +ORDER BY key +PARTITION BY key; + +INSERT INTO data_02200 SELECT number, number FROM numbers(10); + +-- { echoOn } +SELECT * FROM data_02200 WHERE value = 1 SETTINGS use_skip_indexes=1, max_rows_to_read=1; +SELECT * FROM data_02200 WHERE value = 1 SETTINGS use_skip_indexes=0, max_rows_to_read=1; -- { serverError TOO_MANY_ROWS } diff --git a/tests/queries/0_stateless/02201_use_skip_indexes_if_final.reference b/tests/queries/0_stateless/02201_use_skip_indexes_if_final.reference new file mode 100644 index 00000000000..423a9fa46d6 --- /dev/null +++ b/tests/queries/0_stateless/02201_use_skip_indexes_if_final.reference @@ -0,0 +1,6 @@ +-- { echoOn } +SELECT * FROM data_02201 FINAL WHERE value = 1 SETTINGS use_skip_indexes=0, use_skip_indexes_if_final=0, max_rows_to_read=1; -- { serverError TOO_MANY_ROWS } +SELECT * FROM data_02201 FINAL WHERE value = 1 SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=0, max_rows_to_read=1; -- { serverError TOO_MANY_ROWS } +SELECT * FROM data_02201 FINAL WHERE value = 1 SETTINGS use_skip_indexes=0, use_skip_indexes_if_final=1, max_rows_to_read=1; -- { serverError TOO_MANY_ROWS } +SELECT * FROM data_02201 FINAL WHERE value = 1 SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=1, max_rows_to_read=1; +1 1 diff --git a/tests/queries/0_stateless/02201_use_skip_indexes_if_final.sql b/tests/queries/0_stateless/02201_use_skip_indexes_if_final.sql new file mode 100644 index 00000000000..2afc4941c9e --- /dev/null +++ b/tests/queries/0_stateless/02201_use_skip_indexes_if_final.sql @@ -0,0 +1,16 @@ +CREATE TABLE data_02201 ( + key Int, + value Int, + INDEX idx value TYPE minmax GRANULARITY 1 +) +Engine=AggregatingMergeTree() +ORDER BY key +PARTITION BY key; + +INSERT INTO data_02201 SELECT number, number FROM numbers(10); + +-- { echoOn } +SELECT * FROM data_02201 FINAL WHERE value = 1 SETTINGS use_skip_indexes=0, use_skip_indexes_if_final=0, max_rows_to_read=1; -- { serverError TOO_MANY_ROWS } +SELECT * FROM data_02201 FINAL WHERE value = 1 SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=0, max_rows_to_read=1; -- { serverError TOO_MANY_ROWS } +SELECT * FROM data_02201 FINAL WHERE value = 1 SETTINGS use_skip_indexes=0, use_skip_indexes_if_final=1, max_rows_to_read=1; -- { serverError TOO_MANY_ROWS } +SELECT * FROM data_02201 FINAL WHERE value = 1 SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=1, max_rows_to_read=1; diff --git a/tests/queries/0_stateless/02202_use_skip_indexes_if_final.reference b/tests/queries/0_stateless/02202_use_skip_indexes_if_final.reference new file mode 100644 index 00000000000..7d543cfcaf6 --- /dev/null +++ b/tests/queries/0_stateless/02202_use_skip_indexes_if_final.reference @@ -0,0 +1,6 @@ +-- { echoOn } +SELECT * FROM data_02201 FINAL WHERE value_max = 1 ORDER BY key, value_max SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=0; +0 1 +SELECT * FROM data_02201 FINAL WHERE value_max = 1 ORDER BY key, value_max SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=1; +0 1 +1 1 diff --git a/tests/queries/0_stateless/02202_use_skip_indexes_if_final.sql b/tests/queries/0_stateless/02202_use_skip_indexes_if_final.sql new file mode 100644 index 00000000000..cce785eb17d --- /dev/null +++ b/tests/queries/0_stateless/02202_use_skip_indexes_if_final.sql @@ -0,0 +1,19 @@ +-- This tests will show the difference in data with use_skip_indexes_if_final and w/o + +CREATE TABLE data_02201 ( + key Int, + value_max SimpleAggregateFunction(max, Int), + INDEX idx value_max TYPE minmax GRANULARITY 1 +) +Engine=AggregatingMergeTree() +ORDER BY key +PARTITION BY key; + +SYSTEM STOP MERGES data_02201; + +INSERT INTO data_02201 SELECT number, number FROM numbers(10); +INSERT INTO data_02201 SELECT number, number+1 FROM numbers(10); + +-- { echoOn } +SELECT * FROM data_02201 FINAL WHERE value_max = 1 ORDER BY key, value_max SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=0; +SELECT * FROM data_02201 FINAL WHERE value_max = 1 ORDER BY key, value_max SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=1; diff --git a/tests/queries/0_stateless/02203_shebang b/tests/queries/0_stateless/02203_shebang new file mode 100755 index 00000000000..07686d1aab4 --- /dev/null +++ b/tests/queries/0_stateless/02203_shebang @@ -0,0 +1,3 @@ +#!/usr/bin/clickhouse-local --queries-file + +SELECT 1; diff --git a/tests/queries/0_stateless/02203_shebang.reference b/tests/queries/0_stateless/02203_shebang.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02203_shebang.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02203_shebang.sh b/tests/queries/0_stateless/02203_shebang.sh new file mode 100755 index 00000000000..6c46a15d4f5 --- /dev/null +++ b/tests/queries/0_stateless/02203_shebang.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +sed -i.bak "s!/usr/bin/clickhouse-local!$(command -v ${CLICKHOUSE_LOCAL})!" "${CUR_DIR}/02203_shebang" +"${CUR_DIR}/02203_shebang" diff --git a/tests/queries/0_stateless/02204_fractional_progress_bar.reference b/tests/queries/0_stateless/02204_fractional_progress_bar.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02204_fractional_progress_bar.sh b/tests/queries/0_stateless/02204_fractional_progress_bar.sh new file mode 100755 index 00000000000..6018ee8c96c --- /dev/null +++ b/tests/queries/0_stateless/02204_fractional_progress_bar.sh @@ -0,0 +1,20 @@ +#!/usr/bin/expect -f +# Tags: no-fasttest + +log_user 0 +set timeout 60 +match_max 100000 + +spawn clickhouse-local --progress --query "SELECT sum(number % 100000000 = 12345678 ? sleep(0.1) : 1) FROM numbers(1000000000)" + +expect { + "▏" { exit 0 } + "▎" { exit 0 } + "▍" { exit 0 } + "▌" { exit 0 } + "▋" { exit 0 } + "▋" { exit 0 } + "▊" { exit 0 } + "▉" { exit 0 } + timeout { exit 1 } +} diff --git a/utils/list-versions/list-versions.sh b/utils/list-versions/list-versions.sh index 01364c78f83..8d4f286124e 100755 --- a/utils/list-versions/list-versions.sh +++ b/utils/list-versions/list-versions.sh @@ -1,3 +1,5 @@ #!/bin/bash -git tag --list | grep -P 'v.+-(stable|lts)' | sort -V | xargs git show --format='%ai' | awk '/^v/ { version = $1 } /^[0-9]+/ { if (version) { date = $1 } } { if (version && date) { print version "\t" date; version = ""; date = ""; } }' | tac +# refname:strip=2: default tag name when format is not set +# creatordate is always defined for all tags +git tag --list 'v*-lts' 'v*-stable' --format='%(refname:short) %(creatordate:short)' | sort -rV diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 539aa6f1b19..cff412d9fd8 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,56 +1,87 @@ -v21.9.4.35-stable 2021-09-22 -v21.9.3.30-stable 2021-09-16 +v22.1.3.7-stable 2022-01-23 +v22.1.2.2-stable 2022-01-19 +v21.12.4.1-stable 2022-01-23 +v21.12.3.32-stable 2021-12-27 +v21.12.2.17-stable 2021-12-16 +v21.11.11.1-stable 2022-01-23 +v21.11.10.1-stable 2022-01-12 +v21.11.9.1-stable 2021-12-27 +v21.11.8.4-stable 2021-12-22 +v21.11.7.9-stable 2021-12-15 +v21.11.6.7-stable 2021-12-10 +v21.11.5.33-stable 2021-12-02 +v21.11.4.14-stable 2021-11-17 +v21.11.3.6-stable 2021-11-11 +v21.11.2.2-stable 2021-11-09 +v21.10.6.2-stable 2022-01-24 +v21.10.5.3-stable 2021-12-10 +v21.10.4.26-stable 2021-12-02 +v21.10.3.9-stable 2021-11-17 +v21.10.2.15-stable 2021-10-18 +v21.9.6.24-stable 2021-12-02 +v21.9.5.16-stable 2021-10-19 +v21.9.4.35-stable 2021-09-24 +v21.9.3.30-stable 2021-09-17 v21.9.2.17-stable 2021-09-09 -v21.8.8.29-lts 2021-09-28 -v21.8.7.22-lts 2021-09-22 -v21.8.6.15-lts 2021-09-16 +v21.8.14.5-lts 2022-01-26 +v21.8.13.6-lts 2021-12-27 +v21.8.12.29-lts 2021-12-02 +v21.8.11.4-lts 2021-11-17 +v21.8.10.19-lts 2021-10-21 +v21.8.9.13-lts 2021-10-19 +v21.8.8.29-lts 2021-09-29 +v21.8.7.22-lts 2021-09-24 +v21.8.6.15-lts 2021-09-17 v21.8.5.7-lts 2021-09-02 -v21.8.4.51-lts 2021-08-17 +v21.8.4.51-lts 2021-08-18 v21.8.3.44-lts 2021-08-12 -v21.7.11.3-stable 2021-09-23 -v21.7.10.4-stable 2021-09-16 +v21.7.11.3-stable 2021-09-24 +v21.7.10.4-stable 2021-09-18 v21.7.9.7-stable 2021-09-02 -v21.7.8.58-stable 2021-08-17 -v21.7.7.47-stable 2021-08-09 +v21.7.8.58-stable 2021-08-18 +v21.7.7.47-stable 2021-08-10 v21.7.6.39-stable 2021-08-06 -v21.7.5.29-stable 2021-07-28 -v21.7.4.18-stable 2021-07-17 -v21.7.3.14-stable 2021-07-13 +v21.7.5.29-stable 2021-07-29 +v21.7.4.18-stable 2021-07-19 +v21.7.3.14-stable 2021-07-14 v21.7.2.7-stable 2021-07-09 -v21.6.9.7-stable 2021-09-02 -v21.6.8.62-stable 2021-07-13 -v21.6.7.57-stable 2021-07-09 -v21.6.6.51-stable 2021-07-02 +v21.6.9.7-stable 2021-09-03 +v21.6.8.62-stable 2021-07-16 +v21.6.7.57-stable 2021-07-10 +v21.6.6.51-stable 2021-07-04 v21.6.5.37-stable 2021-06-19 v21.6.4.26-stable 2021-06-11 -v21.6.3.14-stable 2021-06-04 +v21.6.3.14-stable 2021-06-05 v21.5.9.4-stable 2021-07-10 -v21.5.8.21-stable 2021-07-02 -v21.5.7.9-stable 2021-06-22 -v21.5.6.6-stable 2021-05-29 +v21.5.8.21-stable 2021-07-04 +v21.5.7.9-stable 2021-06-23 +v21.5.6.6-stable 2021-05-30 v21.5.5.12-stable 2021-05-20 -v21.4.7.3-stable 2021-05-19 -v21.4.6.55-stable 2021-04-30 +v21.4.7.3-stable 2021-05-20 +v21.4.6.55-stable 2021-05-01 v21.4.5.46-stable 2021-04-24 v21.4.4.30-stable 2021-04-16 v21.4.3.21-stable 2021-04-12 -v21.3.17.2-lts 2021-09-16 -v21.3.16.5-lts 2021-09-03 +v21.3.20.1-lts 2022-01-26 +v21.3.19.1-lts 2021-12-10 +v21.3.18.4-lts 2021-10-21 +v21.3.17.2-lts 2021-09-17 +v21.3.16.5-lts 2021-09-04 v21.3.15.4-stable 2021-07-10 -v21.3.14.1-lts 2021-07-01 -v21.3.13.9-lts 2021-06-22 -v21.3.12.2-lts 2021-05-25 -v21.3.11.5-lts 2021-05-14 -v21.3.10.1-lts 2021-05-09 -v21.3.9.83-lts 2021-04-28 +v21.3.14.1-lts 2021-07-04 +v21.3.13.9-lts 2021-06-23 +v21.3.12.2-lts 2021-05-26 +v21.3.11.5-lts 2021-05-16 +v21.3.10.1-lts 2021-05-10 +v21.3.9.83-lts 2021-05-01 v21.3.8.76-lts 2021-04-24 -v21.3.7.62-stable 2021-04-16 +v21.3.7.62-stable 2021-04-17 v21.3.6.55-lts 2021-04-12 v21.3.5.42-lts 2021-04-07 v21.3.4.25-lts 2021-03-28 v21.3.3.14-lts 2021-03-19 v21.3.2.5-lts 2021-03-12 -v21.2.10.48-stable 2021-04-16 +v21.2.10.48-stable 2021-04-17 v21.2.9.41-stable 2021-04-12 v21.2.8.31-stable 2021-04-07 v21.2.7.11-stable 2021-03-28 @@ -93,8 +124,8 @@ v20.9.5.5-stable 2020-11-13 v20.9.4.76-stable 2020-10-29 v20.9.3.45-stable 2020-10-09 v20.9.2.20-stable 2020-09-22 -v20.8.19.4-stable 2021-07-10 -v20.8.18.32-lts 2021-04-16 +v20.8.19.4-stable 2021-07-11 +v20.8.18.32-lts 2021-04-17 v20.8.17.25-lts 2021-04-08 v20.8.16.20-lts 2021-04-06 v20.8.15.11-lts 2021-04-01 @@ -239,97 +270,104 @@ v19.9.2.4-stable 2019-06-24 v19.8.3.8-stable 2019-06-11 v19.7.5.29-stable 2019-07-05 v19.7.5.27-stable 2019-06-09 -v19.7.3.9-stable 2019-05-05 +v19.7.3.9-stable 2019-05-27 v19.6.3.18-stable 2019-06-15 -v19.6.2.11-stable 2019-04-30 -v19.5.4.22-stable 2019-04-30 -v19.5.3.8-stable 2019-04-17 +v19.6.2.11-stable 2019-05-14 +v19.5.4.22-stable 2019-05-13 +v19.5.3.8-stable 2019-04-18 v19.5.2.6-stable 2019-04-15 -v19.4.5.35-stable 2019-05-05 -v19.4.4.33-stable 2019-04-16 -v19.4.3.11-stable 2019-04-01 -v19.4.2.7-stable 2019-03-29 +v19.4.5.35-stable 2019-05-13 +v19.4.4.33-stable 2019-04-17 +v19.4.3.11-stable 2019-04-02 +v19.4.2.7-stable 2019-03-30 v19.4.1.3-stable 2019-03-19 v19.4.0.49-stable 2019-03-09 -v19.3.9.12-stable 2019-04-01 -v19.3.8.6-stable 2019-03-04 -v19.3.7-stable 2019-03-11 +v19.3.9.12-stable 2019-04-02 +v19.3.8.6-stable 2019-03-19 +v19.3.7-stable 2019-03-12 v19.3.6-stable 2019-03-02 -v19.3.5-stable 2019-02-20 -v19.3.4-stable 2019-02-15 +v19.3.5-stable 2019-02-21 +v19.3.4-stable 2019-02-16 v19.3.3-stable 2019-02-13 -v19.1.16.79-stable 2019-03-22 -v19.1.15.73-stable 2019-03-04 +v19.1.16.79-stable 2019-04-02 +v19.1.15.73-stable 2019-03-19 v19.1.14-stable 2019-03-14 v19.1.13-stable 2019-03-12 v19.1.10-stable 2019-03-03 -v19.1.9-stable 2019-02-20 -v19.1.8-stable 2019-02-15 -v19.1.7-stable 2019-02-13 +v19.1.9-stable 2019-02-21 +v19.1.8-stable 2019-02-16 +v19.1.7-stable 2019-02-15 v19.1.6-stable 2019-01-24 -v19.1.5-stable 2019-01-22 -v18.16.1-stable 2018-12-20 -v18.16.0-stable 2018-12-14 +v19.1.5-stable 2019-01-23 +v18.16.1-stable 2018-12-21 +v18.16.0-stable 2018-12-15 v18.14.19-stable 2018-12-19 v18.14.18-stable 2018-12-04 -v18.14.17-stable 2018-11-29 +v18.14.17-stable 2018-11-30 v18.14.15-stable 2018-11-21 -v18.14.14-stable 2018-11-20 -v18.14.13-stable 2018-11-07 -v18.14.12-stable 2018-11-01 -v18.14.11-stable 2018-10-26 -v18.14.10-stable 2018-10-23 +v18.14.14-stable 2018-11-21 +v18.14.13-stable 2018-11-08 +v18.14.12-stable 2018-11-02 +v18.14.11-stable 2018-10-29 +v18.14.10-stable 2018-10-24 v18.14.9-stable 2018-10-16 v18.14.8-stable 2018-10-13 v18.12.17-stable 2018-09-16 v18.12.14-stable 2018-09-13 -v18.12.13-stable 2018-09-10 -v1.1.54390-stable 2018-07-06 -v1.1.54388-stable 2018-06-27 +v18.12.13-stable 2018-09-11 +v18.10.3-stable 2018-08-13 +v18.6.0-stable 2018-08-01 +v18.5.1-stable 2018-07-31 +v18.4.0-stable 2018-07-28 +v18.1.0-stable 2018-07-20 +v1.1.54394-stable 2018-07-12 +v1.1.54390-stable 2018-07-09 +v1.1.54388-stable 2018-06-28 v1.1.54385-stable 2018-06-01 -v1.1.54383-stable 2018-05-18 -v1.1.54381-stable 2018-04-26 -v1.1.54380-stable 2018-04-20 -v1.1.54378-stable 2018-04-13 +v1.1.54383-stable 2018-05-22 +v1.1.54381-stable 2018-05-14 +v1.1.54380-stable 2018-04-21 +v1.1.54378-stable 2018-04-16 v1.1.54370-stable 2018-03-16 -v1.1.54362-stable 2018-03-10 +v1.1.54362-stable 2018-03-11 v1.1.54358-stable 2018-03-08 -v1.1.54343-stable 2018-01-24 +v1.1.54343-stable 2018-02-08 v1.1.54342-stable 2018-01-22 -v1.1.54337-stable 2018-01-17 -v1.1.54336-stable 2018-01-16 -v1.1.54335-stable 2018-01-16 -v1.1.54327-stable 2017-12-20 -v1.1.54318-stable 2017-11-30 +v1.1.54337-stable 2018-01-18 +v1.1.54336-stable 2018-01-17 +v1.1.54335-stable 2018-01-17 +v1.1.54327-stable 2017-12-21 +v1.1.54318-stable 2017-12-01 v1.1.54310-stable 2017-11-01 -v1.1.54304-stable 2017-10-19 -v1.1.54292-stable 2017-09-20 -v1.1.54289-stable 2017-09-13 -v1.1.54284-stable 2017-08-29 +v1.1.54304-stable 2017-10-24 +v1.1.54292-stable 2017-09-27 +v1.1.54289-stable 2017-09-14 +v1.1.54284-stable 2017-08-30 v1.1.54282-stable 2017-08-23 v1.1.54276-stable 2017-08-16 -v1.1.54245-stable 2017-07-04 -v1.1.54242-stable 2017-06-26 -v1.1.54236-stable 2017-05-23 -v1.1.54231-stable 2017-04-29 -v1.1.54198-stable 2017-03-28 -v1.1.54190-stable 2017-03-21 +v1.1.54245-stable 2017-07-05 +v1.1.54242-stable 2017-06-27 +v1.1.54236-stable 2017-05-25 +v1.1.54231-stable 2017-05-15 +v1.1.54198-stable 2017-03-29 +v1.1.54190-stable 2017-03-22 v1.1.54188-stable 2017-03-17 -v1.1.54181-stable 2017-03-09 -v1.1.54165-stable 2017-02-14 -v1.1.54159-stable 2017-02-03 -v1.1.54144-stable 2017-01-31 +v1.1.54181-stable 2017-03-11 +v1.1.54165-stable 2017-02-15 +v1.1.54159-stable 2017-02-07 +v1.1.54144-stable 2017-02-01 v1.1.54135-stable 2017-01-20 -v1.1.54134-stable 2017-01-14 -v1.1.54133-stable 2017-01-12 -v1.1.54127-stable 2016-12-22 -v1.1.54112-stable 2016-12-08 -v1.1.54083-stable 2016-11-28 +v1.1.54134-stable 2017-01-16 +v1.1.54133-stable 2017-01-13 +v1.1.54127-stable 2016-12-23 +v1.1.54112-stable 2016-12-13 +v1.1.54083-stable 2016-12-01 v1.1.54080-stable 2016-11-25 -v1.1.54074-stable 2016-11-23 -v1.1.54046-stable 2016-10-27 -v1.1.54030-stable 2016-10-24 -v1.1.54023-stable 2016-10-07 -v1.1.54022-stable 2016-09-26 -v1.1.54020-stable 2016-09-19 -v1.1.54019-stable 2016-09-07 +v1.1.54074-stable 2016-11-24 +v1.1.54046-stable 2016-11-08 +v1.1.54030-stable 2016-10-25 +v1.1.54023-stable 2016-10-10 +v1.1.54022-stable 2016-09-28 +v1.1.54020-stable 2016-09-21 +v1.1.54019-stable 2016-09-14 +v1.1.54011-stable 2016-08-18 diff --git a/website/images/photos/artur-filatenkov.jpg b/website/images/photos/artur-filatenkov.jpg new file mode 100644 index 00000000000..4fdc7758268 Binary files /dev/null and b/website/images/photos/artur-filatenkov.jpg differ diff --git a/website/images/photos/bastian-spanneberg.jpg b/website/images/photos/bastian-spanneberg.jpg new file mode 100644 index 00000000000..66212cb5abd Binary files /dev/null and b/website/images/photos/bastian-spanneberg.jpg differ diff --git a/website/images/photos/manas-alekar.jpg b/website/images/photos/manas-alekar.jpg new file mode 100644 index 00000000000..307a860455f Binary files /dev/null and b/website/images/photos/manas-alekar.jpg differ diff --git a/website/images/photos/marcelo-rodriguez.jpg b/website/images/photos/marcelo-rodriguez.jpg new file mode 100644 index 00000000000..a25be5309da Binary files /dev/null and b/website/images/photos/marcelo-rodriguez.jpg differ diff --git a/website/images/photos/martin-choluj.jpg b/website/images/photos/martin-choluj.jpg new file mode 100644 index 00000000000..200685ab2fe Binary files /dev/null and b/website/images/photos/martin-choluj.jpg differ diff --git a/website/images/photos/mikhail-fursov.jpg b/website/images/photos/mikhail-fursov.jpg new file mode 100644 index 00000000000..82969fed20e Binary files /dev/null and b/website/images/photos/mikhail-fursov.jpg differ diff --git a/website/templates/company/team.html b/website/templates/company/team.html index c4e18e36c96..c2e6bfe496d 100644 --- a/website/templates/company/team.html +++ b/website/templates/company/team.html @@ -9,6 +9,19 @@
+ + + +

+ {{ _('Manas Alekar') }} +

+

+ {{ _('Senior Cloud SWE') }} +

+ +
+
+ @@ -43,7 +56,7 @@ {{ _('Ivan Blinkov') }}

- {{ _('VP, Product') }} + {{ _('VP, Technical Program Management') }}

@@ -99,6 +112,19 @@ {{ _('Software Engineer') }}

+
+
+ + + + +

+ {{ _('Martin Choluj') }} +

+

+ {{ _('Vice President, Security') }} +

+
@@ -125,6 +151,19 @@ {{ _('Core SWE') }}

+
+
+ + + + +

+ {{ _('Artur Filatenkov') }} +

+

+ {{ _('Associate Core Software Engineer') }} +

+
@@ -138,6 +177,19 @@ {{ _('Senior Director, Business Technology') }}

+
+
+ +
+ +
+

+ {{ _('Mikhail Fursov') }} +

+

+ {{ _('Principal UX/UI Engineer') }} +

+
@@ -437,6 +489,19 @@ {{ _('Director, Global Learning') }}

+
+
+ + + + +

+ {{ _('Marcelo Rodriguez') }} +

+

+ {{ _('Sr Support Engineer') }} +

+
@@ -476,6 +541,19 @@ {{ _('Site Reliability Engineer') }}

+
+
+ + + + +

+ {{ _('Bastian Spanneberg') }} +

+

+ {{ _('Site Reliability Engineer') }} +

+