Merge branch 'master' into use-uniq-ptr-for-actions-dag

This commit is contained in:
Nikolai Kochetov 2024-07-17 10:51:23 +00:00
commit 078aa7a1bd
111 changed files with 1238 additions and 293 deletions

View File

@ -27,6 +27,8 @@ Checks: [
'-bugprone-not-null-terminated-result',
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
'-bugprone-unchecked-optional-access',
'-bugprone-crtp-constructor-accessibility',
'-bugprone-suspicious-stringview-data-usage',
'-cert-dcl16-c',
'-cert-dcl37-c',
@ -36,6 +38,7 @@ Checks: [
'-cert-msc51-cpp',
'-cert-oop54-cpp',
'-cert-oop57-cpp',
'-cert-err33-c', # Misreports on clang-19: it warns about all functions containing 'remove' in the name, not only about the standard library.
'-clang-analyzer-optin.performance.Padding',
@ -99,6 +102,7 @@ Checks: [
'-modernize-use-emplace',
'-modernize-use-nodiscard',
'-modernize-use-trailing-return-type',
'-modernize-use-designated-initializers',
'-performance-enum-size',
'-performance-inefficient-string-concatenation',

View File

@ -62,7 +62,7 @@ jobs:
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_docker.yml
uses: ./.github/workflows/docker_test_images.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
CompatibilityCheckX86:

View File

@ -94,7 +94,7 @@ jobs:
echo "Generate Security"
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Generate ChangeLog
- name: Create ChangeLog PR
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
uses: peter-evans/create-pull-request@v6
with:

View File

@ -58,7 +58,7 @@ jobs:
# BuildDockers:
# needs: [RunConfig]
# if: ${{ !failure() && !cancelled() }}
# uses: ./.github/workflows/reusable_docker.yml
# uses: ./.github/workflows/docker_test_images.yml
# with:
# data: ${{ needs.RunConfig.outputs.data }}
# StyleCheck:

View File

@ -51,7 +51,7 @@ jobs:
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
uses: ./.github/workflows/reusable_docker.yml
uses: ./.github/workflows/docker_test_images.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
StyleCheck:

View File

@ -40,7 +40,7 @@ jobs:
} >> "$GITHUB_OUTPUT"
BuildDockers:
needs: [RunConfig]
uses: ./.github/workflows/reusable_docker.yml
uses: ./.github/workflows/docker_test_images.yml
with:
data: "${{ needs.RunConfig.outputs.data }}"
set_latest: true

View File

@ -72,7 +72,7 @@ jobs:
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
uses: ./.github/workflows/reusable_docker.yml
uses: ./.github/workflows/docker_test_images.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
StyleCheck:

View File

@ -57,7 +57,7 @@ jobs:
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_docker.yml
uses: ./.github/workflows/docker_test_images.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
CompatibilityCheckX86:

View File

@ -102,6 +102,8 @@ jobs:
--job-name '${{inputs.test_name}}' \
--run \
--run-command '''${{inputs.run_command}}'''
# shellcheck disable=SC2319
echo "JOB_EXIT_CODE=$?" >> "$GITHUB_ENV"
- name: Post run
if: ${{ !cancelled() }}
run: |

View File

@ -3,8 +3,9 @@
#include <base/defines.h>
#include <fstream>
#include <sstream>
#include <string>
namespace fs = std::filesystem;
bool cgroupsV2Enabled()
{
@ -13,11 +14,11 @@ bool cgroupsV2Enabled()
{
/// This file exists iff the host has cgroups v2 enabled.
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
if (!std::filesystem::exists(controllers_file))
if (!fs::exists(controllers_file))
return false;
return true;
}
catch (const std::filesystem::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
catch (const fs::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
{
return false; /// not logging the exception as most callers fall back to cgroups v1
}
@ -33,8 +34,9 @@ bool cgroupsV2MemoryControllerEnabled()
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
std::string cgroup = cgroupV2OfProcess();
auto cgroup_dir = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
fs::path cgroup_dir = cgroupV2PathOfProcess();
if (cgroup_dir.empty())
return false;
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
if (!controllers_file.is_open())
return false;
@ -46,7 +48,7 @@ bool cgroupsV2MemoryControllerEnabled()
#endif
}
std::string cgroupV2OfProcess()
fs::path cgroupV2PathOfProcess()
{
#if defined(OS_LINUX)
chassert(cgroupsV2Enabled());
@ -54,17 +56,18 @@ std::string cgroupV2OfProcess()
/// A simpler way to get the membership is:
std::ifstream cgroup_name_file("/proc/self/cgroup");
if (!cgroup_name_file.is_open())
return "";
return {};
/// With cgroups v2, there will be a *single* line with prefix "0::/"
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
std::string cgroup;
std::getline(cgroup_name_file, cgroup);
static const std::string v2_prefix = "0::/";
if (!cgroup.starts_with(v2_prefix))
return "";
return {};
cgroup = cgroup.substr(v2_prefix.length());
return cgroup;
/// Note: The 'root' cgroup can have an empty cgroup name, this is valid
return default_cgroups_mount / cgroup;
#else
return "";
return {};
#endif
}

View File

@ -1,7 +1,6 @@
#pragma once
#include <filesystem>
#include <string>
#if defined(OS_LINUX)
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
@ -16,7 +15,7 @@ bool cgroupsV2Enabled();
/// Assumes that cgroupsV2Enabled() is enabled.
bool cgroupsV2MemoryControllerEnabled();
/// Which cgroup does the process belong to?
/// Returns an empty string if the cgroup cannot be determined.
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
/// Returns an empty path the cgroup cannot be determined.
/// Assumes that cgroupsV2Enabled() is enabled.
std::string cgroupV2OfProcess();
std::filesystem::path cgroupV2PathOfProcess();

View File

@ -23,8 +23,9 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
if (!cgroupsV2MemoryControllerEnabled())
return {};
std::string cgroup = cgroupV2OfProcess();
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
if (current_cgroup.empty())
return {};
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
/// level, try again at the parent level as memory settings are inherited.

2
contrib/libunwind vendored

@ -1 +1 @@
Subproject commit d6a01c46327e56fd86beb8aaa31591fcd9a6b7df
Subproject commit 8f28e64d15819d2d096badd598c7d85bebddb1f2

View File

@ -4,9 +4,6 @@ set(LIBUNWIND_CXX_SOURCES
"${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp"
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp"
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp")
if (APPLE)
set(LIBUNWIND_CXX_SOURCES ${LIBUNWIND_CXX_SOURCES} "${LIBUNWIND_SOURCE_DIR}/src/Unwind_AppleExtras.cpp")
endif ()
set(LIBUNWIND_C_SOURCES
"${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c"
@ -32,6 +29,7 @@ set_target_properties(unwind PROPERTIES FOLDER "contrib/libunwind-cmake")
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1)
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_REMEMBER_STACK_ALLOC=1)
# NOTE: from this macros sizeof(unw_context_t)/sizeof(unw_cursor_t) is depends, so it should be set always
target_compile_definitions(unwind PUBLIC -D_LIBUNWIND_IS_NATIVE_ONLY)

View File

@ -33,13 +33,9 @@ RUN apt-get update \
COPY requirements.txt /
RUN pip3 install --no-cache-dir -r /requirements.txt
COPY * /
ENV FUZZER_ARGS="-max_total_time=60"
SHELL ["/bin/bash", "-c"]
CMD set -o pipefail \
&& timeout -s 9 1h /run_libfuzzer.py 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/libfuzzer

View File

@ -25,7 +25,7 @@ source /utils.lib
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
./setup_minio.sh stateful
./mc admin trace clickminio > /test_output/rubbish.log &
./mc admin trace clickminio > /test_output/minio.log &
MC_ADMIN_PID=$!
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml

View File

@ -12,7 +12,7 @@ MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 7200 : MAX_RUN_TIME))
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=1
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
@ -54,7 +54,7 @@ source /utils.lib
/usr/share/clickhouse-test/config/install.sh
./setup_minio.sh stateless
m./c admin trace clickminio > /test_output/rubbish.log &
./mc admin trace clickminio > /test_output/minio.log &
MC_ADMIN_PID=$!
./setup_hdfs_minicluster.sh

View File

@ -10,7 +10,7 @@ cd hadoop-3.3.1
export JAVA_HOME=/usr
mkdir -p target/test/data
chown clickhouse ./target/test/data
sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/garbage.log 2>&1 &
sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/hdfs_minicluster.log 2>&1 &
while ! nc -z localhost 12222; do
sleep 1

View File

@ -11,7 +11,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
The cross-build for RISC-V 64 is based on the [Build instructions](../development/build.md), follow them first.
## Install Clang-16
## Install Clang-18
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
```

View File

@ -6,7 +6,20 @@ sidebar_label: MySQL Interface
# MySQL Interface
ClickHouse supports the MySQL wire protocol. This allow tools that are MySQL-compatible to interact with ClickHouse seamlessly (e.g. [Looker Studio](../integrations/data-visualization/looker-studio-and-clickhouse.md)).
ClickHouse supports the MySQL wire protocol. This allows certain clients that do not have native ClickHouse connectors leverage the MySQL protocol instead, and it has been validated with the following BI tools:
- [Looker Studio](../integrations/data-visualization/looker-studio-and-clickhouse.md)
- [Tableau Online](../integrations/tableau-online)
- [QuickSight](../integrations/quicksight)
If you are trying other untested clients or integrations, keep in mind that there could be the following limitations:
- SSL implementation might not be fully compatible; there could be potential [TLS SNI](https://www.cloudflare.com/learning/ssl/what-is-sni/) issues.
- A particular tool might require dialect features (e.g., MySQL-specific functions or settings) that are not implemented yet.
If there is a native driver available (e.g., [DBeaver](../integrations/dbeaver)), it is always preferred to use it instead of the MySQL interface. Additionally, while most of the MySQL language clients should work fine, MySQL interface is not guaranteed to be a drop-in replacement for a codebase with existing MySQL queries.
If your use case involves a particular tool that does not have a native ClickHouse driver, and you would like to use it via the MySQL interface and you found certain incompatibilities - please [create an issue](https://github.com/ClickHouse/ClickHouse/issues) in the ClickHouse repository.
## Enabling the MySQL Interface On ClickHouse Cloud

View File

@ -96,3 +96,22 @@ Result:
│ 1 │ [2] │ [[4,1]] │
└───────────┴───────────┴───────────┘
```
## Reading nested subcolumns from Array
If nested type `T` inside `Array` has subcolumns (for example, if it's a [named tuple](./tuple.md)), you can read its subcolumns from an `Array(T)` type with the same subcolumn names. The type of a subcolumn will be `Array` of the type of original subcolumn.
**Example**
```sql
CREATE TABLE t_arr (arr Array(Tuple(field1 UInt32, field2 String))) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO t_arr VALUES ([(1, 'Hello'), (2, 'World')]), ([(3, 'This'), (4, 'is'), (5, 'subcolumn')]);
SELECT arr.field1, toTypeName(arr.field1), arr.field2, toTypeName(arr.field2) from t_arr;
```
```test
┌─arr.field1─┬─toTypeName(arr.field1)─┬─arr.field2────────────────┬─toTypeName(arr.field2)─┐
│ [1,2] │ Array(UInt32) │ ['Hello','World'] │ Array(String) │
│ [3,4,5] │ Array(UInt32) │ ['This','is','subcolumn'] │ Array(String) │
└────────────┴────────────────────────┴───────────────────────────┴────────────────────────┘
```

View File

@ -567,12 +567,13 @@ While no standard or recommendation exists for the epoch of Snowflake IDs, imple
**Syntax**
``` sql
generateSnowflakeID([expr])
generateSnowflakeID([expr, [machine_id]])
```
**Arguments**
- `expr` — An arbitrary [expression](../../sql-reference/syntax.md#syntax-expressions) used to bypass [common subexpression elimination](../../sql-reference/functions/index.md#common-subexpression-elimination) if the function is called multiple times in a query. The value of the expression has no effect on the returned Snowflake ID. Optional.
- `machine_id` — A machine ID, the lowest 10 bits are used. [Int64](../data-types/int-uint.md). Optional.
**Returned value**
@ -608,6 +609,16 @@ SELECT generateSnowflakeID(1), generateSnowflakeID(2);
└────────────────────────┴────────────────────────┘
```
**Example with expression and a machine ID**
```
SELECT generateSnowflakeID('expr', 1);
┌─generateSnowflakeID('expr', 1)─┐
│ 7201148511606784002 │
└────────────────────────────────┘
```
## snowflakeToDateTime
:::warning

View File

@ -82,14 +82,14 @@ FROM LEFT_RIGHT
SELECT
left,
right,
if(left < right, 'left is smaller than right', 'right is greater or equal than left') AS is_smaller
if(left < right, 'left is smaller than right', 'right is smaller or equal than left') AS is_smaller
FROM LEFT_RIGHT
WHERE isNotNull(left) AND isNotNull(right)
┌─left─┬─right─┬─is_smaller──────────────────────────┐
│ 1 │ 3 │ left is smaller than right │
│ 2 │ 2 │ right is greater or equal than left │
│ 3 │ 1 │ right is greater or equal than left │
│ 2 │ 2 │ right is smaller or equal than left │
│ 3 │ 1 │ right is smaller or equal than left │
└──────┴───────┴─────────────────────────────────────┘
```

View File

@ -4,6 +4,9 @@ if (USE_CLANG_TIDY)
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
endif ()
set(MAX_LINKER_MEMORY 3500)
include(../cmake/limit_jobs.cmake)
include(${ClickHouse_SOURCE_DIR}/cmake/split_debug_symbols.cmake)
# The `clickhouse` binary is a multi purpose tool that contains multiple execution modes (client, server, etc.),

View File

@ -186,6 +186,8 @@ void Client::parseConnectionsCredentials(Poco::Util::AbstractConfiguration & con
history_file = home_path + "/" + history_file.substr(1);
config.setString("history_file", history_file);
}
if (config.has(prefix + ".accept-invalid-certificate"))
config.setBool("accept-invalid-certificate", config.getBool(prefix + ".accept-invalid-certificate"));
}
if (!connection_name.empty() && !connection_found)
@ -277,6 +279,12 @@ void Client::initialize(Poco::Util::Application & self)
else if (config().has("connection"))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "--connection was specified, but config does not exist");
if (config().has("accept-invalid-certificate"))
{
config().setString("openSSL.client.invalidCertificateHandler.name", "AcceptCertificateHandler");
config().setString("openSSL.client.verificationMode", "none");
}
/** getenv is thread-safe in Linux glibc and in all sane libc implementations.
* But the standard does not guarantee that subsequent calls will not rewrite the value by returned pointer.
*
@ -731,7 +739,7 @@ bool Client::processWithFuzzing(const String & full_query)
}
if (auto *q = orig_ast->as<ASTSetQuery>())
{
if (auto *setDialect = q->changes.tryGet("dialect"); setDialect && setDialect->safeGet<String>() == "kusto")
if (auto *set_dialect = q->changes.tryGet("dialect"); set_dialect && set_dialect->safeGet<String>() == "kusto")
return true;
}

View File

@ -1,5 +1,6 @@
<!-- Config set into /etc/clickhouse-client/. It's used if no other configs are found. -->
<config>
<!-- Shorthand for self-signed combination in openSSL section below: <accept-invalid-certificate>1</accept-invalid-certificate> -->
<openSSL>
<client> <!-- Used for connection to server's secure tcp port -->
<loadDefaultCAFile>true</loadDefaultCAFile>
@ -72,6 +73,7 @@
Default: "hostname" will be used. -->
<name>default</name>
<!-- For self-signed server certificate when connecting to secure tcp: <accept-invalid-certificate>1</accept-invalid-certificate> -->
<!-- Host that will be used for connection. -->
<hostname>127.0.0.1</hostname>
<port>9000</port>

View File

@ -12,6 +12,7 @@
#include <DataTypes/DataTypeLowCardinality.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeTuple.h>
namespace DB
{
@ -616,6 +617,7 @@ private:
bool is_any_nullable = false;
Tuple args;
args.reserve(equals_functions.size());
DataTypes tuple_element_types;
/// first we create tuple from RHS of equals functions
for (const auto & equals : equals_functions)
{
@ -628,16 +630,18 @@ private:
if (const auto * rhs_literal = equals_arguments[1]->as<ConstantNode>())
{
args.push_back(rhs_literal->getValue());
tuple_element_types.push_back(rhs_literal->getResultType());
}
else
{
const auto * lhs_literal = equals_arguments[0]->as<ConstantNode>();
assert(lhs_literal);
args.push_back(lhs_literal->getValue());
tuple_element_types.push_back(lhs_literal->getResultType());
}
}
auto rhs_node = std::make_shared<ConstantNode>(std::move(args));
auto rhs_node = std::make_shared<ConstantNode>(std::move(args), std::make_shared<DataTypeTuple>(std::move(tuple_element_types)));
auto in_function = std::make_shared<FunctionNode>("in");

View File

@ -1,6 +1,7 @@
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeObject.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/NestedUtils.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionHelpers.h>
@ -680,9 +681,33 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromStorage(
bool match_full_identifier = false;
const auto & identifier_full_name = identifier_without_column_qualifier.getFullName();
auto it = table_expression_data.column_name_to_column_node.find(identifier_full_name);
bool can_resolve_directly_from_storage = it != table_expression_data.column_name_to_column_node.end();
if (can_resolve_directly_from_storage && table_expression_data.subcolumn_names.contains(identifier_full_name))
ColumnNodePtr result_column_node;
bool can_resolve_directly_from_storage = false;
bool is_subcolumn = false;
if (auto it = table_expression_data.column_name_to_column_node.find(identifier_full_name); it != table_expression_data.column_name_to_column_node.end())
{
can_resolve_directly_from_storage = true;
is_subcolumn = table_expression_data.subcolumn_names.contains(identifier_full_name);
result_column_node = it->second;
}
/// Check if it's a dynamic subcolumn
else
{
auto [column_name, dynamic_subcolumn_name] = Nested::splitName(identifier_full_name);
auto jt = table_expression_data.column_name_to_column_node.find(column_name);
if (jt != table_expression_data.column_name_to_column_node.end() && jt->second->getColumnType()->hasDynamicSubcolumns())
{
if (auto dynamic_subcolumn_type = jt->second->getColumnType()->tryGetSubcolumnType(dynamic_subcolumn_name))
{
result_column_node = std::make_shared<ColumnNode>(NameAndTypePair{identifier_full_name, dynamic_subcolumn_type}, jt->second->getColumnSource());
can_resolve_directly_from_storage = true;
is_subcolumn = true;
}
}
}
if (can_resolve_directly_from_storage && is_subcolumn)
{
/** In the case when we have an ARRAY JOIN, we should not resolve subcolumns directly from storage.
* For example, consider the following SQL query:
@ -698,11 +723,11 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromStorage(
if (can_resolve_directly_from_storage)
{
match_full_identifier = true;
result_expression = it->second;
result_expression = result_column_node;
}
else
{
it = table_expression_data.column_name_to_column_node.find(identifier_without_column_qualifier.at(0));
auto it = table_expression_data.column_name_to_column_node.find(identifier_without_column_qualifier.at(0));
if (it != table_expression_data.column_name_to_column_node.end())
result_expression = it->second;
}

View File

@ -3416,14 +3416,14 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
function_base = function->build(argument_columns);
/// Do not constant fold get scalar functions
bool disable_constant_folding = function_name == "__getScalar" || function_name == "shardNum" ||
function_name == "shardCount" || function_name == "hostName" || function_name == "tcpPort";
// bool disable_constant_folding = function_name == "__getScalar" || function_name == "shardNum" ||
// function_name == "shardCount" || function_name == "hostName" || function_name == "tcpPort";
/** If function is suitable for constant folding try to convert it to constant.
* Example: SELECT plus(1, 1);
* Result: SELECT 2;
*/
if (function_base->isSuitableForConstantFolding() && !disable_constant_folding)
if (function_base->isSuitableForConstantFolding()) // && !disable_constant_folding)
{
auto result_type = function_base->getResultType();
auto executable_function = function_base->prepare(argument_columns);
@ -3832,6 +3832,10 @@ ProjectionNames QueryAnalyzer::resolveExpressionNode(
node->convertToNullable();
break;
}
/// Check parent scopes until find current query scope.
if (scope_ptr->scope_node->getNodeType() == QueryTreeNodeType::QUERY)
break;
}
}

View File

@ -55,9 +55,9 @@ namespace
S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
settings.auth_settings.region,
context->getRemoteHostFilter(),
static_cast<unsigned>(global_settings.s3_max_redirects),
static_cast<unsigned>(global_settings.s3_retry_attempts),
global_settings.enable_s3_requests_logging,
static_cast<unsigned>(local_settings.s3_max_redirects),
static_cast<unsigned>(local_settings.backup_restore_s3_retry_attempts),
local_settings.enable_s3_requests_logging,
/* for_disk_s3 = */ false,
request_settings.get_request_throttler,
request_settings.put_request_throttler,

View File

@ -80,6 +80,7 @@
#include <Common/config_version.h>
#include "config.h"
namespace fs = std::filesystem;
using namespace std::literals;
@ -2565,12 +2566,12 @@ void ClientBase::runInteractive()
word_break_characters,
highlight_callback);
#else
(void)word_break_characters;
LineReader lr(
history_file,
getClientConfiguration().has("multiline"),
query_extenders,
query_delimiters,
word_break_characters);
query_delimiters);
#endif
static const std::initializer_list<std::pair<String, String>> backslash_aliases =

View File

@ -103,7 +103,7 @@ public:
Entry get(const ConnectionTimeouts & timeouts, /// NOLINT
const Settings & settings,
bool force_connected = true) override;
bool force_connected) override;
std::string getDescription() const
{

View File

@ -46,8 +46,7 @@ public:
Patterns delimiters,
std::istream & input_stream_ = std::cin,
std::ostream & output_stream_ = std::cout,
int in_fd_ = STDIN_FILENO
);
int in_fd_ = STDIN_FILENO);
virtual ~LineReader() = default;

View File

@ -362,6 +362,9 @@ ReplxxLineReader::ReplxxLineReader(
rx.bind_key(Replxx::KEY::control('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_NEXT, code); });
rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); });
/// We don't want the default, "suspend" behavior, it confuses people.
rx.bind_key_internal(replxx::Replxx::KEY::control('Z'), "insert_character");
auto commit_action = [this](char32_t code)
{
/// If we allow multiline and there is already something in the input, start a newline.

View File

@ -49,6 +49,7 @@ void logAboutProgress(LoggerPtr log, size_t processed, size_t total, AtomicStopw
AsyncLoader::Pool::Pool(const AsyncLoader::PoolInitializer & init)
: name(init.name)
, priority(init.priority)
, max_threads(init.max_threads > 0 ? init.max_threads : getNumberOfPhysicalCPUCores())
, thread_pool(std::make_unique<ThreadPool>(
init.metric_threads,
init.metric_active_threads,
@ -56,17 +57,16 @@ AsyncLoader::Pool::Pool(const AsyncLoader::PoolInitializer & init)
/* max_threads = */ std::numeric_limits<size_t>::max(), // Unlimited number of threads, we do worker management ourselves
/* max_free_threads = */ 0, // We do not require free threads
/* queue_size = */0)) // Unlimited queue to avoid blocking during worker spawning
, max_threads(init.max_threads > 0 ? init.max_threads : getNumberOfPhysicalCPUCores())
{}
AsyncLoader::Pool::Pool(Pool&& o) noexcept
: name(o.name)
, priority(o.priority)
, thread_pool(std::move(o.thread_pool))
, ready_queue(std::move(o.ready_queue))
, max_threads(o.max_threads)
, workers(o.workers)
, suspended_workers(o.suspended_workers.load()) // All these constructors are needed because std::atomic is neither copy-constructible, nor move-constructible. We never move pools after init, so it is safe.
, thread_pool(std::move(o.thread_pool))
{}
void cancelOnDependencyFailure(const LoadJobPtr & self, const LoadJobPtr & dependency, std::exception_ptr & cancel)

View File

@ -365,11 +365,11 @@ private:
{
const String name;
const Priority priority;
std::unique_ptr<ThreadPool> thread_pool; // NOTE: we avoid using a `ThreadPool` queue to be able to move jobs between pools.
std::map<UInt64, LoadJobPtr> ready_queue; // FIFO queue of jobs to be executed in this pool. Map is used for faster erasing. Key is `ready_seqno`
size_t max_threads; // Max number of workers to be spawn
size_t workers = 0; // Number of currently executing workers
std::atomic<size_t> suspended_workers{0}; // Number of workers that are blocked by `wait()` call on a job executing in the same pool (for deadlock resolution)
std::unique_ptr<ThreadPool> thread_pool; // NOTE: we avoid using a `ThreadPool` queue to be able to move jobs between pools.
explicit Pool(const PoolInitializer & init);
Pool(Pool&& o) noexcept;

View File

@ -25,6 +25,7 @@
#endif
using namespace DB;
namespace fs = std::filesystem;
namespace DB
{
@ -69,7 +70,7 @@ uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & ke
struct CgroupsV1Reader : ICgroupsReader
{
explicit CgroupsV1Reader(const std::filesystem::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
explicit CgroupsV1Reader(const fs::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
uint64_t readMemoryUsage() override
{
@ -85,7 +86,7 @@ private:
struct CgroupsV2Reader : ICgroupsReader
{
explicit CgroupsV2Reader(const std::filesystem::path & stat_file_dir)
explicit CgroupsV2Reader(const fs::path & stat_file_dir)
: current_buf(stat_file_dir / "memory.current"), stat_buf(stat_file_dir / "memory.stat")
{
}
@ -129,8 +130,9 @@ std::optional<std::string> getCgroupsV2Path()
if (!cgroupsV2MemoryControllerEnabled())
return {};
String cgroup = cgroupV2OfProcess();
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
fs::path current_cgroup = cgroupV2PathOfProcess();
if (current_cgroup.empty())
return {};
/// Return the bottom-most nested current memory file. If there is no such file at the current
/// level, try again at the parent level as memory settings are inherited.
@ -138,7 +140,7 @@ std::optional<std::string> getCgroupsV2Path()
{
const auto current_path = current_cgroup / "memory.current";
const auto stat_path = current_cgroup / "memory.stat";
if (std::filesystem::exists(current_path) && std::filesystem::exists(stat_path))
if (fs::exists(current_path) && fs::exists(stat_path))
return {current_cgroup};
current_cgroup = current_cgroup.parent_path();
}
@ -148,7 +150,7 @@ std::optional<std::string> getCgroupsV2Path()
std::optional<std::string> getCgroupsV1Path()
{
auto path = default_cgroups_mount / "memory/memory.stat";
if (!std::filesystem::exists(path))
if (!fs::exists(path))
return {};
return {default_cgroups_mount / "memory"};
}

View File

@ -37,12 +37,12 @@ uint32_t getCGroupLimitedCPUCores(unsigned default_cpu_count)
/// cgroupsv2
if (cgroupsV2Enabled())
{
/// First, we identify the cgroup the process belongs
std::string cgroup = cgroupV2OfProcess();
if (cgroup.empty())
/// First, we identify the path of the cgroup the process belongs
std::filesystem::path cgroup_path = cgroupV2PathOfProcess();
if (cgroup_path.empty())
return default_cpu_count;
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
auto current_cgroup = cgroup_path;
// Looking for cpu.max in directories from the current cgroup to the top level
// It does not stop on the first time since the child could have a greater value than parent
@ -62,7 +62,7 @@ uint32_t getCGroupLimitedCPUCores(unsigned default_cpu_count)
}
current_cgroup = current_cgroup.parent_path();
}
current_cgroup = default_cgroups_mount / cgroup;
current_cgroup = cgroup_path;
// Looking for cpuset.cpus.effective in directories from the current cgroup to the top level
while (current_cgroup != default_cgroups_mount.parent_path())
{

View File

@ -80,7 +80,7 @@ inline ALWAYS_INLINE void * newImpl(std::size_t size, TAlign... align)
throw std::bad_alloc{};
}
inline ALWAYS_INLINE void * newNoExept(std::size_t size) noexcept
inline ALWAYS_INLINE void * newNoExcept(std::size_t size) noexcept
{
#if USE_GWP_ASAN
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
@ -99,7 +99,7 @@ inline ALWAYS_INLINE void * newNoExept(std::size_t size) noexcept
return malloc(size);
}
inline ALWAYS_INLINE void * newNoExept(std::size_t size, std::align_val_t align) noexcept
inline ALWAYS_INLINE void * newNoExcept(std::size_t size, std::align_val_t align) noexcept
{
#if USE_GWP_ASAN
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))

View File

@ -87,7 +87,7 @@ void * operator new(std::size_t size, const std::nothrow_t &) noexcept
{
AllocationTrace trace;
std::size_t actual_size = Memory::trackMemory(size, trace);
void * ptr = Memory::newNoExept(size);
void * ptr = Memory::newNoExcept(size);
trace.onAlloc(ptr, actual_size);
return ptr;
}
@ -96,7 +96,7 @@ void * operator new[](std::size_t size, const std::nothrow_t &) noexcept
{
AllocationTrace trace;
std::size_t actual_size = Memory::trackMemory(size, trace);
void * ptr = Memory::newNoExept(size);
void * ptr = Memory::newNoExcept(size);
trace.onAlloc(ptr, actual_size);
return ptr;
}
@ -105,7 +105,7 @@ void * operator new(std::size_t size, std::align_val_t align, const std::nothrow
{
AllocationTrace trace;
std::size_t actual_size = Memory::trackMemory(size, trace, align);
void * ptr = Memory::newNoExept(size, align);
void * ptr = Memory::newNoExcept(size, align);
trace.onAlloc(ptr, actual_size);
return ptr;
}
@ -114,7 +114,7 @@ void * operator new[](std::size_t size, std::align_val_t align, const std::nothr
{
AllocationTrace trace;
std::size_t actual_size = Memory::trackMemory(size, trace, align);
void * ptr = Memory::newNoExept(size, align);
void * ptr = Memory::newNoExcept(size, align);
trace.onAlloc(ptr, actual_size);
return ptr;
}

View File

@ -502,6 +502,7 @@ class IColumn;
M(UInt64, backup_restore_keeper_value_max_size, 1048576, "Maximum size of data of a [Zoo]Keeper's node during backup", 0) \
M(UInt64, backup_restore_batch_size_for_keeper_multiread, 10000, "Maximum size of batch for multiread request to [Zoo]Keeper during backup or restore", 0) \
M(UInt64, backup_restore_batch_size_for_keeper_multi, 1000, "Maximum size of batch for multi request to [Zoo]Keeper during backup or restore", 0) \
M(UInt64, backup_restore_s3_retry_attempts, 1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore.", 0) \
M(UInt64, max_backup_bandwidth, 0, "The maximum read speed in bytes per second for particular backup on server. Zero means unlimited.", 0) \
\
M(Bool, log_profile_events, true, "Log query performance statistics into the query_log, query_thread_log and query_views_log.", 0) \

View File

@ -77,6 +77,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"},
{"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"},
{"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."},
{"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."},
{"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."},
{"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."}
}},

View File

@ -230,6 +230,17 @@ public:
virtual bool isDeterministicInScopeOfQuery() const { return true; }
/** This is a special flags for functions which return constant value for the server,
* but the result could be different for different servers in distributed query.
*
* This functions can't support constant folding on the initiator, but can on the follower.
* We can't apply some optimizations as well (e.g. can't remove constant result from GROUP BY key).
* So, it is convenient to have a special flag for them.
*
* Examples are: "__getScalar" and every function from serverConstants.cpp
*/
virtual bool isServerConstant() const { return false; }
/** Lets you know if the function is monotonic in a range of values.
* This is used to work with the index in a sorted chunk of data.
* And allows to use the index not only when it is written, for example `date >= const`, but also, for example, `toMonth(date) >= 11`.
@ -488,6 +499,7 @@ public:
virtual bool isInjective(const ColumnsWithTypeAndName & /*sample_columns*/) const { return false; }
virtual bool isDeterministic() const { return true; }
virtual bool isDeterministicInScopeOfQuery() const { return true; }
virtual bool isServerConstant() const { return false; }
virtual bool isStateful() const { return false; }
using ShortCircuitSettings = IFunctionBase::ShortCircuitSettings;

View File

@ -86,6 +86,8 @@ public:
bool isDeterministicInScopeOfQuery() const override { return function->isDeterministicInScopeOfQuery(); }
bool isServerConstant() const override { return function->isServerConstant(); }
bool isShortCircuit(ShortCircuitSettings & settings, size_t number_of_arguments) const override { return function->isShortCircuit(settings, number_of_arguments); }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & args) const override { return function->isSuitableForShortCircuitArgumentsExecution(args); }

View File

@ -4,10 +4,10 @@
#include <Functions/FunctionHelpers.h>
#include <Core/ServerUUID.h>
#include <Common/Logger.h>
#include <Common/ErrorCodes.h>
#include <Common/logger_useful.h>
#include "base/types.h"
namespace DB
{
@ -96,10 +96,11 @@ struct SnowflakeIdRange
/// 1. calculate Snowflake ID by current timestamp (`now`)
/// 2. `begin = max(available, now)`
/// 3. Calculate `end = begin + input_rows_count` handling `machine_seq_num` overflow
SnowflakeIdRange getRangeOfAvailableIds(const SnowflakeId & available, size_t input_rows_count)
SnowflakeIdRange getRangeOfAvailableIds(const SnowflakeId & available, uint64_t machine_id, size_t input_rows_count)
{
/// 1. `now`
SnowflakeId begin = {.timestamp = getTimestamp(), .machine_id = getMachineId(), .machine_seq_num = 0};
SnowflakeId begin = {.timestamp = getTimestamp(), .machine_id = machine_id, .machine_seq_num = 0};
/// 2. `begin`
if (begin.timestamp <= available.timestamp)
@ -128,13 +129,13 @@ struct Data
/// Guarantee counter monotonicity within one timestamp across all threads generating Snowflake IDs simultaneously.
static inline std::atomic<uint64_t> lowest_available_snowflake_id = 0;
SnowflakeId reserveRange(size_t input_rows_count)
SnowflakeId reserveRange(uint64_t machine_id, size_t input_rows_count)
{
uint64_t available_snowflake_id = lowest_available_snowflake_id.load();
SnowflakeIdRange range;
do
{
range = getRangeOfAvailableIds(toSnowflakeId(available_snowflake_id), input_rows_count);
range = getRangeOfAvailableIds(toSnowflakeId(available_snowflake_id), machine_id, input_rows_count);
}
while (!lowest_available_snowflake_id.compare_exchange_weak(available_snowflake_id, fromSnowflakeId(range.end)));
/// CAS failed --> another thread updated `lowest_available_snowflake_id` and we re-try
@ -165,24 +166,32 @@ public:
{
FunctionArgumentDescriptors mandatory_args;
FunctionArgumentDescriptors optional_args{
{"expr", nullptr, nullptr, "Arbitrary expression"}
{"expr", nullptr, nullptr, "Arbitrary expression"},
{"machine_id", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNativeUInt), static_cast<FunctionArgumentDescriptor::ColumnValidator>(&isColumnConst), "const UInt*"}
};
validateFunctionArguments(*this, arguments, mandatory_args, optional_args);
return std::make_shared<DataTypeUInt64>();
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & /*arguments*/, const DataTypePtr &, size_t input_rows_count) const override
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
auto col_res = ColumnVector<UInt64>::create();
typename ColumnVector<UInt64>::Container & vec_to = col_res->getData();
if (input_rows_count != 0)
if (input_rows_count > 0)
{
vec_to.resize(input_rows_count);
uint64_t machine_id = getMachineId();
if (arguments.size() == 2)
{
machine_id = arguments[1].column->getUInt(0);
machine_id &= (1ull << machine_id_bits_count) - 1;
}
Data data;
SnowflakeId snowflake_id = data.reserveRange(input_rows_count); /// returns begin of available snowflake ids range
SnowflakeId snowflake_id = data.reserveRange(machine_id, input_rows_count);
for (UInt64 & to_row : vec_to)
{
@ -208,10 +217,13 @@ public:
REGISTER_FUNCTION(GenerateSnowflakeID)
{
FunctionDocumentation::Description description = R"(Generates a Snowflake ID. The generated Snowflake ID contains the current Unix timestamp in milliseconds (41 + 1 top zero bits), followed by a machine id (10 bits), and a counter (12 bits) to distinguish IDs within a millisecond. For any given timestamp (unix_ts_ms), the counter starts at 0 and is incremented by 1 for each new Snowflake ID until the timestamp changes. In case the counter overflows, the timestamp field is incremented by 1 and the counter is reset to 0. Function generateSnowflakeID guarantees that the counter field within a timestamp increments monotonically across all function invocations in concurrently running threads and queries.)";
FunctionDocumentation::Syntax syntax = "generateSnowflakeID([expression])";
FunctionDocumentation::Arguments arguments = {{"expression", "The expression is used to bypass common subexpression elimination if the function is called multiple times in a query but otherwise ignored. Optional."}};
FunctionDocumentation::Syntax syntax = "generateSnowflakeID([expression, [machine_id]])";
FunctionDocumentation::Arguments arguments = {
{"expression", "The expression is used to bypass common subexpression elimination if the function is called multiple times in a query but otherwise ignored. Optional."},
{"machine_id", "A machine ID, the lowest 10 bits are used. Optional."}
};
FunctionDocumentation::ReturnedValue returned_value = "A value of type UInt64";
FunctionDocumentation::Examples examples = {{"single", "SELECT generateSnowflakeID()", "7201148511606784000"}, {"multiple", "SELECT generateSnowflakeID(1), generateSnowflakeID(2)", ""}};
FunctionDocumentation::Examples examples = {{"no_arguments", "SELECT generateSnowflakeID()", "7201148511606784000"}, {"with_machine_id", "SELECT generateSnowflakeID(1)", "7201148511606784001"}, {"with_expression_and_machine_id", "SELECT generateSnowflakeID('some_expression', 1)", "7201148511606784002"}};
FunctionDocumentation::Categories categories = {"Snowflake ID"};
factory.registerFunction<FunctionGenerateSnowflakeID>({description, syntax, arguments, returned_value, examples, categories});

View File

@ -53,6 +53,8 @@ public:
/// getMacro may return different values on different shards/replicas, so it's not constant for distributed query
bool isSuitableForConstantFolding() const override { return !is_distributed; }
bool isServerConstant() const override { return true; }
size_t getNumberOfArguments() const override
{
return 1;

View File

@ -49,6 +49,8 @@ public:
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
bool isServerConstant() const override { return true; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.size() != 1 || !isString(arguments[0].type) || !arguments[0].column || !isColumnConst(*arguments[0].column))
@ -105,6 +107,8 @@ public:
bool isDeterministic() const override { return false; }
bool isServerConstant() const override { return true; }
bool isSuitableForConstantFolding() const override { return !is_distributed; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }

View File

@ -21,117 +21,125 @@ namespace DB
namespace
{
template<typename Derived, typename T, typename ColumnT>
class FunctionServerConstantBase : public FunctionConstantBase<Derived, T, ColumnT>
{
public:
using FunctionConstantBase<Derived, T, ColumnT>::FunctionConstantBase;
bool isServerConstant() const override { return true; }
};
#if defined(__ELF__) && !defined(OS_FREEBSD)
/// buildId() - returns the compiler build id of the running binary.
class FunctionBuildId : public FunctionConstantBase<FunctionBuildId, String, DataTypeString>
class FunctionBuildId : public FunctionServerConstantBase<FunctionBuildId, String, DataTypeString>
{
public:
static constexpr auto name = "buildId";
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionBuildId>(context); }
explicit FunctionBuildId(ContextPtr context) : FunctionConstantBase(SymbolIndex::instance().getBuildIDHex(), context->isDistributed()) {}
explicit FunctionBuildId(ContextPtr context) : FunctionServerConstantBase(SymbolIndex::instance().getBuildIDHex(), context->isDistributed()) {}
};
#endif
/// Get the host name. It is constant on single server, but is not constant in distributed queries.
class FunctionHostName : public FunctionConstantBase<FunctionHostName, String, DataTypeString>
class FunctionHostName : public FunctionServerConstantBase<FunctionHostName, String, DataTypeString>
{
public:
static constexpr auto name = "hostName";
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionHostName>(context); }
explicit FunctionHostName(ContextPtr context) : FunctionConstantBase(DNSResolver::instance().getHostName(), context->isDistributed()) {}
explicit FunctionHostName(ContextPtr context) : FunctionServerConstantBase(DNSResolver::instance().getHostName(), context->isDistributed()) {}
};
class FunctionServerUUID : public FunctionConstantBase<FunctionServerUUID, UUID, DataTypeUUID>
class FunctionServerUUID : public FunctionServerConstantBase<FunctionServerUUID, UUID, DataTypeUUID>
{
public:
static constexpr auto name = "serverUUID";
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionServerUUID>(context); }
explicit FunctionServerUUID(ContextPtr context) : FunctionConstantBase(ServerUUID::get(), context->isDistributed()) {}
explicit FunctionServerUUID(ContextPtr context) : FunctionServerConstantBase(ServerUUID::get(), context->isDistributed()) {}
};
class FunctionTCPPort : public FunctionConstantBase<FunctionTCPPort, UInt16, DataTypeUInt16>
class FunctionTCPPort : public FunctionServerConstantBase<FunctionTCPPort, UInt16, DataTypeUInt16>
{
public:
static constexpr auto name = "tcpPort";
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionTCPPort>(context); }
explicit FunctionTCPPort(ContextPtr context) : FunctionConstantBase(context->getTCPPort(), context->isDistributed()) {}
explicit FunctionTCPPort(ContextPtr context) : FunctionServerConstantBase(context->getTCPPort(), context->isDistributed()) {}
};
/// Returns timezone for current session.
class FunctionTimezone : public FunctionConstantBase<FunctionTimezone, String, DataTypeString>
class FunctionTimezone : public FunctionServerConstantBase<FunctionTimezone, String, DataTypeString>
{
public:
static constexpr auto name = "timezone";
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionTimezone>(context); }
explicit FunctionTimezone(ContextPtr context) : FunctionConstantBase(DateLUT::instance().getTimeZone(), context->isDistributed()) {}
explicit FunctionTimezone(ContextPtr context) : FunctionServerConstantBase(DateLUT::instance().getTimeZone(), context->isDistributed()) {}
};
/// Returns the server time zone (timezone in which server runs).
class FunctionServerTimezone : public FunctionConstantBase<FunctionServerTimezone, String, DataTypeString>
class FunctionServerTimezone : public FunctionServerConstantBase<FunctionServerTimezone, String, DataTypeString>
{
public:
static constexpr auto name = "serverTimezone";
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionServerTimezone>(context); }
explicit FunctionServerTimezone(ContextPtr context) : FunctionConstantBase(DateLUT::serverTimezoneInstance().getTimeZone(), context->isDistributed()) {}
explicit FunctionServerTimezone(ContextPtr context) : FunctionServerConstantBase(DateLUT::serverTimezoneInstance().getTimeZone(), context->isDistributed()) {}
};
/// Returns server uptime in seconds.
class FunctionUptime : public FunctionConstantBase<FunctionUptime, UInt32, DataTypeUInt32>
class FunctionUptime : public FunctionServerConstantBase<FunctionUptime, UInt32, DataTypeUInt32>
{
public:
static constexpr auto name = "uptime";
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionUptime>(context); }
explicit FunctionUptime(ContextPtr context) : FunctionConstantBase(context->getUptimeSeconds(), context->isDistributed()) {}
explicit FunctionUptime(ContextPtr context) : FunctionServerConstantBase(context->getUptimeSeconds(), context->isDistributed()) {}
};
/// version() - returns the current version as a string.
class FunctionVersion : public FunctionConstantBase<FunctionVersion, String, DataTypeString>
class FunctionVersion : public FunctionServerConstantBase<FunctionVersion, String, DataTypeString>
{
public:
static constexpr auto name = "version";
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionVersion>(context); }
explicit FunctionVersion(ContextPtr context) : FunctionConstantBase(VERSION_STRING, context->isDistributed()) {}
explicit FunctionVersion(ContextPtr context) : FunctionServerConstantBase(VERSION_STRING, context->isDistributed()) {}
};
/// revision() - returns the current revision.
class FunctionRevision : public FunctionConstantBase<FunctionRevision, UInt32, DataTypeUInt32>
class FunctionRevision : public FunctionServerConstantBase<FunctionRevision, UInt32, DataTypeUInt32>
{
public:
static constexpr auto name = "revision";
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionRevision>(context); }
explicit FunctionRevision(ContextPtr context) : FunctionConstantBase(ClickHouseRevision::getVersionRevision(), context->isDistributed()) {}
explicit FunctionRevision(ContextPtr context) : FunctionServerConstantBase(ClickHouseRevision::getVersionRevision(), context->isDistributed()) {}
};
class FunctionZooKeeperSessionUptime : public FunctionConstantBase<FunctionZooKeeperSessionUptime, UInt32, DataTypeUInt32>
class FunctionZooKeeperSessionUptime : public FunctionServerConstantBase<FunctionZooKeeperSessionUptime, UInt32, DataTypeUInt32>
{
public:
static constexpr auto name = "zookeeperSessionUptime";
explicit FunctionZooKeeperSessionUptime(ContextPtr context)
: FunctionConstantBase(context->getZooKeeperSessionUptime(), context->isDistributed())
: FunctionServerConstantBase(context->getZooKeeperSessionUptime(), context->isDistributed())
{
}
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionZooKeeperSessionUptime>(context); }
};
class FunctionGetOSKernelVersion : public FunctionConstantBase<FunctionGetOSKernelVersion, String, DataTypeString>
class FunctionGetOSKernelVersion : public FunctionServerConstantBase<FunctionGetOSKernelVersion, String, DataTypeString>
{
public:
static constexpr auto name = "getOSKernelVersion";
explicit FunctionGetOSKernelVersion(ContextPtr context) : FunctionConstantBase(Poco::Environment::osName() + " " + Poco::Environment::osVersion(), context->isDistributed()) {}
explicit FunctionGetOSKernelVersion(ContextPtr context) : FunctionServerConstantBase(Poco::Environment::osName() + " " + Poco::Environment::osVersion(), context->isDistributed()) {}
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionGetOSKernelVersion>(context); }
};
class FunctionDisplayName : public FunctionConstantBase<FunctionDisplayName, String, DataTypeString>
class FunctionDisplayName : public FunctionServerConstantBase<FunctionDisplayName, String, DataTypeString>
{
public:
static constexpr auto name = "displayName";
explicit FunctionDisplayName(ContextPtr context) : FunctionConstantBase(context->getConfigRef().getString("display_name", getFQDNOrHostName()), context->isDistributed()) {}
explicit FunctionDisplayName(ContextPtr context) : FunctionServerConstantBase(context->getConfigRef().getString("display_name", getFQDNOrHostName()), context->isDistributed()) {}
static FunctionPtr create(ContextPtr context) {return std::make_shared<FunctionDisplayName>(context); }
};
}

View File

@ -162,7 +162,7 @@ public:
class RetryStrategy : public Aws::Client::RetryStrategy
{
public:
explicit RetryStrategy(uint32_t maxRetries_ = 10, uint32_t scaleFactor_ = 25, uint32_t maxDelayMs_ = 90000);
explicit RetryStrategy(uint32_t maxRetries_ = 10, uint32_t scaleFactor_ = 25, uint32_t maxDelayMs_ = 5000);
/// NOLINTNEXTLINE(google-runtime-int)
bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override;

View File

@ -51,7 +51,6 @@
#include <Interpreters/SessionTracker.h>
#include <Core/ServerSettings.h>
#include <Interpreters/PreparedSets.h>
#include <Core/Settings.h>
#include <Core/SettingsQuirks.h>
#include <Access/AccessControl.h>
#include <Access/ContextAccess.h>

View File

@ -125,7 +125,7 @@ HashJoin::HashJoin(std::shared_ptr<TableJoin> table_join_, const Block & right_s
if (isCrossOrComma(kind))
{
data->type = Type::CROSS;
sample_block_with_columns_to_add = right_sample_block;
sample_block_with_columns_to_add = materializeBlock(right_sample_block);
}
else if (table_join->getClauses().empty())
{

View File

@ -40,7 +40,7 @@ ColumnPtr tryGetSubcolumnFromBlock(const Block & block, const DataTypePtr & requ
auto subcolumn_name = requested_subcolumn.getSubcolumnName();
/// If requested subcolumn is dynamic, we should first perform cast and then
/// extract the subcolumn, because the data of dynamic subcolumn can change after cast.
if (elem->type->hasDynamicSubcolumns() && !elem->type->equals(*requested_column_type))
if ((elem->type->hasDynamicSubcolumns() || requested_column_type->hasDynamicSubcolumns()) && !elem->type->equals(*requested_column_type))
{
auto casted_column = castColumn({elem->column, elem->type, ""}, requested_column_type);
auto elem_column = requested_column_type->tryGetSubcolumn(subcolumn_name, casted_column);

View File

@ -87,14 +87,14 @@ bool canRemoveConstantFromGroupByKey(const ConstantNode & root)
else if (function_node)
{
/// Do not allow removing constants like `hostName()`
if (!function_node->getFunctionOrThrow()->isDeterministic())
if (function_node->getFunctionOrThrow()->isServerConstant())
return false;
for (const auto & child : function_node->getArguments())
nodes.push(child.get());
}
else
return false;
// else
// return false;
}
return true;

View File

@ -701,15 +701,15 @@ std::optional<NameAndTypePair> ColumnsDescription::tryGetColumn(const GetColumns
auto jt = subcolumns.get<0>().find(column_name);
if (jt != subcolumns.get<0>().end())
return *jt;
}
/// Check for dynamic subcolumns.
auto [ordinary_column_name, dynamic_subcolumn_name] = Nested::splitName(column_name);
it = columns.get<1>().find(ordinary_column_name);
if (it != columns.get<1>().end() && it->type->hasDynamicSubcolumns())
{
if (auto dynamic_subcolumn_type = it->type->tryGetSubcolumnType(dynamic_subcolumn_name))
return NameAndTypePair(ordinary_column_name, dynamic_subcolumn_name, it->type, dynamic_subcolumn_type);
/// Check for dynamic subcolumns.
auto [ordinary_column_name, dynamic_subcolumn_name] = Nested::splitName(column_name);
it = columns.get<1>().find(ordinary_column_name);
if (it != columns.get<1>().end() && it->type->hasDynamicSubcolumns())
{
if (auto dynamic_subcolumn_type = it->type->tryGetSubcolumnType(dynamic_subcolumn_name))
return NameAndTypePair(ordinary_column_name, dynamic_subcolumn_name, it->type, dynamic_subcolumn_type);
}
}
return {};

View File

@ -226,10 +226,11 @@ void EmbeddedRocksDBBulkSink::consume(Chunk chunk_)
if (chunks_to_write.empty())
return;
size_t num_chunks = chunks_to_write.size();
auto [serialized_key_column, serialized_value_column]
= storage.ttl > 0 ? serializeChunks<true>(std::move(chunks_to_write)) : serializeChunks<false>(std::move(chunks_to_write));
auto sst_file_path = getTemporarySSTFilePath();
LOG_DEBUG(getLogger("EmbeddedRocksDBBulkSink"), "Writing {} rows to SST file {}", serialized_key_column->size(), sst_file_path);
LOG_DEBUG(getLogger("EmbeddedRocksDBBulkSink"), "Writing {} rows from {} chunks to SST file {}", serialized_key_column->size(), num_chunks, sst_file_path);
if (auto status = buildSSTFile(sst_file_path, *serialized_key_column, *serialized_value_column); !status.ok())
throw Exception(ErrorCodes::ROCKSDB_ERROR, "RocksDB write error: {}", status.ToString());

View File

@ -420,7 +420,7 @@ String transformQueryForExternalDatabase(
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "No column names for query '{}' to external table '{}.{}'",
query_info.query_tree->formatASTForErrorMessage(), database, table);
auto clone_query = getASTForExternalDatabaseFromQueryTree(query_info.query_tree);
auto clone_query = getASTForExternalDatabaseFromQueryTree(query_info.query_tree, query_info.table_expression);
return transformQueryForExternalDatabaseImpl(
clone_query,

View File

@ -3,6 +3,7 @@
#include <Storages/transformQueryForExternalDatabaseAnalyzer.h>
#include <Parsers/ASTSelectWithUnionQuery.h>
#include <Parsers/ASTSelectQuery.h>
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Columns/ColumnConst.h>
@ -10,7 +11,7 @@
#include <Analyzer/QueryNode.h>
#include <Analyzer/ConstantNode.h>
#include <Analyzer/ConstantValue.h>
#include <Analyzer/JoinNode.h>
#include <DataTypes/DataTypesNumber.h>
@ -20,6 +21,7 @@ namespace DB
namespace ErrorCodes
{
extern const int UNSUPPORTED_METHOD;
extern const int LOGICAL_ERROR;
}
namespace
@ -55,7 +57,7 @@ public:
}
ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tree)
ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tree, const QueryTreeNodePtr & table_expression)
{
auto new_tree = query_tree->clone();
@ -63,6 +65,20 @@ ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tre
visitor.visit(new_tree);
const auto * query_node = new_tree->as<QueryNode>();
const auto & join_tree = query_node->getJoinTree();
bool allow_where = true;
if (const auto * join_node = join_tree->as<JoinNode>())
{
if (join_node->getStrictness() != JoinStrictness::All)
allow_where = false;
else if (join_node->getKind() == JoinKind::Left)
allow_where = join_node->getLeftTableExpression()->isEqual(*table_expression);
else if (join_node->getKind() == JoinKind::Right)
allow_where = join_node->getRightTableExpression()->isEqual(*table_expression);
else
allow_where = (join_node->getKind() == JoinKind::Inner);
}
auto query_node_ast = query_node->toAST({ .add_cast_for_constants = false, .fully_qualified_identifiers = false });
const IAST * ast = query_node_ast.get();
@ -76,7 +92,13 @@ ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tre
if (union_ast->list_of_selects->children.size() != 1)
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "QueryNode AST is not a single ASTSelectQuery, got {}", union_ast->list_of_selects->children.size());
return union_ast->list_of_selects->children.at(0);
ASTPtr select_query = union_ast->list_of_selects->children.at(0);
auto * select_query_typed = select_query->as<ASTSelectQuery>();
if (!select_query_typed)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected ASTSelectQuery, got {}", select_query ? select_query->formatForErrorMessage() : "nullptr");
if (!allow_where)
select_query_typed->setExpression(ASTSelectQuery::Expression::WHERE, nullptr);
return select_query;
}
}

View File

@ -6,6 +6,6 @@
namespace DB
{
ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tree);
ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tree, const QueryTreeNodePtr & table_expression);
}

View File

@ -1068,6 +1068,15 @@ def main() -> int:
if build_result:
if build_result.status == SUCCESS:
previous_status = build_result.status
JobReport(
status=SUCCESS,
description="",
test_results=[],
start_time="",
duration=0.0,
additional_files=[],
job_skipped=True,
).dump()
else:
# FIXME: Consider reusing failures for build jobs.
# Just remove this if/else - that makes build job starting and failing immediately
@ -1265,12 +1274,17 @@ def main() -> int:
elif job_report.pre_report:
print(f"ERROR: Job was killed - generate evidence")
job_report.update_duration()
# Job was killed!
ret_code = os.getenv("JOB_EXIT_CODE", "")
if ret_code:
try:
job_report.exit_code = int(ret_code)
except ValueError:
pass
if Utils.is_killed_with_oom():
print("WARNING: OOM while job execution")
error = f"Out Of Memory, exit_code {job_report.exit_code}, after {job_report.duration}s"
error = f"Out Of Memory, exit_code {job_report.exit_code}, after {int(job_report.duration)}s"
else:
error = f"Unknown, exit_code {job_report.exit_code}, after {job_report.duration}s"
error = f"Unknown, exit_code {job_report.exit_code}, after {int(job_report.duration)}s"
CIBuddy().post_error(error, job_name=_get_ext_check_name(args.job_name))
if CI.is_test_job(args.job_name):
gh = GitHub(get_best_robot_token(), per_page=100)

View File

@ -26,6 +26,7 @@ class CIBuddy:
self.pr_number = pr_info.number
self.head_ref = pr_info.head_ref
self.commit_url = pr_info.commit_html_url
self.sha = pr_info.sha[:10]
@staticmethod
def _get_webhooks():
@ -69,8 +70,10 @@ class CIBuddy:
line_err = f":red_circle: *Error: {error_description}*\n\n"
line_ghr = f" *Runner:* `{instance_type}`, `{instance_id}`\n"
line_job = f" *Job:* `{job_name}`\n"
line_pr_ = f" *PR:* <https://github.com/{self.repo}/pull/{self.pr_number}|#{self.pr_number}>\n"
line_br_ = f" *Branch:* `{self.head_ref}`, <{self.commit_url}|commit>\n"
line_pr_ = f" *PR:* <https://github.com/{self.repo}/pull/{self.pr_number}|#{self.pr_number}>, <{self.commit_url}|{self.sha}>\n"
line_br_ = (
f" *Branch:* `{self.head_ref}`, <{self.commit_url}|{self.sha}>\n"
)
message = line_err
message += line_job
if with_instance_info:
@ -85,4 +88,4 @@ class CIBuddy:
if __name__ == "__main__":
# test
buddy = CIBuddy(dry_run=True)
buddy.post_error("Out of memory")
buddy.post_error("TEst")

View File

@ -763,22 +763,13 @@ class CiCache:
# TIMEOUT * MAX_ROUNDS_TO_WAIT must be less than 6h (GH job timeout) with a room for rest RunConfig work
TIMEOUT = 3000 # 50 min
MAX_ROUNDS_TO_WAIT = 6
MAX_JOB_NUM_TO_WAIT = 3
round_cnt = 0
def _has_build_job():
for job in self.jobs_to_wait:
if CI.is_build_job(job):
return True
return False
if not is_release:
# in PRs we can wait only for builds, TIMEOUT*MAX_ROUNDS_TO_WAIT=100min is enough
MAX_ROUNDS_TO_WAIT = 2
while (
len(self.jobs_to_wait) > MAX_JOB_NUM_TO_WAIT or _has_build_job()
) and round_cnt < MAX_ROUNDS_TO_WAIT:
while round_cnt < MAX_ROUNDS_TO_WAIT:
round_cnt += 1
GHActions.print_in_group(
f"Wait pending jobs, round [{round_cnt}/{MAX_ROUNDS_TO_WAIT}]:",
@ -820,6 +811,10 @@ class CiCache:
f"Job [{job_name}_[{batch}/{num_batches}]] is not pending anymore"
)
job_config.batches.remove(batch)
if not job_config.batches:
print(f"Remove job [{job_name}] from jobs_to_do")
self.jobs_to_skip.append(job_name)
del self.jobs_to_do[job_name]
else:
print(
f"NOTE: Job [{job_name}:{batch}] finished failed - do not add to ready"
@ -830,9 +825,7 @@ class CiCache:
await_finished.add(job_name)
for job in await_finished:
self.jobs_to_skip.append(job)
del self.jobs_to_wait[job]
del self.jobs_to_do[job]
if not dry_run:
expired_sec = int(time.time()) - start_at

View File

@ -49,14 +49,15 @@ class GHActions:
class Shell:
@classmethod
def run_strict(cls, command):
subprocess.run(
command + " 2>&1",
res = subprocess.run(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
check=True,
)
return res.stdout.strip()
@classmethod
def run(cls, command):

View File

@ -58,14 +58,14 @@ def decompress_fast(archive_path: Path, result_path: Optional[Path] = None) -> N
archive_path,
result_path,
)
program_part = "--use-compress-program='zstd --threads=0'"
program_part = "--use-compress-program='zstd --threads=0 -d'"
elif PIGZ.exists():
logging.info(
"pigz found, will compress and decompress faster ('%s' -> '%s')",
archive_path,
result_path,
)
program_part = "--use-compress-program='pigz'"
program_part = "--use-compress-program='pigz -d'"
else:
program_part = "-z"
logging.info(

View File

@ -14,6 +14,7 @@ from ssh import SSHAgent
from env_helper import GITHUB_REPOSITORY, S3_BUILDS_BUCKET
from s3_helper import S3Helper
from autoscale_runners_lambda.lambda_shared.pr import Labels
from ci_utils import Shell
from version_helper import (
FILE_WITH_VERSION_PATH,
GENERATED_CONTRIBUTORS,
@ -65,6 +66,8 @@ class ReleaseInfo:
commit_sha: str
# lts or stable
codename: str
previous_release_tag: str
previous_release_sha: str
@staticmethod
def from_file(file_path: str) -> "ReleaseInfo":
@ -79,6 +82,8 @@ class ReleaseInfo:
version = None
release_branch = None
release_tag = None
previous_release_tag = None
previous_release_sha = None
codename = None
assert release_type in ("patch", "new")
if release_type == "new":
@ -101,6 +106,11 @@ class ReleaseInfo:
codename = (
VersionType.STABLE
) # dummy value (artifactory won't be updated for new release)
previous_release_tag = expected_prev_tag
previous_release_sha = Shell.run_strict(
f"git rev-parse {previous_release_tag}"
)
assert previous_release_sha
if release_type == "patch":
with checkout(commit_ref):
_, commit_sha = ShellRunner.run(f"git rev-parse {commit_ref}")
@ -118,9 +128,10 @@ class ReleaseInfo:
)
if version.patch == 1:
expected_version = copy(version)
previous_release_tag = f"v{version.major}.{version.minor}.1.1-new"
expected_version.bump()
expected_tag_prefix = (
f"v{expected_version.major}.{expected_version.minor}-"
f"v{expected_version.major}.{expected_version.minor}."
)
expected_tag_suffix = "-new"
else:
@ -128,6 +139,7 @@ class ReleaseInfo:
f"v{version.major}.{version.minor}.{version.patch-1}."
)
expected_tag_suffix = f"-{version.get_stable_release_type()}"
previous_release_tag = git.latest_tag
if git.latest_tag.startswith(
expected_tag_prefix
) and git.latest_tag.endswith(expected_tag_suffix):
@ -137,8 +149,15 @@ class ReleaseInfo:
False
), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]"
previous_release_sha = Shell.run_strict(
f"git rev-parse {previous_release_tag}"
)
assert previous_release_sha
assert (
release_branch
and previous_release_tag
and previous_release_sha
and commit_sha
and release_tag
and version
@ -150,6 +169,8 @@ class ReleaseInfo:
release_tag=release_tag,
version=version.string,
codename=codename,
previous_release_tag=previous_release_tag,
previous_release_sha=previous_release_sha,
)
with open(outfile, "w", encoding="utf-8") as f:
print(json.dumps(dataclasses.asdict(res), indent=2), file=f)
@ -618,6 +639,8 @@ sudo apt install --yes --no-install-recommends python3-dev python3-pip gh unzip
sudo apt install --yes python3-boto3
sudo apt install --yes python3-github
sudo apt install --yes python3-unidiff
sudo apt install --yes python3-tqdm # cloud changelog
sudo apt install --yes python3-thefuzz # cloud changelog
sudo apt install --yes s3fs
### INSTALL AWS CLI

View File

@ -24,6 +24,18 @@ from tee_popen import TeePopen
NO_CHANGES_MSG = "Nothing to run"
class SensitiveFormatter(logging.Formatter):
@staticmethod
def _filter(s):
return re.sub(
r"(.*)(AZURE_CONNECTION_STRING.*\')(.*)", r"\1AZURE_CONNECTION_STRING\3", s
)
def format(self, record):
original = logging.Formatter.format(self, record)
return self._filter(original)
def get_additional_envs(
check_name: str, run_by_hash_num: int, run_by_hash_total: int
) -> List[str]:
@ -213,6 +225,9 @@ def parse_args():
def main():
logging.basicConfig(level=logging.INFO)
for handler in logging.root.handlers:
# pylint: disable=protected-access
handler.setFormatter(SensitiveFormatter(handler.formatter._fmt)) # type: ignore
stopwatch = Stopwatch()

View File

@ -74,7 +74,8 @@ def get_run_command(
f"--volume={repo_path}/tests:/usr/share/clickhouse-test "
f"--volume={result_path}:/test_output "
"--security-opt seccomp=unconfined " # required to issue io_uring sys-calls
f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image}"
f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image} "
"python3 ./utils/runner.py"
)

View File

@ -260,18 +260,29 @@ def main():
failed_to_get_info = False
has_failed_statuses = False
for status in statuses:
if not CI.is_required(status.context):
if not CI.is_required(status.context) or status.context in (
CI.StatusNames.SYNC,
CI.StatusNames.PR_CHECK,
):
# CI.StatusNames.SYNC or CI.StatusNames.PR_CHECK should not be checked
continue
print(f"Check status [{status.context}], [{status.state}]")
if status.state == FAILURE:
has_failed_statuses = True
failed_cnt = Utils.get_failed_tests_number(status.description)
if failed_cnt is None:
failed_to_get_info = True
print(
f"WARNING: failed to get number of failed tests from [{status.description}]"
)
else:
if failed_cnt > max_failed_tests_per_job:
job_name_with_max_failures = status.context
max_failed_tests_per_job = failed_cnt
total_failed_tests += failed_cnt
print(
f"Failed test cases in [{status.context}] is [{failed_cnt}], total failures [{total_failed_tests}]"
)
elif status.state != SUCCESS and status.context not in (
CI.StatusNames.SYNC,
CI.StatusNames.PR_CHECK,

View File

@ -3,6 +3,7 @@
import csv
import logging
import os
import re
import subprocess
import sys
from pathlib import Path
@ -19,6 +20,18 @@ from stopwatch import Stopwatch
from tee_popen import TeePopen
class SensitiveFormatter(logging.Formatter):
@staticmethod
def _filter(s):
return re.sub(
r"(.*)(AZURE_CONNECTION_STRING.*\')(.*)", r"\1AZURE_CONNECTION_STRING\3", s
)
def format(self, record):
original = logging.Formatter.format(self, record)
return self._filter(original)
def get_additional_envs(check_name: str) -> List[str]:
result = []
azure_connection_string = get_parameter_from_ssm("azure_connection_string")
@ -117,6 +130,9 @@ def process_results(
def run_stress_test(docker_image_name: str) -> None:
logging.basicConfig(level=logging.INFO)
for handler in logging.root.handlers:
# pylint: disable=protected-access
handler.setFormatter(SensitiveFormatter(handler.formatter._fmt)) # type: ignore
stopwatch = Stopwatch()
temp_path = Path(TEMP_PATH)

View File

@ -1,5 +1,5 @@
#!/usr/bin/env python3
import copy
import unittest
import random
@ -416,6 +416,30 @@ class TestCIConfig(unittest.TestCase):
"""
checks ci.py job configuration
"""
def _reset_ci_cache_to_wait_all_jobs(ci_cache):
# pretend there are pending jobs that we need to wait
ci_cache.jobs_to_wait = dict(ci_cache.jobs_to_do)
for job, config in ci_cache.jobs_to_wait.items():
assert config.batches
config.pending_batches = list(config.batches)
for batch in range(config.num_batches):
record = CiCache.Record(
record_type=CiCache.RecordType.PENDING,
job_name=job,
job_digest=ci_cache.job_digests[job],
batch=batch,
num_batches=config.num_batches,
release_branch=True,
)
for record_t_, records_ in ci_cache.records.items():
if record_t_.value == CiCache.RecordType.PENDING.value:
records_[record.to_str_key()] = record
assert not ci_cache.jobs_to_skip
assert ci_cache.jobs_to_wait
ci_cache.jobs_to_skip = []
settings = CiSettings()
settings.no_ci_cache = True
pr_info = PRInfo(github_event=_TEST_EVENT_JSON)
@ -432,26 +456,6 @@ class TestCIConfig(unittest.TestCase):
assert not ci_cache.jobs_to_skip
assert not ci_cache.jobs_to_wait
# pretend there are pending jobs that we need to wait
ci_cache.jobs_to_wait = dict(ci_cache.jobs_to_do)
for job, config in ci_cache.jobs_to_wait.items():
assert not config.pending_batches
assert config.batches
config.pending_batches = list(config.batches)
for job, config in ci_cache.jobs_to_wait.items():
for batch in range(config.num_batches):
record = CiCache.Record(
record_type=CiCache.RecordType.PENDING,
job_name=job,
job_digest=ci_cache.job_digests[job],
batch=batch,
num_batches=config.num_batches,
release_branch=True,
)
for record_t_, records_ in ci_cache.records.items():
if record_t_.value == CiCache.RecordType.PENDING.value:
records_[record.to_str_key()] = record
def _test_await_for_batch(
ci_cache: CiCache, record_type: CiCache.RecordType, batch: int
) -> None:
@ -477,32 +481,76 @@ class TestCIConfig(unittest.TestCase):
and batch < config_.num_batches
):
assert batch not in config_.pending_batches
else:
assert batch in config_.pending_batches
for _, config_ in ci_cache.jobs_to_do.items():
# jobs to do must have batches to run before/after await
# if it's an empty list after await - apparently job has not been removed after await
assert config_.batches
_test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 0)
# check all one-batch jobs are in jobs_to_skip
for job in all_jobs_in_wf:
config = CI.JOB_CONFIGS[job]
if config.num_batches == 1:
self.assertTrue(job in ci_cache.jobs_to_skip)
self.assertTrue(job not in ci_cache.jobs_to_do)
else:
self.assertTrue(job not in ci_cache.jobs_to_skip)
self.assertTrue(job in ci_cache.jobs_to_do)
_test_await_for_batch(ci_cache, CiCache.RecordType.FAILED, 1)
_test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 2)
self.assertTrue(len(ci_cache.jobs_to_skip) > 0)
self.assertTrue(len(ci_cache.jobs_to_do) > 0)
_reset_ci_cache_to_wait_all_jobs(ci_cache)
_test_await_for_batch(ci_cache, CiCache.RecordType.FAILED, 0)
tested = False
for job, config in ci_cache.jobs_to_do.items():
if config.batches == [0]:
tested = True
self.assertTrue(
job not in ci_cache.jobs_to_wait,
"Job must be removed from @jobs_to_wait, because its only batch has FAILED cache record",
)
self.assertCountEqual(
list(ci_cache.jobs_to_do) + ci_cache.jobs_to_skip, all_jobs_in_wf
ci_cache.jobs_to_skip,
[],
"No jobs must be skipped, since all cache records are of type FAILED",
)
assert tested
# reset jobs_to_wait after previous test
_reset_ci_cache_to_wait_all_jobs(ci_cache)
assert not ci_cache.jobs_to_skip
# set batch 0 as SUCCESSFUL in ci cache
jobs_to_do_prev = list(ci_cache.jobs_to_do)
jobs_to_skip_prev = []
jobs_to_wait_prev = list(ci_cache.jobs_to_wait)
_test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 0)
self.assertTrue(len(jobs_to_skip_prev) != len(ci_cache.jobs_to_skip))
self.assertTrue(len(jobs_to_wait_prev) > len(ci_cache.jobs_to_wait))
self.assertCountEqual(
list(ci_cache.jobs_to_do) + ci_cache.jobs_to_skip,
jobs_to_do_prev + jobs_to_skip_prev,
)
# set batch 1 as SUCCESSFUL in ci cache
jobs_to_do_prev = list(ci_cache.jobs_to_do)
jobs_to_skip_prev = list(ci_cache.jobs_to_skip)
jobs_to_wait_prev = list(ci_cache.jobs_to_wait)
_test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 1)
self.assertTrue(len(jobs_to_skip_prev) != len(ci_cache.jobs_to_skip))
self.assertTrue(len(jobs_to_wait_prev) > len(ci_cache.jobs_to_wait))
self.assertCountEqual(
list(ci_cache.jobs_to_do) + ci_cache.jobs_to_skip,
jobs_to_do_prev + jobs_to_skip_prev,
)
# set batch 3, 4, 5, 6 as SUCCESSFUL in ci cache
jobs_to_do_prev = list(ci_cache.jobs_to_do)
jobs_to_skip_prev = list(ci_cache.jobs_to_skip)
jobs_to_wait_prev = list(ci_cache.jobs_to_wait)
_test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 2)
self.assertTrue(ci_cache.jobs_to_do)
_test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 3)
self.assertTrue(ci_cache.jobs_to_do)
_test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 4)
self.assertTrue(ci_cache.jobs_to_do)
_test_await_for_batch(ci_cache, CiCache.RecordType.SUCCESSFUL, 5)
self.assertTrue(
not ci_cache.jobs_to_do
) # by this moment there must be no jobs left as batch 5 is currently the maximum
self.assertTrue(len(jobs_to_skip_prev) != len(ci_cache.jobs_to_skip))
self.assertTrue(len(jobs_to_wait_prev) > len(ci_cache.jobs_to_wait))
self.assertCountEqual(
list(ci_cache.jobs_to_do) + ci_cache.jobs_to_skip,
jobs_to_do_prev + jobs_to_skip_prev,
)
def test_ci_py_filters_not_affected_jobs_in_prs(self):

View File

@ -3386,13 +3386,13 @@ def parse_args():
parser.add_argument(
"--replace-replicated-with-shared",
action="store_true",
default=os.environ.get("USE_META_IN_KEEPER_FOR_MERGE_TREE", False),
default=os.environ.get("REPLACE_RMT_WITH_SMT", False),
help="Replace ReplicatedMergeTree engine with SharedMergeTree",
)
parser.add_argument(
"--replace-non-replicated-with-shared",
action="store_true",
default=False,
default=os.environ.get("REPLACE_MT_WITH_SMT", False),
help="Replace ordinary MergeTree engine with SharedMergeTree",
)

View File

@ -0,0 +1,32 @@
-----BEGIN CERTIFICATE-----
MIIFhTCCA22gAwIBAgIUZHeZ4ME7rL1RYGJZGPgOTHl7btMwDQYJKoZIhvcNAQEL
BQAwUjELMAkGA1UEBhMCUlUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2EwHhcNMjQwNzAx
MDUxMTI0WhcNMzQwNjI5MDUxMTI0WjBSMQswCQYDVQQGEwJSVTETMBEGA1UECAwK
U29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQsw
CQYDVQQDDAJjYTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALf623IC
1r6Bzwa4dTufRPvFWQinT4uIBv7UvAk8tu8kmLSMOOC2COALNRCSu0s1SFcO6i5G
CgmaWdoTXuw+NYH9n5rU06N991NBfCoLbf0eSoie7bRdTCKkqWWKaw+dCvKL5vWj
LEZRmdh8ZN0lxa05eIsLxErvgWqE8hOOQ7jgRqYLAagp6XkMdB+YRkXFtbVcze83
ZNxFqNjdgwYYybaqDZKGeOnRDRof7P/cyMkbGNUI7+JYA5eGmDgiOTGbaCZx5AUK
DKmkdmy5IJBWS0rYR8Ol9Sjs6NSYkmYiLFEWFNSvI88K99+Ab3DEi9JMEqMeNTJ5
1sx48e2OfMG7ywoGj3D3zf7SUxAfwW+7Ls+v7c/kB9hvspA9iVsqjAKcSPAPnJXk
zR5r+zcPwjb/dvGGGB+D7PhvreEpj0n7PRFEB07jviA1E8zkwdk1L9FefYMG2oRl
8J7miWxoyqUOtA6AYhbN36+I0iJkjxIjXLLzB/dhOzerNTzRHcJtmgSyIpcPIV42
IsJBttLOW0Mpkfw8z88xji+1VWHm+H8sOWCzbfjncScu/z5Xhb92C2T+AG+RpakB
LwyTsovf+xv1gGF/1gmmdKSoV/JZuHSczMmmDLJ9vXRnIpK0AgL16lfs6o0C2Pe2
OHJv0oViokU5mW+Mge3U9kUrh6BVITk8UjUDAgMBAAGjUzBRMB0GA1UdDgQWBBRA
x0+tgav3Q2gRt34AtKZiGfFH7zAfBgNVHSMEGDAWgBRAx0+tgav3Q2gRt34AtKZi
GfFH7zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBcV7s7au6r
bEkHaW9RbGe/7iHesOcH2uyiALGeiCF+pfN9QbfkpJJbMRDhKr+sVqJH6lS+Iys5
MpPaRHOInNs5ajiPQcIxtxAnw2HEsy6LUVvs6RUHrS7Bxlc406YQCY/sPyItAt8t
U/qhx5VNj6Mx1smyKZHFHZIZ0eYieriJkK6NB2E7Ji6EPGe7lkh3kEoVTfaSVCkP
p3d4U603kzCk8PGkYlhjWUCVMhDRwgEy6FXHmU156HivGpYurKcab7cMUfr50MyK
1Mx/NaHOYHhIIQ87t9wF2yQaMskEcyK9XOorp+HFtnTdci+6+J3u2Kmyckq81uy9
uZreH8bUKgmntOhSqtl4tWGMWxX78c7rX2cGIbSJsZzzjmUdhvnpYeuhCCrUPzY3
kIt9Q6q7DCKG+f+D4bHQ292emEfKYb9LgftyOX3pcAJ+d3c56Cs7c0RRVFAZyVnX
1O9ZvHrypmii5RARNuyVtttgDpuGBrRCa2kSNlXD9PJJitI2kfoWrCI/88Pw8H/y
bLO9/ZcCOsxBGmfrQDMUSC8rE8JLUSKV80otWwtHcxcnfpsobs64/Rz6aOLYEiS4
/0M6xyUYdOsEGUh1uyX5qxto8iOWop4S9DLy1QVx+N8/fC6gjhMb4Vg2oTBnHlGr
JcxdYP8jVe6tUgvRKAyNlccPIq20udfGTw==
-----END CERTIFICATE-----

View File

@ -0,0 +1,52 @@
-----BEGIN PRIVATE KEY-----
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQC3+ttyAta+gc8G
uHU7n0T7xVkIp0+LiAb+1LwJPLbvJJi0jDjgtgjgCzUQkrtLNUhXDuouRgoJmlna
E17sPjWB/Z+a1NOjffdTQXwqC239HkqInu20XUwipKllimsPnQryi+b1oyxGUZnY
fGTdJcWtOXiLC8RK74FqhPITjkO44EamCwGoKel5DHQfmEZFxbW1XM3vN2TcRajY
3YMGGMm2qg2Shnjp0Q0aH+z/3MjJGxjVCO/iWAOXhpg4Ijkxm2gmceQFCgyppHZs
uSCQVktK2EfDpfUo7OjUmJJmIixRFhTUryPPCvffgG9wxIvSTBKjHjUyedbMePHt
jnzBu8sKBo9w983+0lMQH8Fvuy7Pr+3P5AfYb7KQPYlbKowCnEjwD5yV5M0ea/s3
D8I2/3bxhhgfg+z4b63hKY9J+z0RRAdO474gNRPM5MHZNS/RXn2DBtqEZfCe5ols
aMqlDrQOgGIWzd+viNIiZI8SI1yy8wf3YTs3qzU80R3CbZoEsiKXDyFeNiLCQbbS
zltDKZH8PM/PMY4vtVVh5vh/LDlgs23453EnLv8+V4W/dgtk/gBvkaWpAS8Mk7KL
3/sb9YBhf9YJpnSkqFfyWbh0nMzJpgyyfb10ZyKStAIC9epX7OqNAtj3tjhyb9KF
YqJFOZlvjIHt1PZFK4egVSE5PFI1AwIDAQABAoICACPKkBVqevddznYqvVfHgMaQ
GmWGEwH8iQvbO+d6IcZ9Mp3xJ4GDuE9Jrt2WBgy4BWY2VxWAd6RtnCxCF3XTFlq/
CeU3oSKNt2dBdpPBXHjhAzP34xtzCQQ2T2QbHP/RF9eeouRHha36YvkMHh0to3qz
EwHN3LaNSHp9+ZAXXZJiQyw7OOvLl4l+hDq/R3B8aYCqWL/AYYU+of3qffpaKSe5
hmFqeqxWCHv8GQGfpIoHFPogvnYklDJOs188PS8q4gSUpHzceyEnhwhYrX6W4xfg
4nkr2HlJOXReaCCpZkCyEIRdvBQjuJwI4uUnv2G+feOw1T2IGWL25m3GCfwweeuZ
AWqqbj5Xnl1+VgvaNm68aHBVlfZ14rIJ/Hu1yJV/E+5m8+3xewAor0jshWceC3OS
lhrZp+EyrwxsKj17s2IBhk4EIOLjfosoDqdw3E2DrtqKrnI63MLUTaEqHWyXnF82
Mn880DR/E3Byj2kKtdIVesAdO0ARtIRqedPunpcvhp1eMZ2wrLJXAHMDBJlUZF64
ZAyO3ll79PhBn/QShGu+KQ682Ns1T3j3J14n2zqBKsU1GzSLBOZO8QXfFELsv6Xj
NGYPg2SbkRwbYHEPr6G9ehsxKaTpMAtPesdz1devKDI4nSpd2qE9ZfREH+CW0MkK
bpiL20d4blIG1DP84qodAoIBAQDEBXbn0IEeyL3j50/AUJY/K4seeDm1ltJmhoNU
HC0dwt2FOHrD1J/Cx6S19zuEVt1qMwJXELxOVzixykKPy73ETW+Qfcpo2J51THrl
fSEUpoheD37Cwjv9vzb6Qb4ZXDr4acLIB06mAUKfo0Pez2x6IybWqS/dZoD1bfh5
wiSCITXQB4n6mI7chfDDyTqgf3C5U5b2UABLDdIKQZHY5kX55Vfr0Lz+Gt2t6uUy
hUiN4C+Qrx1LoHY4x7g1SG9lKASG5zv5dksc+a6uYsS4N8TM3ZQz7BnZrCQPNiWz
K70p5pMjdVTHmQqThoww9y+rRiHMU3qUSZ0tX4nrY99/MiFNAoIBAQDwRi0GzkeA
ECWfr2R+vh5lnxXBeAvM46v+8CzwJHNxtGAhB1ojod7GxOxHiMnWDXubRiNVlhx0
OHeCq11hSQh2+a7s8xwxYbC3paYf1ZQq81YmTGd8v+/QX9b21Kq9dpx+G2C3eikZ
rHMuSifrSEwZMxdw/gKvQ+x66PTNeK8fIgAtZsE96VX3u7sCCNhbBElWWg5OwwMf
0RH5Pi/O8UXxkZs2SUKFaXUFMxjYn9r9636QI8LaLsCvcbPjrhshPtHWHqgrZzia
8gD00ty3mcydRU//xNfHruew2iRZUVVB8E0GQ80eQCsHrI0YkP876at55DKbyZLi
pGisswDdyoePAoIBAQCfOFUSDMGksKfVY8Da1FqGKydQc5Vkk4KJdr9b5Zsb7nmi
yXKmeKHgQ+vdRlmYsDofFhnTgKuD8sMBXuncBUE6Hy/8rMBpk3J6ksnMeGwTawWf
aXSsW7sA7H2KnzX7QXhA6R8m/RB8NM/Z/XKWm+Nf/geBG2Hes3ESaeYHeJPRA4rD
VCxWWe7I3x7m2R/+2CVG08fugKMMU8iFPidqh/jU994HlMC+ojHoH8FV3P9HBo98
NVFVaZw9mLRey1x4TR4IOfD0O4rdmU2zJlTl0Z7QTTchSajhrzqHBJNwyplXXIut
pJNdoUj+9a4P4+hbdZbkcfZCVt0NyumrQq96kMyNAoIBAQC8Jcemd6tHnd1IYPip
wshcvSk1GYLsCzAo7vvRhEmeNSe9VVuBYYToU1XOYk/ivSYYneCXaXeYjosPGI+D
2cO2HqiaxXhDNwNBm4YtkXp385p3XQcAifAJVLMpD7kvqYy7t0aSBiffAcoj6k++
Dxv/86EcituhUOgCCx9/Mky3bVGEO92AVJJL7oZu/mwlQYMICnb/t/dfV8DiUo7v
VsF1e5Z1+QINJFpNGwkOwIakpV6zEj3Hyip6d+fslu6Jpv570//OFQMfWffjvqa1
Ysr2MEuHtiVeKRkmxHsIMgc4HirCiB872/WWCclm/kvPJ5huV4JFuZ+6uBF7gqRg
CPMDAoIBAGs0KCL3a/eXIPWhFcOgtpcNTb3o4gK/s38+GLIVbdP7MUNEkMeWq+Pf
PBUca2p1M8CkbpRi0OAvg1gewAR/+bCdyzoipl1+3v3goYC3J5TAs5NT/siBcjrm
QcTKEHmTeVu82Kpi1JVtcfS2TWuSmT6vSB2A2H3R4GEPQQXnXeS/FnRA0I85MFzq
zxMkar5VY7Eg0Z3/bN5dNqAYSzqjgMMhRMPN1W54gjL2ev65Mzt1jYf6xz7gpJOh
6Fw2EOrM23c4OckZx2dZ7Je6ns2U82D81ffhXRwWfQxoKXvMVMccBJmHTbkVEZlE
VAq9BWwNistc+YfRka1OOeq37MgpXq8=
-----END PRIVATE KEY-----

View File

@ -0,0 +1,30 @@
-----BEGIN CERTIFICATE-----
MIIFLzCCAxcCFA6o2DM1UGQ1NNCQzp0n4hb3iSd9MA0GCSqGSIb3DQEBCwUAMFIx
CzAJBgNVBAYTAlJVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl
cm5ldCBXaWRnaXRzIFB0eSBMdGQxCzAJBgNVBAMMAmNhMB4XDTI0MDcwMTA1MTEy
NloXDTM0MDYyOTA1MTEyNlowVjELMAkGA1UEBhMCUlUxEzARBgNVBAgMClNvbWUt
U3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UE
AwwGY2xpZW50MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAl9i6heiD
2inNs0lKt7XjK0QyXKyIg63uVO39ZpLY5sj1noZnbFmRo5MYC4MMIrlD3wGnqUo/
mdev2eg8qYry0CmzhLscVEha7LN+7wmdftYSGkXOl9E08KIv9Uw2bHap4KEIRvJ6
298bdsuv1l4ljWo3VDkPCczXIaUi94/o1uNu0sbPRluBP8kjFLhwV8Ph/7mxsofu
V5tDYNOZ8VozA7aHAnJqXBCFZuW6igIH3XaROY9LOe2FD+qxtpqJHp7E+ZNsXXH5
SXMQzE2/X7eKHA7zvdG4z5GLgmz3GEvq3ZBJUGUe6DCnrxqBXUeLTqyummohGE57
LuqzrH9etFj/91NaZW9uwN42pxGJSkeZaJ5BDnzO3VYxqvvDnFs/2TFbJiZ065vI
NpA0PnuAkGjztaNAm6qXNTXsWo8bdi8TDgIwKxoo5KIlWsxnxKB82tfBovWd+JLa
xLlWNKbKgNb6xc9OmNCyhYkRXhqIsBrpLy6n2yppyYEiCBzPrqwJ2p3Y88yj3dsc
xiiu0ngFSroE2pLwCgi1rQY1x9VO4XQrrq6vBvPhi530+cdXdVTnnyBhaOMX4bWX
OOjI97xJmR/YbOvpgKw9v/na82t2fgKoqIs124iZ21QOiffOIfwwg2AKj8EaWJ9/
Era8sZMsq/g56PWYwT1J85cCMvbNwGOXjusCAwEAATANBgkqhkiG9w0BAQsFAAOC
AgEAAPSps9k8HEZrfBbzH5SHASNTUc7F81h2EYhaEuQrXoan39u3syCKYMSJDt0P
AVCYbIvXmlHeaBba9YXtZVZJpCM53b+x01297x1bXzRklYxnpuD2IauuQgiaJBXV
VlCjEy7GHBAmRQwV7llkoxx+qHsA0b+oHKvTRc9FJzk/Fx4Eb496gNMvUk47f+15
WBmWoQolqwbH1Ivhca7BpTvTSeNGkJ14cbmuhwryLstHJCPep+vp2TYS9bHFZIPY
g3APnWv1qShuA7Oj68GLpRVJmImgyPMK0i8LDGcLhrDiG+HYEr89MiRWDvBHun7O
Sp5y8zDnDZSrN+EtRp8DypHeB7R0Ukmn76NP6NsigWCPMlLaHNVsvVAwWwFNae9u
OxsoW07tJo3lZjDx+tFrUb7IRyn/9PLDIwEs4SvDP+O97o0JkCszwfRhkDnjY7Qj
XCgXoCBalIadgnGshbEjFLH0ZSwAoCIC/35X0Zo7cd1+Fz4Xq1tgnV5ec6ci/EHi
qsOmGfTqZz5HJlmfyxUOYicizV6SjNZSFWY07ebYRewx6ymdDFhqrbnJIEUGJYlU
5qDY1nemoqh9QRIhm1+73hAKHtmcaa1u72gj1QBtRD5eG3ToM3GHgdbUUPvFpenx
UPfzw/wbzITSqPoYr1/XIGq9e1muTqHbFNlGVByIW0ejeZQ=
-----END CERTIFICATE-----

View File

@ -0,0 +1,52 @@
-----BEGIN PRIVATE KEY-----
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCX2LqF6IPaKc2z
SUq3teMrRDJcrIiDre5U7f1mktjmyPWehmdsWZGjkxgLgwwiuUPfAaepSj+Z16/Z
6DypivLQKbOEuxxUSFrss37vCZ1+1hIaRc6X0TTwoi/1TDZsdqngoQhG8nrb3xt2
y6/WXiWNajdUOQ8JzNchpSL3j+jW427Sxs9GW4E/ySMUuHBXw+H/ubGyh+5Xm0Ng
05nxWjMDtocCcmpcEIVm5bqKAgfddpE5j0s57YUP6rG2mokensT5k2xdcflJcxDM
Tb9ft4ocDvO90bjPkYuCbPcYS+rdkElQZR7oMKevGoFdR4tOrK6aaiEYTnsu6rOs
f160WP/3U1plb27A3janEYlKR5lonkEOfM7dVjGq+8OcWz/ZMVsmJnTrm8g2kDQ+
e4CQaPO1o0Cbqpc1Nexajxt2LxMOAjArGijkoiVazGfEoHza18Gi9Z34ktrEuVY0
psqA1vrFz06Y0LKFiRFeGoiwGukvLqfbKmnJgSIIHM+urAnandjzzKPd2xzGKK7S
eAVKugTakvAKCLWtBjXH1U7hdCuurq8G8+GLnfT5x1d1VOefIGFo4xfhtZc46Mj3
vEmZH9hs6+mArD2/+drza3Z+AqioizXbiJnbVA6J984h/DCDYAqPwRpYn38Stryx
kyyr+Dno9ZjBPUnzlwIy9s3AY5eO6wIDAQABAoICAAl8R6ICPpCcEXrUb5qn2Ujy
IJQdT+D0yJqGTD6QCDvsLDstGcWMcx1him5785IxJ9YpZBbnidO0euXJo8R3Ql8S
aGHK4ZtfFyeKAYNoJ0mSIx03LdiRAEOJ7QDf2LI017yJS9IkeW5ql0fTas0za2Jm
nCBZZrNNzvv7a/BEn8NHkfMQV5Fgy9ESvP3qcHhsNHly/aeD5ZEmbZcmWeb82orS
nxEnR0hUSnI0JpibsycT90DimAOh3LDXfd3E0Iz9VsA8urMpqtpDYzZ1tShed7TP
tyiaxS1+PUNIkEc9xRv2i2FQyhVmpmZdoCXfNi84nUtWhJYE6HkrmgRwnnLQp5rr
4stSQLo9X/5ML/bo2cj1eAiH+P6bAWaZhkMYPGkhBYwwgYqZF0Jn2Ay07BPF5gqq
rDY0mjF+Nm/NOqHfQjWHSj7KYYHm6qU1s5cAnDWLfznXGWZHpoI4UqAZtpAwGWEh
f/nS2LyepFW8RE6mbRDe+PkOM2455QdM8MuQc8drGInJ57A5e7y1LxOvFaUP+QtG
fqXok95XciMfttcR8nfYkUyC5QX+JmDZRf1U/DUxTJtbKXRKVG8WaFagkViiCniw
Q4RbrS4Utu6h+8HXdI9mK2pIHlJPp202b9gPry1j3kya8/jx5cTFWCcPj4IlgY51
ZC1+vXJUezwq60gWJrRtAoIBAQDLsR9M+ZdBC5sl5fiVGsznAvfEVv/Cf7pmeXqt
q4BZT7kB8zfdfRRMzmZiQIX0zTbBn7Q7kWULcl8HgPbMEGZorc24CFuCq8k0n2ji
0aJsTJlfr7kUfTZ5c3HN1fGhDrnGoCL1eElt/Es4hZTIDnbn3i+RoCqwhCqhC0Lo
L7f6iHrVIcKyoaz+Cye9QKA89PVHMNM0f41zFJTVpSzGfWefG4vNoeU6nxqwkWVJ
Guhkt1EsJUScHueAa7hUYlELDy99tlqOPLdG7/GG6LmE6pM7R1E6DishnL1VgOll
KL6fouoz8nTve59eCDQECNzXN8lxPvx6TdufabGJTSZ3iNjPAoIBAQC+10DFlO3Y
XLHfX9LWK4rdkghx9AoeCFIVZO2G29H4zyDQn7hAMISWeVCqdAKhPQMJmAXyhQcc
PLEUdKVwifbPNO/IxsWL8SOfbI28XoZLYkQLual6JPa10jemFsinVRTPLU7RA1/T
7t/mWvyaAHmODWUPKSs1ltzh5dNwvnSNou6h72TytwlMIk5h8Z8ZtMIY2v5Ca2Mx
I7Wi8/5RebpszBbuLYy0GBqK5oqQov1YH0WK3ae4leaPWlZ49nyNYpx5Bvngm+d8
4F+HvcxdpckRPfenl9nBW+0UHVXZORepQ+g0CNJxAQTomXy5VeEnrpiylrec4mE7
YD15GKDxeHclAoIBAQC6efcr4zwDwnpRIc6HwrdfriniR7tx9rlRY1lacS3ZldDL
5jx4uy/AeRzmSifYy56beHfqKNuWtSgmmHxYqZL3tlYlaxFL+bUERgQh0sWd3Cp1
nUwVY+RhH71KMdk6F18gZi5bB7pBxL2gizqARC27bH+kFDvAyd60/LhOFJGb/SJm
yLeUDctv8Ap7ihSoeaGrgRl6cGOwvWDcjpEFTn10/iTpJpPTLSGkEBfn64hh3Cml
/qBnRSuvN+7S4gdL1qUls7IHkbJAcR5u4dQlyRLtLAdC3QHDjdkP1CrX/4s0XlOH
JyrCrCxkS6JDJ1H2fg4fbQh7d5few0ZTapwSxuelAoIBAQCloou8MTuR6zoShsem
TRp0npen18ikMlKT8QpbXxTJ33Q6vjWbuMCLHXYOyyuxKBQn0oNu71ixgFetSmSA
3a2SP/7SAvtYL43cg8ossNncOciXaKgYtFZQjsjv5Zx351dtTYS30jbs6cn41AWj
4D+gNC3/T5sKIwtJIUE3/IWKnNyDPhUeuBBgvPHM/no5RHKmQgQmP7BS6QxwB1c2
/fD5FtXcGTqi4LzAy8F+MYizYEjqcuFsSuZWi3C8AisuR2VJge2Eravn7ZHcbbYf
nZF63o7BLgG4WgHV0HQ/WdcYeTDjYLjNjbXl77mZ3ljSKS6s6jUo/C6p6yjPnrmp
ewXlAoIBAHX9aLtoj06zU5dtrMVjhYip6Z5LEtHsXCvOmnAl91uUXKwtMze6Y7xc
4ZggVWBR9lFyq9bAAuIXkRuE1jbvEwl5sLZY5Hf6GrIUR2uD7ZYFbogBTgmWCX9S
vYRpofEwquX0AUt+a4nO09wPBX1BJMViAEZSAWGfCfzVz8rXYIgcmu471wrpQHv4
/whNA90JX0v6tuOASgR2dGorU//0ejGcm0bcYSuivKPxBfmVNeZNgIJXLM0ZHql1
qwLnWXS0e0kADWhp/GAWVo0xzefgD3mDmyG2Qbz8MLaDkwPU/L073ZZNT1AHFZZE
xfVijyydtPqa/JDgqcUFBxLtiKGTbbI=
-----END PRIVATE KEY-----

View File

@ -0,0 +1,27 @@
-----BEGIN CERTIFICATE REQUEST-----
MIIEmzCCAoMCAQAwVjELMAkGA1UEBhMCUlUxEzARBgNVBAgMClNvbWUtU3RhdGUx
ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAwwGY2xp
ZW50MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAl9i6heiD2inNs0lK
t7XjK0QyXKyIg63uVO39ZpLY5sj1noZnbFmRo5MYC4MMIrlD3wGnqUo/mdev2eg8
qYry0CmzhLscVEha7LN+7wmdftYSGkXOl9E08KIv9Uw2bHap4KEIRvJ6298bdsuv
1l4ljWo3VDkPCczXIaUi94/o1uNu0sbPRluBP8kjFLhwV8Ph/7mxsofuV5tDYNOZ
8VozA7aHAnJqXBCFZuW6igIH3XaROY9LOe2FD+qxtpqJHp7E+ZNsXXH5SXMQzE2/
X7eKHA7zvdG4z5GLgmz3GEvq3ZBJUGUe6DCnrxqBXUeLTqyummohGE57LuqzrH9e
tFj/91NaZW9uwN42pxGJSkeZaJ5BDnzO3VYxqvvDnFs/2TFbJiZ065vINpA0PnuA
kGjztaNAm6qXNTXsWo8bdi8TDgIwKxoo5KIlWsxnxKB82tfBovWd+JLaxLlWNKbK
gNb6xc9OmNCyhYkRXhqIsBrpLy6n2yppyYEiCBzPrqwJ2p3Y88yj3dscxiiu0ngF
SroE2pLwCgi1rQY1x9VO4XQrrq6vBvPhi530+cdXdVTnnyBhaOMX4bWXOOjI97xJ
mR/YbOvpgKw9v/na82t2fgKoqIs124iZ21QOiffOIfwwg2AKj8EaWJ9/Era8sZMs
q/g56PWYwT1J85cCMvbNwGOXjusCAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4ICAQBr
ZWJGghyeCUESKu9v9dgQYD4Bq5YviYY7BAjdQsn5/Ti+tqkAKHEn/L+264QJMrTU
muTKg09o7n7r+g4sHmZhT2QEtwGFwaEGvz2zRd4po29WlGQAN5a8yzXIb8b9n+C4
Z5APeAzjBV9ooqbJCURlgeDP247XTcU8yIvvX0YpLmXYRQ5Na8TTfdBdlD3Y23mD
aPe+2YGkK1Go4iNMoyTEoJ0cj+89WMA9o+BiGcptALYMzKBM26f/vz9hb0J49RDV
MkccL8TKNDJQrLFCIcwpLRw08uZZdqJ/cy01hrtkMkp+SSBQO9qSmalDrTLgs4Pp
3ml88OIsQHMkdBUjUAw5FyAJoik4iXab/sLoMmMJ/lqH0in3dT4KeEh8Xe2iAfNU
97/6lAuZOrn2EPvqBJI/gfjwPj9wTgjlvLpmcjhBvfqkuM/dd7zTmwRoM1klRMC2
07x2YNoG7cGXjAYk1h5HDYVJqdphgQLTS1nRqmiQ8QnPryqlZcI7T/GgpOOulHce
lgMp6FFlGaQMysCbdYJQk4JMQXjwlj5ms/6rZSkgwmrv6WhCY7ImDj2E/7oE5XJd
BqXHFwOQdCSgQOVH+QgmxceYolI73cMNF3Frq8/IztAOtMyBF0bpRpI1U6a3ALf4
cK2Dkd/TLg8ZwsVBjPaRrtXlntO6tB+tn3suukCKcg==
-----END CERTIFICATE REQUEST-----

View File

@ -0,0 +1,13 @@
#!/bin/bash
# 1. Generate CA's private key and self-signed certificate
openssl req -newkey rsa:4096 -x509 -days 3650 -nodes -batch -keyout ca-key.pem -out ca-cert.pem -subj "/C=RU/ST=Some-State/O=Internet Widgits Pty Ltd/CN=ca"
# 2. Generate self-signed certificate and private key for using as wrong server certificate (because it's not signed by CA)
openssl req -newkey rsa:4096 -x509 -days 3650 -nodes -batch -keyout self-key.pem -out self-cert.pem -subj "/C=RU/ST=Some-State/O=Internet Widgits Pty Ltd/CN=server"
# 3. Generate client's private key and certificate signing request (CSR)
openssl req -newkey rsa:4096 -nodes -batch -keyout client-key.pem -out client-req.pem -subj "/C=RU/ST=Some-State/O=Internet Widgits Pty Ltd/CN=client"
# 4. Use CA's private key to sign client's CSR and get back the signed certificate
openssl x509 -req -days 3650 -in client-req.pem -CA ca-cert.pem -CAkey ca-key.pem -CAcreateserial -out client-cert.pem

View File

@ -0,0 +1,32 @@
-----BEGIN CERTIFICATE-----
MIIFjTCCA3WgAwIBAgIUFpPVJytX13EWXwaLqg/rOTdu4UgwDQYJKoZIhvcNAQEL
BQAwVjELMAkGA1UEBhMCUlUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAwwGc2VydmVyMB4XDTI0
MDcwMTA1MTEyNVoXDTM0MDYyOTA1MTEyNVowVjELMAkGA1UEBhMCUlUxEzARBgNV
BAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0
ZDEPMA0GA1UEAwwGc2VydmVyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
AgEAsQO7f1Ar0ThZjh00KeuckyJDf/oPQ+67EX2Y82W4/dl8KmHksmIKp2+CJFwg
Y7/c1ti+DjxVXr4tZynzUeDYqujTrAidUMz0zrvjXNeeyoe2hgc2X6YFtVhyEtMQ
6q8VMaqomjuQDIyxUUdt2xHZWyxHPntDrzsmCCDH5Md27CZ4Z5jsSOdgnlrCkjd9
NtuIzxTlrExe3whIlTL0DURTvAVV/tPoVpVSvHTWCdziaCWm1hG5T6ZCN2W0pBgV
k+UNNtXr1ksr/Ma0Z84ShKjpN+soFOIZ3GDDGY8Xr/74jZo2c4025TpeIPcPBE0Z
b2K24l0UNnQbgm3ryI7NRrZ7psNnhvnWZru4w5Ykr8RKBf3n1AZOt8+dXHNvJQeU
/CUA6pKpc+sIxzAtWtS2ABqdOTJTXj2GyYNsz38dI2uzecUzT7oEtHWq9sr376EO
xMv1wXsUBli8INhPricfojH2/ACnRPysFglAKTx0IpcwqYoV5pGkk0iK1DNkQMv4
Kf7cD2hLWv9+Fgbrjfu/gNCL0bSJBjaTHngACePsLEQkmGomscDySYXa+PHeB8Ln
QCWsE6MkeDa5g3gUOPV/vC6qv0ZBvQj1p3sOeyIQATDBYX+KheCO+N0ZCCdlT5Sx
thKNDusultu6/lyjGum0c/5oGgoDKTIkibgljekKupBRAykCAwEAAaNTMFEwHQYD
VR0OBBYEFEnPwBjLDPzta3nBrUEH+Gz1nBJRMB8GA1UdIwQYMBaAFEnPwBjLDPzt
a3nBrUEH+Gz1nBJRMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIB
AIW829ODEBvBp+vUwt3hbsJdBx9DzwcBBJ+Mew95YVveiosU+3p1HaWCkc4srgOi
hDNbpvfbqBIKVAZzesJbuTEeQ+Aj4M1lcFed9tvidVnmqDgHJApOI61GihT9MDjC
dH6vhcsorE8E5F58FcMJ0QDynVOuCA/gHkS3ucMxBsNKGfYNe0rOGMw23CzHdnz7
UonOKhrhIQawI3ELLQxo2LWZEmMsluznE4MB+RGdn3k/UzOY9x4bauv4hrHjYvEm
x/zp2iWhZXvvTZ9u9xaHXEzbSU3WFCcXE/l9SDPM7ip9fabUrvaTJkmM190bD9sk
VcEFOAFBVxfx2fvmBIYqvSkVbcWdAT6x8k+MVNfq99/1aIdh+DTB3XUB7FkE/fnU
tXr/1sz0guwfNI/cHvhHisjmbgMhZWeeVV1YZHOJCsK7eq9rGq5Uul8nYrTdYKlN
u0OcrBXYBB6ny7ljCTahyPPmFZh/4Mgr0xROn2HyhTCpDHjnD1M5jT783fvWqu06
cpOjRIPYNtA/UQAWUZWrTuJTD6Kju9tDI8i1NtMWIXd7w0LDXsXeHpRoDrVvOIPT
P0ivUpt4dFHnwR8Dos4NI/j9mkdGCblUxDsd/7or8gjedOoWjWfGCNlYa72aq0gf
KjKGPISC3BGTNtwRTe+E7k38r+Y6YO/Tnu9RvBdDM3vF
-----END CERTIFICATE-----

View File

@ -0,0 +1,52 @@
-----BEGIN PRIVATE KEY-----
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCxA7t/UCvROFmO
HTQp65yTIkN/+g9D7rsRfZjzZbj92XwqYeSyYgqnb4IkXCBjv9zW2L4OPFVevi1n
KfNR4Niq6NOsCJ1QzPTOu+Nc157Kh7aGBzZfpgW1WHIS0xDqrxUxqqiaO5AMjLFR
R23bEdlbLEc+e0OvOyYIIMfkx3bsJnhnmOxI52CeWsKSN30224jPFOWsTF7fCEiV
MvQNRFO8BVX+0+hWlVK8dNYJ3OJoJabWEblPpkI3ZbSkGBWT5Q021evWSyv8xrRn
zhKEqOk36ygU4hncYMMZjxev/viNmjZzjTblOl4g9w8ETRlvYrbiXRQ2dBuCbevI
js1Gtnumw2eG+dZmu7jDliSvxEoF/efUBk63z51cc28lB5T8JQDqkqlz6wjHMC1a
1LYAGp05MlNePYbJg2zPfx0ja7N5xTNPugS0dar2yvfvoQ7Ey/XBexQGWLwg2E+u
Jx+iMfb8AKdE/KwWCUApPHQilzCpihXmkaSTSIrUM2RAy/gp/twPaEta/34WBuuN
+7+A0IvRtIkGNpMeeAAJ4+wsRCSYaiaxwPJJhdr48d4HwudAJawToyR4NrmDeBQ4
9X+8Lqq/RkG9CPWnew57IhABMMFhf4qF4I743RkIJ2VPlLG2Eo0O6y6W27r+XKMa
6bRz/mgaCgMpMiSJuCWN6Qq6kFEDKQIDAQABAoICAA4BaZhifipeTFeo395hPKY3
k8z7EAd5SrvQ4C/XNefVNAnLwmM1dqUAzEnsMB6JqQN8FVgzAIjagI/PtedzOYbB
nTMuC08VUwP4bGjay+mNC1gCF2nsojmoTH5IzOcvVdH0MBrj6OXVS+z5EIFrdsh2
nGcegaB1DEagXAXcnQAvmwniA5qmaUfzjmmF58EAuwuyGj9OW5icoyw1jTHE3OCH
hjeqCeav3I1PVpAOi1YKL0UeFIzExCsglPEBfCjoz+mCy4IiYydA4Y1FChoM80bw
CDUpc+UH6D02y9JVA317XMuPuhmGS2RNp+vYiHv+Uide40vbq+w0nkSOvUHXC7lo
UXf26sZWHcQP8TxewjZFcajApk03lCAb/u4skt7OQxbDOcvrcdbCsSpZPS16s3CA
OWA9fS7WJDmFx/dhzm4xNaXpz9z5MMkUfnFqgceG3H1leamL66J+eOzlquQ8LdsJ
/wTvCi1SsR3JmqT+TpqlXTJci7A+I/b0Yt6a0s2HunQ5LA1Xomffhn/00IxUeqyi
4MKh7wnoCaRlwZVORgQfKnosSkbkixDabwr9BMnMryE+NdQI3JKCvchdB0idyk1z
igTgZYpaxZg9hwTlOITGYSsprN7tiwjNQG+Kivatn1NxnxhD8lpDoUqluR7mZs1e
CwOLrB+irCULr7tcRYtxAoIBAQDFV7E1Oz8j8NkvkLztIFJ0ej7zEyNp3HUJb4bd
SRvF2AL6sqSw0xVLK8HbrlfPo1Nex8nZUfY3v2DmPS4+S3mrtk+8XoRkvNn0VdL4
FqLfygYsURI8xVp4zTsAmR+7grPJbb/u3XG7CBY3T4XGHSbMmYAu7+W3A2t8Q+0U
fptju7Gc9gEACcSfap45CzEnFbabxNq3FJJ9V92N/xVQcOzNLxPxgn6tVhPeQnXV
0WHJQdxB/8DivgOObiTCB7on9tNbSHisTYtGwn6Kz0961ewYU7Xj4epj7A35nWhI
E10FDAf3T7cQa9ubSFZwOiSe0voAbiWbN3/M+zgkZDp2+wlnAoIBAQDloTtzgQaA
//VPNhB2d7iOgAl5cUSWqY0HLJobw1ETsdbkGRTgZrFXe4XlXwHB4S+zVEMmyR3p
HY0sYoAdpmd3Ggo4efJEx9IxkD6UZ71we02szvt4MdV5mwjHOAQfEJffktVkeqEJ
gdekSmaHTeryBf3ESpefq+OH1qaj/IjivySLI4c8fPcxNg0nMmDsdMzRaDOHDDa2
nUpRBeMFkwiiPJpALPPp/4be0d9mrahPd4zBoBieZwaP6Zhwksi0NaM4WKEfx4Fd
TzH0K7UfLDMJ13wuTsNPzjPlDA+R4E/O3qVZxtHzuqc7fCksE7inYZghIRSjQHmv
mmLPaFdaIWTvAoIBAQC50BQfVyXgpYsaamI5tFFvMxvKrJ6PsvDQbZ7DBW+NHhPQ
Eh1YEajfGthyDPrJm7OIbv30DmSToTOQFtNP/KkXsZXqe+SVJezjT1xSFJ5PsV2W
pM9TQVAqprODoKpsWGkIpLowI3pO7RJeChafsBs0UuSLTX8yj5nJa/vzbDLL+ZMH
g+r1Kj1lDXLlq1AgySUkDnsSH+C+/Ni0NxbOo0WU/Vr98C1u8LnMcS+aoIg+hDDs
Q/BOjzOyDKu3tqblslbYgKh/NJSLj96uvPzNYFfpoAiIBdQ5Oa7+9eOW2gSgF+/4
QarRX31r/PTGvzh0A5s6hHwMZnmDfJgkS+tw1HbJAoIBAQDfauQBGYROdrsieqPB
3/H9E64kLoRhYk4IsOiwInT8CIfgHlcljkT1rQU1ih6XmJVkkZNUZVFowovcp2mn
b4QzaW6QTHXfXduwxXtdwSGcExwcCra41yOw0ZrU2jeIUgiySpa80GNmcKJzAsCG
3NS+7f1dwoWLoR4byEkSPF6i3v6bMdIelCeeBcz+WhHbfjjZddzMhKGnQ5pPAr9D
aS95SiV6U0JuZWO/38rQiB2+U77rlOzfEbZcxxa+UNJeI+HOrMz3spC3TJp5yxzK
W6K87J+yHogBWcs8C2QKtohv6khO9RbJYM3dHoygif+bO2Vctmbx/4VlyXxh+E8l
LmCZAoIBAFR+dd7cW0pDkOsPsGZbWwDPNRwo2lxmqf2B5w9zoGNFoc4qV5k9tJfu
kPcwol6h2P4s2olYTgYC6hjPmE17U1TyP2DFeOw5IdxzNqy7dZndmaTs4qFLnDv3
HIbkX42p7v++EU9eKAhYKJzJpuianMZ+v5azV1wNS5TDuN2TFULyIi+1UEw0ONuJ
rA97idb2PrO2sT/eBj02uiOcPhZH6CHSMxDp7QG+S1AVZBgutTQQdeAsb6pE/iW0
u3IzGNxTYjTmF+u7iNdOPiaTFpdnltDviADESMJGH7PIk7r7PXMI9ID+LbBFGJ7Q
duW6PGuZ9JMBbxbNNvYayVgPeAeUsUI=
-----END PRIVATE KEY-----

View File

@ -0,0 +1,17 @@
<clickhouse>
<!-- Native interface with TLS.
You have to configure certificate to enable this interface.
See the openSSL section below.
-->
<tcp_port_secure>9440</tcp_port_secure>
<!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
<openSSL replace="replace">
<server> <!-- Used for https server AND secure tcp port -->
<certificateFile>/etc/clickhouse-server/config.d/self-cert.pem</certificateFile>
<privateKeyFile>/etc/clickhouse-server/config.d/self-key.pem</privateKeyFile>
<caConfig>/etc/clickhouse-server/config.d/ca-cert.pem</caConfig>
<verificationMode>none</verificationMode>
</server>
</openSSL>
</clickhouse>

View File

@ -0,0 +1,92 @@
import pytest
from helpers.client import Client
from helpers.cluster import ClickHouseCluster
import os.path
from os import remove
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
MAX_RETRY = 5
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance(
"node",
main_configs=[
"configs/ssl_config.xml",
"certs/self-key.pem",
"certs/self-cert.pem",
"certs/ca-cert.pem",
],
)
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
config_default = """<clickhouse>
</clickhouse>"""
config_accept = """<clickhouse>
<accept-invalid-certificate>1</accept-invalid-certificate>
</clickhouse>"""
config_connection_accept = """<clickhouse>
<connections_credentials>
<connection>
<name>{ip_address}</name>
<accept-invalid-certificate>1</accept-invalid-certificate>
</connection>
</connections_credentials>
</clickhouse>"""
def execute_query_native(node, query, config):
config_path = f"{SCRIPT_DIR}/configs/client.xml"
file = open(config_path, "w")
file.write(config)
file.close()
client = Client(
node.ip_address,
9440,
command=cluster.client_bin_path,
secure=True,
config=config_path,
)
try:
result = client.query(query)
remove(config_path)
return result
except:
remove(config_path)
raise
def test_default():
with pytest.raises(Exception) as err:
execute_query_native(instance, "SELECT 1", config_default)
assert "certificate verify failed" in str(err.value)
def test_accept():
assert execute_query_native(instance, "SELECT 1", config_accept) == "1\n"
def test_connection_accept():
assert (
execute_query_native(
instance,
"SELECT 1",
config_connection_accept.format(ip_address=f"{instance.ip_address}"),
)
== "1\n"
)

View File

@ -2,6 +2,7 @@
<profiles>
<default>
<s3_retry_attempts>5</s3_retry_attempts>
<backup_restore_s3_retry_attempts>5</backup_restore_s3_retry_attempts>
</default>
</profiles>
<users>

View File

@ -834,6 +834,60 @@ def test_literal_escaping(started_cluster):
cursor.execute(f"DROP TABLE escaping")
def test_filter_pushdown(started_cluster):
cursor = started_cluster.postgres_conn.cursor()
cursor.execute("CREATE SCHEMA test_filter_pushdown")
cursor.execute(
"CREATE TABLE test_filter_pushdown.test_table (id integer, value integer)"
)
cursor.execute(
"INSERT INTO test_filter_pushdown.test_table VALUES (1, 10), (1, 110), (2, 0), (3, 33), (4, 0)"
)
node1.query(
"""
CREATE TABLE test_filter_pushdown_pg_table (id UInt32, value UInt32)
ENGINE PostgreSQL('postgres1:5432', 'postgres', 'test_table', 'postgres', 'mysecretpassword', 'test_filter_pushdown');
"""
)
node1.query(
"""
CREATE TABLE test_filter_pushdown_local_table (id UInt32, value UInt32) ENGINE Memory AS SELECT * FROM test_filter_pushdown_pg_table
"""
)
node1.query(
"CREATE TABLE ch_table (id UInt32, pg_id UInt32) ENGINE MergeTree ORDER BY id"
)
node1.query("INSERT INTO ch_table VALUES (1, 1), (2, 2), (3, 1), (4, 2), (5, 999)")
def compare_results(query, **kwargs):
result1 = node1.query(
query.format(pg_table="test_filter_pushdown_pg_table", **kwargs)
)
result2 = node1.query(
query.format(pg_table="test_filter_pushdown_local_table", **kwargs)
)
assert result1 == result2
for kind in ["INNER", "LEFT", "RIGHT", "FULL", "ANY LEFT", "SEMI RIGHT"]:
for value in [0, 10]:
compare_results(
"SELECT * FROM ch_table {kind} JOIN {pg_table} as p ON ch_table.pg_id = p.id WHERE value = {value} ORDER BY ALL",
kind=kind,
value=value,
)
compare_results(
"SELECT * FROM {pg_table} as p {kind} JOIN ch_table ON ch_table.pg_id = p.id WHERE value = {value} ORDER BY ALL",
kind=kind,
value=value,
)
cursor.execute("DROP SCHEMA test_filter_pushdown CASCADE")
if __name__ == "__main__":
cluster.start()
input("Cluster created, press any key to destroy...")

View File

@ -30,7 +30,7 @@ $CLICKHOUSE_CLIENT --max_block_size 1 --min_insert_block_size_rows 1 --min_inser
for _ in {1..60}; do
$CLICKHOUSE_CLIENT --query "SYSTEM FLUSH LOGS"
[[ $($CLICKHOUSE_CLIENT --query "SELECT sum(toUInt32(extract(message, 'Removed (\d+) old log entries'))) FROM system.text_log WHERE event_date >= yesterday() AND logger_name LIKE '%' || '$CLICKHOUSE_DATABASE' || '%r1%(ReplicatedMergeTreeCleanupThread)%' AND message LIKE '%Removed % old log entries%'") -gt $((SCALE - 100)) ]] && break;
[[ $($CLICKHOUSE_CLIENT --query "SELECT sum(toUInt32(extract(message, 'Removed (\d+) old log entries'))) FROM system.text_log WHERE event_date >= yesterday() AND logger_name LIKE '%' || '$CLICKHOUSE_DATABASE' || '%r1%(ReplicatedMergeTreeCleanupThread)%' AND message LIKE '%Removed % old log entries%'") -gt $((SCALE - 10)) ]] && break;
sleep 1
done

View File

@ -73,7 +73,7 @@ kill -TERM $PID_1 && kill -TERM $PID_2 && kill -TERM $PID_3 && kill -TERM $PID_4
wait
$CLICKHOUSE_CLIENT -q "SELECT '$CLICKHOUSE_DATABASE', 'threads finished'"
wait_for_queries_to_finish
wait_for_queries_to_finish 60
$CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA alter_table0"
$CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA alter_table1"

View File

@ -1,102 +1,42 @@
flat
-- { echoOn }
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
SELECT key, value FROM dict_flat ORDER BY key ASC;
1 First
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
SELECT key, value FROM dict_flat ORDER BY key ASC;
1 First
2 Second
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
SELECT key, value FROM dict_flat ORDER BY key ASC;
1 First
2 SecondUpdated
3 Third
flat/custom
-- { echoOn }
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
SELECT key, value FROM dict_flat_custom ORDER BY key ASC;
1 First
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
SELECT key, value FROM dict_flat_custom ORDER BY key ASC;
1 First
2 Second
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
SELECT key, value FROM dict_flat_custom ORDER BY key ASC;
1 First
2 SecondUpdated
3 Third
hashed
-- { echoOn }
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
SELECT key, value FROM dict_hashed ORDER BY key ASC;
1 First
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
SELECT key, value FROM dict_hashed ORDER BY key ASC;
1 First
2 Second
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
SELECT key, value FROM dict_hashed ORDER BY key ASC;
1 First
2 SecondUpdated
3 Third
hashed/custom
-- { echoOn }
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
SELECT key, value FROM dict_hashed_custom ORDER BY key ASC;
1 First
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
SELECT key, value FROM dict_hashed_custom ORDER BY key ASC;
1 First
2 Second
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
SELECT key, value FROM dict_hashed_custom ORDER BY key ASC;
1 First
2 SecondUpdated
3 Third
complex_key_hashed
-- { echoOn }
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
SELECT key, value FROM dict_complex_key_hashed ORDER BY key ASC;
1 First
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
SELECT key, value FROM dict_complex_key_hashed ORDER BY key ASC;
1 First
2 Second
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
SELECT key, value FROM dict_complex_key_hashed ORDER BY key ASC;
1 First
2 SecondUpdated
3 Third
complex_key_hashed/custom
-- { echoOn }
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
SELECT key, value FROM dict_complex_key_hashed_custom ORDER BY key ASC;
1 First
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
SELECT key, value FROM dict_complex_key_hashed_custom ORDER BY key ASC;
1 First
2 Second
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
SELECT key, value FROM dict_complex_key_hashed_custom ORDER BY key ASC;
1 First
2 SecondUpdated
3 Third

View File

@ -35,7 +35,7 @@ for layout in "${layouts[@]}"; do
echo "$layout"
fi
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT --multiquery "
TRUNCATE TABLE table_for_update_field_dictionary;
CREATE DICTIONARY $dictionary_name
@ -49,24 +49,31 @@ for layout in "${layouts[@]}"; do
LAYOUT($layout())
LIFETIME(1);
-- { echoOn }
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
SELECT key, value FROM $dictionary_name ORDER BY key ASC;
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());"
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
while true
do
$CLICKHOUSE_CLIENT --query "SELECT key, value FROM $dictionary_name ORDER BY key ASC" | grep -A10 -B10 'First' && break;
sleep .1;
done
SELECT key, value FROM $dictionary_name ORDER BY key ASC;
$CLICKHOUSE_CLIENT --query "INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());"
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
while true
do
$CLICKHOUSE_CLIENT --query "SELECT key, value FROM $dictionary_name ORDER BY key ASC" | grep -A10 -B10 'Second' && break;
sleep .1;
done
SELECT key, value FROM $dictionary_name ORDER BY key ASC;
-- { echoOff }
$CLICKHOUSE_CLIENT --query "INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now()), (3, 'Third', now())"
DROP DICTIONARY $dictionary_name;
"
while true
do
$CLICKHOUSE_CLIENT --query "SELECT key, value FROM $dictionary_name ORDER BY key ASC" | grep -A10 -B10 'SecondUpdated' && break;
sleep .1;
done
$CLICKHOUSE_CLIENT --query "DROP DICTIONARY $dictionary_name"
done
done

View File

@ -264,3 +264,44 @@ SETTINGS group_by_use_nulls = 1, max_bytes_before_external_sort=10;
9 \N 9
\N 0 20
\N 1 25
CREATE TABLE test
ENGINE = ReplacingMergeTree
PRIMARY KEY id
AS SELECT number AS id FROM numbers(100);
SELECT id
FROM test
GROUP BY id
WITH CUBE
HAVING id IN (
SELECT id
FROM test
)
FORMAT `NUll`
SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls = true;
SELECT id
FROM test
FINAL
GROUP BY id
WITH CUBE
HAVING id IN (
SELECT DISTINCT id
FROM test
FINAL
)
FORMAT `NUll`
SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls = true;
SELECT id
FROM test
FINAL
GROUP BY
GROUPING SETS ((id))
ORDER BY
id IN (
SELECT DISTINCT id
FROM test
FINAL
LIMIT 4
) ASC
LIMIT 256 BY id
FORMAT `NUll`
SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls=true;

View File

@ -83,3 +83,48 @@ GROUP BY
)
ORDER BY 1, tuple(val)
SETTINGS group_by_use_nulls = 1, max_bytes_before_external_sort=10;
CREATE TABLE test
ENGINE = ReplacingMergeTree
PRIMARY KEY id
AS SELECT number AS id FROM numbers(100);
SELECT id
FROM test
GROUP BY id
WITH CUBE
HAVING id IN (
SELECT id
FROM test
)
FORMAT `NUll`
SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls = true;
SELECT id
FROM test
FINAL
GROUP BY id
WITH CUBE
HAVING id IN (
SELECT DISTINCT id
FROM test
FINAL
)
FORMAT `NUll`
SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls = true;
SELECT id
FROM test
FINAL
GROUP BY
GROUPING SETS ((id))
ORDER BY
id IN (
SELECT DISTINCT id
FROM test
FINAL
LIMIT 4
) ASC
LIMIT 256 BY id
FORMAT `NUll`
SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls=true;

View File

@ -1,3 +0,0 @@
select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=1 format Null;
select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=1, use_hedged_requests=0 format Null;
select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=0 format Null;

View File

@ -6,6 +6,9 @@ INSERT INTO 02918_parallel_replicas SELECT toString(number), number % 4 FROM num
SET prefer_localhost_replica=0;
--- if we try to query unavaialble replica, connection will be retried
--- but a warning log message will be printed out
SET send_logs_level='error';
-- { echoOn }
SELECT y, count()
FROM cluster(test_cluster_1_shard_3_replicas_1_unavailable, currentDatabase(), 02918_parallel_replicas)
@ -26,5 +29,6 @@ GROUP BY y
ORDER BY y
SETTINGS max_parallel_replicas=3, parallel_replicas_custom_key='cityHash64(y)', parallel_replicas_custom_key_filter_type='default';
-- { echoOff }
SET send_logs_level='warning';
DROP TABLE 02918_parallel_replicas;

View File

@ -3,7 +3,7 @@
1000
1
1000
2
1
1000000
1000
0 999001

View File

@ -29,7 +29,7 @@ ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm;"
${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE rocksdb_worm;"
# Must set both max_threads and max_insert_threads to 2 to make sure there is only two sinks
${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers_mt(1000000) SETTINGS max_threads = 2, max_insert_threads = 2, max_block_size = 10000, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, insert_deduplication_token = '', optimize_trivial_insert_select = 1;"
${CLICKHOUSE_CLIENT} --query "SELECT sum(value) FROM system.rocksdb WHERE database = currentDatabase() AND table = 'rocksdb_worm' AND name = 'no.file.opens';" # should be 2 because default bulk sink size is ~1M rows / SST file
${CLICKHOUSE_CLIENT} --query "SELECT sum(value) IN (1, 2) FROM system.rocksdb WHERE database = currentDatabase() AND table = 'rocksdb_worm' AND name = 'no.file.opens';" # should be not more than 2 because default bulk sink size is ~1M rows / SST file.
${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm;"
# Testing insert with duplicated keys

View File

@ -30,3 +30,46 @@ SELECT
min(dummy)
FROM remote('127.0.0.{2,3}', system.one)
GROUP BY y;
CREATE TABLE ttt (hr DateTime, ts DateTime) ENGINE=Memory
as select '2000-01-01' d, d;
SELECT
count(),
now() AS c1
FROM remote('127.0.0.{1,2}', currentDatabase(), ttt)
GROUP BY c1 FORMAT Null;
SELECT
count(),
now() AS c1
FROM remote('127.0.0.{3,2}', currentDatabase(), ttt)
GROUP BY c1 FORMAT Null;
SELECT
count(),
now() AS c1
FROM remote('127.0.0.{1,2}', currentDatabase(), ttt)
GROUP BY c1 + 1 FORMAT Null;
SELECT
count(),
now() AS c1
FROM remote('127.0.0.{3,2}', currentDatabase(), ttt)
GROUP BY c1 + 1 FORMAT Null;
SELECT
count(),
tuple(nullIf(toDateTime(formatDateTime(hr, '%F %T', 'America/Los_Angeles'), 'America/Los_Angeles'), toDateTime(0))) as c1,
defaultValueOfArgumentType(toTimeZone(ts, 'America/Los_Angeles')) as c2,
formatDateTime(hr, '%F %T', 'America/Los_Angeles') as c3
FROM remote('127.0.0.{1,2}', currentDatabase(), ttt)
GROUP BY c1, c2, c3 FORMAT Null;
SELECT
count(),
tuple(nullIf(toDateTime(formatDateTime(hr, '%F %T', 'America/Los_Angeles'), 'America/Los_Angeles'), toDateTime(0))) as c1,
defaultValueOfArgumentType(toTimeZone(ts, 'America/Los_Angeles')) as c2,
formatDateTime(hr, '%F %T', 'America/Los_Angeles') as c3
FROM remote('127.0.0.{3,2}', currentDatabase(), ttt)
GROUP BY c1, c2, c3 FORMAT Null;

View File

@ -7,12 +7,14 @@ SETTINGS max_bytes_to_merge_at_max_space_in_pool = 80000, exclude_deleted_rows_f
INSERT INTO lwd_merge SELECT number FROM numbers(10000);
INSERT INTO lwd_merge SELECT number FROM numbers(10000, 10000);
OPTIMIZE TABLE lwd_merge;
SET optimize_throw_if_noop = 1;
OPTIMIZE TABLE lwd_merge; -- { serverError CANNOT_ASSIGN_OPTIMIZE }
SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1;
DELETE FROM lwd_merge WHERE id % 10 > 0;
OPTIMIZE TABLE lwd_merge;
OPTIMIZE TABLE lwd_merge; -- { serverError CANNOT_ASSIGN_OPTIMIZE }
SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1;
ALTER TABLE lwd_merge MODIFY SETTING exclude_deleted_rows_for_part_size_in_merge = 1;

View File

@ -7,7 +7,7 @@ CLICKHOUSE_LOG_COMMENT=
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_analyzer=1"
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --use_variant_as_common_type=1"
function run()
{

View File

@ -1,5 +1,11 @@
Negative tests
The first bit must be zero
1
Test disabling of common subexpression elimination via first parameter
0
0
1
Test user-provided machine ID
1
Generated Snowflake IDs are unique
100

View File

@ -1,13 +1,22 @@
-- Test SQL function 'generateSnowflakeID'
SELECT bitAnd(bitShiftRight(toUInt64(generateSnowflakeID()), 63), 1) = 0; -- check first bit is zero
SELECT 'Negative tests';
SELECT generateSnowflakeID(1, 2, 3); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
SELECT generateSnowflakeID(1, 'not_an_int'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT generateSnowflakeID(1, materialize(2)); -- { serverError ILLEGAL_COLUMN }
SELECT 'The first bit must be zero';
SELECT bitAnd(bitShiftRight(generateSnowflakeID(), 63), 1) = 0;
SELECT 'Test disabling of common subexpression elimination via first parameter';
SELECT generateSnowflakeID(1) = generateSnowflakeID(2); -- disabled common subexpression elimination --> lhs != rhs
SELECT generateSnowflakeID() = generateSnowflakeID(1); -- same as ^^
SELECT generateSnowflakeID(1) = generateSnowflakeID(1); -- enabled common subexpression elimination
SELECT generateSnowflakeID(1) = generateSnowflakeID(1); -- with common subexpression elimination
SELECT generateSnowflakeID(1, 2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
SELECT 'Test user-provided machine ID';
SELECT bitAnd(bitShiftRight(generateSnowflakeID(1, 123), 12), 1024 - 1) = 123; -- the machine id is actually set in the generated snowflake ID (1024 = 2^10)
SELECT 'Generated Snowflake IDs are unique';
SELECT count(*)
FROM
(

View File

@ -0,0 +1,17 @@
QUERY id: 0
PROJECTION COLUMNS
d.String Nullable(String)
PROJECTION
LIST id: 1, nodes: 1
COLUMN id: 2, column_name: d.String, result_type: Nullable(String), source_id: 3
JOIN TREE
TABLE id: 3, alias: __table1, table_name: default.test_dynamic
SETTINGS allow_experimental_analyzer=1
foo
\N
\N
foo
\N
\N
6
6

Some files were not shown because too many files have changed in this diff Show More