Merge branch 'master' into test/full_join_null_pk

This commit is contained in:
Denny Crane 2023-07-13 12:38:10 -03:00 committed by GitHub
commit 6131f0d83d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 680 additions and 48 deletions

View File

@ -3903,6 +3903,216 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAnalyzerAsan0:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan, analyzer)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=0
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAnalyzerAsan1:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan, analyzer)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=1
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAnalyzerAsan2:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan, analyzer)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=2
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAnalyzerAsan3:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan, analyzer)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=3
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAnalyzerAsan4:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan, analyzer)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=4
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAnalyzerAsan5:
needs: [BuilderDebAsan]
runs-on: [self-hosted, stress-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/integration_tests_asan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Integration tests (asan, analyzer)
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
RUN_BY_HASH_NUM=5
RUN_BY_HASH_TOTAL=6
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Integration test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan0:
needs: [BuilderDebTsan]
runs-on: [self-hosted, stress-tester]

View File

@ -97,8 +97,8 @@ docker run -d \
You may also want to mount:
* `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustmenets
* `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustmenets
* `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments
* `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments
* `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
### Linux capabilities

View File

@ -0,0 +1,26 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.4.6.25-stable (a06848b1770) FIXME as compared to v23.4.5.22-stable (0ced5d6a8da)
#### Improvement
* Backported in [#51234](https://github.com/ClickHouse/ClickHouse/issues/51234): Improve the progress bar for file/s3/hdfs/url table functions by using chunk size from source data and using incremental total size counting in each thread. Fix the progress bar for *Cluster functions. This closes [#47250](https://github.com/ClickHouse/ClickHouse/issues/47250). [#51088](https://github.com/ClickHouse/ClickHouse/pull/51088) ([Kruglov Pavel](https://github.com/Avogar)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).
* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -1404,10 +1404,9 @@ void Client::readArguments(
else if (arg == "--password" && ((arg_num + 1) >= argc || std::string_view(argv[arg_num + 1]).starts_with('-')))
{
common_arguments.emplace_back(arg);
/// No password was provided by user. Add '\n' as implicit password,
/// which encodes that client should ask user for the password.
/// '\n' is used because there is hardly a chance that a user would use '\n' as a password.
common_arguments.emplace_back("\n");
/// if the value of --password is omitted, the password will be asked before
/// connection start
common_arguments.emplace_back(ConnectionParameters::ASK_PASSWORD);
}
else
common_arguments.emplace_back(arg);

View File

@ -30,7 +30,7 @@ bool parseKeeperPath(IParser::Pos & pos, Expected & expected, String & path)
return parseIdentifierOrStringLiteral(pos, expected, path);
String result;
while (pos->type == TokenType::BareWord || pos->type == TokenType::Slash || pos->type == TokenType::Dot)
while (pos->type != TokenType::Whitespace && pos->type != TokenType::EndOfStream)
{
result.append(pos->begin, pos->end);
++pos;

View File

@ -46,8 +46,7 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
else
{
password = config.getString("password", "");
/// if the value of --password is omitted, the password will be set implicitly to "\n"
if (password == "\n")
if (password == ASK_PASSWORD)
password_prompt = true;
}
if (password_prompt)

View File

@ -28,6 +28,10 @@ struct ConnectionParameters
ConnectionParameters(const Poco::Util::AbstractConfiguration & config, std::string host, std::optional<UInt16> port);
static UInt16 getPortFromConfig(const Poco::Util::AbstractConfiguration & config);
/// Ask to enter the user's password if password option contains this value.
/// "\n" is used because there is hardly a chance that a user would use '\n' as password.
static constexpr std::string_view ASK_PASSWORD = "\n";
};
}

View File

@ -1,6 +1,7 @@
#include "ConnectionString.h"
#include <Common/Exception.h>
#include <Client/ConnectionParameters.h>
#include <Poco/Exception.h>
#include <Poco/URI.h>
@ -201,8 +202,8 @@ bool tryParseConnectionString(
else
{
// in case of user_info == 'user:', ':' is specified, but password is empty
// then add password argument "\n" which means: Ask user for a password.
common_arguments.push_back("\n");
// then ask user for a password.
common_arguments.emplace_back(ConnectionParameters::ASK_PASSWORD);
}
}
else

View File

@ -188,7 +188,7 @@ public:
/// Get the result in some form. This can only be done once!
void get128(char * out)
ALWAYS_INLINE void get128(char * out)
{
finalize();
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__

View File

@ -137,25 +137,9 @@ void SystemLogBase<LogElement>::add(const LogElement & element)
template <typename LogElement>
void SystemLogBase<LogElement>::flush(bool force)
{
uint64_t this_thread_requested_offset;
{
std::lock_guard lock(mutex);
if (is_shutdown)
return;
this_thread_requested_offset = queue_front_index + queue.size();
// Publish our flush request, taking care not to overwrite the requests
// made by other threads.
is_force_prepare_tables |= force;
requested_flush_up_to = std::max(requested_flush_up_to, this_thread_requested_offset);
flush_event.notify_all();
}
LOG_DEBUG(log, "Requested flush up to offset {}", this_thread_requested_offset);
uint64_t this_thread_requested_offset = notifyFlushImpl(force);
if (this_thread_requested_offset == uint64_t(-1))
return;
// Use an arbitrary timeout to avoid endless waiting. 60s proved to be
// too fast for our parallel functional tests, probably because they
@ -174,6 +158,33 @@ void SystemLogBase<LogElement>::flush(bool force)
}
}
template <typename LogElement>
void SystemLogBase<LogElement>::notifyFlush(bool force) { notifyFlushImpl(force); }
template <typename LogElement>
uint64_t SystemLogBase<LogElement>::notifyFlushImpl(bool force)
{
uint64_t this_thread_requested_offset;
{
std::lock_guard lock(mutex);
if (is_shutdown)
return uint64_t(-1);
this_thread_requested_offset = queue_front_index + queue.size();
// Publish our flush request, taking care not to overwrite the requests
// made by other threads.
is_force_prepare_tables |= force;
requested_flush_up_to = std::max(requested_flush_up_to, this_thread_requested_offset);
flush_event.notify_all();
}
LOG_DEBUG(log, "Requested flush up to offset {}", this_thread_requested_offset);
return this_thread_requested_offset;
}
#define INSTANTIATE_SYSTEM_LOG_BASE(ELEMENT) template class SystemLogBase<ELEMENT>;
SYSTEM_LOG_ELEMENTS(INSTANTIATE_SYSTEM_LOG_BASE)

View File

@ -87,9 +87,12 @@ public:
*/
void add(const LogElement & element);
/// Flush data in the buffer to disk
/// Flush data in the buffer to disk. Block the thread until the data is stored on disk.
void flush(bool force) override;
/// Non-blocking flush data in the buffer to disk.
void notifyFlush(bool force);
String getName() const override { return LogElement::name(); }
static const char * getDefaultOrderBy() { return "event_date, event_time"; }
@ -112,6 +115,10 @@ protected:
uint64_t flushed_up_to = 0;
// Logged overflow message at this queue front index
uint64_t logged_queue_full_at_index = -1;
private:
uint64_t notifyFlushImpl(bool force);
};
}

View File

@ -173,6 +173,9 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
/// This coarse method of synchronization is perfectly ok for fatal signals.
sleepForSeconds(1);
}
/// Wait for all logs flush operations
sleepForSeconds(3);
call_default_signal_handler(sig);
}

View File

@ -122,6 +122,14 @@ void registerDiskS3(DiskFactory & factory, bool global_skip_access_check)
auto client = getClient(config, config_prefix, context, *settings);
if (type == "s3_plain")
{
/// send_metadata changes the filenames (includes revision), while
/// s3_plain do not care about this, and expect that the file name
/// will not be changed.
///
/// And besides, send_metadata does not make sense for s3_plain.
if (config.getBool(config_prefix + ".send_metadata", false))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "s3_plain does not supports send_metadata");
s3_storage = std::make_shared<S3PlainObjectStorage>(std::move(client), std::move(settings), uri.version_id, s3_capabilities, uri.bucket, uri.endpoint);
metadata_storage = std::make_shared<MetadataStorageFromPlainObjectStorage>(s3_storage, uri.key);
}

View File

@ -149,6 +149,9 @@ Block NativeReader::read()
rows = index_block_it->num_rows;
}
if (columns == 0 && !header && rows != 0)
throw Exception(ErrorCodes::INCORRECT_DATA, "Zero columns but {} rows in Native format.", rows);
for (size_t i = 0; i < columns; ++i)
{
if (use_index)
@ -290,6 +293,9 @@ Block NativeReader::read()
res.swap(tmp_res);
}
if (res.rows() != rows)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Row count mismatch after desirialization, got: {}, expected: {}", res.rows(), rows);
return res;
}

View File

@ -943,7 +943,16 @@ public:
{
if constexpr (std::is_same_v<DataType, DataTypeDateTime64>)
{
const auto c = DecimalUtils::split(vec[i], scale);
auto c = DecimalUtils::split(vec[i], scale);
// -1.123 splits to -1 / 0.123
if (vec[i].value < 0 && c.fractional)
{
using F = typename DataType::FieldType;
c.fractional = DecimalUtils::scaleMultiplier<F>(scale) + (c.whole ? F(-1) : F(1)) * c.fractional;
--c.whole;
}
for (auto & instruction : instructions)
instruction.perform(pos, static_cast<Int64>(c.whole), c.fractional, scale, time_zone);
}

View File

@ -84,5 +84,8 @@ void collectCrashLog(Int32 signal, UInt64 thread_id, const String & query_id, co
CrashLogElement element{static_cast<time_t>(time / 1000000000), time, signal, thread_id, query_id, trace, trace_full};
crash_log_owned->add(element);
/// Notify savingThreadFunction to start flushing crash log
/// Crash log is storing in parallel with the signal processing thread.
crash_log_owned->notifyFlush(true);
}
}

View File

@ -115,7 +115,7 @@ CompletedPipelineExecutor::~CompletedPipelineExecutor()
}
catch (...)
{
tryLogCurrentException("PullingAsyncPipelineExecutor");
tryLogCurrentException("CompletedPipelineExecutor");
}
}

View File

@ -0,0 +1,198 @@
test_access_for_functions/test.py::test_access_rights_for_function
test_backward_compatibility/test_normalized_count_comparison.py::test_select_aggregate_alias_column
test_concurrent_backups_s3/test.py::test_concurrent_backups
test_distributed_ddl/test.py::test_default_database[configs]
test_distributed_ddl/test.py::test_default_database[configs_secure]
test_distributed_ddl/test.py::test_on_server_fail[configs]
test_distributed_ddl/test.py::test_on_server_fail[configs_secure]
test_distributed_insert_backward_compatibility/test.py::test_distributed_in_tuple
test_distributed_inter_server_secret/test.py::test_per_user_inline_settings_secure_cluster[default-]
test_distributed_inter_server_secret/test.py::test_per_user_inline_settings_secure_cluster[nopass-]
test_distributed_inter_server_secret/test.py::test_per_user_inline_settings_secure_cluster[pass-foo]
test_distributed_inter_server_secret/test.py::test_per_user_protocol_settings_secure_cluster[default-]
test_distributed_inter_server_secret/test.py::test_per_user_protocol_settings_secure_cluster[nopass-]
test_distributed_inter_server_secret/test.py::test_per_user_protocol_settings_secure_cluster[pass-foo]
test_distributed_inter_server_secret/test.py::test_user_insecure_cluster[default-]
test_distributed_inter_server_secret/test.py::test_user_insecure_cluster[nopass-]
test_distributed_inter_server_secret/test.py::test_user_insecure_cluster[pass-foo]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster[default-]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster[nopass-]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster[pass-foo]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster_from_backward[default-]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster_from_backward[nopass-]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster_from_backward[pass-foo]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster_with_backward[default-]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster_with_backward[nopass-]
test_distributed_inter_server_secret/test.py::test_user_secure_cluster_with_backward[pass-foo]
test_distributed_load_balancing/test.py::test_distributed_replica_max_ignored_errors
test_distributed_load_balancing/test.py::test_load_balancing_default
test_distributed_load_balancing/test.py::test_load_balancing_priority_round_robin[dist_priority]
test_distributed_load_balancing/test.py::test_load_balancing_priority_round_robin[dist_priority_negative]
test_distributed_load_balancing/test.py::test_load_balancing_round_robin
test_backward_compatibility/test.py::test_backward_compatability1
test_backward_compatibility/test_aggregate_fixed_key.py::test_two_level_merge
test_backward_compatibility/test_aggregate_function_state.py::test_backward_compatability_for_avg
test_backward_compatibility/test_aggregate_function_state.py::test_backward_compatability_for_uniq_exact[1000]
test_backward_compatibility/test_aggregate_function_state.py::test_backward_compatability_for_uniq_exact[500000]
test_backward_compatibility/test_aggregate_function_state.py::test_backward_compatability_for_uniq_exact_variadic[1000]
test_backward_compatibility/test_aggregate_function_state.py::test_backward_compatability_for_uniq_exact_variadic[500000]
test_backward_compatibility/test_ip_types_binary_compatibility.py::test_ip_types_binary_compatibility
test_backward_compatibility/test_select_aggregate_alias_column.py::test_select_aggregate_alias_column
test_backward_compatibility/test_short_strings_aggregation.py::test_backward_compatability
test_mask_sensitive_info/test.py::test_encryption_functions
test_merge_table_over_distributed/test.py::test_global_in
test_merge_table_over_distributed/test.py::test_select_table_name_from_merge_over_distributed
test_mutations_with_merge_tree/test.py::test_mutations_with_merge_background_task
test_passing_max_partitions_to_read_remotely/test.py::test_default_database_on_cluster
test_row_policy/test.py::test_change_of_users_xml_changes_row_policies
test_row_policy/test.py::test_change_of_users_xml_changes_row_policies
test_row_policy/test.py::test_dcl_introspection
test_row_policy/test.py::test_dcl_introspection
test_row_policy/test.py::test_dcl_management
test_row_policy/test.py::test_dcl_management
test_row_policy/test.py::test_dcl_users_with_policies_from_users_xml
test_row_policy/test.py::test_dcl_users_with_policies_from_users_xml
test_row_policy/test.py::test_grant_create_row_policy
test_row_policy/test.py::test_grant_create_row_policy
test_row_policy/test.py::test_introspection
test_row_policy/test.py::test_introspection
test_row_policy/test.py::test_join
test_row_policy/test.py::test_join
test_row_policy/test.py::test_miscellaneous_engines
test_row_policy/test.py::test_miscellaneous_engines
test_row_policy/test.py::test_policy_from_users_xml_affects_only_user_assigned
test_row_policy/test.py::test_policy_from_users_xml_affects_only_user_assigned
test_row_policy/test.py::test_policy_on_distributed_table_via_role
test_row_policy/test.py::test_policy_on_distributed_table_via_role
test_row_policy/test.py::test_reload_users_xml_by_timer
test_row_policy/test.py::test_reload_users_xml_by_timer
test_row_policy/test.py::test_row_policy_filter_with_subquery
test_row_policy/test.py::test_row_policy_filter_with_subquery
test_row_policy/test.py::test_smoke
test_row_policy/test.py::test_smoke
test_row_policy/test.py::test_some_users_without_policies
test_row_policy/test.py::test_some_users_without_policies
test_row_policy/test.py::test_tags_with_db_and_table_names
test_row_policy/test.py::test_tags_with_db_and_table_names
test_row_policy/test.py::test_throwif_error_in_prewhere_with_same_condition_as_filter
test_row_policy/test.py::test_throwif_error_in_prewhere_with_same_condition_as_filter
test_row_policy/test.py::test_throwif_error_in_where_with_same_condition_as_filter
test_row_policy/test.py::test_throwif_error_in_where_with_same_condition_as_filter
test_row_policy/test.py::test_throwif_in_prewhere_doesnt_expose_restricted_data
test_row_policy/test.py::test_throwif_in_prewhere_doesnt_expose_restricted_data
test_row_policy/test.py::test_throwif_in_where_doesnt_expose_restricted_data
test_row_policy/test.py::test_throwif_in_where_doesnt_expose_restricted_data
test_row_policy/test.py::test_users_xml_is_readonly
test_row_policy/test.py::test_users_xml_is_readonly
test_row_policy/test.py::test_with_prewhere
test_row_policy/test.py::test_with_prewhere
test_settings_constraints_distributed/test.py::test_select_clamps_settings
test_backward_compatibility/test_cte_distributed.py::test_cte_distributed
test_compression_codec_read/test.py::test_default_codec_read
test_dictionaries_update_and_reload/test.py::test_reload_after_fail_in_cache_dictionary
test_distributed_type_object/test.py::test_distributed_type_object
test_materialized_mysql_database/test.py::test_select_without_columns_5_7
test_materialized_mysql_database/test.py::test_select_without_columns_8_0
test_shard_level_const_function/test.py::test_remote
test_storage_postgresql/test.py::test_postgres_select_insert
test_storage_rabbitmq/test.py::test_rabbitmq_materialized_view
test_system_merges/test.py::test_mutation_simple[]
test_system_merges/test.py::test_mutation_simple[replicated]
test_backward_compatibility/test_insert_profile_events.py::test_new_client_compatible
test_backward_compatibility/test_insert_profile_events.py::test_old_client_compatible
test_backward_compatibility/test_vertical_merges_from_compact_parts.py::test_vertical_merges_from_compact_parts
test_disk_over_web_server/test.py::test_cache[node2]
test_disk_over_web_server/test.py::test_incorrect_usage
test_disk_over_web_server/test.py::test_replicated_database
test_disk_over_web_server/test.py::test_unavailable_server
test_disk_over_web_server/test.py::test_usage[node2]
test_distributed_backward_compatability/test.py::test_distributed_in_tuple
test_executable_table_function/test.py::test_executable_function_input_python
test_groupBitmapAnd_on_distributed/test_groupBitmapAndState_on_distributed_table.py::test_groupBitmapAndState_on_different_version_nodes
test_groupBitmapAnd_on_distributed/test_groupBitmapAndState_on_distributed_table.py::test_groupBitmapAndState_on_distributed_table
test_settings_profile/test.py::test_show_profiles
test_sql_user_defined_functions_on_cluster/test.py::test_sql_user_defined_functions_on_cluster
test_backward_compatibility/test_functions.py::test_aggregate_states
test_backward_compatibility/test_functions.py::test_string_functions
test_default_compression_codec/test.py::test_default_codec_for_compact_parts
test_default_compression_codec/test.py::test_default_codec_multiple
test_default_compression_codec/test.py::test_default_codec_single
test_default_compression_codec/test.py::test_default_codec_version_update
test_postgresql_protocol/test.py::test_python_client
test_quota/test.py::test_add_remove_interval
test_quota/test.py::test_add_remove_quota
test_quota/test.py::test_consumption_of_show_clusters
test_quota/test.py::test_consumption_of_show_databases
test_quota/test.py::test_consumption_of_show_privileges
test_quota/test.py::test_consumption_of_show_processlist
test_quota/test.py::test_consumption_of_show_tables
test_quota/test.py::test_dcl_introspection
test_quota/test.py::test_dcl_management
test_quota/test.py::test_exceed_quota
test_quota/test.py::test_query_inserts
test_quota/test.py::test_quota_from_users_xml
test_quota/test.py::test_reload_users_xml_by_timer
test_quota/test.py::test_simpliest_quota
test_quota/test.py::test_tracking_quota
test_quota/test.py::test_users_xml_is_readonly
test_replicated_merge_tree_compatibility/test.py::test_replicated_merge_tree_defaults_compatibility
test_polymorphic_parts/test.py::test_different_part_types_on_replicas[polymorphic_table_wide-Wide]
test_old_versions/test.py::test_client_is_older_than_server
test_polymorphic_parts/test.py::test_polymorphic_parts_non_adaptive
test_old_versions/test.py::test_server_is_older_than_client
test_polymorphic_parts/test.py::test_compact_parts_only
test_polymorphic_parts/test.py::test_different_part_types_on_replicas[polymorphic_table_compact-Compact]
test_polymorphic_parts/test.py::test_polymorphic_parts_index
test_old_versions/test.py::test_distributed_query_initiator_is_older_than_shard
test_polymorphic_parts/test.py::test_polymorphic_parts_basics[first_node1-second_node1]
test_polymorphic_parts/test.py::test_polymorphic_parts_basics[first_node0-second_node0]
test_ttl_replicated/test.py::test_ttl_table[DELETE]
test_ttl_replicated/test.py::test_ttl_columns
test_ttl_replicated/test.py::test_ttl_compatibility[node_left2-node_right2-2]
test_ttl_replicated/test.py::test_ttl_table[]
test_version_update/test.py::test_aggregate_function_versioning_server_upgrade
test_version_update/test.py::test_aggregate_function_versioning_fetch_data_from_old_to_new_server
test_ttl_replicated/test.py::test_ttl_double_delete_rule_returns_error
test_ttl_replicated/test.py::test_ttl_alter_delete[test_ttl_alter_delete]
test_ttl_replicated/test.py::test_ttl_alter_delete[test_ttl_alter_delete_replicated]
test_ttl_replicated/test.py::test_ttl_compatibility[node_left0-node_right0-0]
test_version_update/test.py::test_modulo_partition_key_issue_23508
test_ttl_replicated/test.py::test_ttl_many_columns
test_ttl_replicated/test.py::test_modify_column_ttl
test_ttl_replicated/test.py::test_merge_with_ttl_timeout
test_ttl_replicated/test.py::test_ttl_empty_parts
test_ttl_replicated/test.py::test_ttl_compatibility[node_left1-node_right1-1]
test_version_update/test.py::test_aggregate_function_versioning_persisting_metadata
test_version_update/test.py::test_aggregate_function_versioning_issue_16587
test_ttl_replicated/test.py::test_modify_ttl
test_mysql_database_engine/test.py::test_mysql_ddl_for_mysql_database
test_profile_events_s3/test.py::test_profile_events
test_version_update_after_mutation/test.py::test_upgrade_while_mutation
test_version_update_after_mutation/test.py::test_mutate_and_upgrade
test_system_flush_logs/test.py::test_system_logs[system.text_log-0]
test_user_defined_object_persistence/test.py::test_persistence
test_settings_profile/test.py::test_show_profiles
test_sql_user_defined_functions_on_cluster/test.py::test_sql_user_defined_functions_on_cluster
test_select_access_rights/test_main.py::test_alias_columns
test_select_access_rights/test_main.py::test_select_count
test_select_access_rights/test_main.py::test_select_join
test_replicated_merge_tree_compatibility/test.py::test_replicated_merge_tree_defaults_compatibility
test_postgresql_protocol/test.py::test_python_client
test_quota/test.py::test_add_remove_interval
test_quota/test.py::test_add_remove_quota
test_quota/test.py::test_consumption_of_show_clusters
test_quota/test.py::test_consumption_of_show_databases
test_quota/test.py::test_consumption_of_show_privileges
test_quota/test.py::test_consumption_of_show_processlist
test_quota/test.py::test_consumption_of_show_tables
test_quota/test.py::test_dcl_introspection
test_quota/test.py::test_dcl_management
test_quota/test.py::test_exceed_quota
test_quota/test.py::test_query_inserts
test_quota/test.py::test_quota_from_users_xml
test_quota/test.py::test_reload_users_xml_by_timer
test_quota/test.py::test_simpliest_quota
test_quota/test.py::test_tracking_quota
test_quota/test.py::test_users_xml_is_readonly
test_replicating_constants/test.py::test_different_versions
test_merge_tree_s3/test.py::test_heavy_insert_select_check_memory[node]

View File

@ -336,6 +336,9 @@ CI_CONFIG = {
"Integration tests (asan)": {
"required_build": "package_asan",
},
"Integration tests (asan, analyzer)": {
"required_build": "package_asan",
},
"Integration tests (tsan)": {
"required_build": "package_tsan",
},

View File

@ -71,7 +71,7 @@ def get_json_params_dict(
}
def get_env_for_runner(build_path, repo_path, result_path, work_path):
def get_env_for_runner(check_name, build_path, repo_path, result_path, work_path):
binary_path = os.path.join(build_path, "clickhouse")
odbc_bridge_path = os.path.join(build_path, "clickhouse-odbc-bridge")
library_bridge_path = os.path.join(build_path, "clickhouse-library-bridge")
@ -88,6 +88,9 @@ def get_env_for_runner(build_path, repo_path, result_path, work_path):
my_env["CLICKHOUSE_TESTS_JSON_PARAMS_PATH"] = os.path.join(work_path, "params.json")
my_env["CLICKHOUSE_TESTS_RUNNER_RESTART_DOCKER"] = "0"
if "analyzer" in check_name.lower():
my_env["CLICKHOUSE_USE_NEW_ANALYZER"] = "1"
return my_env
@ -225,7 +228,9 @@ def main():
else:
download_all_deb_packages(check_name, reports_path, build_path)
my_env = get_env_for_runner(build_path, repo_path, result_path, work_path)
my_env = get_env_for_runner(
check_name, build_path, repo_path, result_path, work_path
)
json_path = os.path.join(work_path, "params.json")
with open(json_path, "w", encoding="utf-8") as json_params:

View File

@ -239,6 +239,8 @@ class ClickhouseIntegrationTestsRunner:
self.start_time = time.time()
self.soft_deadline_time = self.start_time + (TASK_TIMEOUT - MAX_TIME_IN_SANDBOX)
self.use_analyzer = os.environ.get("CLICKHOUSE_USE_NEW_ANALYZER") is not None
if "run_by_hash_total" in self.params:
self.run_by_hash_total = self.params["run_by_hash_total"]
self.run_by_hash_num = self.params["run_by_hash_num"]
@ -398,6 +400,9 @@ class ClickhouseIntegrationTestsRunner:
result.append("--tmpfs")
if self.disable_net_host:
result.append("--disable-net-host")
if self.use_analyzer:
result.append("--analyzer")
return " ".join(result)
def _get_all_tests(self, repo_path):
@ -480,7 +485,7 @@ class ClickhouseIntegrationTestsRunner:
result[test_file].append(test)
return result
def _update_counters(self, main_counters, current_counters):
def _update_counters(self, main_counters, current_counters, broken_tests):
for test in current_counters["PASSED"]:
if (
test not in main_counters["PASSED"]
@ -493,10 +498,17 @@ class ClickhouseIntegrationTestsRunner:
if test in main_counters["ERROR"]:
main_counters["ERROR"].remove(test)
is_flaky = True
if test in main_counters["BROKEN"]:
main_counters["BROKEN"].remove(test)
is_flaky = True
if is_flaky:
main_counters["FLAKY"].append(test)
else:
main_counters["PASSED"].append(test)
if test not in broken_tests:
main_counters["PASSED"].append(test)
else:
main_counters["NOT_FAILED"].append(test)
for state in ("ERROR", "FAILED"):
for test in current_counters[state]:
@ -506,8 +518,12 @@ class ClickhouseIntegrationTestsRunner:
main_counters["PASSED"].remove(test)
main_counters["FLAKY"].append(test)
continue
if test not in main_counters[state]:
main_counters[state].append(test)
if test not in broken_tests:
if test not in main_counters[state]:
main_counters[state].append(test)
else:
if test not in main_counters["BROKEN"]:
main_counters["BROKEN"].append(test)
for state in ("SKIPPED",):
for test in current_counters[state]:
@ -565,11 +581,22 @@ class ClickhouseIntegrationTestsRunner:
return res
def try_run_test_group(
self, repo_path, test_group, tests_in_group, num_tries, num_workers
self,
repo_path,
test_group,
tests_in_group,
num_tries,
num_workers,
broken_tests,
):
try:
return self.run_test_group(
repo_path, test_group, tests_in_group, num_tries, num_workers
repo_path,
test_group,
tests_in_group,
num_tries,
num_workers,
broken_tests,
)
except Exception as e:
logging.info("Failed to run {}:\n{}".format(str(test_group), str(e)))
@ -587,7 +614,13 @@ class ClickhouseIntegrationTestsRunner:
return counters, tests_times, []
def run_test_group(
self, repo_path, test_group, tests_in_group, num_tries, num_workers
self,
repo_path,
test_group,
tests_in_group,
num_tries,
num_workers,
broken_tests,
):
counters = {
"ERROR": [],
@ -595,6 +628,8 @@ class ClickhouseIntegrationTestsRunner:
"FAILED": [],
"SKIPPED": [],
"FLAKY": [],
"BROKEN": [],
"NOT_FAILED": [],
}
tests_times = defaultdict(float)
@ -700,7 +735,7 @@ class ClickhouseIntegrationTestsRunner:
)
times_lines = parse_test_times(info_path)
new_tests_times = get_test_times(times_lines)
self._update_counters(counters, new_counters)
self._update_counters(counters, new_counters, broken_tests)
for test_name, test_time in new_tests_times.items():
tests_times[test_name] = test_time
@ -773,7 +808,7 @@ class ClickhouseIntegrationTestsRunner:
final_retry += 1
logging.info("Running tests for the %s time", i)
counters, tests_times, log_paths = self.try_run_test_group(
repo_path, "bugfix" if should_fail else "flaky", tests_to_run, 1, 1
repo_path, "bugfix" if should_fail else "flaky", tests_to_run, 1, 1, []
)
logs += log_paths
if counters["FAILED"]:
@ -894,6 +929,8 @@ class ClickhouseIntegrationTestsRunner:
"FAILED": [],
"SKIPPED": [],
"FLAKY": [],
"BROKEN": [],
"NOT_FAILED": [],
}
tests_times = defaultdict(float)
tests_log_paths = defaultdict(list)
@ -905,10 +942,16 @@ class ClickhouseIntegrationTestsRunner:
logging.info("Shuffling test groups")
random.shuffle(items_to_run)
broken_tests = list()
if self.use_analyzer:
with open(f"{repo_path}/tests/analyzer_integration_broken_tests.txt") as f:
broken_tests = f.read().splitlines()
logging.info(f"Broken tests in the list: {len(broken_tests)}")
for group, tests in items_to_run:
logging.info("Running test group %s containing %s tests", group, len(tests))
group_counters, group_test_times, log_paths = self.try_run_test_group(
repo_path, group, tests, MAX_RETRY, NUM_WORKERS
repo_path, group, tests, MAX_RETRY, NUM_WORKERS, broken_tests
)
total_tests = 0
for counter, value in group_counters.items():
@ -940,7 +983,15 @@ class ClickhouseIntegrationTestsRunner:
result_state = "success"
test_result = []
for state in ("ERROR", "FAILED", "PASSED", "SKIPPED", "FLAKY"):
for state in (
"ERROR",
"FAILED",
"PASSED",
"SKIPPED",
"FLAKY",
"BROKEN",
"NOT_FAILED",
):
if state == "PASSED":
text_state = "OK"
elif state == "FAILED":

View File

@ -0,0 +1,7 @@
<clickhouse>
<profiles>
<default>
<allow_experimental_analyzer>1</allow_experimental_analyzer>
</default>
</profiles>
</clickhouse>

View File

@ -4190,6 +4190,8 @@ class ClickHouseInstance:
)
write_embedded_config("0_common_instance_users.xml", users_d_dir)
if os.environ.get("CLICKHOUSE_USE_NEW_ANALYZER") is not None:
write_embedded_config("0_common_enable_analyzer.xml", users_d_dir)
if len(self.custom_dictionaries_paths):
write_embedded_config("0_common_enable_dictionaries.xml", self.config_d_dir)

View File

@ -283,6 +283,14 @@ if __name__ == "__main__":
help="Use tmpfs for dockerd files",
)
parser.add_argument(
"--analyzer",
action="store_true",
default=False,
dest="analyzer",
help="Use new analyzer infrastructure",
)
parser.add_argument(
"--cleanup-containers",
action="store_true",
@ -395,6 +403,10 @@ if __name__ == "__main__":
if args.keyword_expression:
args.pytest_args += ["-k", args.keyword_expression]
use_analyzer = ""
if args.analyzer:
use_analyzer = "-e CLICKHOUSE_USE_NEW_ANALYZER=1"
pytest_opts = " ".join(args.pytest_args).replace("'", "\\'")
tests_list = " ".join(args.tests_list)
cmd_base = (
@ -407,7 +419,7 @@ if __name__ == "__main__":
f"--volume={args.cases_dir}:/ClickHouse/tests/integration "
f"--volume={args.src_dir}/Server/grpc_protos:/ClickHouse/src/Server/grpc_protos "
f"--volume=/run:/run/host:ro {dockerd_internal_volume} {env_tags} {env_cleanup} "
"-e DOCKER_CLIENT_TIMEOUT=300 -e COMPOSE_HTTP_TIMEOUT=600 -e PYTHONUNBUFFERED=1 "
f"-e DOCKER_CLIENT_TIMEOUT=300 -e COMPOSE_HTTP_TIMEOUT=600 {use_analyzer} -e PYTHONUNBUFFERED=1 "
f"-e PYTEST_OPTS='{parallel_args} {pytest_opts} {tests_list} {rand_args} -vvv'"
f" {DIND_INTEGRATION_TESTS_IMAGE_NAME}:{args.docker_image_version}"
)

View File

@ -0,0 +1,57 @@
import os
import time
import pytest
import helpers.cluster
import helpers.test_tools
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture(scope="module")
def started_node():
cluster = helpers.cluster.ClickHouseCluster(__file__)
try:
node = cluster.add_instance("node", stay_alive=True)
cluster.start()
yield node
finally:
cluster.shutdown()
def send_signal(started_node, signal):
started_node.exec_in_container(
["bash", "-c", f"pkill -{signal} clickhouse"], user="root"
)
def wait_for_clickhouse_stop(started_node):
result = None
for attempt in range(60):
time.sleep(1)
pid = started_node.get_process_pid("clickhouse")
if pid is None:
result = "OK"
break
assert result == "OK", "ClickHouse process is still running"
def test_pkill(started_node):
if (
started_node.is_built_with_thread_sanitizer()
or started_node.is_built_with_address_sanitizer()
or started_node.is_built_with_memory_sanitizer()
):
pytest.skip("doesn't fit in timeouts for stacktrace generation")
crashes_count = 0
for signal in ["SEGV", "4"]:
send_signal(started_node, signal)
wait_for_clickhouse_stop(started_node)
started_node.restart_clickhouse()
crashes_count += 1
assert (
started_node.query("SELECT COUNT(*) FROM system.crash_log")
== f"{crashes_count}\n"
)

View File

@ -0,0 +1,5 @@
1900-01-01 00:00:00.000
1962-12-08 18:11:29.123
1969-12-31 23:59:59.999
1970-01-01 00:00:00.000
1970-01-01 00:00:00.001

View File

@ -0,0 +1,5 @@
select formatDateTime(toDateTime64('1900-01-01 00:00:00.000', 3, 'UTC'), '%F %T.%f');
select formatDateTime(toDateTime64('1962-12-08 18:11:29.123', 3, 'UTC'), '%F %T.%f');
select formatDateTime(toDateTime64('1969-12-31 23:59:59.999', 3, 'UTC'), '%F %T.%f');
select formatDateTime(toDateTime64('1970-01-01 00:00:00.000', 3, 'UTC'), '%F %T.%f');
select formatDateTime(toDateTime64('1970-01-01 00:00:00.001', 3, 'UTC'), '%F %T.%f');

View File

@ -4,6 +4,7 @@ v23.5.4.25-stable 2023-06-29
v23.5.3.24-stable 2023-06-17
v23.5.2.7-stable 2023-06-10
v23.5.1.3174-stable 2023-06-09
v23.4.6.25-stable 2023-07-12
v23.4.5.22-stable 2023-06-29
v23.4.4.16-stable 2023-06-17
v23.4.3.48-stable 2023-06-12

1 v23.6.2.18-stable 2023-07-09
4 v23.5.3.24-stable 2023-06-17
5 v23.5.2.7-stable 2023-06-10
6 v23.5.1.3174-stable 2023-06-09
7 v23.4.6.25-stable 2023-07-12
8 v23.4.5.22-stable 2023-06-29
9 v23.4.4.16-stable 2023-06-17
10 v23.4.3.48-stable 2023-06-12