mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge pull request #34153 from ClickHouse/add_func_tests_over_s3
Add func tests run with s3
This commit is contained in:
commit
a2aa147ce0
36
.github/workflows/pull_request.yml
vendored
36
.github/workflows/pull_request.yml
vendored
@ -1212,6 +1212,41 @@ jobs:
|
|||||||
docker kill "$(docker ps -q)" ||:
|
docker kill "$(docker ps -q)" ||:
|
||||||
docker rm -f "$(docker ps -a -q)" ||:
|
docker rm -f "$(docker ps -a -q)" ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
FunctionalStatelessTestReleaseS3:
|
||||||
|
needs: [BuilderDebRelease]
|
||||||
|
runs-on: [self-hosted, func-tester]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Stateless tests (release, s3 storage, actions)
|
||||||
|
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
|
||||||
|
KILL_TIMEOUT=10800
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Functional test
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker kill "$(docker ps -q)" ||:
|
||||||
|
docker rm -f "$(docker ps -a -q)" ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
FunctionalStatelessTestAarch64:
|
FunctionalStatelessTestAarch64:
|
||||||
needs: [BuilderDebAarch64]
|
needs: [BuilderDebAarch64]
|
||||||
runs-on: [self-hosted, func-tester-aarch64]
|
runs-on: [self-hosted, func-tester-aarch64]
|
||||||
@ -3034,6 +3069,7 @@ jobs:
|
|||||||
- FunctionalStatefulTestTsan
|
- FunctionalStatefulTestTsan
|
||||||
- FunctionalStatefulTestMsan
|
- FunctionalStatefulTestMsan
|
||||||
- FunctionalStatefulTestUBsan
|
- FunctionalStatefulTestUBsan
|
||||||
|
- FunctionalStatelessTestReleaseS3
|
||||||
- StressTestDebug
|
- StressTestDebug
|
||||||
- StressTestAsan
|
- StressTestAsan
|
||||||
- StressTestTsan
|
- StressTestTsan
|
||||||
|
@ -85,6 +85,10 @@ function run_tests()
|
|||||||
# everything in parallel except DatabaseReplicated. See below.
|
# everything in parallel except DatabaseReplicated. See below.
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||||
|
ADDITIONAL_OPTIONS+=('--s3-storage')
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||||
ADDITIONAL_OPTIONS+=('--jobs')
|
ADDITIONAL_OPTIONS+=('--jobs')
|
||||||
|
@ -30,6 +30,7 @@ void CachedCompressedReadBuffer::initInput()
|
|||||||
|
|
||||||
void CachedCompressedReadBuffer::prefetch()
|
void CachedCompressedReadBuffer::prefetch()
|
||||||
{
|
{
|
||||||
|
initInput();
|
||||||
file_in->prefetch();
|
file_in->prefetch();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,13 +61,13 @@ public:
|
|||||||
|
|
||||||
void setReadUntilPosition(size_t position) override
|
void setReadUntilPosition(size_t position) override
|
||||||
{
|
{
|
||||||
if (file_in)
|
initInput();
|
||||||
file_in->setReadUntilPosition(position);
|
file_in->setReadUntilPosition(position);
|
||||||
}
|
}
|
||||||
|
|
||||||
void setReadUntilEnd() override
|
void setReadUntilEnd() override
|
||||||
{
|
{
|
||||||
if (file_in)
|
initInput();
|
||||||
file_in->setReadUntilEnd();
|
file_in->setReadUntilEnd();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -174,6 +174,10 @@ void registerDiskS3(DiskFactory & factory)
|
|||||||
ContextPtr context,
|
ContextPtr context,
|
||||||
const DisksMap & /*map*/) -> DiskPtr {
|
const DisksMap & /*map*/) -> DiskPtr {
|
||||||
S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint")));
|
S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint")));
|
||||||
|
|
||||||
|
if (uri.key.empty())
|
||||||
|
throw Exception("Empty S3 path specified in disk configuration", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
if (uri.key.back() != '/')
|
if (uri.key.back() != '/')
|
||||||
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
|
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
@ -270,6 +270,9 @@ CI_CONFIG = {
|
|||||||
"Stateless tests (release, DatabaseReplicated, actions)": {
|
"Stateless tests (release, DatabaseReplicated, actions)": {
|
||||||
"required_build": "package_release",
|
"required_build": "package_release",
|
||||||
},
|
},
|
||||||
|
"Stateless tests (release, s3 storage, actions)": {
|
||||||
|
"required_build": "package_release",
|
||||||
|
},
|
||||||
"Stress test (address, actions)": {
|
"Stress test (address, actions)": {
|
||||||
"required_build": "package_asan",
|
"required_build": "package_asan",
|
||||||
},
|
},
|
||||||
|
@ -29,6 +29,8 @@ def get_additional_envs(check_name, run_by_hash_num, run_by_hash_total):
|
|||||||
result.append("USE_DATABASE_ORDINARY=1")
|
result.append("USE_DATABASE_ORDINARY=1")
|
||||||
if 'wide parts enabled' in check_name:
|
if 'wide parts enabled' in check_name:
|
||||||
result.append("USE_POLYMORPHIC_PARTS=1")
|
result.append("USE_POLYMORPHIC_PARTS=1")
|
||||||
|
if 's3 storage' in check_name:
|
||||||
|
result.append("USE_S3_STORAGE_FOR_MERGE_TREE=1")
|
||||||
|
|
||||||
if run_by_hash_total != 0:
|
if run_by_hash_total != 0:
|
||||||
result.append(f"RUN_BY_HASH_NUM={run_by_hash_num}")
|
result.append(f"RUN_BY_HASH_NUM={run_by_hash_num}")
|
||||||
|
@ -329,6 +329,7 @@ class FailureReason(enum.Enum):
|
|||||||
FAST_ONLY = "running fast tests only"
|
FAST_ONLY = "running fast tests only"
|
||||||
NO_LONG = "not running long tests"
|
NO_LONG = "not running long tests"
|
||||||
REPLICATED_DB = "replicated-database"
|
REPLICATED_DB = "replicated-database"
|
||||||
|
S3_STORAGE = "s3-storage"
|
||||||
BUILD = "not running for current build"
|
BUILD = "not running for current build"
|
||||||
|
|
||||||
# UNKNOWN reasons
|
# UNKNOWN reasons
|
||||||
@ -463,6 +464,10 @@ class TestCase:
|
|||||||
elif tags and ('no-replicated-database' in tags) and args.replicated_database:
|
elif tags and ('no-replicated-database' in tags) and args.replicated_database:
|
||||||
return FailureReason.REPLICATED_DB
|
return FailureReason.REPLICATED_DB
|
||||||
|
|
||||||
|
elif tags and ('no-s3-storage' in tags) and args.s3_storage:
|
||||||
|
return FailureReason.S3_STORAGE
|
||||||
|
|
||||||
|
|
||||||
elif tags:
|
elif tags:
|
||||||
for build_flag in args.build_flags:
|
for build_flag in args.build_flags:
|
||||||
if 'no-' + build_flag in tags:
|
if 'no-' + build_flag in tags:
|
||||||
@ -1369,6 +1374,7 @@ if __name__ == '__main__':
|
|||||||
parser.add_argument('--client-option', nargs='+', help='Specify additional client argument')
|
parser.add_argument('--client-option', nargs='+', help='Specify additional client argument')
|
||||||
parser.add_argument('--print-time', action='store_true', dest='print_time', help='Print test time')
|
parser.add_argument('--print-time', action='store_true', dest='print_time', help='Print test time')
|
||||||
parser.add_argument('--check-zookeeper-session', action='store_true', help='Check ZooKeeper session uptime to determine if failed test should be retried')
|
parser.add_argument('--check-zookeeper-session', action='store_true', help='Check ZooKeeper session uptime to determine if failed test should be retried')
|
||||||
|
parser.add_argument('--s3-storage', action='store_true', default=False, help='Run tests over s3 storage')
|
||||||
|
|
||||||
parser.add_argument('--run-by-hash-num', type=int, help='Run tests matching crc32(test_name) % run_by_hash_total == run_by_hash_num')
|
parser.add_argument('--run-by-hash-num', type=int, help='Run tests matching crc32(test_name) % run_by_hash_total == run_by_hash_num')
|
||||||
parser.add_argument('--run-by-hash-total', type=int, help='Total test groups for crc32(test_name) % run_by_hash_total == run_by_hash_num')
|
parser.add_argument('--run-by-hash-total', type=int, help='Total test groups for crc32(test_name) % run_by_hash_total == run_by_hash_num')
|
||||||
|
24
tests/config/config.d/s3_storage_policy_by_default.xml
Normal file
24
tests/config/config.d/s3_storage_policy_by_default.xml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<storage_configuration>
|
||||||
|
<disks>
|
||||||
|
<s3>
|
||||||
|
<type>s3</type>
|
||||||
|
<endpoint>http://localhost:11111/test/test/</endpoint>
|
||||||
|
<access_key_id>clickhouse</access_key_id>
|
||||||
|
<secret_access_key>clickhouse</secret_access_key>
|
||||||
|
</s3>
|
||||||
|
</disks>
|
||||||
|
<policies>
|
||||||
|
<s3>
|
||||||
|
<volumes>
|
||||||
|
<main>
|
||||||
|
<disk>s3</disk>
|
||||||
|
</main>
|
||||||
|
</volumes>
|
||||||
|
</s3>
|
||||||
|
</policies>
|
||||||
|
</storage_configuration>
|
||||||
|
<merge_tree>
|
||||||
|
<storage_policy>s3</storage_policy>
|
||||||
|
</merge_tree>
|
||||||
|
</clickhouse>
|
@ -107,4 +107,8 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
|||||||
sudo chgrp clickhouse /var/lib/clickhouse2
|
sudo chgrp clickhouse /var/lib/clickhouse2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||||
|
ln -sf $SRC_PATH/config.d/s3_storage_policy_by_default.xml $DEST_SERVER_PATH/config.d/
|
||||||
|
fi
|
||||||
|
|
||||||
ln -sf $SRC_PATH/client_config.xml $DEST_CLIENT_PATH/config.xml
|
ln -sf $SRC_PATH/client_config.xml $DEST_CLIENT_PATH/config.xml
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
-- Tags: no-s3-storage
|
||||||
SELECT '*** Not partitioned ***';
|
SELECT '*** Not partitioned ***';
|
||||||
|
|
||||||
DROP TABLE IF EXISTS not_partitioned;
|
DROP TABLE IF EXISTS not_partitioned;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Tags: long
|
# Tags: long, no-s3-storage
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
-- Tags: long
|
-- Tags: long, no-s3-storage
|
||||||
|
|
||||||
DROP TABLE IF EXISTS check_system_tables;
|
DROP TABLE IF EXISTS check_system_tables;
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
-- Tags: no-parallel
|
-- Tags: no-parallel, no-s3-storage
|
||||||
|
|
||||||
drop table if exists ttl;
|
drop table if exists ttl;
|
||||||
set mutations_sync = 2;
|
set mutations_sync = 2;
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
-- Tags: no-s3-storage
|
||||||
|
-- Output slightly different plan
|
||||||
drop table if exists t;
|
drop table if exists t;
|
||||||
|
|
||||||
create table t (a Int, b Int) engine = MergeTree order by (a, b) settings index_granularity = 400;
|
create table t (a Int, b Int) engine = MergeTree order by (a, b) settings index_granularity = 400;
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
-- Tags: no-s3-storage
|
||||||
select * from system.settings where name = 'send_timeout';
|
select * from system.settings where name = 'send_timeout';
|
||||||
select * from system.merge_tree_settings order by length(description) limit 1;
|
select * from system.merge_tree_settings order by length(description) limit 1;
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
-- Tags: no-s3-storage
|
||||||
DROP TABLE IF EXISTS test_01343;
|
DROP TABLE IF EXISTS test_01343;
|
||||||
CREATE TABLE test_01343 (x String) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
CREATE TABLE test_01343 (x String) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||||
INSERT INTO test_01343 VALUES ('Hello, world');
|
INSERT INTO test_01343 VALUES ('Hello, world');
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
-- Tags: no-s3-storage
|
||||||
DROP TABLE IF EXISTS test_01344;
|
DROP TABLE IF EXISTS test_01344;
|
||||||
CREATE TABLE test_01344 (x String, INDEX idx (x) TYPE set(10) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
CREATE TABLE test_01344 (x String, INDEX idx (x) TYPE set(10) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||||
INSERT INTO test_01344 VALUES ('Hello, world');
|
INSERT INTO test_01344 VALUES ('Hello, world');
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
-- Tags: no-s3-storage
|
||||||
SELECT '====array====';
|
SELECT '====array====';
|
||||||
DROP TABLE IF EXISTS t_arr;
|
DROP TABLE IF EXISTS t_arr;
|
||||||
CREATE TABLE t_arr (a Array(UInt32)) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
CREATE TABLE t_arr (a Array(UInt32)) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
-- Tags: no-s3-storage
|
||||||
|
-- Temporary supressed
|
||||||
DROP TABLE IF EXISTS nested;
|
DROP TABLE IF EXISTS nested;
|
||||||
|
|
||||||
SET flatten_nested = 0;
|
SET flatten_nested = 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user