Revert "Add func tests run with s3"

This commit is contained in:
alexey-milovidov 2022-02-01 05:46:13 +03:00 committed by GitHub
parent a2aa147ce0
commit cbfcd45be3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 7 additions and 100 deletions

View File

@ -1212,41 +1212,6 @@ jobs:
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (release, s3 storage, actions)
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
KILL_TIMEOUT=10800
EOF
- name: Download json reports
uses: actions/download-artifact@v2
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAarch64:
needs: [BuilderDebAarch64]
runs-on: [self-hosted, func-tester-aarch64]
@ -3069,7 +3034,6 @@ jobs:
- FunctionalStatefulTestTsan
- FunctionalStatefulTestMsan
- FunctionalStatefulTestUBsan
- FunctionalStatelessTestReleaseS3
- StressTestDebug
- StressTestAsan
- StressTestTsan

View File

@ -85,10 +85,6 @@ function run_tests()
# everything in parallel except DatabaseReplicated. See below.
fi
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--s3-storage')
fi
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--replicated-database')
ADDITIONAL_OPTIONS+=('--jobs')

View File

@ -30,7 +30,6 @@ void CachedCompressedReadBuffer::initInput()
void CachedCompressedReadBuffer::prefetch()
{
initInput();
file_in->prefetch();
}

View File

@ -61,14 +61,14 @@ public:
void setReadUntilPosition(size_t position) override
{
initInput();
file_in->setReadUntilPosition(position);
if (file_in)
file_in->setReadUntilPosition(position);
}
void setReadUntilEnd() override
{
initInput();
file_in->setReadUntilEnd();
if (file_in)
file_in->setReadUntilEnd();
}
};

View File

@ -174,10 +174,6 @@ void registerDiskS3(DiskFactory & factory)
ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr {
S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint")));
if (uri.key.empty())
throw Exception("Empty S3 path specified in disk configuration", ErrorCodes::BAD_ARGUMENTS);
if (uri.key.back() != '/')
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);

View File

@ -270,9 +270,6 @@ CI_CONFIG = {
"Stateless tests (release, DatabaseReplicated, actions)": {
"required_build": "package_release",
},
"Stateless tests (release, s3 storage, actions)": {
"required_build": "package_release",
},
"Stress test (address, actions)": {
"required_build": "package_asan",
},

View File

@ -29,8 +29,6 @@ def get_additional_envs(check_name, run_by_hash_num, run_by_hash_total):
result.append("USE_DATABASE_ORDINARY=1")
if 'wide parts enabled' in check_name:
result.append("USE_POLYMORPHIC_PARTS=1")
if 's3 storage' in check_name:
result.append("USE_S3_STORAGE_FOR_MERGE_TREE=1")
if run_by_hash_total != 0:
result.append(f"RUN_BY_HASH_NUM={run_by_hash_num}")

View File

@ -329,7 +329,6 @@ class FailureReason(enum.Enum):
FAST_ONLY = "running fast tests only"
NO_LONG = "not running long tests"
REPLICATED_DB = "replicated-database"
S3_STORAGE = "s3-storage"
BUILD = "not running for current build"
# UNKNOWN reasons
@ -464,10 +463,6 @@ class TestCase:
elif tags and ('no-replicated-database' in tags) and args.replicated_database:
return FailureReason.REPLICATED_DB
elif tags and ('no-s3-storage' in tags) and args.s3_storage:
return FailureReason.S3_STORAGE
elif tags:
for build_flag in args.build_flags:
if 'no-' + build_flag in tags:
@ -1374,7 +1369,6 @@ if __name__ == '__main__':
parser.add_argument('--client-option', nargs='+', help='Specify additional client argument')
parser.add_argument('--print-time', action='store_true', dest='print_time', help='Print test time')
parser.add_argument('--check-zookeeper-session', action='store_true', help='Check ZooKeeper session uptime to determine if failed test should be retried')
parser.add_argument('--s3-storage', action='store_true', default=False, help='Run tests over s3 storage')
parser.add_argument('--run-by-hash-num', type=int, help='Run tests matching crc32(test_name) % run_by_hash_total == run_by_hash_num')
parser.add_argument('--run-by-hash-total', type=int, help='Total test groups for crc32(test_name) % run_by_hash_total == run_by_hash_num')

View File

@ -1,24 +0,0 @@
<clickhouse>
<storage_configuration>
<disks>
<s3>
<type>s3</type>
<endpoint>http://localhost:11111/test/test/</endpoint>
<access_key_id>clickhouse</access_key_id>
<secret_access_key>clickhouse</secret_access_key>
</s3>
</disks>
<policies>
<s3>
<volumes>
<main>
<disk>s3</disk>
</main>
</volumes>
</s3>
</policies>
</storage_configuration>
<merge_tree>
<storage_policy>s3</storage_policy>
</merge_tree>
</clickhouse>

View File

@ -107,8 +107,4 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
sudo chgrp clickhouse /var/lib/clickhouse2
fi
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
ln -sf $SRC_PATH/config.d/s3_storage_policy_by_default.xml $DEST_SERVER_PATH/config.d/
fi
ln -sf $SRC_PATH/client_config.xml $DEST_CLIENT_PATH/config.xml

View File

@ -1,4 +1,3 @@
-- Tags: no-s3-storage
SELECT '*** Not partitioned ***';
DROP TABLE IF EXISTS not_partitioned;

View File

@ -1,5 +1,5 @@
#!/usr/bin/env bash
# Tags: long, no-s3-storage
# Tags: long
set -e

View File

@ -1,4 +1,4 @@
-- Tags: long, no-s3-storage
-- Tags: long
DROP TABLE IF EXISTS check_system_tables;

View File

@ -1,4 +1,4 @@
-- Tags: no-parallel, no-s3-storage
-- Tags: no-parallel
drop table if exists ttl;
set mutations_sync = 2;

View File

@ -1,5 +1,3 @@
-- Tags: no-s3-storage
-- Output slightly different plan
drop table if exists t;
create table t (a Int, b Int) engine = MergeTree order by (a, b) settings index_granularity = 400;

View File

@ -1,4 +1,3 @@
-- Tags: no-s3-storage
select * from system.settings where name = 'send_timeout';
select * from system.merge_tree_settings order by length(description) limit 1;

View File

@ -1,4 +1,3 @@
-- Tags: no-s3-storage
DROP TABLE IF EXISTS test_01343;
CREATE TABLE test_01343 (x String) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
INSERT INTO test_01343 VALUES ('Hello, world');

View File

@ -1,4 +1,3 @@
-- Tags: no-s3-storage
DROP TABLE IF EXISTS test_01344;
CREATE TABLE test_01344 (x String, INDEX idx (x) TYPE set(10) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
INSERT INTO test_01344 VALUES ('Hello, world');

View File

@ -1,4 +1,3 @@
-- Tags: no-s3-storage
SELECT '====array====';
DROP TABLE IF EXISTS t_arr;
CREATE TABLE t_arr (a Array(UInt32)) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;

View File

@ -1,5 +1,3 @@
-- Tags: no-s3-storage
-- Temporary supressed
DROP TABLE IF EXISTS nested;
SET flatten_nested = 0;