mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
disable more hdfs and kafka inttests
#ci_set_arm
This commit is contained in:
parent
984c7e69a0
commit
55737de0e6
@ -1,14 +1,17 @@
|
|||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster, is_arm
|
||||||
from helpers.test_tools import TSV
|
from helpers.test_tools import TSV
|
||||||
|
|
||||||
disk_types = {
|
disk_types = {
|
||||||
"default": "Local",
|
"default": "Local",
|
||||||
"disk_s3": "S3",
|
"disk_s3": "S3",
|
||||||
"disk_hdfs": "HDFS",
|
|
||||||
"disk_encrypted": "S3",
|
"disk_encrypted": "S3",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# do not test HDFS on ARM
|
||||||
|
if not is_arm():
|
||||||
|
disk_types["disk_hdfs"] = "HDFS"
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def cluster():
|
def cluster():
|
||||||
@ -18,7 +21,7 @@ def cluster():
|
|||||||
"node",
|
"node",
|
||||||
main_configs=["configs/storage.xml"],
|
main_configs=["configs/storage.xml"],
|
||||||
with_minio=True,
|
with_minio=True,
|
||||||
with_hdfs=True,
|
with_hdfs=not is_arm(),
|
||||||
)
|
)
|
||||||
cluster.start()
|
cluster.start()
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster, is_arm
|
||||||
from helpers.test_tools import TSV
|
from helpers.test_tools import TSV
|
||||||
from pyhdfs import HdfsClient
|
from pyhdfs import HdfsClient
|
||||||
|
|
||||||
@ -10,6 +10,8 @@ disk_types = {
|
|||||||
"disk_encrypted": "S3",
|
"disk_encrypted": "S3",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if is_arm():
|
||||||
|
pytestmark = pytest.mark.skip
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def cluster():
|
def cluster():
|
||||||
|
@ -8,9 +8,13 @@ from confluent_kafka.avro.cached_schema_registry_client import (
|
|||||||
CachedSchemaRegistryClient,
|
CachedSchemaRegistryClient,
|
||||||
)
|
)
|
||||||
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
|
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
|
||||||
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
|
from helpers.cluster import ClickHouseCluster, ClickHouseInstance, is_arm
|
||||||
from urllib import parse
|
from urllib import parse
|
||||||
|
|
||||||
|
# Skip on ARM due to Confluent/Kafka
|
||||||
|
if is_arm():
|
||||||
|
pytestmark = pytest.mark.skip
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def started_cluster():
|
def started_cluster():
|
||||||
|
@ -3,7 +3,7 @@ import os.path as p
|
|||||||
import pytest
|
import pytest
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster, is_arm
|
||||||
from helpers.test_tools import TSV
|
from helpers.test_tools import TSV
|
||||||
from string import Template
|
from string import Template
|
||||||
|
|
||||||
@ -14,6 +14,9 @@ instance = cluster.add_instance(
|
|||||||
datasource = "self"
|
datasource = "self"
|
||||||
records = 1000
|
records = 1000
|
||||||
|
|
||||||
|
if is_arm():
|
||||||
|
pytestmark = pytest.mark.skip
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def started_cluster():
|
def started_cluster():
|
||||||
|
@ -2,10 +2,14 @@ import time
|
|||||||
import logging
|
import logging
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster, is_arm
|
||||||
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
|
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
|
||||||
from kafka.admin import NewTopic
|
from kafka.admin import NewTopic
|
||||||
|
|
||||||
|
if is_arm():
|
||||||
|
pytestmark = pytest.mark.skip
|
||||||
|
|
||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
instance = cluster.add_instance(
|
instance = cluster.add_instance(
|
||||||
"instance",
|
"instance",
|
||||||
|
@ -1,11 +1,15 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster, is_arm
|
||||||
import helpers.keeper_utils as keeper_utils
|
import helpers.keeper_utils as keeper_utils
|
||||||
from minio.deleteobjects import DeleteObject
|
from minio.deleteobjects import DeleteObject
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
if is_arm():
|
||||||
|
pytestmark = pytest.mark.skip
|
||||||
|
|
||||||
|
|
||||||
CURRENT_TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
CURRENT_TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
node_logs = cluster.add_instance(
|
node_logs = cluster.add_instance(
|
||||||
|
@ -2,10 +2,13 @@ import logging
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster, is_arm
|
||||||
|
|
||||||
from pyhdfs import HdfsClient
|
from pyhdfs import HdfsClient
|
||||||
|
|
||||||
|
if is_arm():
|
||||||
|
pytestmark = pytest.mark.skip
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def started_cluster():
|
def started_cluster():
|
||||||
|
@ -6,6 +6,7 @@ from helpers.cluster import (
|
|||||||
ClickHouseCluster,
|
ClickHouseCluster,
|
||||||
ClickHouseInstance,
|
ClickHouseInstance,
|
||||||
get_docker_compose_path,
|
get_docker_compose_path,
|
||||||
|
is_arm,
|
||||||
)
|
)
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@ -13,6 +14,10 @@ from . import materialized_with_ddl
|
|||||||
|
|
||||||
DOCKER_COMPOSE_PATH = get_docker_compose_path()
|
DOCKER_COMPOSE_PATH = get_docker_compose_path()
|
||||||
|
|
||||||
|
# skip all test on arm due to no arm support in mysql57
|
||||||
|
if is_arm():
|
||||||
|
pytestmark = pytest.mark.skip
|
||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
mysql_node = None
|
mysql_node = None
|
||||||
mysql8_node = None
|
mysql8_node = None
|
||||||
|
@ -3,7 +3,7 @@ import time
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster, is_arm
|
||||||
from helpers.utility import generate_values
|
from helpers.utility import generate_values
|
||||||
from helpers.wait_for_helpers import wait_for_delete_inactive_parts
|
from helpers.wait_for_helpers import wait_for_delete_inactive_parts
|
||||||
from helpers.wait_for_helpers import wait_for_delete_empty_parts
|
from helpers.wait_for_helpers import wait_for_delete_empty_parts
|
||||||
@ -16,6 +16,10 @@ CONFIG_PATH = os.path.join(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if is_arm():
|
||||||
|
pytestmark = pytest.mark.skip
|
||||||
|
|
||||||
|
|
||||||
def create_table(cluster, table_name, additional_settings=None):
|
def create_table(cluster, table_name, additional_settings=None):
|
||||||
node = cluster.instances["node"]
|
node = cluster.instances["node"]
|
||||||
|
|
||||||
|
@ -12,11 +12,20 @@ from typing import Literal
|
|||||||
import docker
|
import docker
|
||||||
import pymysql.connections
|
import pymysql.connections
|
||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check
|
from helpers.cluster import (
|
||||||
|
ClickHouseCluster,
|
||||||
|
get_docker_compose_path,
|
||||||
|
is_arm,
|
||||||
|
run_and_check,
|
||||||
|
)
|
||||||
|
|
||||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||||
DOCKER_COMPOSE_PATH = get_docker_compose_path()
|
DOCKER_COMPOSE_PATH = get_docker_compose_path()
|
||||||
|
|
||||||
|
if is_arm():
|
||||||
|
pytestmark = pytest.mark.skip
|
||||||
|
|
||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
node = cluster.add_instance(
|
node = cluster.add_instance(
|
||||||
"node",
|
"node",
|
||||||
|
@ -1,10 +1,15 @@
|
|||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster, is_arm
|
||||||
|
|
||||||
from helpers.network import PartitionManager
|
from helpers.network import PartitionManager
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
# skip all tests in the module on ARM due to HDFS
|
||||||
|
if is_arm():
|
||||||
|
pytestmark = pytest.mark.skip
|
||||||
|
|
||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
node1 = cluster.add_instance(
|
node1 = cluster.add_instance(
|
||||||
"node1",
|
"node1",
|
||||||
|
@ -22,7 +22,7 @@ import kafka.errors
|
|||||||
import pytest
|
import pytest
|
||||||
from google.protobuf.internal.encoder import _VarintBytes
|
from google.protobuf.internal.encoder import _VarintBytes
|
||||||
from helpers.client import QueryRuntimeException
|
from helpers.client import QueryRuntimeException
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster, is_arm
|
||||||
from helpers.network import PartitionManager
|
from helpers.network import PartitionManager
|
||||||
from helpers.test_tools import TSV
|
from helpers.test_tools import TSV
|
||||||
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
|
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
|
||||||
@ -40,6 +40,8 @@ from . import kafka_pb2
|
|||||||
from . import social_pb2
|
from . import social_pb2
|
||||||
from . import message_with_repeated_pb2
|
from . import message_with_repeated_pb2
|
||||||
|
|
||||||
|
if is_arm():
|
||||||
|
pytestmark = pytest.mark.skip
|
||||||
|
|
||||||
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
|
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
|
||||||
# TODO: add test for SELECT LIMIT is working.
|
# TODO: add test for SELECT LIMIT is working.
|
||||||
|
Loading…
Reference in New Issue
Block a user