diff --git a/tests/integration/test_disk_types/test.py b/tests/integration/test_disk_types/test.py index b9b8ef2010d..3c4169be4de 100644 --- a/tests/integration/test_disk_types/test.py +++ b/tests/integration/test_disk_types/test.py @@ -1,14 +1,17 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, is_arm from helpers.test_tools import TSV disk_types = { "default": "Local", "disk_s3": "S3", - "disk_hdfs": "HDFS", "disk_encrypted": "S3", } +# do not test HDFS on ARM +if not is_arm(): + disk_types["disk_hdfs"] = "HDFS" + @pytest.fixture(scope="module") def cluster(): @@ -18,7 +21,7 @@ def cluster(): "node", main_configs=["configs/storage.xml"], with_minio=True, - with_hdfs=True, + with_hdfs=not is_arm(), ) cluster.start() diff --git a/tests/integration/test_endpoint_macro_substitution/test.py b/tests/integration/test_endpoint_macro_substitution/test.py index ee72fb9b492..22a649e2225 100644 --- a/tests/integration/test_endpoint_macro_substitution/test.py +++ b/tests/integration/test_endpoint_macro_substitution/test.py @@ -1,5 +1,5 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, is_arm from helpers.test_tools import TSV from pyhdfs import HdfsClient @@ -10,6 +10,8 @@ disk_types = { "disk_encrypted": "S3", } +if is_arm(): + pytestmark = pytest.mark.skip @pytest.fixture(scope="module") def cluster(): diff --git a/tests/integration/test_format_avro_confluent/test.py b/tests/integration/test_format_avro_confluent/test.py index 540f90ae05e..ccaaee83514 100644 --- a/tests/integration/test_format_avro_confluent/test.py +++ b/tests/integration/test_format_avro_confluent/test.py @@ -8,9 +8,13 @@ from confluent_kafka.avro.cached_schema_registry_client import ( CachedSchemaRegistryClient, ) from confluent_kafka.avro.serializer.message_serializer import MessageSerializer -from helpers.cluster import ClickHouseCluster, ClickHouseInstance +from helpers.cluster import ClickHouseCluster, ClickHouseInstance, is_arm from urllib import parse +# Skip on ARM due to Confluent/Kafka +if is_arm(): + pytestmark = pytest.mark.skip + @pytest.fixture(scope="module") def started_cluster(): diff --git a/tests/integration/test_jdbc_bridge/test.py b/tests/integration/test_jdbc_bridge/test.py index 0e41cc8c8b7..c4a0a525df3 100644 --- a/tests/integration/test_jdbc_bridge/test.py +++ b/tests/integration/test_jdbc_bridge/test.py @@ -3,7 +3,7 @@ import os.path as p import pytest import uuid -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, is_arm from helpers.test_tools import TSV from string import Template @@ -14,6 +14,9 @@ instance = cluster.add_instance( datasource = "self" records = 1000 +if is_arm(): + pytestmark = pytest.mark.skip + @pytest.fixture(scope="module") def started_cluster(): diff --git a/tests/integration/test_kafka_bad_messages/test.py b/tests/integration/test_kafka_bad_messages/test.py index 954b6042305..0446ca5cb47 100644 --- a/tests/integration/test_kafka_bad_messages/test.py +++ b/tests/integration/test_kafka_bad_messages/test.py @@ -2,10 +2,14 @@ import time import logging import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, is_arm from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection from kafka.admin import NewTopic +if is_arm(): + pytestmark = pytest.mark.skip + + cluster = ClickHouseCluster(__file__) instance = cluster.add_instance( "instance", diff --git a/tests/integration/test_keeper_disks/test.py b/tests/integration/test_keeper_disks/test.py index e41837b89b4..0c91aa03419 100644 --- a/tests/integration/test_keeper_disks/test.py +++ b/tests/integration/test_keeper_disks/test.py @@ -1,11 +1,15 @@ #!/usr/bin/env python3 import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, is_arm import helpers.keeper_utils as keeper_utils from minio.deleteobjects import DeleteObject import os +if is_arm(): + pytestmark = pytest.mark.skip + + CURRENT_TEST_DIR = os.path.dirname(os.path.abspath(__file__)) cluster = ClickHouseCluster(__file__) node_logs = cluster.add_instance( diff --git a/tests/integration/test_log_family_hdfs/test.py b/tests/integration/test_log_family_hdfs/test.py index e8afe364ec4..6c3d28d2e3c 100644 --- a/tests/integration/test_log_family_hdfs/test.py +++ b/tests/integration/test_log_family_hdfs/test.py @@ -2,10 +2,13 @@ import logging import sys import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, is_arm from pyhdfs import HdfsClient +if is_arm(): + pytestmark = pytest.mark.skip + @pytest.fixture(scope="module") def started_cluster(): diff --git a/tests/integration/test_materialized_mysql_database/test.py b/tests/integration/test_materialized_mysql_database/test.py index 89c69c42adc..57e496fe737 100644 --- a/tests/integration/test_materialized_mysql_database/test.py +++ b/tests/integration/test_materialized_mysql_database/test.py @@ -6,6 +6,7 @@ from helpers.cluster import ( ClickHouseCluster, ClickHouseInstance, get_docker_compose_path, + is_arm, ) import logging @@ -13,6 +14,10 @@ from . import materialized_with_ddl DOCKER_COMPOSE_PATH = get_docker_compose_path() +# skip all test on arm due to no arm support in mysql57 +if is_arm(): + pytestmark = pytest.mark.skip + cluster = ClickHouseCluster(__file__) mysql_node = None mysql8_node = None diff --git a/tests/integration/test_merge_tree_hdfs/test.py b/tests/integration/test_merge_tree_hdfs/test.py index 95b63a5c8a3..5ca7dc5feb0 100644 --- a/tests/integration/test_merge_tree_hdfs/test.py +++ b/tests/integration/test_merge_tree_hdfs/test.py @@ -3,7 +3,7 @@ import time import os import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, is_arm from helpers.utility import generate_values from helpers.wait_for_helpers import wait_for_delete_inactive_parts from helpers.wait_for_helpers import wait_for_delete_empty_parts @@ -16,6 +16,10 @@ CONFIG_PATH = os.path.join( ) +if is_arm(): + pytestmark = pytest.mark.skip + + def create_table(cluster, table_name, additional_settings=None): node = cluster.instances["node"] diff --git a/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py index 7a69d07633c..e641d4c2300 100644 --- a/tests/integration/test_mysql_protocol/test.py +++ b/tests/integration/test_mysql_protocol/test.py @@ -12,11 +12,20 @@ from typing import Literal import docker import pymysql.connections import pytest -from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check +from helpers.cluster import ( + ClickHouseCluster, + get_docker_compose_path, + is_arm, + run_and_check, +) SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) DOCKER_COMPOSE_PATH = get_docker_compose_path() +if is_arm(): + pytestmark = pytest.mark.skip + + cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", diff --git a/tests/integration/test_redirect_url_storage/test.py b/tests/integration/test_redirect_url_storage/test.py index 033f02d7bde..ba3fb3e14ab 100644 --- a/tests/integration/test_redirect_url_storage/test.py +++ b/tests/integration/test_redirect_url_storage/test.py @@ -1,10 +1,15 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, is_arm from helpers.network import PartitionManager import threading import time +# skip all tests in the module on ARM due to HDFS +if is_arm(): + pytestmark = pytest.mark.skip + + cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index dea1ea49851..5bdea179449 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -22,7 +22,7 @@ import kafka.errors import pytest from google.protobuf.internal.encoder import _VarintBytes from helpers.client import QueryRuntimeException -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, is_arm from helpers.network import PartitionManager from helpers.test_tools import TSV from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection @@ -40,6 +40,8 @@ from . import kafka_pb2 from . import social_pb2 from . import message_with_repeated_pb2 +if is_arm(): + pytestmark = pytest.mark.skip # TODO: add test for run-time offset update in CH, if we manually update it on Kafka side. # TODO: add test for SELECT LIMIT is working.