mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-29 19:12:03 +00:00
Merge branch 'master' into remove_some_tests
This commit is contained in:
commit
6231a0d7d7
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 95d6bbba579b3a4e4c2dede954f541ff6f3dba51
|
||||
Subproject commit 2a1bf7d87b4a03561fc66fbb49cee8a288983c5d
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-integration-tests-runner .
|
||||
FROM ubuntu:18.04
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
@ -14,7 +14,6 @@ RUN apt-get update \
|
||||
wget \
|
||||
git \
|
||||
iproute2 \
|
||||
module-init-tools \
|
||||
cgroupfs-mount \
|
||||
python3-pip \
|
||||
tzdata \
|
||||
@ -42,7 +41,6 @@ ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 5:19.03.13~3-0~ubuntu-bionic
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
RUN add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}"
|
||||
|
||||
@ -66,17 +64,18 @@ RUN python3 -m pip install \
|
||||
dict2xml \
|
||||
dicttoxml \
|
||||
docker \
|
||||
docker-compose==1.22.0 \
|
||||
docker-compose==1.28.2 \
|
||||
grpcio \
|
||||
grpcio-tools \
|
||||
kafka-python \
|
||||
kazoo \
|
||||
minio \
|
||||
protobuf \
|
||||
psycopg2-binary==2.7.5 \
|
||||
psycopg2-binary==2.8.6 \
|
||||
pymongo \
|
||||
pytest \
|
||||
pytest-timeout \
|
||||
pytest-xdist \
|
||||
redis \
|
||||
tzlocal \
|
||||
urllib3 \
|
||||
@ -86,6 +85,7 @@ RUN python3 -m pip install \
|
||||
COPY modprobe.sh /usr/local/bin/modprobe
|
||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||
COPY compose/ /compose/
|
||||
COPY misc/ /misc/
|
||||
|
||||
RUN set -x \
|
||||
&& addgroup --system dockremap \
|
||||
@ -94,7 +94,6 @@ RUN set -x \
|
||||
&& echo 'dockremap:165536:65536' >> /etc/subuid \
|
||||
&& echo 'dockremap:165536:65536' >> /etc/subgid
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
EXPOSE 2375
|
||||
ENTRYPOINT ["dockerd-entrypoint.sh"]
|
||||
CMD ["sh", "-c", "pytest $PYTEST_OPTS"]
|
||||
|
@ -1,7 +1,5 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
cassandra1:
|
||||
image: cassandra
|
||||
image: cassandra:4.0
|
||||
restart: always
|
||||
ports:
|
||||
- 9043:9042
|
||||
|
@ -5,6 +5,10 @@ services:
|
||||
hostname: hdfs1
|
||||
restart: always
|
||||
ports:
|
||||
- 50075:50075
|
||||
- 50070:50070
|
||||
- ${HDFS_NAME_EXTERNAL_PORT}:${HDFS_NAME_INTERNAL_PORT} #50070
|
||||
- ${HDFS_DATA_EXTERNAL_PORT}:${HDFS_DATA_INTERNAL_PORT} #50075
|
||||
entrypoint: /etc/bootstrap.sh -d
|
||||
volumes:
|
||||
- type: ${HDFS_FS:-tmpfs}
|
||||
source: ${HDFS_LOGS:-}
|
||||
target: /usr/local/hadoop/logs
|
@ -15,10 +15,11 @@ services:
|
||||
image: confluentinc/cp-kafka:5.2.0
|
||||
hostname: kafka1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- ${KAFKA_EXTERNAL_PORT}:${KAFKA_EXTERNAL_PORT}
|
||||
environment:
|
||||
KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:9092,OUTSIDE://kafka1:19092
|
||||
KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:19092
|
||||
KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:${KAFKA_EXTERNAL_PORT},OUTSIDE://kafka1:19092
|
||||
KAFKA_ADVERTISED_HOST_NAME: kafka1
|
||||
KAFKA_LISTENERS: INSIDE://0.0.0.0:${KAFKA_EXTERNAL_PORT},OUTSIDE://0.0.0.0:19092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
|
||||
KAFKA_BROKER_ID: 1
|
||||
@ -34,7 +35,7 @@ services:
|
||||
image: confluentinc/cp-schema-registry:5.2.0
|
||||
hostname: schema-registry
|
||||
ports:
|
||||
- "8081:8081"
|
||||
- ${SCHEMA_REGISTRY_EXTERNAL_PORT}:${SCHEMA_REGISTRY_INTERNAL_PORT}
|
||||
environment:
|
||||
SCHEMA_REGISTRY_HOST_NAME: schema-registry
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
|
@ -11,10 +11,12 @@ services:
|
||||
- ${KERBERIZED_HDFS_DIR}/../../hdfs_configs/bootstrap.sh:/etc/bootstrap.sh:ro
|
||||
- ${KERBERIZED_HDFS_DIR}/secrets:/usr/local/hadoop/etc/hadoop/conf
|
||||
- ${KERBERIZED_HDFS_DIR}/secrets/krb_long.conf:/etc/krb5.conf:ro
|
||||
- type: ${KERBERIZED_HDFS_FS:-tmpfs}
|
||||
source: ${KERBERIZED_HDFS_LOGS:-}
|
||||
target: /var/log/hadoop-hdfs
|
||||
ports:
|
||||
- 1006:1006
|
||||
- 50070:50070
|
||||
- 9010:9010
|
||||
- ${KERBERIZED_HDFS_NAME_EXTERNAL_PORT}:${KERBERIZED_HDFS_NAME_INTERNAL_PORT} #50070
|
||||
- ${KERBERIZED_HDFS_DATA_EXTERNAL_PORT}:${KERBERIZED_HDFS_DATA_INTERNAL_PORT} #1006
|
||||
depends_on:
|
||||
- hdfskerberos
|
||||
entrypoint: /etc/bootstrap.sh -d
|
||||
|
@ -23,13 +23,13 @@ services:
|
||||
# restart: always
|
||||
hostname: kerberized_kafka1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "9093:9093"
|
||||
- ${KERBERIZED_KAFKA_EXTERNAL_PORT}:${KERBERIZED_KAFKA_EXTERNAL_PORT}
|
||||
environment:
|
||||
KAFKA_LISTENERS: OUTSIDE://:19092,UNSECURED_OUTSIDE://:19093,UNSECURED_INSIDE://:9093
|
||||
KAFKA_ADVERTISED_LISTENERS: OUTSIDE://kerberized_kafka1:19092,UNSECURED_OUTSIDE://kerberized_kafka1:19093,UNSECURED_INSIDE://localhost:9093
|
||||
KAFKA_LISTENERS: OUTSIDE://:19092,UNSECURED_OUTSIDE://:19093,UNSECURED_INSIDE://0.0.0.0:${KERBERIZED_KAFKA_EXTERNAL_PORT}
|
||||
KAFKA_ADVERTISED_LISTENERS: OUTSIDE://kerberized_kafka1:19092,UNSECURED_OUTSIDE://kerberized_kafka1:19093,UNSECURED_INSIDE://localhost:${KERBERIZED_KAFKA_EXTERNAL_PORT}
|
||||
# KAFKA_LISTENERS: INSIDE://kerberized_kafka1:9092,OUTSIDE://kerberized_kafka1:19092
|
||||
# KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:9092,OUTSIDE://kerberized_kafka1:19092
|
||||
KAFKA_ADVERTISED_HOST_NAME: kerberized_kafka1
|
||||
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: GSSAPI
|
||||
KAFKA_SASL_ENABLED_MECHANISMS: GSSAPI
|
||||
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
|
||||
|
@ -6,8 +6,8 @@ services:
|
||||
volumes:
|
||||
- data1-1:/data1
|
||||
- ${MINIO_CERTS_DIR:-}:/certs
|
||||
ports:
|
||||
- "9001:9001"
|
||||
expose:
|
||||
- ${MINIO_PORT}
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: minio
|
||||
MINIO_SECRET_KEY: minio123
|
||||
@ -20,14 +20,14 @@ services:
|
||||
# HTTP proxies for Minio.
|
||||
proxy1:
|
||||
image: yandex/clickhouse-s3-proxy
|
||||
ports:
|
||||
expose:
|
||||
- "8080" # Redirect proxy port
|
||||
- "80" # Reverse proxy port
|
||||
- "443" # Reverse proxy port (secure)
|
||||
|
||||
proxy2:
|
||||
image: yandex/clickhouse-s3-proxy
|
||||
ports:
|
||||
expose:
|
||||
- "8080"
|
||||
- "80"
|
||||
- "443"
|
||||
@ -35,7 +35,7 @@ services:
|
||||
# Empty container to run proxy resolver.
|
||||
resolver:
|
||||
image: yandex/clickhouse-python-bottle
|
||||
ports:
|
||||
expose:
|
||||
- "8080"
|
||||
tty: true
|
||||
depends_on:
|
||||
|
@ -7,5 +7,5 @@ services:
|
||||
MONGO_INITDB_ROOT_USERNAME: root
|
||||
MONGO_INITDB_ROOT_PASSWORD: clickhouse
|
||||
ports:
|
||||
- 27018:27017
|
||||
- ${MONGO_EXTERNAL_PORT}:${MONGO_INTERNAL_PORT}
|
||||
command: --profile=2 --verbose
|
||||
|
@ -1,10 +1,24 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
mysql1:
|
||||
mysql57:
|
||||
image: mysql:5.7
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: clickhouse
|
||||
ports:
|
||||
- 3308:3306
|
||||
command: --server_id=100 --log-bin='mysql-bin-1.log' --default-time-zone='+3:00' --gtid-mode="ON" --enforce-gtid-consistency
|
||||
MYSQL_ROOT_HOST: ${MYSQL_ROOT_HOST}
|
||||
DATADIR: /mysql/
|
||||
expose:
|
||||
- ${MYSQL_PORT}
|
||||
command: --server_id=100
|
||||
--log-bin='mysql-bin-1.log'
|
||||
--default-time-zone='+3:00'
|
||||
--gtid-mode="ON"
|
||||
--enforce-gtid-consistency
|
||||
--log-error-verbosity=3
|
||||
--log-error=/mysql/error.log
|
||||
--general-log=ON
|
||||
--general-log-file=/mysql/general.log
|
||||
volumes:
|
||||
- type: ${MYSQL_LOGS_FS:-tmpfs}
|
||||
source: ${MYSQL_LOGS:-}
|
||||
target: /mysql/
|
@ -12,3 +12,10 @@ services:
|
||||
--gtid-mode="ON"
|
||||
--enforce-gtid-consistency
|
||||
--log-error-verbosity=3
|
||||
--log-error=/var/log/mysqld/error.log
|
||||
--general-log=ON
|
||||
--general-log-file=/var/log/mysqld/general.log
|
||||
volumes:
|
||||
- type: ${MYSQL_LOGS_FS:-tmpfs}
|
||||
source: ${MYSQL_LOGS:-}
|
||||
target: /var/log/mysqld/
|
@ -0,0 +1,23 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
mysql80:
|
||||
image: mysql:8.0
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: clickhouse
|
||||
MYSQL_ROOT_HOST: ${MYSQL_ROOT_HOST}
|
||||
DATADIR: /mysql/
|
||||
expose:
|
||||
- ${MYSQL8_PORT}
|
||||
command: --server_id=100 --log-bin='mysql-bin-1.log'
|
||||
--default_authentication_plugin='mysql_native_password'
|
||||
--default-time-zone='+3:00' --gtid-mode="ON"
|
||||
--enforce-gtid-consistency
|
||||
--log-error-verbosity=3
|
||||
--log-error=/mysql/error.log
|
||||
--general-log=ON
|
||||
--general-log-file=/mysql/general.log
|
||||
volumes:
|
||||
- type: ${MYSQL8_LOGS_FS:-tmpfs}
|
||||
source: ${MYSQL8_LOGS:-}
|
||||
target: /mysql/
|
@ -1,15 +0,0 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
mysql8_0:
|
||||
image: mysql:8.0
|
||||
restart: 'no'
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: clickhouse
|
||||
ports:
|
||||
- 3309:3306
|
||||
command: --server_id=100 --log-bin='mysql-bin-1.log'
|
||||
--default_authentication_plugin='mysql_native_password'
|
||||
--default-time-zone='+3:00'
|
||||
--gtid-mode="ON"
|
||||
--enforce-gtid-consistency
|
||||
--log-error-verbosity=3
|
@ -1,6 +1,6 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
mysql1:
|
||||
mysql_client:
|
||||
image: mysql:5.7
|
||||
restart: always
|
||||
environment:
|
||||
|
@ -5,19 +5,64 @@ services:
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: clickhouse
|
||||
ports:
|
||||
- 3348:3306
|
||||
MYSQL_ROOT_HOST: ${MYSQL_CLUSTER_ROOT_HOST}
|
||||
DATADIR: /mysql/
|
||||
expose:
|
||||
- ${MYSQL_CLUSTER_PORT}
|
||||
command: --server_id=100
|
||||
--log-bin='mysql-bin-2.log'
|
||||
--default-time-zone='+3:00'
|
||||
--gtid-mode="ON"
|
||||
--enforce-gtid-consistency
|
||||
--log-error-verbosity=3
|
||||
--log-error=/mysql/2_error.log
|
||||
--general-log=ON
|
||||
--general-log-file=/mysql/2_general.log
|
||||
volumes:
|
||||
- type: ${MYSQL_CLUSTER_LOGS_FS:-tmpfs}
|
||||
source: ${MYSQL_CLUSTER_LOGS:-}
|
||||
target: /mysql/
|
||||
mysql3:
|
||||
image: mysql:5.7
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: clickhouse
|
||||
ports:
|
||||
- 3388:3306
|
||||
MYSQL_ROOT_HOST: ${MYSQL_CLUSTER_ROOT_HOST}
|
||||
DATADIR: /mysql/
|
||||
expose:
|
||||
- ${MYSQL_CLUSTER_PORT}
|
||||
command: --server_id=100
|
||||
--log-bin='mysql-bin-3.log'
|
||||
--default-time-zone='+3:00'
|
||||
--gtid-mode="ON"
|
||||
--enforce-gtid-consistency
|
||||
--log-error-verbosity=3
|
||||
--log-error=/mysql/3_error.log
|
||||
--general-log=ON
|
||||
--general-log-file=/mysql/3_general.log
|
||||
volumes:
|
||||
- type: ${MYSQL_CLUSTER_LOGS_FS:-tmpfs}
|
||||
source: ${MYSQL_CLUSTER_LOGS:-}
|
||||
target: /mysql/
|
||||
mysql4:
|
||||
image: mysql:5.7
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: clickhouse
|
||||
ports:
|
||||
- 3368:3306
|
||||
MYSQL_ROOT_HOST: ${MYSQL_CLUSTER_ROOT_HOST}
|
||||
DATADIR: /mysql/
|
||||
expose:
|
||||
- ${MYSQL_CLUSTER_PORT}
|
||||
command: --server_id=100
|
||||
--log-bin='mysql-bin-4.log'
|
||||
--default-time-zone='+3:00'
|
||||
--gtid-mode="ON"
|
||||
--enforce-gtid-consistency
|
||||
--log-error-verbosity=3
|
||||
--log-error=/mysql/4_error.log
|
||||
--general-log=ON
|
||||
--general-log-file=/mysql/4_general.log
|
||||
volumes:
|
||||
- type: ${MYSQL_CLUSTER_LOGS_FS:-tmpfs}
|
||||
source: ${MYSQL_CLUSTER_LOGS:-}
|
||||
target: /mysql/
|
@ -2,12 +2,24 @@ version: '2.3'
|
||||
services:
|
||||
postgres1:
|
||||
image: postgres
|
||||
command: ["postgres", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all"]
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_PASSWORD: mysecretpassword
|
||||
ports:
|
||||
- 5432:5432
|
||||
expose:
|
||||
- ${POSTGRES_PORT}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- postgre-sql.local
|
||||
default:
|
||||
aliases:
|
||||
- postgre-sql.local
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
POSTGRES_PASSWORD: mysecretpassword
|
||||
PGDATA: /postgres/data
|
||||
volumes:
|
||||
- type: ${POSTGRES_LOGS_FS:-tmpfs}
|
||||
source: ${POSTGRES_DIR:-}
|
||||
target: /postgres/
|
@ -2,22 +2,43 @@ version: '2.3'
|
||||
services:
|
||||
postgres2:
|
||||
image: postgres
|
||||
command: ["postgres", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all"]
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
POSTGRES_PASSWORD: mysecretpassword
|
||||
ports:
|
||||
- 5421:5432
|
||||
PGDATA: /postgres/data
|
||||
expose:
|
||||
- ${POSTGRES_PORT}
|
||||
volumes:
|
||||
- type: ${POSTGRES_LOGS_FS:-tmpfs}
|
||||
source: ${POSTGRES2_DIR:-}
|
||||
target: /postgres/
|
||||
postgres3:
|
||||
image: postgres
|
||||
command: ["postgres", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all"]
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
POSTGRES_PASSWORD: mysecretpassword
|
||||
ports:
|
||||
- 5441:5432
|
||||
PGDATA: /postgres/data
|
||||
expose:
|
||||
- ${POSTGRES_PORT}
|
||||
volumes:
|
||||
- type: ${POSTGRES_LOGS_FS:-tmpfs}
|
||||
source: ${POSTGRES3_DIR:-}
|
||||
target: /postgres/
|
||||
postgres4:
|
||||
image: postgres
|
||||
command: ["postgres", "-c", "logging_collector=on", "-c", "log_directory=/postgres/logs", "-c", "log_filename=postgresql.log", "-c", "log_statement=all"]
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: "trust"
|
||||
POSTGRES_PASSWORD: mysecretpassword
|
||||
ports:
|
||||
- 5461:5432
|
||||
PGDATA: /postgres/data
|
||||
expose:
|
||||
- ${POSTGRES_PORT}
|
||||
volumes:
|
||||
- type: ${POSTGRES_LOGS_FS:-tmpfs}
|
||||
source: ${POSTGRES4_DIR:-}
|
||||
target: /postgres/
|
@ -2,11 +2,15 @@ version: '2.3'
|
||||
|
||||
services:
|
||||
rabbitmq1:
|
||||
image: rabbitmq:3-management
|
||||
image: rabbitmq:3-management-alpine
|
||||
hostname: rabbitmq1
|
||||
ports:
|
||||
- "5672:5672"
|
||||
- "15672:15672"
|
||||
expose:
|
||||
- ${RABBITMQ_PORT}
|
||||
environment:
|
||||
RABBITMQ_DEFAULT_USER: "root"
|
||||
RABBITMQ_DEFAULT_PASS: "clickhouse"
|
||||
RABBITMQ_LOG_BASE: /rabbitmq_logs/
|
||||
volumes:
|
||||
- type: ${RABBITMQ_LOGS_FS:-tmpfs}
|
||||
source: ${RABBITMQ_LOGS:-}
|
||||
target: /rabbitmq_logs/
|
||||
|
@ -4,5 +4,5 @@ services:
|
||||
image: redis
|
||||
restart: always
|
||||
ports:
|
||||
- 6380:6379
|
||||
- ${REDIS_EXTERNAL_PORT}:${REDIS_INTERNAL_PORT}
|
||||
command: redis-server --requirepass "clickhouse" --databases 32
|
||||
|
@ -0,0 +1,75 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
zoo1:
|
||||
image: zookeeper:3.6.2
|
||||
restart: always
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
|
||||
ZOO_MY_ID: 1
|
||||
JVMFLAGS: -Dzookeeper.forceSync=no
|
||||
ZOO_SECURE_CLIENT_PORT: $ZOO_SECURE_CLIENT_PORT
|
||||
command: ["zkServer.sh", "start-foreground"]
|
||||
entrypoint: /zookeeper-ssl-entrypoint.sh
|
||||
volumes:
|
||||
- type: bind
|
||||
source: /misc/zookeeper-ssl-entrypoint.sh
|
||||
target: /zookeeper-ssl-entrypoint.sh
|
||||
- type: bind
|
||||
source: /misc/client.crt
|
||||
target: /clickhouse-config/client.crt
|
||||
- type: ${ZK_FS:-tmpfs}
|
||||
source: ${ZK_DATA1:-}
|
||||
target: /data
|
||||
- type: ${ZK_FS:-tmpfs}
|
||||
source: ${ZK_DATA_LOG1:-}
|
||||
target: /datalog
|
||||
zoo2:
|
||||
image: zookeeper:3.6.2
|
||||
restart: always
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888
|
||||
ZOO_MY_ID: 2
|
||||
JVMFLAGS: -Dzookeeper.forceSync=no
|
||||
ZOO_SECURE_CLIENT_PORT: $ZOO_SECURE_CLIENT_PORT
|
||||
|
||||
command: ["zkServer.sh", "start-foreground"]
|
||||
entrypoint: /zookeeper-ssl-entrypoint.sh
|
||||
volumes:
|
||||
- type: bind
|
||||
source: /misc/zookeeper-ssl-entrypoint.sh
|
||||
target: /zookeeper-ssl-entrypoint.sh
|
||||
- type: bind
|
||||
source: /misc/client.crt
|
||||
target: /clickhouse-config/client.crt
|
||||
- type: ${ZK_FS:-tmpfs}
|
||||
source: ${ZK_DATA2:-}
|
||||
target: /data
|
||||
- type: ${ZK_FS:-tmpfs}
|
||||
source: ${ZK_DATA_LOG2:-}
|
||||
target: /datalog
|
||||
zoo3:
|
||||
image: zookeeper:3.6.2
|
||||
restart: always
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
|
||||
ZOO_MY_ID: 3
|
||||
JVMFLAGS: -Dzookeeper.forceSync=no
|
||||
ZOO_SECURE_CLIENT_PORT: $ZOO_SECURE_CLIENT_PORT
|
||||
command: ["zkServer.sh", "start-foreground"]
|
||||
entrypoint: /zookeeper-ssl-entrypoint.sh
|
||||
volumes:
|
||||
- type: bind
|
||||
source: /misc/zookeeper-ssl-entrypoint.sh
|
||||
target: /zookeeper-ssl-entrypoint.sh
|
||||
- type: bind
|
||||
source: /misc/client.crt
|
||||
target: /clickhouse-config/client.crt
|
||||
- type: ${ZK_FS:-tmpfs}
|
||||
source: ${ZK_DATA3:-}
|
||||
target: /data
|
||||
- type: ${ZK_FS:-tmpfs}
|
||||
source: ${ZK_DATA_LOG3:-}
|
||||
target: /datalog
|
@ -2,17 +2,17 @@
|
||||
set -e
|
||||
|
||||
mkdir -p /etc/docker/
|
||||
cat > /etc/docker/daemon.json << EOF
|
||||
{
|
||||
echo '{
|
||||
"ipv6": true,
|
||||
"fixed-cidr-v6": "fd00::/8",
|
||||
"ip-forward": true,
|
||||
"log-level": "debug",
|
||||
"storage-driver": "overlay2",
|
||||
"insecure-registries" : ["dockerhub-proxy.sas.yp-c.yandex.net:5000"],
|
||||
"registry-mirrors" : ["http://dockerhub-proxy.sas.yp-c.yandex.net:5000"]
|
||||
}
|
||||
EOF
|
||||
}' | dd of=/etc/docker/daemon.json
|
||||
|
||||
dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 &>/var/log/somefile &
|
||||
dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --default-address-pool base=172.17.0.0/12,size=24 &>/ClickHouse/tests/integration/dockerd.log &
|
||||
|
||||
set +e
|
||||
reties=0
|
||||
|
19
docker/test/integration/runner/misc/client.crt
Normal file
19
docker/test/integration/runner/misc/client.crt
Normal file
@ -0,0 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC/TCCAeWgAwIBAgIJANjx1QSR77HBMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV
|
||||
BAMMCWxvY2FsaG9zdDAgFw0xODA3MzAxODE2MDhaGA8yMjkyMDUxNDE4MTYwOFow
|
||||
FDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAs9uSo6lJG8o8pw0fbVGVu0tPOljSWcVSXH9uiJBwlZLQnhN4SFSFohfI
|
||||
4K8U1tBDTnxPLUo/V1K9yzoLiRDGMkwVj6+4+hE2udS2ePTQv5oaMeJ9wrs+5c9T
|
||||
4pOtlq3pLAdm04ZMB1nbrEysceVudHRkQbGHzHp6VG29Fw7Ga6YpqyHQihRmEkTU
|
||||
7UCYNA+Vk7aDPdMS/khweyTpXYZimaK9f0ECU3/VOeG3fH6Sp2X6FN4tUj/aFXEj
|
||||
sRmU5G2TlYiSIUMF2JPdhSihfk1hJVALrHPTU38SOL+GyyBRWdNcrIwVwbpvsvPg
|
||||
pryMSNxnpr0AK0dFhjwnupIv5hJIOQIDAQABo1AwTjAdBgNVHQ4EFgQUjPLb3uYC
|
||||
kcamyZHK4/EV8jAP0wQwHwYDVR0jBBgwFoAUjPLb3uYCkcamyZHK4/EV8jAP0wQw
|
||||
DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAM/ocuDvfPus/KpMVD51j
|
||||
4IdlU8R0vmnYLQ+ygzOAo7+hUWP5j0yvq4ILWNmQX6HNvUggCgFv9bjwDFhb/5Vr
|
||||
85ieWfTd9+LTjrOzTw4avdGwpX9G+6jJJSSq15tw5ElOIFb/qNA9O4dBiu8vn03C
|
||||
L/zRSXrARhSqTW5w/tZkUcSTT+M5h28+Lgn9ysx4Ff5vi44LJ1NnrbJbEAIYsAAD
|
||||
+UA+4MBFKx1r6hHINULev8+lCfkpwIaeS8RL+op4fr6kQPxnULw8wT8gkuc8I4+L
|
||||
P9gg/xDHB44T3ADGZ5Ib6O0DJaNiToO6rnoaaxs0KkotbvDWvRoxEytSbXKoYjYp
|
||||
0g==
|
||||
-----END CERTIFICATE-----
|
@ -81,8 +81,8 @@ if [[ ! -f "$ZOO_DATA_DIR/myid" ]]; then
|
||||
echo "${ZOO_MY_ID:-1}" > "$ZOO_DATA_DIR/myid"
|
||||
fi
|
||||
|
||||
mkdir -p $(dirname $ZOO_SSL_KEYSTORE_LOCATION)
|
||||
mkdir -p $(dirname $ZOO_SSL_TRUSTSTORE_LOCATION)
|
||||
mkdir -p "$(dirname $ZOO_SSL_KEYSTORE_LOCATION)"
|
||||
mkdir -p "$(dirname $ZOO_SSL_TRUSTSTORE_LOCATION)"
|
||||
|
||||
if [[ ! -f "$ZOO_SSL_KEYSTORE_LOCATION" ]]; then
|
||||
keytool -genkeypair -alias zookeeper -keyalg RSA -validity 365 -keysize 2048 -dname "cn=zookeeper" -keypass password -keystore $ZOO_SSL_KEYSTORE_LOCATION -storepass password -deststoretype pkcs12
|
@ -94,4 +94,6 @@ select * from products limit 1;
|
||||
└───────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/dictionary/) <!--hide-->
|
||||
**See Also**
|
||||
|
||||
- [Dictionary function](../../../sql-reference/table-functions/dictionary.md#dictionary-function)
|
||||
|
@ -123,6 +123,19 @@ The `Insert` command creates one or more blocks (parts). When inserting into Rep
|
||||
A large number of `replicated_deduplication_window` slows down `Inserts` because it needs to compare more entries.
|
||||
The hash sum is calculated from the composition of the field names and types and the data of the inserted part (stream of bytes).
|
||||
|
||||
## non_replicated_deduplication_window {#non-replicated-deduplication-window}
|
||||
|
||||
The number of the most recently inserted blocks in the non-replicated [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table for which hash sums are stored to check for duplicates.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
- 0 (disable deduplication).
|
||||
|
||||
Default value: 0.
|
||||
|
||||
A deduplication mechanism is used, similar to replicated tables (see [replicated_deduplication_window](#replicated-deduplication-window) setting). The hash sums of the created parts are written to a local file on a disk.
|
||||
|
||||
## replicated_deduplication_window_seconds {#replicated-deduplication-window-seconds}
|
||||
|
||||
The number of seconds after which the hash sums of the inserted blocks are removed from Zookeeper.
|
||||
|
@ -506,3 +506,256 @@ Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >=
|
||||
## sumMapFiltered(keys_to_keep)(keys, values) {#summapfilteredkeys-to-keepkeys-values}
|
||||
|
||||
Same behavior as [sumMap](../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap) except that an array of keys is passed as a parameter. This can be especially useful when working with a high cardinality of keys.
|
||||
|
||||
## sequenceNextNode {#sequenceNextNode}
|
||||
|
||||
Returns a value of next event that matched an event chain.
|
||||
|
||||
_Experimental function, `SET allow_experimental_funnel_functions = 1` to enable it._
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
sequenceNextNode(direction, base)(timestamp, event_column, base_condition, event1, event2, event3, ...)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
- `direction` - Used to navigate to directions.
|
||||
- forward : Moving forward
|
||||
- backward: Moving backward
|
||||
|
||||
- `base` - Used to set the base point.
|
||||
- head : Set the base point to the first event
|
||||
- tail : Set the base point to the last event
|
||||
- first_match : Set the base point to the first matched event1
|
||||
- last_match : Set the base point to the last matched event1
|
||||
|
||||
**Arguments**
|
||||
- `timestamp` — Name of the column containing the timestamp. Data types supported: `Date`, `DateTime` and other unsigned integer types.
|
||||
- `event_column` — Name of the column containing the value of the next event to be returned. Data types supported: `String` and `Nullable(String)`
|
||||
- `base_condition` — Condition that the base point must fulfill.
|
||||
- `cond` — Conditions describing the chain of events. `UInt8`
|
||||
|
||||
**Returned value**
|
||||
- `event_column[next_index]` - if the pattern is matched and next value exists.
|
||||
- `NULL` - if the pattern isn’t matched or next value doesn't exist.
|
||||
|
||||
Type: `Nullable(String)`.
|
||||
|
||||
**Example**
|
||||
|
||||
It can be used when events are A->B->C->E->F and you want to know the event following B->C, which is E.
|
||||
|
||||
The query statement searching the event following A->B :
|
||||
|
||||
``` sql
|
||||
CREATE TABLE test_flow (
|
||||
dt DateTime,
|
||||
id int,
|
||||
page String)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY toYYYYMMDD(dt)
|
||||
ORDER BY id;
|
||||
|
||||
INSERT INTO test_flow VALUES (1, 1, 'A') (2, 1, 'B') (3, 1, 'C') (4, 1, 'E') (5, 1, 'F');
|
||||
|
||||
SELECT id, sequenceNextNode('forward', 'head')(dt, page, page = 'A', page = 'A', page = 'B') as next_flow FROM test_flow GROUP BY id;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─id─┬─next_flow─┐
|
||||
│ 1 │ C │
|
||||
└────┴───────────┘
|
||||
```
|
||||
|
||||
**Behavior for `forward` and `head`**
|
||||
|
||||
```SQL
|
||||
ALTER TABLE test_flow DELETE WHERE 1 = 1 settings mutations_sync = 1;
|
||||
|
||||
INSERT INTO test_flow VALUES (1, 1, 'Home') (2, 1, 'Gift') (3, 1, 'Exit');
|
||||
INSERT INTO test_flow VALUES (1, 2, 'Home') (2, 2, 'Home') (3, 2, 'Gift') (4, 2, 'Basket');
|
||||
INSERT INTO test_flow VALUES (1, 3, 'Gift') (2, 3, 'Home') (3, 3, 'Gift') (4, 3, 'Basket');
|
||||
```
|
||||
|
||||
```SQL
|
||||
SELECT id, sequenceNextNode('forward', 'head')(dt, page, page = 'Home', page = 'Home', page = 'Gift') FROM test_flow GROUP BY id;
|
||||
|
||||
dt id page
|
||||
1970-01-01 09:00:01 1 Home // Base point, Matched with Home
|
||||
1970-01-01 09:00:02 1 Gift // Matched with Gift
|
||||
1970-01-01 09:00:03 1 Exit // The result
|
||||
|
||||
1970-01-01 09:00:01 2 Home // Base point, Matched with Home
|
||||
1970-01-01 09:00:02 2 Home // Unmatched with Gift
|
||||
1970-01-01 09:00:03 2 Gift
|
||||
1970-01-01 09:00:04 2 Basket
|
||||
|
||||
1970-01-01 09:00:01 3 Gift // Base point, Unmatched with Home
|
||||
1970-01-01 09:00:02 3 Home
|
||||
1970-01-01 09:00:03 3 Gift
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
```
|
||||
|
||||
**Behavior for `backward` and `tail`**
|
||||
|
||||
```SQL
|
||||
SELECT id, sequenceNextNode('backward', 'tail')(dt, page, page = 'Basket', page = 'Basket', page = 'Gift') FROM test_flow GROUP BY id;
|
||||
|
||||
dt id page
|
||||
1970-01-01 09:00:01 1 Home
|
||||
1970-01-01 09:00:02 1 Gift
|
||||
1970-01-01 09:00:03 1 Exit // Base point, Unmatched with Basket
|
||||
|
||||
1970-01-01 09:00:01 2 Home
|
||||
1970-01-01 09:00:02 2 Home // The result
|
||||
1970-01-01 09:00:03 2 Gift // Matched with Gift
|
||||
1970-01-01 09:00:04 2 Basket // Base point, Matched with Basket
|
||||
|
||||
1970-01-01 09:00:01 3 Gift
|
||||
1970-01-01 09:00:02 3 Home // The result
|
||||
1970-01-01 09:00:03 3 Gift // Base point, Matched with Gift
|
||||
1970-01-01 09:00:04 3 Basket // Base point, Matched with Basket
|
||||
```
|
||||
|
||||
|
||||
**Behavior for `forward` and `first_match`**
|
||||
|
||||
```SQL
|
||||
SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', page = 'Gift') FROM test_flow GROUP BY id;
|
||||
|
||||
dt id page
|
||||
1970-01-01 09:00:01 1 Home
|
||||
1970-01-01 09:00:02 1 Gift // Base point
|
||||
1970-01-01 09:00:03 1 Exit // The result
|
||||
|
||||
1970-01-01 09:00:01 2 Home
|
||||
1970-01-01 09:00:02 2 Home
|
||||
1970-01-01 09:00:03 2 Gift // Base point
|
||||
1970-01-01 09:00:04 2 Basket The result
|
||||
|
||||
1970-01-01 09:00:01 3 Gift // Base point
|
||||
1970-01-01 09:00:02 3 Home // Thre result
|
||||
1970-01-01 09:00:03 3 Gift
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
```
|
||||
|
||||
```SQL
|
||||
SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, page = 'Gift', page = 'Gift', page = 'Home') FROM test_flow GROUP BY id;
|
||||
|
||||
dt id page
|
||||
1970-01-01 09:00:01 1 Home
|
||||
1970-01-01 09:00:02 1 Gift // Base point
|
||||
1970-01-01 09:00:03 1 Exit // Unmatched with Home
|
||||
|
||||
1970-01-01 09:00:01 2 Home
|
||||
1970-01-01 09:00:02 2 Home
|
||||
1970-01-01 09:00:03 2 Gift // Base point
|
||||
1970-01-01 09:00:04 2 Basket // Unmatched with Home
|
||||
|
||||
1970-01-01 09:00:01 3 Gift // Base point
|
||||
1970-01-01 09:00:02 3 Home // Matched with Home
|
||||
1970-01-01 09:00:03 3 Gift // The result
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
```
|
||||
|
||||
|
||||
**Behavior for `backward` and `last_match`**
|
||||
|
||||
```SQL
|
||||
SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', page = 'Gift') FROM test_flow GROUP BY id;
|
||||
|
||||
dt id page
|
||||
1970-01-01 09:00:01 1 Home // The result
|
||||
1970-01-01 09:00:02 1 Gift // Base point
|
||||
1970-01-01 09:00:03 1 Exit
|
||||
|
||||
1970-01-01 09:00:01 2 Home
|
||||
1970-01-01 09:00:02 2 Home // The result
|
||||
1970-01-01 09:00:03 2 Gift // Base point
|
||||
1970-01-01 09:00:04 2 Basket
|
||||
|
||||
1970-01-01 09:00:01 3 Gift
|
||||
1970-01-01 09:00:02 3 Home // The result
|
||||
1970-01-01 09:00:03 3 Gift // Base point
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
```
|
||||
|
||||
```SQL
|
||||
SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, page = 'Gift', page = 'Gift', page = 'Home') FROM test_flow GROUP BY id;
|
||||
|
||||
dt id page
|
||||
1970-01-01 09:00:01 1 Home // Matched with Home, the result is null
|
||||
1970-01-01 09:00:02 1 Gift // Base point
|
||||
1970-01-01 09:00:03 1 Exit
|
||||
|
||||
1970-01-01 09:00:01 2 Home // The result
|
||||
1970-01-01 09:00:02 2 Home // Matched with Home
|
||||
1970-01-01 09:00:03 2 Gift // Base point
|
||||
1970-01-01 09:00:04 2 Basket
|
||||
|
||||
1970-01-01 09:00:01 3 Gift // The result
|
||||
1970-01-01 09:00:02 3 Home // Matched with Home
|
||||
1970-01-01 09:00:03 3 Gift // Base point
|
||||
1970-01-01 09:00:04 3 Basket
|
||||
```
|
||||
|
||||
|
||||
**Behavior for `base_condition`**
|
||||
|
||||
```SQL
|
||||
CREATE TABLE test_flow_basecond
|
||||
(
|
||||
`dt` DateTime,
|
||||
`id` int,
|
||||
`page` String,
|
||||
`ref` String
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMMDD(dt)
|
||||
ORDER BY id
|
||||
|
||||
INSERT INTO test_flow_basecond VALUES (1, 1, 'A', 'ref4') (2, 1, 'A', 'ref3') (3, 1, 'B', 'ref2') (4, 1, 'B', 'ref1');
|
||||
```
|
||||
|
||||
```SQL
|
||||
SELECT id, sequenceNextNode('forward', 'head')(dt, page, ref = 'ref1', page = 'A') FROM test_flow_basecond GROUP BY id;
|
||||
|
||||
dt id page ref
|
||||
1970-01-01 09:00:01 1 A ref4 // The head can't be base point becasue the ref column of the head unmatched with 'ref1'.
|
||||
1970-01-01 09:00:02 1 A ref3
|
||||
1970-01-01 09:00:03 1 B ref2
|
||||
1970-01-01 09:00:04 1 B ref1
|
||||
```
|
||||
|
||||
```SQL
|
||||
SELECT id, sequenceNextNode('backward', 'tail')(dt, page, ref = 'ref4', page = 'B') FROM test_flow_basecond GROUP BY id;
|
||||
|
||||
dt id page ref
|
||||
1970-01-01 09:00:01 1 A ref4
|
||||
1970-01-01 09:00:02 1 A ref3
|
||||
1970-01-01 09:00:03 1 B ref2
|
||||
1970-01-01 09:00:04 1 B ref1 // The tail can't be base point becasue the ref column of the tail unmatched with 'ref4'.
|
||||
```
|
||||
|
||||
```SQL
|
||||
SELECT id, sequenceNextNode('forward', 'first_match')(dt, page, ref = 'ref3', page = 'A') FROM test_flow_basecond GROUP BY id;
|
||||
|
||||
dt id page ref
|
||||
1970-01-01 09:00:01 1 A ref4 // This row can't be base point becasue the ref column unmatched with 'ref3'.
|
||||
1970-01-01 09:00:02 1 A ref3 // Base point
|
||||
1970-01-01 09:00:03 1 B ref2 // The result
|
||||
1970-01-01 09:00:04 1 B ref1
|
||||
```
|
||||
|
||||
```SQL
|
||||
SELECT id, sequenceNextNode('backward', 'last_match')(dt, page, ref = 'ref2', page = 'B') FROM test_flow_basecond GROUP BY id;
|
||||
|
||||
dt id page ref
|
||||
1970-01-01 09:00:01 1 A ref4
|
||||
1970-01-01 09:00:02 1 A ref3 // The result
|
||||
1970-01-01 09:00:03 1 B ref2 // Base point
|
||||
1970-01-01 09:00:04 1 B ref1 // This row can't be base point becasue the ref column unmatched with 'ref2'.
|
||||
```
|
||||
|
@ -98,6 +98,10 @@ Setting fields:
|
||||
|
||||
When dictionary with source `FILE` is created via DDL command (`CREATE DICTIONARY ...`), the source file needs to be located in `user_files` directory, to prevent DB users accessing arbitrary file on ClickHouse node.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Dictionary function](../../../sql-reference/table-functions/dictionary.md#dictionary-function)
|
||||
|
||||
## Executable File {#dicts-external_dicts_dict_sources-executable}
|
||||
|
||||
Working with executable files depends on [how the dictionary is stored in memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable file’s STDIN. Otherwise, ClickHouse starts executable file and treats its output as dictionary data.
|
||||
|
59
docs/en/sql-reference/table-functions/dictionary.md
Normal file
59
docs/en/sql-reference/table-functions/dictionary.md
Normal file
@ -0,0 +1,59 @@
|
||||
---
|
||||
toc_priority: 54
|
||||
toc_title: dictionary function
|
||||
---
|
||||
|
||||
# dictionary {#dictionary-function}
|
||||
|
||||
Displays the [dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) data as a ClickHouse table. Works the same way as [Dictionary](../../engines/table-engines/special/dictionary.md) engine.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
dictionary('dict')
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `dict` — A dictionary name. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
A ClickHouse table.
|
||||
|
||||
**Example**
|
||||
|
||||
Input table `dictionary_source_table`:
|
||||
|
||||
``` text
|
||||
┌─id─┬─value─┐
|
||||
│ 0 │ 0 │
|
||||
│ 1 │ 1 │
|
||||
└────┴───────┘
|
||||
```
|
||||
|
||||
Create a dictionary:
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY new_dictionary(id UInt64, value UInt64 DEFAULT 0) PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dictionary_source_table')) LAYOUT(DIRECT());
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM dictionary('new_dictionary');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─id─┬─value─┐
|
||||
│ 0 │ 0 │
|
||||
│ 1 │ 1 │
|
||||
└────┴───────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Dictionary engine](../../engines/table-engines/special/dictionary.md#dictionary)
|
@ -90,3 +90,6 @@ select * from products limit 1;
|
||||
└───────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [Функция dictionary](../../../sql-reference/table-functions/dictionary.md#dictionary-function)
|
||||
|
@ -120,6 +120,19 @@ Eсли суммарное число активных кусков во все
|
||||
Команда `Insert` создает один или несколько блоков (кусков). При вставке в Replicated таблицы ClickHouse для [дедупликации вставок](../../engines/table-engines/mergetree-family/replication.md) записывает в Zookeeper хеш-суммы созданных кусков. Но хранятся хеш-суммы не всех кусков, а только последние `replicated_deduplication_window`. Наиболее старые хеш-суммы удаляются из Zookeeper.
|
||||
Большое число `replicated_deduplication_window` замедляет `Insert`-ы. Хеш-сумма рассчитывается от композиции имен и типов полей, а также данных вставленного куска (потока байт).
|
||||
|
||||
## non_replicated_deduplication_window {#non-replicated-deduplication-window}
|
||||
|
||||
Количество последних вставленных блоков в нереплицированной [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) таблице, для которых хранятся хеш-суммы для проверки дубликатов.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- Положительное целое число.
|
||||
- 0 (дедупликация отключена).
|
||||
|
||||
Значение по умолчанию: 0.
|
||||
|
||||
Используется механизм дедупликации, аналогичный реплицированным таблицам (см. описание настройки [replicated_deduplication_window](#replicated-deduplication-window)). Хеш-суммы вставленных кусков записываются в локальный файл на диске.
|
||||
|
||||
## replicated_deduplication_window_seconds {#replicated-deduplication-window-seconds}
|
||||
|
||||
Число секунд, после которых хеш-суммы вставленных блоков удаляются из Zookeeper.
|
||||
|
@ -97,6 +97,10 @@ SOURCE(FILE(path './user_files/os.tsv' format 'TabSeparated'))
|
||||
|
||||
Если словарь с источником `FILE` создается с помощью DDL-команды (`CREATE DICTIONARY ...`), источник словаря должен быть расположен в каталоге `user_files`. Иначе пользователи базы данных будут иметь доступ к произвольному файлу на узле ClickHouse.
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [Функция dictionary](../../../sql-reference/table-functions/dictionary.md#dictionary-function)
|
||||
|
||||
## Исполняемый файл {#dicts-external_dicts_dict_sources-executable}
|
||||
|
||||
Работа с исполняемым файлом зависит от [размещения словаря в памяти](external-dicts-dict-layout.md). Если тип размещения словаря `cache` и `complex_key_cache`, то ClickHouse запрашивает необходимые ключи, отправляя запрос в `STDIN` исполняемого файла.
|
||||
|
59
docs/ru/sql-reference/table-functions/dictionary.md
Normal file
59
docs/ru/sql-reference/table-functions/dictionary.md
Normal file
@ -0,0 +1,59 @@
|
||||
---
|
||||
toc_priority: 54
|
||||
toc_title: dictionary
|
||||
---
|
||||
|
||||
# dictionary {#dictionary-function}
|
||||
|
||||
Отображает данные [словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) как таблицу ClickHouse. Работает аналогично движку [Dictionary](../../engines/table-engines/special/dictionary.md).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
dictionary('dict')
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `dict` — имя словаря. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Таблица ClickHouse.
|
||||
|
||||
**Пример**
|
||||
|
||||
Входная таблица `dictionary_source_table`:
|
||||
|
||||
``` text
|
||||
┌─id─┬─value─┐
|
||||
│ 0 │ 0 │
|
||||
│ 1 │ 1 │
|
||||
└────┴───────┘
|
||||
```
|
||||
|
||||
Создаем словарь:
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY new_dictionary(id UInt64, value UInt64 DEFAULT 0) PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dictionary_source_table')) LAYOUT(DIRECT());
|
||||
```
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM dictionary('new_dictionary');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─id─┬─value─┐
|
||||
│ 0 │ 0 │
|
||||
│ 1 │ 1 │
|
||||
└────┴───────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [Движок Dictionary](../../engines/table-engines/special/dictionary.md#dictionary)
|
@ -27,7 +27,7 @@ pymdown-extensions==8.0
|
||||
python-slugify==4.0.1
|
||||
PyYAML==5.4.1
|
||||
repackage==0.7.3
|
||||
requests==2.24.0
|
||||
requests==2.25.1
|
||||
singledispatch==3.4.0.3
|
||||
six==1.15.0
|
||||
soupsieve==2.0.1
|
||||
|
@ -3,6 +3,9 @@
|
||||
#include <Core/Settings.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -90,7 +93,7 @@ void Suggest::loadImpl(Connection & connection, const ConnectionTimeouts & timeo
|
||||
/// NOTE: Once you will update the completion list,
|
||||
/// do not forget to update 01676_clickhouse_client_autocomplete.sh
|
||||
|
||||
std::stringstream query; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
WriteBufferFromOwnString query;
|
||||
query << "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM ("
|
||||
"SELECT name FROM system.functions"
|
||||
" UNION ALL "
|
||||
|
144
src/AggregateFunctions/AggregateFunctionSequenceNextNode.cpp
Normal file
144
src/AggregateFunctions/AggregateFunctionSequenceNextNode.cpp
Normal file
@ -0,0 +1,144 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/AggregateFunctionSequenceNextNode.h>
|
||||
#include <AggregateFunctions/Helpers.h>
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <ext/range.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
constexpr size_t max_events_size = 64;
|
||||
|
||||
constexpr size_t min_required_args = 3;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int UNKNOWN_AGGREGATE_FUNCTION;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
template <typename T>
|
||||
inline AggregateFunctionPtr createAggregateFunctionSequenceNodeImpl(
|
||||
const DataTypePtr data_type, const DataTypes & argument_types, SequenceDirection direction, SequenceBase base)
|
||||
{
|
||||
return std::make_shared<SequenceNextNodeImpl<T, NodeString<max_events_size>>>(
|
||||
data_type, argument_types, base, direction, min_required_args);
|
||||
}
|
||||
|
||||
AggregateFunctionPtr
|
||||
createAggregateFunctionSequenceNode(const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings * settings)
|
||||
{
|
||||
if (settings == nullptr || !settings->allow_experimental_funnel_functions)
|
||||
{
|
||||
throw Exception(
|
||||
"Aggregate function " + name + " is experimental. Set `allow_experimental_funnel_functions` setting to enable it",
|
||||
ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION);
|
||||
}
|
||||
|
||||
if (parameters.size() < 2)
|
||||
throw Exception("Aggregate function '" + name + "' requires 2 parameters (direction, head)",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
auto expected_param_type = Field::Types::Which::String;
|
||||
if (parameters.at(0).getType() != expected_param_type || parameters.at(1).getType() != expected_param_type)
|
||||
throw Exception("Aggregate function '" + name + "' requires 'String' parameters",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
String param_dir = parameters.at(0).safeGet<String>();
|
||||
std::unordered_map<std::string, SequenceDirection> seq_dir_mapping{
|
||||
{"forward", SequenceDirection::Forward},
|
||||
{"backward", SequenceDirection::Backward},
|
||||
};
|
||||
if (!seq_dir_mapping.contains(param_dir))
|
||||
throw Exception{"Aggregate function " + name + " doesn't support a parameter: " + param_dir, ErrorCodes::BAD_ARGUMENTS};
|
||||
SequenceDirection direction = seq_dir_mapping[param_dir];
|
||||
|
||||
String param_base = parameters.at(1).safeGet<String>();
|
||||
std::unordered_map<std::string, SequenceBase> seq_base_mapping{
|
||||
{"head", SequenceBase::Head},
|
||||
{"tail", SequenceBase::Tail},
|
||||
{"first_match", SequenceBase::FirstMatch},
|
||||
{"last_match", SequenceBase::LastMatch},
|
||||
};
|
||||
if (!seq_base_mapping.contains(param_base))
|
||||
throw Exception{"Aggregate function " + name + " doesn't support a parameter: " + param_base, ErrorCodes::BAD_ARGUMENTS};
|
||||
SequenceBase base = seq_base_mapping[param_base];
|
||||
|
||||
if ((base == SequenceBase::Head && direction == SequenceDirection::Backward) ||
|
||||
(base == SequenceBase::Tail && direction == SequenceDirection::Forward))
|
||||
throw Exception(fmt::format(
|
||||
"Invalid argument combination of '{}' with '{}'", param_base, param_dir), ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
if (argument_types.size() < min_required_args)
|
||||
throw Exception("Aggregate function " + name + " requires at least " + toString(min_required_args) + " arguments.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
bool is_base_match_type = base == SequenceBase::FirstMatch || base == SequenceBase::LastMatch;
|
||||
if (is_base_match_type && argument_types.size() < min_required_args + 1)
|
||||
throw Exception(
|
||||
"Aggregate function " + name + " requires at least " + toString(min_required_args + 1) + " arguments when base is first_match or last_match.",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
if (argument_types.size() > max_events_size + min_required_args)
|
||||
throw Exception(fmt::format(
|
||||
"Aggregate function '{}' requires at most {} (timestamp, value_column, ...{} events) arguments.",
|
||||
name, max_events_size + min_required_args, max_events_size), ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
if (const auto * cond_arg = argument_types[2].get(); cond_arg && !isUInt8(cond_arg))
|
||||
throw Exception("Illegal type " + cond_arg->getName() + " of third argument of aggregate function "
|
||||
+ name + ", must be UInt8", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
for (const auto i : ext::range(min_required_args, argument_types.size()))
|
||||
{
|
||||
const auto * cond_arg = argument_types[i].get();
|
||||
if (!isUInt8(cond_arg))
|
||||
throw Exception(fmt::format(
|
||||
"Illegal type '{}' of {} argument of aggregate function '{}', must be UInt8", cond_arg->getName(), i + 1, name),
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
}
|
||||
|
||||
if (WhichDataType(argument_types[1].get()).idx != TypeIndex::String)
|
||||
throw Exception{"Illegal type " + argument_types[1].get()->getName()
|
||||
+ " of second argument of aggregate function " + name + ", must be String",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
|
||||
|
||||
DataTypePtr data_type = makeNullable(argument_types[1]);
|
||||
|
||||
WhichDataType timestamp_type(argument_types[0].get());
|
||||
if (timestamp_type.idx == TypeIndex::UInt8)
|
||||
return createAggregateFunctionSequenceNodeImpl<UInt8>(data_type, argument_types, direction, base);
|
||||
if (timestamp_type.idx == TypeIndex::UInt16)
|
||||
return createAggregateFunctionSequenceNodeImpl<UInt16>(data_type, argument_types, direction, base);
|
||||
if (timestamp_type.idx == TypeIndex::UInt32)
|
||||
return createAggregateFunctionSequenceNodeImpl<UInt32>(data_type, argument_types, direction, base);
|
||||
if (timestamp_type.idx == TypeIndex::UInt64)
|
||||
return createAggregateFunctionSequenceNodeImpl<UInt64>(data_type, argument_types, direction, base);
|
||||
if (timestamp_type.isDate())
|
||||
return createAggregateFunctionSequenceNodeImpl<DataTypeDate::FieldType>(data_type, argument_types, direction, base);
|
||||
if (timestamp_type.isDateTime())
|
||||
return createAggregateFunctionSequenceNodeImpl<DataTypeDateTime::FieldType>(data_type, argument_types, direction, base);
|
||||
|
||||
throw Exception{"Illegal type " + argument_types.front().get()->getName()
|
||||
+ " of first argument of aggregate function " + name + ", must be Unsigned Number, Date, DateTime",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void registerAggregateFunctionSequenceNextNode(AggregateFunctionFactory & factory)
|
||||
{
|
||||
AggregateFunctionProperties properties = { .returns_default_when_only_null = true, .is_order_dependent = false };
|
||||
factory.registerFunction("sequenceNextNode", { createAggregateFunctionSequenceNode, properties });
|
||||
}
|
||||
|
||||
}
|
426
src/AggregateFunctions/AggregateFunctionSequenceNextNode.h
Normal file
426
src/AggregateFunctions/AggregateFunctionSequenceNextNode.h
Normal file
@ -0,0 +1,426 @@
|
||||
#pragma once
|
||||
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnNullable.h>
|
||||
|
||||
#include <Common/ArenaAllocator.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/AggregateFunctionNull.h>
|
||||
|
||||
#include <type_traits>
|
||||
#include <bitset>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct Settings;
|
||||
|
||||
enum class SequenceDirection
|
||||
{
|
||||
Forward,
|
||||
Backward,
|
||||
};
|
||||
|
||||
enum SequenceBase
|
||||
{
|
||||
Head,
|
||||
Tail,
|
||||
FirstMatch,
|
||||
LastMatch,
|
||||
};
|
||||
|
||||
/// NodeBase used to implement a linked list for storage of SequenceNextNodeImpl
|
||||
template <typename Node, size_t MaxEventsSize>
|
||||
struct NodeBase
|
||||
{
|
||||
UInt64 size; /// size of payload
|
||||
|
||||
DataTypeDateTime::FieldType event_time;
|
||||
std::bitset<MaxEventsSize> events_bitset;
|
||||
bool can_be_base;
|
||||
|
||||
char * data() { return reinterpret_cast<char *>(this) + sizeof(Node); }
|
||||
|
||||
const char * data() const { return reinterpret_cast<const char *>(this) + sizeof(Node); }
|
||||
|
||||
Node * clone(Arena * arena) const
|
||||
{
|
||||
return reinterpret_cast<Node *>(
|
||||
const_cast<char *>(arena->alignedInsert(reinterpret_cast<const char *>(this), sizeof(Node) + size, alignof(Node))));
|
||||
}
|
||||
|
||||
void write(WriteBuffer & buf) const
|
||||
{
|
||||
writeVarUInt(size, buf);
|
||||
buf.write(data(), size);
|
||||
|
||||
writeBinary(event_time, buf);
|
||||
UInt64 ulong_bitset = events_bitset.to_ulong();
|
||||
writeBinary(ulong_bitset, buf);
|
||||
writeBinary(can_be_base, buf);
|
||||
}
|
||||
|
||||
static Node * read(ReadBuffer & buf, Arena * arena)
|
||||
{
|
||||
UInt64 size;
|
||||
readVarUInt(size, buf);
|
||||
|
||||
Node * node = reinterpret_cast<Node *>(arena->alignedAlloc(sizeof(Node) + size, alignof(Node)));
|
||||
node->size = size;
|
||||
buf.read(node->data(), size);
|
||||
|
||||
readBinary(node->event_time, buf);
|
||||
UInt64 ulong_bitset;
|
||||
readBinary(ulong_bitset, buf);
|
||||
node->events_bitset = ulong_bitset;
|
||||
readBinary(node->can_be_base, buf);
|
||||
|
||||
return node;
|
||||
}
|
||||
};
|
||||
|
||||
/// It stores String, timestamp, bitset of matched events.
|
||||
template <size_t MaxEventsSize>
|
||||
struct NodeString : public NodeBase<NodeString<MaxEventsSize>, MaxEventsSize>
|
||||
{
|
||||
using Node = NodeString<MaxEventsSize>;
|
||||
|
||||
static Node * allocate(const IColumn & column, size_t row_num, Arena * arena)
|
||||
{
|
||||
StringRef string = assert_cast<const ColumnString &>(column).getDataAt(row_num);
|
||||
|
||||
Node * node = reinterpret_cast<Node *>(arena->alignedAlloc(sizeof(Node) + string.size, alignof(Node)));
|
||||
node->size = string.size;
|
||||
memcpy(node->data(), string.data, string.size);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
void insertInto(IColumn & column)
|
||||
{
|
||||
assert_cast<ColumnString &>(column).insertData(this->data(), this->size);
|
||||
}
|
||||
|
||||
bool compare(const Node * rhs) const
|
||||
{
|
||||
auto cmp = strncmp(this->data(), rhs->data(), std::min(this->size, rhs->size));
|
||||
return (cmp == 0) ? this->size < rhs->size : cmp < 0;
|
||||
}
|
||||
};
|
||||
|
||||
/// TODO : Support other types than string
|
||||
template <typename Node>
|
||||
struct SequenceNextNodeGeneralData
|
||||
{
|
||||
using Allocator = MixedAlignedArenaAllocator<alignof(Node *), 4096>;
|
||||
using Array = PODArray<Node *, 32, Allocator>;
|
||||
|
||||
Array value;
|
||||
bool sorted = false;
|
||||
|
||||
struct Comparator final
|
||||
{
|
||||
bool operator()(const Node * lhs, const Node * rhs) const
|
||||
{
|
||||
return lhs->event_time == rhs->event_time ? lhs->compare(rhs) : lhs->event_time < rhs->event_time;
|
||||
}
|
||||
};
|
||||
|
||||
void sort()
|
||||
{
|
||||
if (!sorted)
|
||||
{
|
||||
std::stable_sort(std::begin(value), std::end(value), Comparator{});
|
||||
sorted = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// Implementation of sequenceFirstNode
|
||||
template <typename T, typename Node>
|
||||
class SequenceNextNodeImpl final
|
||||
: public IAggregateFunctionDataHelper<SequenceNextNodeGeneralData<Node>, SequenceNextNodeImpl<T, Node>>
|
||||
{
|
||||
using Self = SequenceNextNodeImpl<T, Node>;
|
||||
|
||||
using Data = SequenceNextNodeGeneralData<Node>;
|
||||
static Data & data(AggregateDataPtr place) { return *reinterpret_cast<Data *>(place); }
|
||||
static const Data & data(ConstAggregateDataPtr place) { return *reinterpret_cast<const Data *>(place); }
|
||||
|
||||
static constexpr size_t base_cond_column_idx = 2;
|
||||
static constexpr size_t event_column_idx = 1;
|
||||
|
||||
SequenceBase seq_base_kind;
|
||||
SequenceDirection seq_direction;
|
||||
const size_t min_required_args;
|
||||
|
||||
DataTypePtr & data_type;
|
||||
UInt8 events_size;
|
||||
UInt64 max_elems;
|
||||
public:
|
||||
SequenceNextNodeImpl(
|
||||
const DataTypePtr & data_type_,
|
||||
const DataTypes & arguments,
|
||||
SequenceBase seq_base_kind_,
|
||||
SequenceDirection seq_direction_,
|
||||
size_t min_required_args_,
|
||||
UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||
: IAggregateFunctionDataHelper<SequenceNextNodeGeneralData<Node>, Self>({data_type_}, {})
|
||||
, seq_base_kind(seq_base_kind_)
|
||||
, seq_direction(seq_direction_)
|
||||
, min_required_args(min_required_args_)
|
||||
, data_type(this->argument_types[0])
|
||||
, events_size(arguments.size() - min_required_args)
|
||||
, max_elems(max_elems_)
|
||||
{
|
||||
}
|
||||
|
||||
String getName() const override { return "sequenceNextNode"; }
|
||||
|
||||
DataTypePtr getReturnType() const override { return data_type; }
|
||||
|
||||
AggregateFunctionPtr getOwnNullAdapter(
|
||||
const AggregateFunctionPtr & nested_function, const DataTypes & arguments, const Array & params,
|
||||
const AggregateFunctionProperties &) const override
|
||||
{
|
||||
/// Even though some values are mapped to aggregating key, it could return nulls for the below case.
|
||||
/// aggregated events: [A -> B -> C]
|
||||
/// events to find: [C -> D]
|
||||
/// [C -> D] is not matched to 'A -> B -> C' so that it returns null.
|
||||
return std::make_shared<AggregateFunctionNullVariadic<false, false, true>>(nested_function, arguments, params);
|
||||
}
|
||||
|
||||
void insert(Data & a, const Node * v, Arena * arena) const
|
||||
{
|
||||
++a.total_values;
|
||||
a.value.push_back(v->clone(arena), arena);
|
||||
}
|
||||
|
||||
void create(AggregateDataPtr place) const override
|
||||
{
|
||||
new (place) Data;
|
||||
}
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
Node * node = Node::allocate(*columns[event_column_idx], row_num, arena);
|
||||
|
||||
const auto timestamp = assert_cast<const ColumnVector<T> *>(columns[0])->getData()[row_num];
|
||||
|
||||
/// The events_bitset variable stores matched events in the form of bitset.
|
||||
/// Each Nth-bit indicates that the Nth-event are matched.
|
||||
/// For example, event1 and event3 is matched then the values of events_bitset is 0x00000005.
|
||||
/// 0x00000000
|
||||
/// + 1 (bit of event1)
|
||||
/// + 4 (bit of event3)
|
||||
node->events_bitset.reset();
|
||||
for (UInt8 i = 0; i < events_size; ++i)
|
||||
if (assert_cast<const ColumnVector<UInt8> *>(columns[min_required_args + i])->getData()[row_num])
|
||||
node->events_bitset.set(i);
|
||||
node->event_time = timestamp;
|
||||
|
||||
node->can_be_base = assert_cast<const ColumnVector<UInt8> *>(columns[base_cond_column_idx])->getData()[row_num];
|
||||
|
||||
data(place).value.push_back(node, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
if (data(rhs).value.empty())
|
||||
return;
|
||||
|
||||
if (data(place).value.size() >= max_elems)
|
||||
return;
|
||||
|
||||
auto & a = data(place).value;
|
||||
auto & b = data(rhs).value;
|
||||
const auto a_size = a.size();
|
||||
|
||||
const UInt64 new_elems = std::min(data(rhs).value.size(), static_cast<size_t>(max_elems) - data(place).value.size());
|
||||
for (UInt64 i = 0; i < new_elems; ++i)
|
||||
a.push_back(b[i]->clone(arena), arena);
|
||||
|
||||
/// Either sort whole container or do so partially merging ranges afterwards
|
||||
using Comparator = typename SequenceNextNodeGeneralData<Node>::Comparator;
|
||||
|
||||
if (!data(place).sorted && !data(rhs).sorted)
|
||||
std::stable_sort(std::begin(a), std::end(a), Comparator{});
|
||||
else
|
||||
{
|
||||
const auto begin = std::begin(a);
|
||||
const auto middle = std::next(begin, a_size);
|
||||
const auto end = std::end(a);
|
||||
|
||||
if (!data(place).sorted)
|
||||
std::stable_sort(begin, middle, Comparator{});
|
||||
|
||||
if (!data(rhs).sorted)
|
||||
std::stable_sort(middle, end, Comparator{});
|
||||
|
||||
std::inplace_merge(begin, middle, end, Comparator{});
|
||||
}
|
||||
|
||||
data(place).sorted = true;
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
{
|
||||
/// Temporarily do a const_cast to sort the values. It helps to reduce the computational burden on the initiator node.
|
||||
this->data(const_cast<AggregateDataPtr>(place)).sort();
|
||||
|
||||
writeBinary(data(place).sorted, buf);
|
||||
|
||||
auto & value = data(place).value;
|
||||
|
||||
size_t size = std::min(static_cast<size_t>(events_size + 1), value.size());
|
||||
switch (seq_base_kind)
|
||||
{
|
||||
case SequenceBase::Head:
|
||||
writeVarUInt(size, buf);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
value[i]->write(buf);
|
||||
break;
|
||||
|
||||
case SequenceBase::Tail:
|
||||
writeVarUInt(size, buf);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
value[value.size() - size + i]->write(buf);
|
||||
break;
|
||||
|
||||
case SequenceBase::FirstMatch:
|
||||
case SequenceBase::LastMatch:
|
||||
writeVarUInt(value.size(), buf);
|
||||
for (auto & node : value)
|
||||
node->write(buf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
{
|
||||
readBinary(data(place).sorted, buf);
|
||||
|
||||
UInt64 size;
|
||||
readVarUInt(size, buf);
|
||||
|
||||
if (unlikely(size == 0))
|
||||
return;
|
||||
|
||||
auto & value = data(place).value;
|
||||
|
||||
value.resize(size, arena);
|
||||
for (UInt64 i = 0; i < size; ++i)
|
||||
value[i] = Node::read(buf, arena);
|
||||
}
|
||||
|
||||
inline std::optional<size_t> getBaseIndex(Data & data) const
|
||||
{
|
||||
if (data.value.size() == 0)
|
||||
return {};
|
||||
|
||||
switch (seq_base_kind)
|
||||
{
|
||||
case SequenceBase::Head:
|
||||
if (data.value[0]->can_be_base)
|
||||
return 0;
|
||||
break;
|
||||
|
||||
case SequenceBase::Tail:
|
||||
if (data.value[data.value.size() - 1]->can_be_base)
|
||||
return data.value.size() - 1;
|
||||
break;
|
||||
|
||||
case SequenceBase::FirstMatch:
|
||||
for (size_t i = 0; i < data.value.size(); ++i)
|
||||
{
|
||||
if (data.value[i]->events_bitset.test(0) && data.value[i]->can_be_base)
|
||||
return i;
|
||||
}
|
||||
break;
|
||||
|
||||
case SequenceBase::LastMatch:
|
||||
for (size_t i = 0; i < data.value.size(); ++i)
|
||||
{
|
||||
auto reversed_i = data.value.size() - i - 1;
|
||||
if (data.value[reversed_i]->events_bitset.test(0) && data.value[reversed_i]->can_be_base)
|
||||
return reversed_i;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
/// This method returns an index of next node that matched the events.
|
||||
/// matched events in the chain of events are represented as a bitmask.
|
||||
/// The first matched event is 0x00000001, the second one is 0x00000002, the third one is 0x00000004, and so on.
|
||||
UInt32 getNextNodeIndex(Data & data) const
|
||||
{
|
||||
const UInt32 unmatched_idx = data.value.size();
|
||||
|
||||
if (data.value.size() <= events_size)
|
||||
return unmatched_idx;
|
||||
|
||||
data.sort();
|
||||
|
||||
std::optional<size_t> base_opt = getBaseIndex(data);
|
||||
if (!base_opt.has_value())
|
||||
return unmatched_idx;
|
||||
UInt32 base = static_cast<UInt32>(base_opt.value());
|
||||
|
||||
if (events_size == 0)
|
||||
return data.value.size() > 0 ? base : unmatched_idx;
|
||||
|
||||
UInt32 i = 0;
|
||||
switch (seq_direction)
|
||||
{
|
||||
case SequenceDirection::Forward:
|
||||
for (i = 0; i < events_size && base + i < data.value.size(); ++i)
|
||||
if (!data.value[base + i]->events_bitset.test(i))
|
||||
break;
|
||||
return (i == events_size) ? base + i : unmatched_idx;
|
||||
|
||||
case SequenceDirection::Backward:
|
||||
for (i = 0; i < events_size && i < base; ++i)
|
||||
if (!data.value[base - i]->events_bitset.test(i))
|
||||
break;
|
||||
return (i == events_size) ? base - i : unmatched_idx;
|
||||
}
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
auto & value = data(place).value;
|
||||
|
||||
UInt32 event_idx = getNextNodeIndex(this->data(place));
|
||||
if (event_idx < value.size())
|
||||
{
|
||||
ColumnNullable & to_concrete = assert_cast<ColumnNullable &>(to);
|
||||
value[event_idx]->insertInto(to_concrete.getNestedColumn());
|
||||
to_concrete.getNullMapData().push_back(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
to.insertDefault();
|
||||
}
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return true; }
|
||||
};
|
||||
|
||||
}
|
@ -17,7 +17,6 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int UNKNOWN_AGGREGATE_FUNCTION;
|
||||
}
|
||||
|
||||
namespace
|
||||
@ -25,15 +24,8 @@ namespace
|
||||
|
||||
template <template <typename> class Data>
|
||||
AggregateFunctionPtr
|
||||
createAggregateFunctionWindowFunnel(const std::string & name, const DataTypes & arguments, const Array & params, const Settings * settings)
|
||||
createAggregateFunctionWindowFunnel(const std::string & name, const DataTypes & arguments, const Array & params, const Settings *)
|
||||
{
|
||||
if (settings == nullptr || !settings->allow_experimental_funnel_functions)
|
||||
{
|
||||
throw Exception(
|
||||
"Aggregate function " + name + " is experimental. Set `allow_experimental_funnel_functions` setting to enable it",
|
||||
ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION);
|
||||
}
|
||||
|
||||
if (params.empty())
|
||||
throw Exception{"Aggregate function " + name + " requires at least one parameter: <window>, [option, [option, ...]]", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH};
|
||||
|
||||
|
@ -48,6 +48,7 @@ void registerAggregateFunctionRankCorrelation(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionMannWhitney(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionWelchTTest(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionStudentTTest(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionSequenceNextNode(AggregateFunctionFactory &);
|
||||
|
||||
class AggregateFunctionCombinatorFactory;
|
||||
void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory &);
|
||||
@ -109,6 +110,7 @@ void registerAggregateFunctions()
|
||||
registerAggregateFunctionAggThrow(factory);
|
||||
registerAggregateFunctionRankCorrelation(factory);
|
||||
registerAggregateFunctionMannWhitney(factory);
|
||||
registerAggregateFunctionSequenceNextNode(factory);
|
||||
registerAggregateFunctionWelchTTest(factory);
|
||||
registerAggregateFunctionStudentTTest(factory);
|
||||
|
||||
|
@ -45,6 +45,7 @@ SRCS(
|
||||
AggregateFunctionRetention.cpp
|
||||
AggregateFunctionSegmentLengthSum.cpp
|
||||
AggregateFunctionSequenceMatch.cpp
|
||||
AggregateFunctionSequenceNextNode.cpp
|
||||
AggregateFunctionSimpleLinearRegression.cpp
|
||||
AggregateFunctionSimpleState.cpp
|
||||
AggregateFunctionState.cpp
|
||||
|
@ -35,7 +35,6 @@ ColumnNullable::ColumnNullable(MutableColumnPtr && nested_column_, MutableColumn
|
||||
throw Exception{"ColumnNullable cannot have constant null map", ErrorCodes::ILLEGAL_COLUMN};
|
||||
}
|
||||
|
||||
|
||||
void ColumnNullable::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
{
|
||||
const auto & arr = getNullMapData();
|
||||
|
108
src/Common/Throttler.cpp
Normal file
108
src/Common/Throttler.cpp
Normal file
@ -0,0 +1,108 @@
|
||||
#include <Common/Throttler.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <cmath>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event ThrottlerSleepMicroseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LIMIT_EXCEEDED;
|
||||
}
|
||||
|
||||
/// Just 10^9.
|
||||
static constexpr auto NS = 1000000000UL;
|
||||
|
||||
/// Tracking window. Actually the size is not really important. We just want to avoid
|
||||
/// throttles when there are no actions for a long period time.
|
||||
static const double window_ns = 7UL * NS;
|
||||
|
||||
void Throttler::add(size_t amount)
|
||||
{
|
||||
size_t new_count;
|
||||
/// This outer variable is always equal to smoothed_speed.
|
||||
/// We use to avoid race condition.
|
||||
double current_speed = 0;
|
||||
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
auto now = clock_gettime_ns();
|
||||
/// If prev_ns is equal to zero (first `add` call) we known nothing about speed
|
||||
/// and don't track anything.
|
||||
if (max_speed && prev_ns != 0)
|
||||
{
|
||||
/// Time spent to process the amount of bytes
|
||||
double time_spent = now - prev_ns;
|
||||
|
||||
/// The speed in bytes per second is equal to amount / time_spent in seconds
|
||||
auto new_speed = amount / (time_spent / NS);
|
||||
|
||||
/// We want to make old values of speed less important for our smoothed value
|
||||
/// so we decay it's value with coef.
|
||||
auto decay_coeff = std::pow(0.5, time_spent / window_ns);
|
||||
|
||||
/// Weighted average between previous and new speed
|
||||
smoothed_speed = smoothed_speed * decay_coeff + (1 - decay_coeff) * new_speed;
|
||||
current_speed = smoothed_speed;
|
||||
}
|
||||
|
||||
count += amount;
|
||||
new_count = count;
|
||||
prev_ns = now;
|
||||
}
|
||||
|
||||
if (limit && new_count > limit)
|
||||
throw Exception(limit_exceeded_exception_message + std::string(" Maximum: ") + toString(limit), ErrorCodes::LIMIT_EXCEEDED);
|
||||
|
||||
if (max_speed && current_speed > max_speed)
|
||||
{
|
||||
/// If we was too fast then we have to sleep until our smoothed speed became <= max_speed
|
||||
int64_t sleep_time = -window_ns * std::log2(max_speed / current_speed);
|
||||
|
||||
if (sleep_time > 0)
|
||||
{
|
||||
accumulated_sleep += sleep_time;
|
||||
|
||||
sleepForNanoseconds(sleep_time);
|
||||
|
||||
accumulated_sleep -= sleep_time;
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::ThrottlerSleepMicroseconds, sleep_time / 1000UL);
|
||||
}
|
||||
}
|
||||
|
||||
if (parent)
|
||||
parent->add(amount);
|
||||
}
|
||||
|
||||
void Throttler::reset()
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
count = 0;
|
||||
accumulated_sleep = 0;
|
||||
smoothed_speed = 0;
|
||||
prev_ns = 0;
|
||||
}
|
||||
|
||||
bool Throttler::isThrottling() const
|
||||
{
|
||||
if (accumulated_sleep != 0)
|
||||
return true;
|
||||
|
||||
if (parent)
|
||||
return parent->isThrottling();
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
@ -2,32 +2,16 @@
|
||||
|
||||
#include <mutex>
|
||||
#include <memory>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <common/sleep.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event ThrottlerSleepMicroseconds;
|
||||
}
|
||||
|
||||
#include <atomic>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LIMIT_EXCEEDED;
|
||||
}
|
||||
|
||||
|
||||
/** Allows you to limit the speed of something (in entities per second) using sleep.
|
||||
* Specifics of work:
|
||||
* - only the average speed is considered, from the moment of the first call of `add` function;
|
||||
* if there were periods with low speed, then during some time after them, the speed will be higher;
|
||||
* Tracks exponentially (pow of 1/2) smoothed speed with hardcoded window.
|
||||
* See more comments in .cpp file.
|
||||
*
|
||||
* Also allows you to set a limit on the maximum number of entities. If exceeded, an exception will be thrown.
|
||||
*/
|
||||
@ -41,49 +25,9 @@ public:
|
||||
const std::shared_ptr<Throttler> & parent_ = nullptr)
|
||||
: max_speed(max_speed_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), parent(parent_) {}
|
||||
|
||||
void add(const size_t amount)
|
||||
{
|
||||
size_t new_count;
|
||||
UInt64 elapsed_ns = 0;
|
||||
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
if (max_speed)
|
||||
{
|
||||
if (0 == count)
|
||||
{
|
||||
watch.start();
|
||||
elapsed_ns = 0;
|
||||
}
|
||||
else
|
||||
elapsed_ns = watch.elapsed();
|
||||
}
|
||||
|
||||
count += amount;
|
||||
new_count = count;
|
||||
}
|
||||
|
||||
if (limit && new_count > limit)
|
||||
throw Exception(limit_exceeded_exception_message + std::string(" Maximum: ") + toString(limit), ErrorCodes::LIMIT_EXCEEDED);
|
||||
|
||||
if (max_speed)
|
||||
{
|
||||
/// How much time to wait for the average speed to become `max_speed`.
|
||||
UInt64 desired_ns = new_count * 1000000000 / max_speed;
|
||||
|
||||
if (desired_ns > elapsed_ns)
|
||||
{
|
||||
UInt64 sleep_ns = desired_ns - elapsed_ns;
|
||||
sleepForNanoseconds(sleep_ns);
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::ThrottlerSleepMicroseconds, sleep_ns / 1000UL);
|
||||
}
|
||||
}
|
||||
|
||||
if (parent)
|
||||
parent->add(amount);
|
||||
}
|
||||
/// Calculates the smoothed speed, sleeps if required and throws exception on
|
||||
/// limit overflow.
|
||||
void add(size_t amount);
|
||||
|
||||
/// Not thread safe
|
||||
void setParent(const std::shared_ptr<Throttler> & parent_)
|
||||
@ -91,21 +35,23 @@ public:
|
||||
parent = parent_;
|
||||
}
|
||||
|
||||
void reset()
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
/// Reset all throttlers internal stats
|
||||
void reset();
|
||||
|
||||
count = 0;
|
||||
watch.reset();
|
||||
}
|
||||
/// Is throttler already accumulated some sleep time and throttling.
|
||||
bool isThrottling() const;
|
||||
|
||||
private:
|
||||
size_t count = 0;
|
||||
const size_t max_speed = 0;
|
||||
const UInt64 limit = 0; /// 0 - not limited.
|
||||
size_t count{0};
|
||||
const size_t max_speed{0};
|
||||
const uint64_t limit{0}; /// 0 - not limited.
|
||||
const char * limit_exceeded_exception_message = nullptr;
|
||||
Stopwatch watch {CLOCK_MONOTONIC_COARSE};
|
||||
std::mutex mutex;
|
||||
std::atomic<uint64_t> accumulated_sleep{0};
|
||||
/// Smoothed value of current speed. Updated in `add` method.
|
||||
double smoothed_speed{0};
|
||||
/// previous `add` call time (in nanoseconds)
|
||||
uint64_t prev_ns{0};
|
||||
|
||||
/// Used to implement a hierarchy of throttlers
|
||||
std::shared_ptr<Throttler> parent;
|
||||
|
@ -80,6 +80,7 @@ SRCS(
|
||||
ThreadPool.cpp
|
||||
ThreadProfileEvents.cpp
|
||||
ThreadStatus.cpp
|
||||
Throttler.cpp
|
||||
TimerDescriptor.cpp
|
||||
TraceCollector.cpp
|
||||
UTF8Helpers.cpp
|
||||
|
@ -83,6 +83,8 @@ class IColumn;
|
||||
M(UInt64, background_schedule_pool_size, 16, "Number of threads performing background tasks for replicated tables, dns cache updates. Only has meaning at server startup.", 0) \
|
||||
M(UInt64, background_message_broker_schedule_pool_size, 16, "Number of threads performing background tasks for message streaming. Only has meaning at server startup.", 0) \
|
||||
M(UInt64, background_distributed_schedule_pool_size, 16, "Number of threads performing background tasks for distributed sends. Only has meaning at server startup.", 0) \
|
||||
M(UInt64, max_replicated_fetches_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited. Only has meaning at server startup.", 0) \
|
||||
M(UInt64, max_replicated_sends_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for replicated sends. Zero means unlimited. Only has meaning at server startup.", 0) \
|
||||
\
|
||||
M(Milliseconds, distributed_directory_monitor_sleep_time_ms, 100, "Sleep time for StorageDistributed DirectoryMonitors, in case of any errors delay grows exponentially.", 0) \
|
||||
M(Milliseconds, distributed_directory_monitor_max_sleep_time_ms, 30000, "Maximum sleep time for StorageDistributed DirectoryMonitors, it limits exponential growth too.", 0) \
|
||||
@ -468,13 +470,15 @@ class IColumn;
|
||||
M(UInt64, limit, 0, "Limit on read rows from the most 'end' result for select query, default 0 means no limit length", 0) \
|
||||
M(UInt64, offset, 0, "Offset on read rows from the most 'end' result for select query", 0) \
|
||||
\
|
||||
/** Experimental functions */ \
|
||||
M(Bool, allow_experimental_funnel_functions, false, "Enable experimental functions for funnel analysis.", 0) \
|
||||
\
|
||||
\
|
||||
/** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \
|
||||
M(UInt64, max_memory_usage_for_all_queries, 0, "Obsolete setting, does nothing.", 0) \
|
||||
M(UInt64, multiple_joins_rewriter_version, 0, "Obsolete setting, does nothing.", 0) \
|
||||
M(Bool, enable_debug_queries, false, "Obsolete setting, does nothing.", 0) \
|
||||
M(Bool, allow_experimental_database_atomic, true, "Obsolete setting, does nothing.", 0) \
|
||||
M(Bool, allow_experimental_funnel_functions, true, "Obsolete setting, does nothing.", 0) \
|
||||
M(Bool, allow_experimental_bigint_types, true, "Obsolete setting, does nothing.", 0) \
|
||||
M(HandleKafkaErrorMode, handle_kafka_error_mode, HandleKafkaErrorMode::DEFAULT, "Obsolete setting, does nothing.", 0) \
|
||||
M(Bool, database_replicated_ddl_output, true, "Obsolete setting, does nothing.", 0) \
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Throttler.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/copyData.h>
|
||||
@ -14,7 +15,7 @@ namespace ErrorCodes
|
||||
namespace
|
||||
{
|
||||
|
||||
void copyDataImpl(ReadBuffer & from, WriteBuffer & to, bool check_bytes, size_t bytes, const std::atomic<int> * is_cancelled)
|
||||
void copyDataImpl(ReadBuffer & from, WriteBuffer & to, bool check_bytes, size_t bytes, const std::atomic<int> * is_cancelled, ThrottlerPtr throttler)
|
||||
{
|
||||
/// If read to the end of the buffer, eof() either fills the buffer with new data and moves the cursor to the beginning, or returns false.
|
||||
while (bytes > 0 && !from.eof())
|
||||
@ -27,13 +28,16 @@ void copyDataImpl(ReadBuffer & from, WriteBuffer & to, bool check_bytes, size_t
|
||||
to.write(from.position(), count);
|
||||
from.position() += count;
|
||||
bytes -= count;
|
||||
|
||||
if (throttler)
|
||||
throttler->add(count);
|
||||
}
|
||||
|
||||
if (check_bytes && bytes > 0)
|
||||
throw Exception("Attempt to read after EOF.", ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF);
|
||||
}
|
||||
|
||||
void copyDataImpl(ReadBuffer & from, WriteBuffer & to, bool check_bytes, size_t bytes, std::function<void()> cancellation_hook)
|
||||
void copyDataImpl(ReadBuffer & from, WriteBuffer & to, bool check_bytes, size_t bytes, std::function<void()> cancellation_hook, ThrottlerPtr throttler)
|
||||
{
|
||||
/// If read to the end of the buffer, eof() either fills the buffer with new data and moves the cursor to the beginning, or returns false.
|
||||
while (bytes > 0 && !from.eof())
|
||||
@ -46,6 +50,9 @@ void copyDataImpl(ReadBuffer & from, WriteBuffer & to, bool check_bytes, size_t
|
||||
to.write(from.position(), count);
|
||||
from.position() += count;
|
||||
bytes -= count;
|
||||
|
||||
if (throttler)
|
||||
throttler->add(count);
|
||||
}
|
||||
|
||||
if (check_bytes && bytes > 0)
|
||||
@ -56,32 +63,42 @@ void copyDataImpl(ReadBuffer & from, WriteBuffer & to, bool check_bytes, size_t
|
||||
|
||||
void copyData(ReadBuffer & from, WriteBuffer & to)
|
||||
{
|
||||
copyDataImpl(from, to, false, std::numeric_limits<size_t>::max(), nullptr);
|
||||
copyDataImpl(from, to, false, std::numeric_limits<size_t>::max(), nullptr, nullptr);
|
||||
}
|
||||
|
||||
void copyData(ReadBuffer & from, WriteBuffer & to, const std::atomic<int> & is_cancelled)
|
||||
{
|
||||
copyDataImpl(from, to, false, std::numeric_limits<size_t>::max(), &is_cancelled);
|
||||
copyDataImpl(from, to, false, std::numeric_limits<size_t>::max(), &is_cancelled, nullptr);
|
||||
}
|
||||
|
||||
void copyData(ReadBuffer & from, WriteBuffer & to, std::function<void()> cancellation_hook)
|
||||
{
|
||||
copyDataImpl(from, to, false, std::numeric_limits<size_t>::max(), cancellation_hook);
|
||||
copyDataImpl(from, to, false, std::numeric_limits<size_t>::max(), cancellation_hook, nullptr);
|
||||
}
|
||||
|
||||
void copyData(ReadBuffer & from, WriteBuffer & to, size_t bytes)
|
||||
{
|
||||
copyDataImpl(from, to, true, bytes, nullptr);
|
||||
copyDataImpl(from, to, true, bytes, nullptr, nullptr);
|
||||
}
|
||||
|
||||
void copyData(ReadBuffer & from, WriteBuffer & to, size_t bytes, const std::atomic<int> & is_cancelled)
|
||||
{
|
||||
copyDataImpl(from, to, true, bytes, &is_cancelled);
|
||||
copyDataImpl(from, to, true, bytes, &is_cancelled, nullptr);
|
||||
}
|
||||
|
||||
void copyData(ReadBuffer & from, WriteBuffer & to, size_t bytes, std::function<void()> cancellation_hook)
|
||||
{
|
||||
copyDataImpl(from, to, true, bytes, cancellation_hook);
|
||||
copyDataImpl(from, to, true, bytes, cancellation_hook, nullptr);
|
||||
}
|
||||
|
||||
void copyDataWithThrottler(ReadBuffer & from, WriteBuffer & to, const std::atomic<int> & is_cancelled, ThrottlerPtr throttler)
|
||||
{
|
||||
copyDataImpl(from, to, false, std::numeric_limits<size_t>::max(), &is_cancelled, throttler);
|
||||
}
|
||||
|
||||
void copyDataWithThrottler(ReadBuffer & from, WriteBuffer & to, size_t bytes, const std::atomic<int> & is_cancelled, ThrottlerPtr throttler)
|
||||
{
|
||||
copyDataImpl(from, to, true, bytes, &is_cancelled, throttler);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -9,22 +9,26 @@ namespace DB
|
||||
|
||||
class ReadBuffer;
|
||||
class WriteBuffer;
|
||||
class Throttler;
|
||||
|
||||
using ThrottlerPtr = std::shared_ptr<Throttler>;
|
||||
|
||||
|
||||
/** Copies data from ReadBuffer to WriteBuffer, all that is.
|
||||
*/
|
||||
/// Copies data from ReadBuffer to WriteBuffer, all that is.
|
||||
void copyData(ReadBuffer & from, WriteBuffer & to);
|
||||
|
||||
/** Copies `bytes` bytes from ReadBuffer to WriteBuffer. If there are no `bytes` bytes, then throws an exception.
|
||||
*/
|
||||
/// Copies `bytes` bytes from ReadBuffer to WriteBuffer. If there are no `bytes` bytes, then throws an exception.
|
||||
void copyData(ReadBuffer & from, WriteBuffer & to, size_t bytes);
|
||||
|
||||
/** The same, with the condition to cancel.
|
||||
*/
|
||||
/// The same, with the condition to cancel.
|
||||
void copyData(ReadBuffer & from, WriteBuffer & to, const std::atomic<int> & is_cancelled);
|
||||
void copyData(ReadBuffer & from, WriteBuffer & to, size_t bytes, const std::atomic<int> & is_cancelled);
|
||||
|
||||
void copyData(ReadBuffer & from, WriteBuffer & to, std::function<void()> cancellation_hook);
|
||||
void copyData(ReadBuffer & from, WriteBuffer & to, size_t bytes, std::function<void()> cancellation_hook);
|
||||
|
||||
/// Same as above but also use throttler to limit maximum speed
|
||||
void copyDataWithThrottler(ReadBuffer & from, WriteBuffer & to, const std::atomic<int> & is_cancelled, ThrottlerPtr throttler);
|
||||
void copyDataWithThrottler(ReadBuffer & from, WriteBuffer & to, size_t bytes, const std::atomic<int> & is_cancelled, ThrottlerPtr throttler);
|
||||
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/Throttler.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
#include <Coordination/KeeperStorageDispatcher.h>
|
||||
#include <Compression/ICompressionCodec.h>
|
||||
@ -363,6 +364,10 @@ struct ContextSharedPart
|
||||
mutable std::optional<BackgroundSchedulePool> schedule_pool; /// A thread pool that can run different jobs in background (used in replicated tables)
|
||||
mutable std::optional<BackgroundSchedulePool> distributed_schedule_pool; /// A thread pool that can run different jobs in background (used for distributed sends)
|
||||
mutable std::optional<BackgroundSchedulePool> message_broker_schedule_pool; /// A thread pool that can run different jobs in background (used for message brokers, like RabbitMQ and Kafka)
|
||||
|
||||
mutable ThrottlerPtr replicated_fetches_throttler; /// A server-wide throttler for replicated fetches
|
||||
mutable ThrottlerPtr replicated_sends_throttler; /// A server-wide throttler for replicated sends
|
||||
|
||||
MultiVersion<Macros> macros; /// Substitutions extracted from config.
|
||||
std::unique_ptr<DDLWorker> ddl_worker; /// Process ddl commands from zk.
|
||||
/// Rules for selecting the compression settings, depending on the size of the part.
|
||||
@ -1628,6 +1633,26 @@ BackgroundSchedulePool & Context::getMessageBrokerSchedulePool() const
|
||||
return *shared->message_broker_schedule_pool;
|
||||
}
|
||||
|
||||
ThrottlerPtr Context::getReplicatedFetchesThrottler() const
|
||||
{
|
||||
auto lock = getLock();
|
||||
if (!shared->replicated_fetches_throttler)
|
||||
shared->replicated_fetches_throttler = std::make_shared<Throttler>(
|
||||
settings.max_replicated_fetches_network_bandwidth_for_server);
|
||||
|
||||
return shared->replicated_fetches_throttler;
|
||||
}
|
||||
|
||||
ThrottlerPtr Context::getReplicatedSendsThrottler() const
|
||||
{
|
||||
auto lock = getLock();
|
||||
if (!shared->replicated_sends_throttler)
|
||||
shared->replicated_sends_throttler = std::make_shared<Throttler>(
|
||||
settings.max_replicated_sends_network_bandwidth_for_server);
|
||||
|
||||
return shared->replicated_sends_throttler;
|
||||
}
|
||||
|
||||
bool Context::hasDistributedDDL() const
|
||||
{
|
||||
return getConfigRef().has("distributed_ddl");
|
||||
|
@ -113,6 +113,9 @@ using VolumePtr = std::shared_ptr<IVolume>;
|
||||
struct NamedSession;
|
||||
struct BackgroundTaskSchedulingSettings;
|
||||
|
||||
class Throttler;
|
||||
using ThrottlerPtr = std::shared_ptr<Throttler>;
|
||||
|
||||
class ZooKeeperMetadataTransaction;
|
||||
using ZooKeeperMetadataTransactionPtr = std::shared_ptr<ZooKeeperMetadataTransaction>;
|
||||
|
||||
@ -657,6 +660,9 @@ public:
|
||||
BackgroundSchedulePool & getMessageBrokerSchedulePool() const;
|
||||
BackgroundSchedulePool & getDistributedSchedulePool() const;
|
||||
|
||||
ThrottlerPtr getReplicatedFetchesThrottler() const;
|
||||
ThrottlerPtr getReplicatedSendsThrottler() const;
|
||||
|
||||
/// Has distributed_ddl configuration or not.
|
||||
bool hasDistributedDDL() const;
|
||||
void setDDLWorker(std::unique_ptr<DDLWorker> ddl_worker);
|
||||
|
@ -4,11 +4,14 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
ASTPtr ASTProjectionDeclaration::clone() const
|
||||
{
|
||||
auto clone = std::make_shared<ASTProjectionDeclaration>(*this);
|
||||
clone->cloneChildren();
|
||||
return clone;
|
||||
auto res = std::make_shared<ASTProjectionDeclaration>();
|
||||
res->name = name;
|
||||
if (query)
|
||||
res->set(res->query, query->clone());
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
|
@ -12,7 +12,7 @@ class ASTProjectionDeclaration : public IAST
|
||||
{
|
||||
public:
|
||||
String name;
|
||||
ASTPtr query;
|
||||
IAST * query;
|
||||
|
||||
/** Get the text that identifies this element. */
|
||||
String getID(char) const override { return "Projection"; }
|
||||
|
@ -37,7 +37,6 @@ ASTPtr ASTProjectionSelectQuery::clone() const
|
||||
*/
|
||||
CLONE(Expression::WITH);
|
||||
CLONE(Expression::SELECT);
|
||||
CLONE(Expression::WHERE);
|
||||
CLONE(Expression::GROUP_BY);
|
||||
CLONE(Expression::ORDER_BY);
|
||||
|
||||
@ -47,13 +46,6 @@ ASTPtr ASTProjectionSelectQuery::clone() const
|
||||
}
|
||||
|
||||
|
||||
void ASTProjectionSelectQuery::updateTreeHashImpl(SipHash & hash_state) const
|
||||
{
|
||||
hash_state.update(distinct);
|
||||
IAST::updateTreeHashImpl(hash_state);
|
||||
}
|
||||
|
||||
|
||||
void ASTProjectionSelectQuery::formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
frame.current_select = this;
|
||||
@ -67,16 +59,10 @@ void ASTProjectionSelectQuery::formatImpl(const FormatSettings & s, FormatState
|
||||
s.ostr << s.nl_or_ws;
|
||||
}
|
||||
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << indent_str << "SELECT " << (distinct ? "DISTINCT " : "") << (s.hilite ? hilite_none : "");
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << indent_str << "SELECT " << (s.hilite ? hilite_none : "");
|
||||
|
||||
s.one_line ? select()->formatImpl(s, state, frame) : select()->as<ASTExpressionList &>().formatImplMultiline(s, state, frame);
|
||||
|
||||
if (where())
|
||||
{
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << "WHERE " << (s.hilite ? hilite_none : "");
|
||||
where()->formatImpl(s, state, frame);
|
||||
}
|
||||
|
||||
if (groupBy())
|
||||
{
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << "GROUP BY " << (s.hilite ? hilite_none : "");
|
||||
@ -129,8 +115,6 @@ ASTPtr ASTProjectionSelectQuery::cloneToASTSelect() const
|
||||
select_query->setExpression(ASTSelectQuery::Expression::WITH, with()->clone());
|
||||
if (select())
|
||||
select_query->setExpression(ASTSelectQuery::Expression::SELECT, select()->clone());
|
||||
if (where())
|
||||
select_query->setExpression(ASTSelectQuery::Expression::WHERE, where()->clone());
|
||||
if (groupBy())
|
||||
select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, groupBy()->clone());
|
||||
// Get rid of orderBy. It's used for projection definition only
|
||||
|
@ -15,7 +15,6 @@ public:
|
||||
{
|
||||
WITH,
|
||||
SELECT,
|
||||
WHERE,
|
||||
GROUP_BY,
|
||||
ORDER_BY,
|
||||
};
|
||||
@ -25,14 +24,10 @@ public:
|
||||
|
||||
ASTPtr clone() const override;
|
||||
|
||||
bool distinct = false;
|
||||
|
||||
ASTPtr & refSelect() { return getExpression(Expression::SELECT); }
|
||||
ASTPtr & refWhere() { return getExpression(Expression::WHERE); }
|
||||
|
||||
const ASTPtr with() const { return getExpression(Expression::WITH); }
|
||||
const ASTPtr select() const { return getExpression(Expression::SELECT); }
|
||||
const ASTPtr where() const { return getExpression(Expression::WHERE); }
|
||||
const ASTPtr groupBy() const { return getExpression(Expression::GROUP_BY); }
|
||||
const ASTPtr orderBy() const { return getExpression(Expression::ORDER_BY); }
|
||||
|
||||
@ -47,8 +42,6 @@ public:
|
||||
return {};
|
||||
}
|
||||
|
||||
void updateTreeHashImpl(SipHash & hash_state) const override;
|
||||
|
||||
ASTPtr cloneToASTSelect() const;
|
||||
|
||||
protected:
|
||||
|
@ -91,6 +91,15 @@ PtrTo<AlterTableClause> AlterTableClause::createAddIndex(bool if_not_exists, Ptr
|
||||
return query;
|
||||
}
|
||||
|
||||
// static
|
||||
PtrTo<AlterTableClause> AlterTableClause::createAddProjection(bool if_not_exists, PtrTo<TableElementExpr> element, PtrTo<Identifier> after)
|
||||
{
|
||||
assert(element->getType() == TableElementExpr::ExprType::PROJECTION);
|
||||
PtrTo<AlterTableClause> query(new AlterTableClause(ClauseType::ADD_PROJECTION, {element, after}));
|
||||
query->if_not_exists = if_not_exists;
|
||||
return query;
|
||||
}
|
||||
|
||||
// static
|
||||
PtrTo<AlterTableClause> AlterTableClause::createAttach(PtrTo<PartitionClause> clause, PtrTo<TableIdentifier> from)
|
||||
{
|
||||
@ -98,9 +107,23 @@ PtrTo<AlterTableClause> AlterTableClause::createAttach(PtrTo<PartitionClause> cl
|
||||
}
|
||||
|
||||
// static
|
||||
PtrTo<AlterTableClause> AlterTableClause::createClear(bool if_exists, PtrTo<Identifier> identifier, PtrTo<PartitionClause> in)
|
||||
PtrTo<AlterTableClause> AlterTableClause::createClearColumn(bool if_exists, PtrTo<Identifier> identifier, PtrTo<PartitionClause> in)
|
||||
{
|
||||
PtrTo<AlterTableClause> query(new AlterTableClause(ClauseType::CLEAR, {identifier, in}));
|
||||
PtrTo<AlterTableClause> query(new AlterTableClause(ClauseType::CLEAR_COLUMN, {identifier, in}));
|
||||
query->if_exists = if_exists;
|
||||
return query;
|
||||
}
|
||||
|
||||
PtrTo<AlterTableClause> AlterTableClause::createClearIndex(bool if_exists, PtrTo<Identifier> identifier, PtrTo<PartitionClause> in)
|
||||
{
|
||||
PtrTo<AlterTableClause> query(new AlterTableClause(ClauseType::CLEAR_INDEX, {identifier, in}));
|
||||
query->if_exists = if_exists;
|
||||
return query;
|
||||
}
|
||||
|
||||
PtrTo<AlterTableClause> AlterTableClause::createClearProjection(bool if_exists, PtrTo<Identifier> identifier, PtrTo<PartitionClause> in)
|
||||
{
|
||||
PtrTo<AlterTableClause> query(new AlterTableClause(ClauseType::CLEAR_PROJECTION, {identifier, in}));
|
||||
query->if_exists = if_exists;
|
||||
return query;
|
||||
}
|
||||
@ -149,6 +172,14 @@ PtrTo<AlterTableClause> AlterTableClause::createDropIndex(bool if_exists, PtrTo<
|
||||
return query;
|
||||
}
|
||||
|
||||
// static
|
||||
PtrTo<AlterTableClause> AlterTableClause::createDropProjection(bool if_exists, PtrTo<Identifier> identifier)
|
||||
{
|
||||
PtrTo<AlterTableClause> query(new AlterTableClause(ClauseType::DROP_PROJECTION, {identifier}));
|
||||
query->if_exists = if_exists;
|
||||
return query;
|
||||
}
|
||||
|
||||
// static
|
||||
PtrTo<AlterTableClause> AlterTableClause::createDropPartition(PtrTo<PartitionClause> clause)
|
||||
{
|
||||
@ -161,6 +192,22 @@ PtrTo<AlterTableClause> AlterTableClause::createFreezePartition(PtrTo<PartitionC
|
||||
return PtrTo<AlterTableClause>(new AlterTableClause(ClauseType::FREEZE_PARTITION, {clause}));
|
||||
}
|
||||
|
||||
// static
|
||||
PtrTo<AlterTableClause> AlterTableClause::createMaterializeIndex(bool if_exists, PtrTo<Identifier> identifier, PtrTo<PartitionClause> in)
|
||||
{
|
||||
PtrTo<AlterTableClause> query(new AlterTableClause(ClauseType::MATERIALIZE_INDEX, {identifier, in}));
|
||||
query->if_exists = if_exists;
|
||||
return query;
|
||||
}
|
||||
|
||||
// static
|
||||
PtrTo<AlterTableClause> AlterTableClause::createMaterializeProjection(bool if_exists, PtrTo<Identifier> identifier, PtrTo<PartitionClause> in)
|
||||
{
|
||||
PtrTo<AlterTableClause> query(new AlterTableClause(ClauseType::MATERIALIZE_PROJECTION, {identifier, in}));
|
||||
query->if_exists = if_exists;
|
||||
return query;
|
||||
}
|
||||
|
||||
// static
|
||||
PtrTo<AlterTableClause> AlterTableClause::createModify(bool if_exists, PtrTo<TableElementExpr> element)
|
||||
{
|
||||
@ -256,6 +303,13 @@ ASTPtr AlterTableClause::convertToOld() const
|
||||
if (has(AFTER)) command->index = get(AFTER)->convertToOld();
|
||||
break;
|
||||
|
||||
case ClauseType::ADD_PROJECTION:
|
||||
command->type = ASTAlterCommand::ADD_PROJECTION;
|
||||
command->if_not_exists = if_not_exists;
|
||||
command->projection_decl = get(ELEMENT)->convertToOld();
|
||||
if (has(AFTER)) command->projection = get(AFTER)->convertToOld();
|
||||
break;
|
||||
|
||||
case ClauseType::ATTACH:
|
||||
command->type = ASTAlterCommand::ATTACH_PARTITION;
|
||||
command->partition = get(PARTITION)->convertToOld();
|
||||
@ -271,12 +325,30 @@ ASTPtr AlterTableClause::convertToOld() const
|
||||
}
|
||||
break;
|
||||
|
||||
case ClauseType::CLEAR:
|
||||
case ClauseType::CLEAR_COLUMN:
|
||||
command->type = ASTAlterCommand::DROP_COLUMN;
|
||||
command->if_exists = if_exists;
|
||||
command->clear_column = true;
|
||||
command->detach = false;
|
||||
command->column = get(COLUMN)->convertToOld();
|
||||
command->column = get(ELEMENT)->convertToOld();
|
||||
if (has(IN)) command->partition = get(IN)->convertToOld();
|
||||
break;
|
||||
|
||||
case ClauseType::CLEAR_INDEX:
|
||||
command->type = ASTAlterCommand::DROP_INDEX;
|
||||
command->if_exists = if_exists;
|
||||
command->clear_index = true;
|
||||
command->detach = false;
|
||||
command->index = get(ELEMENT)->convertToOld();
|
||||
if (has(IN)) command->partition = get(IN)->convertToOld();
|
||||
break;
|
||||
|
||||
case ClauseType::CLEAR_PROJECTION:
|
||||
command->type = ASTAlterCommand::DROP_PROJECTION;
|
||||
command->if_exists = if_exists;
|
||||
command->clear_projection = true;
|
||||
command->detach = false;
|
||||
command->projection = get(ELEMENT)->convertToOld();
|
||||
if (has(IN)) command->partition = get(IN)->convertToOld();
|
||||
break;
|
||||
|
||||
@ -315,14 +387,21 @@ ASTPtr AlterTableClause::convertToOld() const
|
||||
command->type = ASTAlterCommand::DROP_COLUMN;
|
||||
command->if_exists = if_exists;
|
||||
command->detach = false;
|
||||
command->column = get(COLUMN)->convertToOld();
|
||||
command->column = get(ELEMENT)->convertToOld();
|
||||
break;
|
||||
|
||||
case ClauseType::DROP_INDEX:
|
||||
command->type = ASTAlterCommand::DROP_INDEX;
|
||||
command->if_exists = if_exists;
|
||||
command->detach = false;
|
||||
command->index = get(COLUMN)->convertToOld();
|
||||
command->index = get(ELEMENT)->convertToOld();
|
||||
break;
|
||||
|
||||
case ClauseType::DROP_PROJECTION:
|
||||
command->type = ASTAlterCommand::DROP_PROJECTION;
|
||||
command->if_exists = if_exists;
|
||||
command->detach = false;
|
||||
command->projection = get(ELEMENT)->convertToOld();
|
||||
break;
|
||||
|
||||
case ClauseType::DROP_PARTITION:
|
||||
@ -340,6 +419,20 @@ ASTPtr AlterTableClause::convertToOld() const
|
||||
command->type = ASTAlterCommand::FREEZE_ALL;
|
||||
break;
|
||||
|
||||
case ClauseType::MATERIALIZE_INDEX:
|
||||
command->type = ASTAlterCommand::MATERIALIZE_INDEX;
|
||||
command->if_exists = if_exists;
|
||||
command->index = get(ELEMENT)->convertToOld();
|
||||
if (has(IN)) command->partition = get(IN)->convertToOld();
|
||||
break;
|
||||
|
||||
case ClauseType::MATERIALIZE_PROJECTION:
|
||||
command->type = ASTAlterCommand::MATERIALIZE_PROJECTION;
|
||||
command->if_exists = if_exists;
|
||||
command->projection = get(ELEMENT)->convertToOld();
|
||||
if (has(IN)) command->partition = get(IN)->convertToOld();
|
||||
break;
|
||||
|
||||
case ClauseType::MODIFY:
|
||||
command->type = ASTAlterCommand::MODIFY_COLUMN;
|
||||
command->if_exists = if_exists;
|
||||
@ -511,16 +604,34 @@ antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseAddIndex(ClickHouseParser::
|
||||
return AlterTableClause::createAddIndex(!!ctx->IF(), visit(ctx->tableIndexDfnt()), after);
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseAddProjection(ClickHouseParser::AlterTableClauseAddProjectionContext * ctx)
|
||||
{
|
||||
auto after = ctx->AFTER() ? visit(ctx->nestedIdentifier()).as<PtrTo<Identifier>>() : nullptr;
|
||||
return AlterTableClause::createAddProjection(!!ctx->IF(), visit(ctx->tableProjectionDfnt()), after);
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseAttach(ClickHouseParser::AlterTableClauseAttachContext *ctx)
|
||||
{
|
||||
auto from = ctx->tableIdentifier() ? visit(ctx->tableIdentifier()).as<PtrTo<TableIdentifier>>() : nullptr;
|
||||
return AlterTableClause::createAttach(visit(ctx->partitionClause()), from);
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseClear(ClickHouseParser::AlterTableClauseClearContext * ctx)
|
||||
antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseClearColumn(ClickHouseParser::AlterTableClauseClearColumnContext * ctx)
|
||||
{
|
||||
auto partition = ctx->partitionClause() ? visit(ctx->partitionClause()).as<PtrTo<PartitionClause>>() : nullptr;
|
||||
return AlterTableClause::createClear(!!ctx->IF(), visit(ctx->nestedIdentifier()), partition);
|
||||
return AlterTableClause::createClearColumn(!!ctx->IF(), visit(ctx->nestedIdentifier()), partition);
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseClearIndex(ClickHouseParser::AlterTableClauseClearIndexContext * ctx)
|
||||
{
|
||||
auto partition = ctx->partitionClause() ? visit(ctx->partitionClause()).as<PtrTo<PartitionClause>>() : nullptr;
|
||||
return AlterTableClause::createClearIndex(!!ctx->IF(), visit(ctx->nestedIdentifier()), partition);
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseClearProjection(ClickHouseParser::AlterTableClauseClearProjectionContext * ctx)
|
||||
{
|
||||
auto partition = ctx->partitionClause() ? visit(ctx->partitionClause()).as<PtrTo<PartitionClause>>() : nullptr;
|
||||
return AlterTableClause::createClearProjection(!!ctx->IF(), visit(ctx->nestedIdentifier()), partition);
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseComment(ClickHouseParser::AlterTableClauseCommentContext * ctx)
|
||||
@ -548,6 +659,11 @@ antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseDropIndex(ClickHouseParser:
|
||||
return AlterTableClause::createDropIndex(!!ctx->IF(), visit(ctx->nestedIdentifier()));
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseDropProjection(ClickHouseParser::AlterTableClauseDropProjectionContext * ctx)
|
||||
{
|
||||
return AlterTableClause::createDropProjection(!!ctx->IF(), visit(ctx->nestedIdentifier()));
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseDropPartition(ClickHouseParser::AlterTableClauseDropPartitionContext *ctx)
|
||||
{
|
||||
return AlterTableClause::createDropPartition(visit(ctx->partitionClause()));
|
||||
@ -559,6 +675,18 @@ antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseFreezePartition(ClickHouseP
|
||||
return AlterTableClause::createFreezePartition(clause);
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseMaterializeIndex(ClickHouseParser::AlterTableClauseMaterializeIndexContext * ctx)
|
||||
{
|
||||
auto partition = ctx->partitionClause() ? visit(ctx->partitionClause()).as<PtrTo<PartitionClause>>() : nullptr;
|
||||
return AlterTableClause::createMaterializeIndex(!!ctx->IF(), visit(ctx->nestedIdentifier()), partition);
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseMaterializeProjection(ClickHouseParser::AlterTableClauseMaterializeProjectionContext * ctx)
|
||||
{
|
||||
auto partition = ctx->partitionClause() ? visit(ctx->partitionClause()).as<PtrTo<PartitionClause>>() : nullptr;
|
||||
return AlterTableClause::createMaterializeProjection(!!ctx->IF(), visit(ctx->nestedIdentifier()), partition);
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitAlterTableClauseModify(ClickHouseParser::AlterTableClauseModifyContext * ctx)
|
||||
{
|
||||
return AlterTableClause::createModify(!!ctx->IF(), visit(ctx->tableColumnDfnt()));
|
||||
|
@ -61,16 +61,22 @@ class AlterTableClause : public INode
|
||||
public:
|
||||
static PtrTo<AlterTableClause> createAddColumn(bool if_not_exists, PtrTo<TableElementExpr> element, PtrTo<Identifier> after);
|
||||
static PtrTo<AlterTableClause> createAddIndex(bool if_not_exists, PtrTo<TableElementExpr> element, PtrTo<Identifier> after);
|
||||
static PtrTo<AlterTableClause> createAddProjection(bool if_not_exists, PtrTo<TableElementExpr> element, PtrTo<Identifier> after);
|
||||
static PtrTo<AlterTableClause> createAttach(PtrTo<PartitionClause> clause, PtrTo<TableIdentifier> from);
|
||||
static PtrTo<AlterTableClause> createClear(bool if_exists, PtrTo<Identifier> identifier, PtrTo<PartitionClause> in);
|
||||
static PtrTo<AlterTableClause> createClearColumn(bool if_exists, PtrTo<Identifier> identifier, PtrTo<PartitionClause> in);
|
||||
static PtrTo<AlterTableClause> createClearIndex(bool if_exists, PtrTo<Identifier> identifier, PtrTo<PartitionClause> in);
|
||||
static PtrTo<AlterTableClause> createClearProjection(bool if_exists, PtrTo<Identifier> identifier, PtrTo<PartitionClause> in);
|
||||
static PtrTo<AlterTableClause> createCodec(bool if_exists, PtrTo<Identifier> identifier, PtrTo<CodecExpr> codec);
|
||||
static PtrTo<AlterTableClause> createComment(bool if_exists, PtrTo<Identifier> identifier, PtrTo<StringLiteral> comment);
|
||||
static PtrTo<AlterTableClause> createDelete(PtrTo<ColumnExpr> expr);
|
||||
static PtrTo<AlterTableClause> createDetach(PtrTo<PartitionClause> clause);
|
||||
static PtrTo<AlterTableClause> createDropColumn(bool if_exists, PtrTo<Identifier> identifier);
|
||||
static PtrTo<AlterTableClause> createDropIndex(bool if_exists, PtrTo<Identifier> identifier);
|
||||
static PtrTo<AlterTableClause> createDropProjection(bool if_exists, PtrTo<Identifier> identifier);
|
||||
static PtrTo<AlterTableClause> createDropPartition(PtrTo<PartitionClause> clause);
|
||||
static PtrTo<AlterTableClause> createFreezePartition(PtrTo<PartitionClause> clause);
|
||||
static PtrTo<AlterTableClause> createMaterializeIndex(bool if_exists, PtrTo<Identifier> identifier, PtrTo<PartitionClause> in);
|
||||
static PtrTo<AlterTableClause> createMaterializeProjection(bool if_exists, PtrTo<Identifier> identifier, PtrTo<PartitionClause> in);
|
||||
static PtrTo<AlterTableClause> createModify(bool if_exists, PtrTo<TableElementExpr> element);
|
||||
static PtrTo<AlterTableClause> createMovePartitionToDisk(PtrTo<PartitionClause> clause, PtrTo<StringLiteral> literal);
|
||||
static PtrTo<AlterTableClause> createMovePartitionToTable(PtrTo<PartitionClause> clause, PtrTo<TableIdentifier> identifier);
|
||||
@ -88,19 +94,19 @@ class AlterTableClause : public INode
|
||||
private:
|
||||
enum ChildIndex : UInt8
|
||||
{
|
||||
// ADD COLUMN or INDEX
|
||||
ELEMENT = 0, // TableElementExpr
|
||||
// ADD COLUMN, INDEX or PROJECTION
|
||||
ELEMENT = 0, // TableElementExpr (COLUMN, CONSTRAINT, INDEX, PROJECTION)
|
||||
AFTER = 1, // Identifier (optional)
|
||||
|
||||
// ATTACH/REPLACE
|
||||
PARTITION = 0, // PartitionClause
|
||||
FROM = 1, // TableIdentifier (optional)
|
||||
|
||||
// CLEAR
|
||||
COLUMN = 0, // Identifier
|
||||
// CLEAR COLUMN, INDEX or PROJECTION
|
||||
IN = 1, // PartitionClause
|
||||
|
||||
// CODEC
|
||||
// CODEC, COMMENT and RENAME
|
||||
COLUMN = 0, // Identifier
|
||||
CODEC = 1, // CodecExpr
|
||||
|
||||
// COMMENT
|
||||
@ -127,16 +133,22 @@ class AlterTableClause : public INode
|
||||
{
|
||||
ADD_COLUMN,
|
||||
ADD_INDEX,
|
||||
ADD_PROJECTION,
|
||||
ATTACH,
|
||||
CLEAR,
|
||||
CLEAR_COLUMN,
|
||||
CLEAR_INDEX,
|
||||
CLEAR_PROJECTION,
|
||||
CODEC,
|
||||
COMMENT,
|
||||
DELETE,
|
||||
DETACH,
|
||||
DROP_COLUMN,
|
||||
DROP_INDEX,
|
||||
DROP_PROJECTION,
|
||||
DROP_PARTITION,
|
||||
FREEZE_PARTITION,
|
||||
MATERIALIZE_INDEX,
|
||||
MATERIALIZE_PROJECTION,
|
||||
MODIFY,
|
||||
MOVE_PARTITION_TO_DISK,
|
||||
MOVE_PARTITION_TO_TABLE,
|
||||
|
@ -48,6 +48,7 @@ ASTPtr TableSchemaClause::convertToOld() const
|
||||
auto column_list = std::make_shared<ASTExpressionList>();
|
||||
auto constraint_list = std::make_shared<ASTExpressionList>();
|
||||
auto index_list = std::make_shared<ASTExpressionList>();
|
||||
auto projection_list = std::make_shared<ASTExpressionList>();
|
||||
|
||||
for (const auto & element : get(ELEMENTS)->as<TableElementList &>())
|
||||
{
|
||||
@ -62,12 +63,16 @@ ASTPtr TableSchemaClause::convertToOld() const
|
||||
case TableElementExpr::ExprType::INDEX:
|
||||
index_list->children.push_back(element->convertToOld());
|
||||
break;
|
||||
case TableElementExpr::ExprType::PROJECTION:
|
||||
projection_list->children.push_back(element->convertToOld());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!column_list->children.empty()) columns->set(columns->columns, column_list);
|
||||
if (!constraint_list->children.empty()) columns->set(columns->constraints, constraint_list);
|
||||
if (!index_list->children.empty()) columns->set(columns->indices, index_list);
|
||||
if (!projection_list->children.empty()) columns->set(columns->projections, projection_list);
|
||||
|
||||
return columns;
|
||||
}
|
||||
|
@ -1,7 +1,9 @@
|
||||
#include <Parsers/New/AST/SelectUnionQuery.h>
|
||||
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTProjectionSelectQuery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
@ -102,6 +104,59 @@ ASTPtr SettingsClause::convertToOld() const
|
||||
return expr;
|
||||
}
|
||||
|
||||
// PROJECTION SELECT Caluse
|
||||
|
||||
ProjectionSelectStmt::ProjectionSelectStmt(PtrTo<ColumnExprList> expr_list)
|
||||
: INode(MAX_INDEX)
|
||||
{
|
||||
set(COLUMNS, expr_list);
|
||||
}
|
||||
|
||||
void ProjectionSelectStmt::setWithClause(PtrTo<WithClause> clause)
|
||||
{
|
||||
set(WITH, clause);
|
||||
}
|
||||
|
||||
void ProjectionSelectStmt::setGroupByClause(PtrTo<GroupByClause> clause)
|
||||
{
|
||||
set(GROUP_BY, clause);
|
||||
}
|
||||
|
||||
void ProjectionSelectStmt::setOrderByClause(PtrTo<ProjectionOrderByClause> clause)
|
||||
{
|
||||
set(ORDER_BY, clause);
|
||||
}
|
||||
|
||||
ASTPtr ProjectionSelectStmt::convertToOld() const
|
||||
{
|
||||
auto old_select = std::make_shared<ASTProjectionSelectQuery>();
|
||||
|
||||
old_select->setExpression(ASTProjectionSelectQuery::Expression::SELECT, get(COLUMNS)->convertToOld());
|
||||
|
||||
if (has(WITH)) old_select->setExpression(ASTProjectionSelectQuery::Expression::WITH, get(WITH)->convertToOld());
|
||||
if (has(GROUP_BY)) old_select->setExpression(ASTProjectionSelectQuery::Expression::GROUP_BY, get(GROUP_BY)->convertToOld());
|
||||
if (has(ORDER_BY))
|
||||
{
|
||||
ASTPtr order_expression;
|
||||
auto expr_list = get(ORDER_BY)->convertToOld();
|
||||
if (expr_list->children.size() == 1)
|
||||
{
|
||||
order_expression = expr_list->children.front();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto function_node = std::make_shared<ASTFunction>();
|
||||
function_node->name = "tuple";
|
||||
function_node->arguments = expr_list;
|
||||
function_node->children.push_back(expr_list);
|
||||
order_expression = function_node;
|
||||
}
|
||||
old_select->setExpression(ASTProjectionSelectQuery::Expression::ORDER_BY, std::move(order_expression));
|
||||
}
|
||||
|
||||
return old_select;
|
||||
}
|
||||
|
||||
// SELECT Statement
|
||||
|
||||
SelectStmt::SelectStmt(bool distinct_, ModifierType type, bool totals, PtrTo<ColumnExprList> expr_list)
|
||||
@ -302,6 +357,11 @@ antlrcpp::Any ParseTreeVisitor::visitOrderByClause(ClickHouseParser::OrderByClau
|
||||
return std::make_shared<OrderByClause>(visit(ctx->orderExprList()).as<PtrTo<OrderExprList>>());
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitProjectionOrderByClause(ClickHouseParser::ProjectionOrderByClauseContext *ctx)
|
||||
{
|
||||
return std::make_shared<ProjectionOrderByClause>(visit(ctx->columnExprList()).as<PtrTo<ColumnExprList>>());
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitLimitByClause(ClickHouseParser::LimitByClauseContext *ctx)
|
||||
{
|
||||
return std::make_shared<LimitByClause>(visit(ctx->limitExpr()), visit(ctx->columnExprList()));
|
||||
@ -317,6 +377,18 @@ antlrcpp::Any ParseTreeVisitor::visitSettingsClause(ClickHouseParser::SettingsCl
|
||||
return std::make_shared<SettingsClause>(visit(ctx->settingExprList()).as<PtrTo<SettingExprList>>());
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitProjectionSelectStmt(ClickHouseParser::ProjectionSelectStmtContext *ctx)
|
||||
{
|
||||
PtrTo<ColumnExprList> column_list = visit(ctx->columnExprList());
|
||||
auto select_stmt = std::make_shared<ProjectionSelectStmt>(column_list);
|
||||
|
||||
if (ctx->withClause()) select_stmt->setWithClause(visit(ctx->withClause()));
|
||||
if (ctx->groupByClause()) select_stmt->setGroupByClause(visit(ctx->groupByClause()));
|
||||
if (ctx->projectionOrderByClause()) select_stmt->setOrderByClause(visit(ctx->projectionOrderByClause()));
|
||||
|
||||
return select_stmt;
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitSelectStmt(ClickHouseParser::SelectStmtContext *ctx)
|
||||
{
|
||||
SelectStmt::ModifierType type = SelectStmt::ModifierType::NONE;
|
||||
|
@ -97,6 +97,29 @@ class SettingsClause : public INode
|
||||
|
||||
// Statement
|
||||
|
||||
class ProjectionSelectStmt : public INode
|
||||
{
|
||||
public:
|
||||
ProjectionSelectStmt(PtrTo<ColumnExprList> expr_list);
|
||||
|
||||
void setWithClause(PtrTo<WithClause> clause);
|
||||
void setGroupByClause(PtrTo<GroupByClause> clause);
|
||||
void setOrderByClause(PtrTo<ProjectionOrderByClause> clause);
|
||||
|
||||
ASTPtr convertToOld() const override;
|
||||
|
||||
private:
|
||||
enum ChildIndex : UInt8
|
||||
{
|
||||
COLUMNS = 0, // ColumnExprList
|
||||
WITH, // WithClause (optional)
|
||||
GROUP_BY, // GroupByClause (optional)
|
||||
ORDER_BY, // OrderByClause (optional)
|
||||
|
||||
MAX_INDEX,
|
||||
};
|
||||
};
|
||||
|
||||
class SelectStmt : public INode
|
||||
{
|
||||
public:
|
||||
|
@ -4,10 +4,12 @@
|
||||
#include <Parsers/ASTConstraintDeclaration.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTIndexDeclaration.h>
|
||||
#include <Parsers/ASTProjectionDeclaration.h>
|
||||
#include <Parsers/New/AST/ColumnExpr.h>
|
||||
#include <Parsers/New/AST/ColumnTypeExpr.h>
|
||||
#include <Parsers/New/AST/Identifier.h>
|
||||
#include <Parsers/New/AST/Literal.h>
|
||||
#include <Parsers/New/AST/SelectUnionQuery.h>
|
||||
#include <Parsers/New/ParseTreeVisitor.h>
|
||||
|
||||
|
||||
@ -81,6 +83,13 @@ TableElementExpr::createIndex(PtrTo<Identifier> name, PtrTo<ColumnExpr> expr, Pt
|
||||
return PtrTo<TableElementExpr>(new TableElementExpr(ExprType::INDEX, {name, expr, type, granularity}));
|
||||
}
|
||||
|
||||
// static
|
||||
PtrTo<TableElementExpr>
|
||||
TableElementExpr::createProjection(PtrTo<Identifier> name, PtrTo<ProjectionSelectStmt> query)
|
||||
{
|
||||
return PtrTo<TableElementExpr>(new TableElementExpr(ExprType::PROJECTION, {name, query}));
|
||||
}
|
||||
|
||||
TableElementExpr::TableElementExpr(ExprType type, PtrList exprs) : INode(exprs), expr_type(type)
|
||||
{
|
||||
}
|
||||
@ -152,6 +161,15 @@ ASTPtr TableElementExpr::convertToOld() const
|
||||
expr->set(expr->type, get(INDEX_TYPE)->convertToOld());
|
||||
expr->granularity = get<NumberLiteral>(GRANULARITY)->as<UInt64>().value_or(0); // FIXME: throw exception instead of default.
|
||||
|
||||
return expr;
|
||||
}
|
||||
case ExprType::PROJECTION:
|
||||
{
|
||||
auto expr = std::make_shared<ASTProjectionDeclaration>();
|
||||
|
||||
expr->name = get<Identifier>(NAME)->getName();
|
||||
expr->set(expr->query, get(QUERY)->convertToOld());
|
||||
|
||||
return expr;
|
||||
}
|
||||
}
|
||||
@ -222,6 +240,11 @@ antlrcpp::Any ParseTreeVisitor::visitTableElementExprIndex(ClickHouseParser::Tab
|
||||
return visit(ctx->tableIndexDfnt());
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitTableElementExprProjection(ClickHouseParser::TableElementExprProjectionContext *ctx)
|
||||
{
|
||||
return visit(ctx->tableProjectionDfnt());
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitTableIndexDfnt(ClickHouseParser::TableIndexDfntContext *ctx)
|
||||
{
|
||||
return TableElementExpr::createIndex(
|
||||
@ -231,4 +254,11 @@ antlrcpp::Any ParseTreeVisitor::visitTableIndexDfnt(ClickHouseParser::TableIndex
|
||||
Literal::createNumber(ctx->DECIMAL_LITERAL()));
|
||||
}
|
||||
|
||||
antlrcpp::Any ParseTreeVisitor::visitTableProjectionDfnt(ClickHouseParser::TableProjectionDfntContext *ctx)
|
||||
{
|
||||
return TableElementExpr::createProjection(
|
||||
visit(ctx->nestedIdentifier()),
|
||||
visit(ctx->projectionSelectStmt()));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -68,6 +68,7 @@ class TableElementExpr : public INode
|
||||
COLUMN,
|
||||
CONSTRAINT,
|
||||
INDEX,
|
||||
PROJECTION,
|
||||
};
|
||||
|
||||
static PtrTo<TableElementExpr> createColumn(
|
||||
@ -83,6 +84,9 @@ class TableElementExpr : public INode
|
||||
static PtrTo<TableElementExpr>
|
||||
createIndex(PtrTo<Identifier> name, PtrTo<ColumnExpr> expr, PtrTo<ColumnTypeExpr> type, PtrTo<NumberLiteral> granularity);
|
||||
|
||||
static PtrTo<TableElementExpr>
|
||||
createProjection(PtrTo<Identifier> name, PtrTo<ProjectionSelectStmt> query);
|
||||
|
||||
auto getType() const { return expr_type; }
|
||||
|
||||
ASTPtr convertToOld() const override;
|
||||
@ -106,6 +110,9 @@ class TableElementExpr : public INode
|
||||
EXPR = 1, // ColumnExpr
|
||||
INDEX_TYPE = 2, // ColumnTypeExpr
|
||||
GRANULARITY = 3, // NumberLiteral
|
||||
|
||||
// PROJECTION
|
||||
QUERY = 1, // ColumnExpr
|
||||
};
|
||||
|
||||
const ExprType expr_type;
|
||||
|
@ -47,6 +47,7 @@ class PartitionClause;
|
||||
class Query;
|
||||
class RatioExpr;
|
||||
class TableSchemaClause;
|
||||
class ProjectionSelectStmt;
|
||||
class SelectStmt;
|
||||
class SelectUnionQuery;
|
||||
class SettingExpr;
|
||||
@ -81,6 +82,7 @@ using TTLExprList = List<TTLExpr>;
|
||||
using ClusterClause = SimpleClause<StringLiteral>;
|
||||
using DestinationClause = SimpleClause<TableIdentifier>;
|
||||
using OrderByClause = SimpleClause<OrderExprList>;
|
||||
using ProjectionOrderByClause = SimpleClause<ColumnExprList>;
|
||||
using PrimaryKeyClause = SimpleClause<ColumnExpr>;
|
||||
using TTLClause = SimpleClause<TTLExprList>;
|
||||
using UUIDClause = SimpleClause<StringLiteral>;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -102,6 +102,7 @@ LIMIT: L I M I T;
|
||||
LIVE: L I V E;
|
||||
LOCAL: L O C A L;
|
||||
LOGS: L O G S;
|
||||
MATERIALIZE: M A T E R I A L I Z E;
|
||||
MATERIALIZED: M A T E R I A L I Z E D;
|
||||
MAX: M A X;
|
||||
MERGES: M E R G E S;
|
||||
@ -127,6 +128,7 @@ PARTITION: P A R T I T I O N;
|
||||
POPULATE: P O P U L A T E;
|
||||
PREWHERE: P R E W H E R E;
|
||||
PRIMARY: P R I M A R Y;
|
||||
PROJECTION: P R O J E C T I O N;
|
||||
QUARTER: Q U A R T E R;
|
||||
RANGE: R A N G E;
|
||||
RELOAD: R E L O A D;
|
||||
|
@ -13,51 +13,51 @@ namespace DB {
|
||||
class ClickHouseLexer : public antlr4::Lexer {
|
||||
public:
|
||||
enum {
|
||||
ADD = 1, AFTER = 2, ALIAS = 3, ALL = 4, ALTER = 5, AND = 6, ANTI = 7,
|
||||
ANY = 8, ARRAY = 9, AS = 10, ASCENDING = 11, ASOF = 12, ASYNC = 13,
|
||||
ATTACH = 14, BETWEEN = 15, BOTH = 16, BY = 17, CASE = 18, CAST = 19,
|
||||
CHECK = 20, CLEAR = 21, CLUSTER = 22, CODEC = 23, COLLATE = 24, COLUMN = 25,
|
||||
COMMENT = 26, CONSTRAINT = 27, CREATE = 28, CROSS = 29, CUBE = 30, DATABASE = 31,
|
||||
DATABASES = 32, DATE = 33, DAY = 34, DEDUPLICATE = 35, DEFAULT = 36,
|
||||
DELAY = 37, DELETE = 38, DESC = 39, DESCENDING = 40, DESCRIBE = 41,
|
||||
DETACH = 42, DICTIONARIES = 43, DICTIONARY = 44, DISK = 45, DISTINCT = 46,
|
||||
DISTRIBUTED = 47, DROP = 48, ELSE = 49, END = 50, ENGINE = 51, EVENTS = 52,
|
||||
EXISTS = 53, EXPLAIN = 54, EXPRESSION = 55, EXTRACT = 56, FETCHES = 57,
|
||||
FINAL = 58, FIRST = 59, FLUSH = 60, FOR = 61, FORMAT = 62, FREEZE = 63,
|
||||
FROM = 64, FULL = 65, FUNCTION = 66, GLOBAL = 67, GRANULARITY = 68,
|
||||
GROUP = 69, HAVING = 70, HIERARCHICAL = 71, HOUR = 72, ID = 73, IF = 74,
|
||||
ILIKE = 75, IN = 76, INDEX = 77, INF = 78, INJECTIVE = 79, INNER = 80,
|
||||
INSERT = 81, INTERVAL = 82, INTO = 83, IS = 84, IS_OBJECT_ID = 85, JOIN = 86,
|
||||
KEY = 87, KILL = 88, LAST = 89, LAYOUT = 90, LEADING = 91, LEFT = 92,
|
||||
LIFETIME = 93, LIKE = 94, LIMIT = 95, LIVE = 96, LOCAL = 97, LOGS = 98,
|
||||
MATERIALIZED = 99, MAX = 100, MERGES = 101, MIN = 102, MINUTE = 103,
|
||||
MODIFY = 104, MONTH = 105, MOVE = 106, MUTATION = 107, NAN_SQL = 108,
|
||||
NO = 109, NOT = 110, NULL_SQL = 111, NULLS = 112, OFFSET = 113, ON = 114,
|
||||
OPTIMIZE = 115, OR = 116, ORDER = 117, OUTER = 118, OUTFILE = 119, PARTITION = 120,
|
||||
POPULATE = 121, PREWHERE = 122, PRIMARY = 123, QUARTER = 124, RANGE = 125,
|
||||
RELOAD = 126, REMOVE = 127, RENAME = 128, REPLACE = 129, REPLICA = 130,
|
||||
REPLICATED = 131, RIGHT = 132, ROLLUP = 133, SAMPLE = 134, SECOND = 135,
|
||||
SELECT = 136, SEMI = 137, SENDS = 138, SET = 139, SETTINGS = 140, SHOW = 141,
|
||||
SOURCE = 142, START = 143, STOP = 144, SUBSTRING = 145, SYNC = 146,
|
||||
SYNTAX = 147, SYSTEM = 148, TABLE = 149, TABLES = 150, TEMPORARY = 151,
|
||||
TEST = 152, THEN = 153, TIES = 154, TIMEOUT = 155, TIMESTAMP = 156,
|
||||
TO = 157, TOP = 158, TOTALS = 159, TRAILING = 160, TRIM = 161, TRUNCATE = 162,
|
||||
TTL = 163, TYPE = 164, UNION = 165, UPDATE = 166, USE = 167, USING = 168,
|
||||
UUID = 169, VALUES = 170, VIEW = 171, VOLUME = 172, WATCH = 173, WEEK = 174,
|
||||
WHEN = 175, WHERE = 176, WITH = 177, YEAR = 178, JSON_FALSE = 179, JSON_TRUE = 180,
|
||||
IDENTIFIER = 181, FLOATING_LITERAL = 182, OCTAL_LITERAL = 183, DECIMAL_LITERAL = 184,
|
||||
HEXADECIMAL_LITERAL = 185, STRING_LITERAL = 186, ARROW = 187, ASTERISK = 188,
|
||||
BACKQUOTE = 189, BACKSLASH = 190, COLON = 191, COMMA = 192, CONCAT = 193,
|
||||
DASH = 194, DOT = 195, EQ_DOUBLE = 196, EQ_SINGLE = 197, GE = 198, GT = 199,
|
||||
LBRACE = 200, LBRACKET = 201, LE = 202, LPAREN = 203, LT = 204, NOT_EQ = 205,
|
||||
PERCENT = 206, PLUS = 207, QUERY = 208, QUOTE_DOUBLE = 209, QUOTE_SINGLE = 210,
|
||||
RBRACE = 211, RBRACKET = 212, RPAREN = 213, SEMICOLON = 214, SLASH = 215,
|
||||
UNDERSCORE = 216, MULTI_LINE_COMMENT = 217, SINGLE_LINE_COMMENT = 218,
|
||||
WHITESPACE = 219
|
||||
ADD = 1, AFTER = 2, ALIAS = 3, ALL = 4, ALTER = 5, AND = 6, ANTI = 7,
|
||||
ANY = 8, ARRAY = 9, AS = 10, ASCENDING = 11, ASOF = 12, ASYNC = 13,
|
||||
ATTACH = 14, BETWEEN = 15, BOTH = 16, BY = 17, CASE = 18, CAST = 19,
|
||||
CHECK = 20, CLEAR = 21, CLUSTER = 22, CODEC = 23, COLLATE = 24, COLUMN = 25,
|
||||
COMMENT = 26, CONSTRAINT = 27, CREATE = 28, CROSS = 29, CUBE = 30, DATABASE = 31,
|
||||
DATABASES = 32, DATE = 33, DAY = 34, DEDUPLICATE = 35, DEFAULT = 36,
|
||||
DELAY = 37, DELETE = 38, DESC = 39, DESCENDING = 40, DESCRIBE = 41,
|
||||
DETACH = 42, DICTIONARIES = 43, DICTIONARY = 44, DISK = 45, DISTINCT = 46,
|
||||
DISTRIBUTED = 47, DROP = 48, ELSE = 49, END = 50, ENGINE = 51, EVENTS = 52,
|
||||
EXISTS = 53, EXPLAIN = 54, EXPRESSION = 55, EXTRACT = 56, FETCHES = 57,
|
||||
FINAL = 58, FIRST = 59, FLUSH = 60, FOR = 61, FORMAT = 62, FREEZE = 63,
|
||||
FROM = 64, FULL = 65, FUNCTION = 66, GLOBAL = 67, GRANULARITY = 68,
|
||||
GROUP = 69, HAVING = 70, HIERARCHICAL = 71, HOUR = 72, ID = 73, IF = 74,
|
||||
ILIKE = 75, IN = 76, INDEX = 77, INF = 78, INJECTIVE = 79, INNER = 80,
|
||||
INSERT = 81, INTERVAL = 82, INTO = 83, IS = 84, IS_OBJECT_ID = 85, JOIN = 86,
|
||||
KEY = 87, KILL = 88, LAST = 89, LAYOUT = 90, LEADING = 91, LEFT = 92,
|
||||
LIFETIME = 93, LIKE = 94, LIMIT = 95, LIVE = 96, LOCAL = 97, LOGS = 98,
|
||||
MATERIALIZED = 99, MATERIALIZE = 100, MAX = 101, MERGES = 102, MIN = 103,
|
||||
MINUTE = 104, MODIFY = 105, MONTH = 106, MOVE = 107, MUTATION = 108,
|
||||
NAN_SQL = 109, NO = 110, NOT = 111, NULL_SQL = 112, NULLS = 113, OFFSET = 114,
|
||||
ON = 115, OPTIMIZE = 116, OR = 117, ORDER = 118, OUTER = 119, OUTFILE = 120,
|
||||
PARTITION = 121, POPULATE = 122, PREWHERE = 123, PRIMARY = 124, PROJECTION = 125,
|
||||
QUARTER = 126, RANGE = 127, RELOAD = 128, REMOVE = 129, RENAME = 130,
|
||||
REPLACE = 131, REPLICA = 132, REPLICATED = 133, RIGHT = 134, ROLLUP = 135,
|
||||
SAMPLE = 136, SECOND = 137, SELECT = 138, SEMI = 139, SENDS = 140, SET = 141,
|
||||
SETTINGS = 142, SHOW = 143, SOURCE = 144, START = 145, STOP = 146, SUBSTRING = 147,
|
||||
SYNC = 148, SYNTAX = 149, SYSTEM = 150, TABLE = 151, TABLES = 152, TEMPORARY = 153,
|
||||
TEST = 154, THEN = 155, TIES = 156, TIMEOUT = 157, TIMESTAMP = 158,
|
||||
TO = 159, TOP = 160, TOTALS = 161, TRAILING = 162, TRIM = 163, TRUNCATE = 164,
|
||||
TTL = 165, TYPE = 166, UNION = 167, UPDATE = 168, USE = 169, USING = 170,
|
||||
UUID = 171, VALUES = 172, VIEW = 173, VOLUME = 174, WATCH = 175, WEEK = 176,
|
||||
WHEN = 177, WHERE = 178, WITH = 179, YEAR = 180, JSON_FALSE = 181, JSON_TRUE = 182,
|
||||
IDENTIFIER = 183, FLOATING_LITERAL = 184, OCTAL_LITERAL = 185, DECIMAL_LITERAL = 186,
|
||||
HEXADECIMAL_LITERAL = 187, STRING_LITERAL = 188, ARROW = 189, ASTERISK = 190,
|
||||
BACKQUOTE = 191, BACKSLASH = 192, COLON = 193, COMMA = 194, CONCAT = 195,
|
||||
DASH = 196, DOT = 197, EQ_DOUBLE = 198, EQ_SINGLE = 199, GE = 200, GT = 201,
|
||||
LBRACE = 202, LBRACKET = 203, LE = 204, LPAREN = 205, LT = 206, NOT_EQ = 207,
|
||||
PERCENT = 208, PLUS = 209, QUERY = 210, QUOTE_DOUBLE = 211, QUOTE_SINGLE = 212,
|
||||
RBRACE = 213, RBRACKET = 214, RPAREN = 215, SEMICOLON = 216, SLASH = 217,
|
||||
UNDERSCORE = 218, MULTI_LINE_COMMENT = 219, SINGLE_LINE_COMMENT = 220,
|
||||
WHITESPACE = 221
|
||||
};
|
||||
|
||||
ClickHouseLexer(antlr4::CharStream *input);
|
||||
~ClickHouseLexer() override;
|
||||
~ClickHouseLexer();
|
||||
|
||||
virtual std::string getGrammarFileName() const override;
|
||||
virtual const std::vector<std::string>& getRuleNames() const override;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -35,31 +35,37 @@ alterStmt
|
||||
;
|
||||
|
||||
alterTableClause
|
||||
: ADD COLUMN (IF NOT EXISTS)? tableColumnDfnt (AFTER nestedIdentifier)? # AlterTableClauseAddColumn
|
||||
| ADD INDEX (IF NOT EXISTS)? tableIndexDfnt (AFTER nestedIdentifier)? # AlterTableClauseAddIndex
|
||||
| ATTACH partitionClause (FROM tableIdentifier)? # AlterTableClauseAttach
|
||||
| CLEAR COLUMN (IF EXISTS)? nestedIdentifier (IN partitionClause)? # AlterTableClauseClear
|
||||
| COMMENT COLUMN (IF EXISTS)? nestedIdentifier STRING_LITERAL # AlterTableClauseComment
|
||||
| DELETE WHERE columnExpr # AlterTableClauseDelete
|
||||
| DETACH partitionClause # AlterTableClauseDetach
|
||||
| DROP COLUMN (IF EXISTS)? nestedIdentifier # AlterTableClauseDropColumn
|
||||
| DROP INDEX (IF EXISTS)? nestedIdentifier # AlterTableClauseDropIndex
|
||||
| DROP partitionClause # AlterTableClauseDropPartition
|
||||
| FREEZE partitionClause? # AlterTableClauseFreezePartition
|
||||
| MODIFY COLUMN (IF EXISTS)? nestedIdentifier codecExpr # AlterTableClauseModifyCodec
|
||||
| MODIFY COLUMN (IF EXISTS)? nestedIdentifier COMMENT STRING_LITERAL # AlterTableClauseModifyComment
|
||||
| MODIFY COLUMN (IF EXISTS)? nestedIdentifier REMOVE tableColumnPropertyType # AlterTableClauseModifyRemove
|
||||
| MODIFY COLUMN (IF EXISTS)? tableColumnDfnt # AlterTableClauseModify
|
||||
| MODIFY ORDER BY columnExpr # AlterTableClauseModifyOrderBy
|
||||
| MODIFY ttlClause # AlterTableClauseModifyTTL
|
||||
: ADD COLUMN (IF NOT EXISTS)? tableColumnDfnt (AFTER nestedIdentifier)? # AlterTableClauseAddColumn
|
||||
| ADD INDEX (IF NOT EXISTS)? tableIndexDfnt (AFTER nestedIdentifier)? # AlterTableClauseAddIndex
|
||||
| ADD PROJECTION (IF NOT EXISTS)? tableProjectionDfnt (AFTER nestedIdentifier)? # AlterTableClauseAddProjection
|
||||
| ATTACH partitionClause (FROM tableIdentifier)? # AlterTableClauseAttach
|
||||
| CLEAR COLUMN (IF EXISTS)? nestedIdentifier (IN partitionClause)? # AlterTableClauseClearColumn
|
||||
| CLEAR INDEX (IF EXISTS)? nestedIdentifier (IN partitionClause)? # AlterTableClauseClearIndex
|
||||
| CLEAR PROJECTION (IF EXISTS)? nestedIdentifier (IN partitionClause)? # AlterTableClauseClearProjection
|
||||
| COMMENT COLUMN (IF EXISTS)? nestedIdentifier STRING_LITERAL # AlterTableClauseComment
|
||||
| DELETE WHERE columnExpr # AlterTableClauseDelete
|
||||
| DETACH partitionClause # AlterTableClauseDetach
|
||||
| DROP COLUMN (IF EXISTS)? nestedIdentifier # AlterTableClauseDropColumn
|
||||
| DROP INDEX (IF EXISTS)? nestedIdentifier # AlterTableClauseDropIndex
|
||||
| DROP PROJECTION (IF EXISTS)? nestedIdentifier # AlterTableClauseDropProjection
|
||||
| DROP partitionClause # AlterTableClauseDropPartition
|
||||
| FREEZE partitionClause? # AlterTableClauseFreezePartition
|
||||
| MATERIALIZE INDEX (IF EXISTS)? nestedIdentifier (IN partitionClause)? # AlterTableClauseMaterializeIndex
|
||||
| MATERIALIZE PROJECTION (IF EXISTS)? nestedIdentifier (IN partitionClause)? # AlterTableClauseMaterializeProjection
|
||||
| MODIFY COLUMN (IF EXISTS)? nestedIdentifier codecExpr # AlterTableClauseModifyCodec
|
||||
| MODIFY COLUMN (IF EXISTS)? nestedIdentifier COMMENT STRING_LITERAL # AlterTableClauseModifyComment
|
||||
| MODIFY COLUMN (IF EXISTS)? nestedIdentifier REMOVE tableColumnPropertyType # AlterTableClauseModifyRemove
|
||||
| MODIFY COLUMN (IF EXISTS)? tableColumnDfnt # AlterTableClauseModify
|
||||
| MODIFY ORDER BY columnExpr # AlterTableClauseModifyOrderBy
|
||||
| MODIFY ttlClause # AlterTableClauseModifyTTL
|
||||
| MOVE partitionClause ( TO DISK STRING_LITERAL
|
||||
| TO VOLUME STRING_LITERAL
|
||||
| TO TABLE tableIdentifier
|
||||
) # AlterTableClauseMovePartition
|
||||
| REMOVE TTL # AlterTableClauseRemoveTTL
|
||||
| RENAME COLUMN (IF EXISTS)? nestedIdentifier TO nestedIdentifier # AlterTableClauseRename
|
||||
| REPLACE partitionClause FROM tableIdentifier # AlterTableClauseReplace
|
||||
| UPDATE assignmentExprList whereClause # AlterTableClauseUpdate
|
||||
) # AlterTableClauseMovePartition
|
||||
| REMOVE TTL # AlterTableClauseRemoveTTL
|
||||
| RENAME COLUMN (IF EXISTS)? nestedIdentifier TO nestedIdentifier # AlterTableClauseRename
|
||||
| REPLACE partitionClause FROM tableIdentifier # AlterTableClauseReplace
|
||||
| UPDATE assignmentExprList whereClause # AlterTableClauseUpdate
|
||||
;
|
||||
|
||||
assignmentExprList: assignmentExpr (COMMA assignmentExpr)*;
|
||||
@ -154,6 +160,7 @@ tableElementExpr
|
||||
: tableColumnDfnt # TableElementExprColumn
|
||||
| CONSTRAINT identifier CHECK columnExpr # TableElementExprConstraint
|
||||
| INDEX tableIndexDfnt # TableElementExprIndex
|
||||
| PROJECTION tableProjectionDfnt # TableElementExprProjection
|
||||
;
|
||||
tableColumnDfnt
|
||||
: nestedIdentifier columnTypeExpr tableColumnPropertyExpr? (COMMENT STRING_LITERAL)? codecExpr? (TTL columnExpr)?
|
||||
@ -161,6 +168,7 @@ tableColumnDfnt
|
||||
;
|
||||
tableColumnPropertyExpr: (DEFAULT | MATERIALIZED | ALIAS) columnExpr;
|
||||
tableIndexDfnt: nestedIdentifier columnExpr TYPE columnTypeExpr GRANULARITY DECIMAL_LITERAL;
|
||||
tableProjectionDfnt: nestedIdentifier projectionSelectStmt;
|
||||
codecExpr: CODEC LPAREN codecArgExpr (COMMA codecArgExpr)* RPAREN;
|
||||
codecArgExpr: identifier (LPAREN columnExprList? RPAREN)?;
|
||||
ttlExpr: columnExpr (DELETE | TO DISK STRING_LITERAL | TO VOLUME STRING_LITERAL)?;
|
||||
@ -212,6 +220,17 @@ optimizeStmt: OPTIMIZE TABLE tableIdentifier clusterClause? partitionClause? FIN
|
||||
|
||||
renameStmt: RENAME TABLE tableIdentifier TO tableIdentifier (COMMA tableIdentifier TO tableIdentifier)* clusterClause?;
|
||||
|
||||
// PROJECTION SELECT statement
|
||||
|
||||
projectionSelectStmt:
|
||||
LPAREN
|
||||
withClause?
|
||||
SELECT columnExprList
|
||||
groupByClause?
|
||||
projectionOrderByClause?
|
||||
RPAREN
|
||||
;
|
||||
|
||||
// SELECT statement
|
||||
|
||||
selectUnionStmt: selectStmtWithParens (UNION ALL selectStmtWithParens)*;
|
||||
@ -240,6 +259,7 @@ whereClause: WHERE columnExpr;
|
||||
groupByClause: GROUP BY ((CUBE | ROLLUP) LPAREN columnExprList RPAREN | columnExprList);
|
||||
havingClause: HAVING columnExpr;
|
||||
orderByClause: ORDER BY orderExprList;
|
||||
projectionOrderByClause: ORDER BY columnExprList;
|
||||
limitByClause: LIMIT limitExpr BY columnExprList;
|
||||
limitClause: LIMIT limitExpr (WITH TIES)?;
|
||||
settingsClause: SETTINGS settingExprList;
|
||||
@ -436,7 +456,7 @@ keyword
|
||||
| DISTRIBUTED | DROP | ELSE | END | ENGINE | EVENTS | EXISTS | EXPLAIN | EXPRESSION | EXTRACT | FETCHES | FINAL | FIRST | FLUSH | FOR
|
||||
| FORMAT | FREEZE | FROM | FULL | FUNCTION | GLOBAL | GRANULARITY | GROUP | HAVING | HIERARCHICAL | ID | IF | ILIKE | IN | INDEX
|
||||
| INJECTIVE | INNER | INSERT | INTERVAL | INTO | IS | IS_OBJECT_ID | JOIN | JSON_FALSE | JSON_TRUE | KEY | KILL | LAST | LAYOUT
|
||||
| LEADING | LEFT | LIFETIME | LIKE | LIMIT | LIVE | LOCAL | LOGS | MATERIALIZED | MAX | MERGES | MIN | MODIFY | MOVE | MUTATION | NO
|
||||
| LEADING | LEFT | LIFETIME | LIKE | LIMIT | LIVE | LOCAL | LOGS | MATERIALIZE | MATERIALIZED | MAX | MERGES | MIN | MODIFY | MOVE | MUTATION | NO
|
||||
| NOT | NULLS | OFFSET | ON | OPTIMIZE | OR | ORDER | OUTER | OUTFILE | PARTITION | POPULATE | PREWHERE | PRIMARY | RANGE | RELOAD
|
||||
| REMOVE | RENAME | REPLACE | REPLICA | REPLICATED | RIGHT | ROLLUP | SAMPLE | SELECT | SEMI | SENDS | SET | SETTINGS | SHOW | SOURCE
|
||||
| START | STOP | SUBSTRING | SYNC | SYNTAX | SYSTEM | TABLE | TABLES | TEMPORARY | TEST | THEN | TIES | TIMEOUT | TIMESTAMP | TOTALS
|
||||
|
@ -30,30 +30,30 @@ public:
|
||||
INSERT = 81, INTERVAL = 82, INTO = 83, IS = 84, IS_OBJECT_ID = 85, JOIN = 86,
|
||||
KEY = 87, KILL = 88, LAST = 89, LAYOUT = 90, LEADING = 91, LEFT = 92,
|
||||
LIFETIME = 93, LIKE = 94, LIMIT = 95, LIVE = 96, LOCAL = 97, LOGS = 98,
|
||||
MATERIALIZED = 99, MAX = 100, MERGES = 101, MIN = 102, MINUTE = 103,
|
||||
MODIFY = 104, MONTH = 105, MOVE = 106, MUTATION = 107, NAN_SQL = 108,
|
||||
NO = 109, NOT = 110, NULL_SQL = 111, NULLS = 112, OFFSET = 113, ON = 114,
|
||||
OPTIMIZE = 115, OR = 116, ORDER = 117, OUTER = 118, OUTFILE = 119, PARTITION = 120,
|
||||
POPULATE = 121, PREWHERE = 122, PRIMARY = 123, QUARTER = 124, RANGE = 125,
|
||||
RELOAD = 126, REMOVE = 127, RENAME = 128, REPLACE = 129, REPLICA = 130,
|
||||
REPLICATED = 131, RIGHT = 132, ROLLUP = 133, SAMPLE = 134, SECOND = 135,
|
||||
SELECT = 136, SEMI = 137, SENDS = 138, SET = 139, SETTINGS = 140, SHOW = 141,
|
||||
SOURCE = 142, START = 143, STOP = 144, SUBSTRING = 145, SYNC = 146,
|
||||
SYNTAX = 147, SYSTEM = 148, TABLE = 149, TABLES = 150, TEMPORARY = 151,
|
||||
TEST = 152, THEN = 153, TIES = 154, TIMEOUT = 155, TIMESTAMP = 156,
|
||||
TO = 157, TOP = 158, TOTALS = 159, TRAILING = 160, TRIM = 161, TRUNCATE = 162,
|
||||
TTL = 163, TYPE = 164, UNION = 165, UPDATE = 166, USE = 167, USING = 168,
|
||||
UUID = 169, VALUES = 170, VIEW = 171, VOLUME = 172, WATCH = 173, WEEK = 174,
|
||||
WHEN = 175, WHERE = 176, WITH = 177, YEAR = 178, JSON_FALSE = 179, JSON_TRUE = 180,
|
||||
IDENTIFIER = 181, FLOATING_LITERAL = 182, OCTAL_LITERAL = 183, DECIMAL_LITERAL = 184,
|
||||
HEXADECIMAL_LITERAL = 185, STRING_LITERAL = 186, ARROW = 187, ASTERISK = 188,
|
||||
BACKQUOTE = 189, BACKSLASH = 190, COLON = 191, COMMA = 192, CONCAT = 193,
|
||||
DASH = 194, DOT = 195, EQ_DOUBLE = 196, EQ_SINGLE = 197, GE = 198, GT = 199,
|
||||
LBRACE = 200, LBRACKET = 201, LE = 202, LPAREN = 203, LT = 204, NOT_EQ = 205,
|
||||
PERCENT = 206, PLUS = 207, QUERY = 208, QUOTE_DOUBLE = 209, QUOTE_SINGLE = 210,
|
||||
RBRACE = 211, RBRACKET = 212, RPAREN = 213, SEMICOLON = 214, SLASH = 215,
|
||||
UNDERSCORE = 216, MULTI_LINE_COMMENT = 217, SINGLE_LINE_COMMENT = 218,
|
||||
WHITESPACE = 219
|
||||
MATERIALIZED = 99, MATERIALIZE = 100, MAX = 101, MERGES = 102, MIN = 103,
|
||||
MINUTE = 104, MODIFY = 105, MONTH = 106, MOVE = 107, MUTATION = 108,
|
||||
NAN_SQL = 109, NO = 110, NOT = 111, NULL_SQL = 112, NULLS = 113, OFFSET = 114,
|
||||
ON = 115, OPTIMIZE = 116, OR = 117, ORDER = 118, OUTER = 119, OUTFILE = 120,
|
||||
PARTITION = 121, POPULATE = 122, PREWHERE = 123, PRIMARY = 124, PROJECTION = 125,
|
||||
QUARTER = 126, RANGE = 127, RELOAD = 128, REMOVE = 129, RENAME = 130,
|
||||
REPLACE = 131, REPLICA = 132, REPLICATED = 133, RIGHT = 134, ROLLUP = 135,
|
||||
SAMPLE = 136, SECOND = 137, SELECT = 138, SEMI = 139, SENDS = 140, SET = 141,
|
||||
SETTINGS = 142, SHOW = 143, SOURCE = 144, START = 145, STOP = 146, SUBSTRING = 147,
|
||||
SYNC = 148, SYNTAX = 149, SYSTEM = 150, TABLE = 151, TABLES = 152, TEMPORARY = 153,
|
||||
TEST = 154, THEN = 155, TIES = 156, TIMEOUT = 157, TIMESTAMP = 158,
|
||||
TO = 159, TOP = 160, TOTALS = 161, TRAILING = 162, TRIM = 163, TRUNCATE = 164,
|
||||
TTL = 165, TYPE = 166, UNION = 167, UPDATE = 168, USE = 169, USING = 170,
|
||||
UUID = 171, VALUES = 172, VIEW = 173, VOLUME = 174, WATCH = 175, WEEK = 176,
|
||||
WHEN = 177, WHERE = 178, WITH = 179, YEAR = 180, JSON_FALSE = 181, JSON_TRUE = 182,
|
||||
IDENTIFIER = 183, FLOATING_LITERAL = 184, OCTAL_LITERAL = 185, DECIMAL_LITERAL = 186,
|
||||
HEXADECIMAL_LITERAL = 187, STRING_LITERAL = 188, ARROW = 189, ASTERISK = 190,
|
||||
BACKQUOTE = 191, BACKSLASH = 192, COLON = 193, COMMA = 194, CONCAT = 195,
|
||||
DASH = 196, DOT = 197, EQ_DOUBLE = 198, EQ_SINGLE = 199, GE = 200, GT = 201,
|
||||
LBRACE = 202, LBRACKET = 203, LE = 204, LPAREN = 205, LT = 206, NOT_EQ = 207,
|
||||
PERCENT = 208, PLUS = 209, QUERY = 210, QUOTE_DOUBLE = 211, QUOTE_SINGLE = 212,
|
||||
RBRACE = 213, RBRACKET = 214, RPAREN = 215, SEMICOLON = 216, SLASH = 217,
|
||||
UNDERSCORE = 218, MULTI_LINE_COMMENT = 219, SINGLE_LINE_COMMENT = 220,
|
||||
WHITESPACE = 221
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -68,30 +68,31 @@ public:
|
||||
RuleEngineClause = 26, RulePartitionByClause = 27, RulePrimaryKeyClause = 28,
|
||||
RuleSampleByClause = 29, RuleTtlClause = 30, RuleEngineExpr = 31, RuleTableElementExpr = 32,
|
||||
RuleTableColumnDfnt = 33, RuleTableColumnPropertyExpr = 34, RuleTableIndexDfnt = 35,
|
||||
RuleCodecExpr = 36, RuleCodecArgExpr = 37, RuleTtlExpr = 38, RuleDescribeStmt = 39,
|
||||
RuleDropStmt = 40, RuleExistsStmt = 41, RuleExplainStmt = 42, RuleInsertStmt = 43,
|
||||
RuleColumnsClause = 44, RuleDataClause = 45, RuleKillStmt = 46, RuleOptimizeStmt = 47,
|
||||
RuleRenameStmt = 48, RuleSelectUnionStmt = 49, RuleSelectStmtWithParens = 50,
|
||||
RuleSelectStmt = 51, RuleWithClause = 52, RuleTopClause = 53, RuleFromClause = 54,
|
||||
RuleArrayJoinClause = 55, RulePrewhereClause = 56, RuleWhereClause = 57,
|
||||
RuleGroupByClause = 58, RuleHavingClause = 59, RuleOrderByClause = 60,
|
||||
RuleLimitByClause = 61, RuleLimitClause = 62, RuleSettingsClause = 63,
|
||||
RuleJoinExpr = 64, RuleJoinOp = 65, RuleJoinOpCross = 66, RuleJoinConstraintClause = 67,
|
||||
RuleSampleClause = 68, RuleLimitExpr = 69, RuleOrderExprList = 70, RuleOrderExpr = 71,
|
||||
RuleRatioExpr = 72, RuleSettingExprList = 73, RuleSettingExpr = 74,
|
||||
RuleSetStmt = 75, RuleShowStmt = 76, RuleSystemStmt = 77, RuleTruncateStmt = 78,
|
||||
RuleUseStmt = 79, RuleWatchStmt = 80, RuleColumnTypeExpr = 81, RuleColumnExprList = 82,
|
||||
RuleColumnsExpr = 83, RuleColumnExpr = 84, RuleColumnArgList = 85, RuleColumnArgExpr = 86,
|
||||
RuleColumnLambdaExpr = 87, RuleColumnIdentifier = 88, RuleNestedIdentifier = 89,
|
||||
RuleTableExpr = 90, RuleTableFunctionExpr = 91, RuleTableIdentifier = 92,
|
||||
RuleTableArgList = 93, RuleTableArgExpr = 94, RuleDatabaseIdentifier = 95,
|
||||
RuleFloatingLiteral = 96, RuleNumberLiteral = 97, RuleLiteral = 98,
|
||||
RuleInterval = 99, RuleKeyword = 100, RuleKeywordForAlias = 101, RuleAlias = 102,
|
||||
RuleIdentifier = 103, RuleIdentifierOrNull = 104, RuleEnumValue = 105
|
||||
RuleTableProjectionDfnt = 36, RuleCodecExpr = 37, RuleCodecArgExpr = 38,
|
||||
RuleTtlExpr = 39, RuleDescribeStmt = 40, RuleDropStmt = 41, RuleExistsStmt = 42,
|
||||
RuleExplainStmt = 43, RuleInsertStmt = 44, RuleColumnsClause = 45, RuleDataClause = 46,
|
||||
RuleKillStmt = 47, RuleOptimizeStmt = 48, RuleRenameStmt = 49, RuleProjectionSelectStmt = 50,
|
||||
RuleSelectUnionStmt = 51, RuleSelectStmtWithParens = 52, RuleSelectStmt = 53,
|
||||
RuleWithClause = 54, RuleTopClause = 55, RuleFromClause = 56, RuleArrayJoinClause = 57,
|
||||
RulePrewhereClause = 58, RuleWhereClause = 59, RuleGroupByClause = 60,
|
||||
RuleHavingClause = 61, RuleOrderByClause = 62, RuleProjectionOrderByClause = 63,
|
||||
RuleLimitByClause = 64, RuleLimitClause = 65, RuleSettingsClause = 66,
|
||||
RuleJoinExpr = 67, RuleJoinOp = 68, RuleJoinOpCross = 69, RuleJoinConstraintClause = 70,
|
||||
RuleSampleClause = 71, RuleLimitExpr = 72, RuleOrderExprList = 73, RuleOrderExpr = 74,
|
||||
RuleRatioExpr = 75, RuleSettingExprList = 76, RuleSettingExpr = 77,
|
||||
RuleSetStmt = 78, RuleShowStmt = 79, RuleSystemStmt = 80, RuleTruncateStmt = 81,
|
||||
RuleUseStmt = 82, RuleWatchStmt = 83, RuleColumnTypeExpr = 84, RuleColumnExprList = 85,
|
||||
RuleColumnsExpr = 86, RuleColumnExpr = 87, RuleColumnArgList = 88, RuleColumnArgExpr = 89,
|
||||
RuleColumnLambdaExpr = 90, RuleColumnIdentifier = 91, RuleNestedIdentifier = 92,
|
||||
RuleTableExpr = 93, RuleTableFunctionExpr = 94, RuleTableIdentifier = 95,
|
||||
RuleTableArgList = 96, RuleTableArgExpr = 97, RuleDatabaseIdentifier = 98,
|
||||
RuleFloatingLiteral = 99, RuleNumberLiteral = 100, RuleLiteral = 101,
|
||||
RuleInterval = 102, RuleKeyword = 103, RuleKeywordForAlias = 104, RuleAlias = 105,
|
||||
RuleIdentifier = 106, RuleIdentifierOrNull = 107, RuleEnumValue = 108
|
||||
};
|
||||
|
||||
ClickHouseParser(antlr4::TokenStream *input);
|
||||
~ClickHouseParser() override;
|
||||
~ClickHouseParser();
|
||||
|
||||
virtual std::string getGrammarFileName() const override;
|
||||
virtual const antlr4::atn::ATN& getATN() const override { return _atn; };
|
||||
@ -136,6 +137,7 @@ public:
|
||||
class TableColumnDfntContext;
|
||||
class TableColumnPropertyExprContext;
|
||||
class TableIndexDfntContext;
|
||||
class TableProjectionDfntContext;
|
||||
class CodecExprContext;
|
||||
class CodecArgExprContext;
|
||||
class TtlExprContext;
|
||||
@ -149,6 +151,7 @@ public:
|
||||
class KillStmtContext;
|
||||
class OptimizeStmtContext;
|
||||
class RenameStmtContext;
|
||||
class ProjectionSelectStmtContext;
|
||||
class SelectUnionStmtContext;
|
||||
class SelectStmtWithParensContext;
|
||||
class SelectStmtContext;
|
||||
@ -161,6 +164,7 @@ public:
|
||||
class GroupByClauseContext;
|
||||
class HavingClauseContext;
|
||||
class OrderByClauseContext;
|
||||
class ProjectionOrderByClauseContext;
|
||||
class LimitByClauseContext;
|
||||
class LimitClauseContext;
|
||||
class SettingsClauseContext;
|
||||
@ -313,44 +317,6 @@ public:
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseRenameContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseRenameContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *RENAME();
|
||||
antlr4::tree::TerminalNode *COLUMN();
|
||||
std::vector<NestedIdentifierContext *> nestedIdentifier();
|
||||
NestedIdentifierContext* nestedIdentifier(size_t i);
|
||||
antlr4::tree::TerminalNode *TO();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseFreezePartitionContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseFreezePartitionContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *FREEZE();
|
||||
PartitionClauseContext *partitionClause();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseModifyContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseModifyContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *MODIFY();
|
||||
antlr4::tree::TerminalNode *COLUMN();
|
||||
TableColumnDfntContext *tableColumnDfnt();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseModifyOrderByContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseModifyOrderByContext(AlterTableClauseContext *ctx);
|
||||
@ -363,16 +329,6 @@ public:
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseRemoveTTLContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseRemoveTTLContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *REMOVE();
|
||||
antlr4::tree::TerminalNode *TTL();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseUpdateContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseUpdateContext(AlterTableClauseContext *ctx);
|
||||
@ -384,6 +340,21 @@ public:
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseClearProjectionContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseClearProjectionContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *CLEAR();
|
||||
antlr4::tree::TerminalNode *PROJECTION();
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
antlr4::tree::TerminalNode *IN();
|
||||
PartitionClauseContext *partitionClause();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseModifyRemoveContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseModifyRemoveContext(AlterTableClauseContext *ctx);
|
||||
@ -410,20 +381,6 @@ public:
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseModifyCodecContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseModifyCodecContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *MODIFY();
|
||||
antlr4::tree::TerminalNode *COLUMN();
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
CodecExprContext *codecExpr();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseCommentContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseCommentContext(AlterTableClauseContext *ctx);
|
||||
@ -438,18 +395,6 @@ public:
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseAttachContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseAttachContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *ATTACH();
|
||||
PartitionClauseContext *partitionClause();
|
||||
antlr4::tree::TerminalNode *FROM();
|
||||
TableIdentifierContext *tableIdentifier();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseDropColumnContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseDropColumnContext(AlterTableClauseContext *ctx);
|
||||
@ -463,21 +408,6 @@ public:
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseClearContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseClearContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *CLEAR();
|
||||
antlr4::tree::TerminalNode *COLUMN();
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
antlr4::tree::TerminalNode *IN();
|
||||
PartitionClauseContext *partitionClause();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseDetachContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseDetachContext(AlterTableClauseContext *ctx);
|
||||
@ -488,19 +418,6 @@ public:
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseDropIndexContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseDropIndexContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *DROP();
|
||||
antlr4::tree::TerminalNode *INDEX();
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseAddIndexContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseAddIndexContext(AlterTableClauseContext *ctx);
|
||||
@ -527,6 +444,182 @@ public:
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseMaterializeIndexContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseMaterializeIndexContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *MATERIALIZE();
|
||||
antlr4::tree::TerminalNode *INDEX();
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
antlr4::tree::TerminalNode *IN();
|
||||
PartitionClauseContext *partitionClause();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseMaterializeProjectionContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseMaterializeProjectionContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *MATERIALIZE();
|
||||
antlr4::tree::TerminalNode *PROJECTION();
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
antlr4::tree::TerminalNode *IN();
|
||||
PartitionClauseContext *partitionClause();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseMovePartitionContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseMovePartitionContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *MOVE();
|
||||
PartitionClauseContext *partitionClause();
|
||||
antlr4::tree::TerminalNode *TO();
|
||||
antlr4::tree::TerminalNode *DISK();
|
||||
antlr4::tree::TerminalNode *STRING_LITERAL();
|
||||
antlr4::tree::TerminalNode *VOLUME();
|
||||
antlr4::tree::TerminalNode *TABLE();
|
||||
TableIdentifierContext *tableIdentifier();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseRenameContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseRenameContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *RENAME();
|
||||
antlr4::tree::TerminalNode *COLUMN();
|
||||
std::vector<NestedIdentifierContext *> nestedIdentifier();
|
||||
NestedIdentifierContext* nestedIdentifier(size_t i);
|
||||
antlr4::tree::TerminalNode *TO();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseFreezePartitionContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseFreezePartitionContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *FREEZE();
|
||||
PartitionClauseContext *partitionClause();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseClearColumnContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseClearColumnContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *CLEAR();
|
||||
antlr4::tree::TerminalNode *COLUMN();
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
antlr4::tree::TerminalNode *IN();
|
||||
PartitionClauseContext *partitionClause();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseModifyContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseModifyContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *MODIFY();
|
||||
antlr4::tree::TerminalNode *COLUMN();
|
||||
TableColumnDfntContext *tableColumnDfnt();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseClearIndexContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseClearIndexContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *CLEAR();
|
||||
antlr4::tree::TerminalNode *INDEX();
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
antlr4::tree::TerminalNode *IN();
|
||||
PartitionClauseContext *partitionClause();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseRemoveTTLContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseRemoveTTLContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *REMOVE();
|
||||
antlr4::tree::TerminalNode *TTL();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseModifyCodecContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseModifyCodecContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *MODIFY();
|
||||
antlr4::tree::TerminalNode *COLUMN();
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
CodecExprContext *codecExpr();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseAttachContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseAttachContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *ATTACH();
|
||||
PartitionClauseContext *partitionClause();
|
||||
antlr4::tree::TerminalNode *FROM();
|
||||
TableIdentifierContext *tableIdentifier();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseDropProjectionContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseDropProjectionContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *DROP();
|
||||
antlr4::tree::TerminalNode *PROJECTION();
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseDropIndexContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseDropIndexContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *DROP();
|
||||
antlr4::tree::TerminalNode *INDEX();
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseModifyCommentContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseModifyCommentContext(AlterTableClauseContext *ctx);
|
||||
@ -552,18 +645,18 @@ public:
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class AlterTableClauseMovePartitionContext : public AlterTableClauseContext {
|
||||
class AlterTableClauseAddProjectionContext : public AlterTableClauseContext {
|
||||
public:
|
||||
AlterTableClauseMovePartitionContext(AlterTableClauseContext *ctx);
|
||||
AlterTableClauseAddProjectionContext(AlterTableClauseContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *MOVE();
|
||||
PartitionClauseContext *partitionClause();
|
||||
antlr4::tree::TerminalNode *TO();
|
||||
antlr4::tree::TerminalNode *DISK();
|
||||
antlr4::tree::TerminalNode *STRING_LITERAL();
|
||||
antlr4::tree::TerminalNode *VOLUME();
|
||||
antlr4::tree::TerminalNode *TABLE();
|
||||
TableIdentifierContext *tableIdentifier();
|
||||
antlr4::tree::TerminalNode *ADD();
|
||||
antlr4::tree::TerminalNode *PROJECTION();
|
||||
TableProjectionDfntContext *tableProjectionDfnt();
|
||||
antlr4::tree::TerminalNode *IF();
|
||||
antlr4::tree::TerminalNode *NOT();
|
||||
antlr4::tree::TerminalNode *EXISTS();
|
||||
antlr4::tree::TerminalNode *AFTER();
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
@ -1257,6 +1350,16 @@ public:
|
||||
|
||||
};
|
||||
|
||||
class TableElementExprProjectionContext : public TableElementExprContext {
|
||||
public:
|
||||
TableElementExprProjectionContext(TableElementExprContext *ctx);
|
||||
|
||||
antlr4::tree::TerminalNode *PROJECTION();
|
||||
TableProjectionDfntContext *tableProjectionDfnt();
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
};
|
||||
|
||||
class TableElementExprConstraintContext : public TableElementExprContext {
|
||||
public:
|
||||
TableElementExprConstraintContext(TableElementExprContext *ctx);
|
||||
@ -1344,6 +1447,20 @@ public:
|
||||
|
||||
TableIndexDfntContext* tableIndexDfnt();
|
||||
|
||||
class TableProjectionDfntContext : public antlr4::ParserRuleContext {
|
||||
public:
|
||||
TableProjectionDfntContext(antlr4::ParserRuleContext *parent, size_t invokingState);
|
||||
virtual size_t getRuleIndex() const override;
|
||||
NestedIdentifierContext *nestedIdentifier();
|
||||
ProjectionSelectStmtContext *projectionSelectStmt();
|
||||
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
|
||||
};
|
||||
|
||||
TableProjectionDfntContext* tableProjectionDfnt();
|
||||
|
||||
class CodecExprContext : public antlr4::ParserRuleContext {
|
||||
public:
|
||||
CodecExprContext(antlr4::ParserRuleContext *parent, size_t invokingState);
|
||||
@ -1671,6 +1788,25 @@ public:
|
||||
|
||||
RenameStmtContext* renameStmt();
|
||||
|
||||
class ProjectionSelectStmtContext : public antlr4::ParserRuleContext {
|
||||
public:
|
||||
ProjectionSelectStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState);
|
||||
virtual size_t getRuleIndex() const override;
|
||||
antlr4::tree::TerminalNode *LPAREN();
|
||||
antlr4::tree::TerminalNode *SELECT();
|
||||
ColumnExprListContext *columnExprList();
|
||||
antlr4::tree::TerminalNode *RPAREN();
|
||||
WithClauseContext *withClause();
|
||||
GroupByClauseContext *groupByClause();
|
||||
ProjectionOrderByClauseContext *projectionOrderByClause();
|
||||
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
|
||||
};
|
||||
|
||||
ProjectionSelectStmtContext* projectionSelectStmt();
|
||||
|
||||
class SelectUnionStmtContext : public antlr4::ParserRuleContext {
|
||||
public:
|
||||
SelectUnionStmtContext(antlr4::ParserRuleContext *parent, size_t invokingState);
|
||||
@ -1874,6 +2010,21 @@ public:
|
||||
|
||||
OrderByClauseContext* orderByClause();
|
||||
|
||||
class ProjectionOrderByClauseContext : public antlr4::ParserRuleContext {
|
||||
public:
|
||||
ProjectionOrderByClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState);
|
||||
virtual size_t getRuleIndex() const override;
|
||||
antlr4::tree::TerminalNode *ORDER();
|
||||
antlr4::tree::TerminalNode *BY();
|
||||
ColumnExprListContext *columnExprList();
|
||||
|
||||
|
||||
virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
|
||||
|
||||
};
|
||||
|
||||
ProjectionOrderByClauseContext* projectionOrderByClause();
|
||||
|
||||
class LimitByClauseContext : public antlr4::ParserRuleContext {
|
||||
public:
|
||||
LimitByClauseContext(antlr4::ParserRuleContext *parent, size_t invokingState);
|
||||
@ -3273,6 +3424,7 @@ public:
|
||||
antlr4::tree::TerminalNode *LIVE();
|
||||
antlr4::tree::TerminalNode *LOCAL();
|
||||
antlr4::tree::TerminalNode *LOGS();
|
||||
antlr4::tree::TerminalNode *MATERIALIZE();
|
||||
antlr4::tree::TerminalNode *MATERIALIZED();
|
||||
antlr4::tree::TerminalNode *MAX();
|
||||
antlr4::tree::TerminalNode *MERGES();
|
||||
|
@ -30,9 +30,15 @@ public:
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseAddIndex(ClickHouseParser::AlterTableClauseAddIndexContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseAddProjection(ClickHouseParser::AlterTableClauseAddProjectionContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseAttach(ClickHouseParser::AlterTableClauseAttachContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseClear(ClickHouseParser::AlterTableClauseClearContext *context) = 0;
|
||||
virtual antlrcpp::Any visitAlterTableClauseClearColumn(ClickHouseParser::AlterTableClauseClearColumnContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseClearIndex(ClickHouseParser::AlterTableClauseClearIndexContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseClearProjection(ClickHouseParser::AlterTableClauseClearProjectionContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseComment(ClickHouseParser::AlterTableClauseCommentContext *context) = 0;
|
||||
|
||||
@ -44,10 +50,16 @@ public:
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseDropIndex(ClickHouseParser::AlterTableClauseDropIndexContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseDropProjection(ClickHouseParser::AlterTableClauseDropProjectionContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseDropPartition(ClickHouseParser::AlterTableClauseDropPartitionContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseFreezePartition(ClickHouseParser::AlterTableClauseFreezePartitionContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseMaterializeIndex(ClickHouseParser::AlterTableClauseMaterializeIndexContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseMaterializeProjection(ClickHouseParser::AlterTableClauseMaterializeProjectionContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseModifyCodec(ClickHouseParser::AlterTableClauseModifyCodecContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitAlterTableClauseModifyComment(ClickHouseParser::AlterTableClauseModifyCommentContext *context) = 0;
|
||||
@ -146,12 +158,16 @@ public:
|
||||
|
||||
virtual antlrcpp::Any visitTableElementExprIndex(ClickHouseParser::TableElementExprIndexContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitTableElementExprProjection(ClickHouseParser::TableElementExprProjectionContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitTableColumnDfnt(ClickHouseParser::TableColumnDfntContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitTableColumnPropertyExpr(ClickHouseParser::TableColumnPropertyExprContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitTableIndexDfnt(ClickHouseParser::TableIndexDfntContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitTableProjectionDfnt(ClickHouseParser::TableProjectionDfntContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitCodecExpr(ClickHouseParser::CodecExprContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitCodecArgExpr(ClickHouseParser::CodecArgExprContext *context) = 0;
|
||||
@ -186,6 +202,8 @@ public:
|
||||
|
||||
virtual antlrcpp::Any visitRenameStmt(ClickHouseParser::RenameStmtContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitProjectionSelectStmt(ClickHouseParser::ProjectionSelectStmtContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitSelectUnionStmt(ClickHouseParser::SelectUnionStmtContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitSelectStmtWithParens(ClickHouseParser::SelectStmtWithParensContext *context) = 0;
|
||||
@ -210,6 +228,8 @@ public:
|
||||
|
||||
virtual antlrcpp::Any visitOrderByClause(ClickHouseParser::OrderByClauseContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitProjectionOrderByClause(ClickHouseParser::ProjectionOrderByClauseContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitLimitByClause(ClickHouseParser::LimitByClauseContext *context) = 0;
|
||||
|
||||
virtual antlrcpp::Any visitLimitClause(ClickHouseParser::LimitClauseContext *context) = 0;
|
||||
|
@ -19,15 +19,21 @@ public:
|
||||
// AlterTableQuery
|
||||
antlrcpp::Any visitAlterTableClauseAddColumn(ClickHouseParser::AlterTableClauseAddColumnContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseAddIndex(ClickHouseParser::AlterTableClauseAddIndexContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseAddProjection(ClickHouseParser::AlterTableClauseAddProjectionContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseAttach(ClickHouseParser::AlterTableClauseAttachContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseClear(ClickHouseParser::AlterTableClauseClearContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseClearColumn(ClickHouseParser::AlterTableClauseClearColumnContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseClearIndex(ClickHouseParser::AlterTableClauseClearIndexContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseClearProjection(ClickHouseParser::AlterTableClauseClearProjectionContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseComment(ClickHouseParser::AlterTableClauseCommentContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseDelete(ClickHouseParser::AlterTableClauseDeleteContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseDetach(ClickHouseParser::AlterTableClauseDetachContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseDropColumn(ClickHouseParser::AlterTableClauseDropColumnContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseDropIndex(ClickHouseParser::AlterTableClauseDropIndexContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseDropProjection(ClickHouseParser::AlterTableClauseDropProjectionContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseDropPartition(ClickHouseParser::AlterTableClauseDropPartitionContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseFreezePartition(ClickHouseParser::AlterTableClauseFreezePartitionContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseMaterializeIndex(ClickHouseParser::AlterTableClauseMaterializeIndexContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseMaterializeProjection(ClickHouseParser::AlterTableClauseMaterializeProjectionContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseModify(ClickHouseParser::AlterTableClauseModifyContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseModifyCodec(ClickHouseParser::AlterTableClauseModifyCodecContext * ctx) override;
|
||||
antlrcpp::Any visitAlterTableClauseModifyComment(ClickHouseParser::AlterTableClauseModifyCommentContext * ctx) override;
|
||||
@ -162,6 +168,7 @@ public:
|
||||
antlrcpp::Any visitRenameStmt(ClickHouseParser::RenameStmtContext * ctx) override;
|
||||
|
||||
// SelectUnionQuery
|
||||
antlrcpp::Any visitProjectionSelectStmt(ClickHouseParser::ProjectionSelectStmtContext * ctx) override;
|
||||
antlrcpp::Any visitSelectStmt(ClickHouseParser::SelectStmtContext * ctx) override;
|
||||
antlrcpp::Any visitSelectStmtWithParens(ClickHouseParser::SelectStmtWithParensContext * ctx) override;
|
||||
antlrcpp::Any visitSelectUnionStmt(ClickHouseParser::SelectUnionStmtContext * ctx) override;
|
||||
@ -190,7 +197,9 @@ public:
|
||||
antlrcpp::Any visitTableElementExprColumn(ClickHouseParser::TableElementExprColumnContext * ctx) override;
|
||||
antlrcpp::Any visitTableElementExprConstraint(ClickHouseParser::TableElementExprConstraintContext * ctx) override;
|
||||
antlrcpp::Any visitTableElementExprIndex(ClickHouseParser::TableElementExprIndexContext * ctx) override;
|
||||
antlrcpp::Any visitTableElementExprProjection(ClickHouseParser::TableElementExprProjectionContext * ctx) override;
|
||||
antlrcpp::Any visitTableIndexDfnt(ClickHouseParser::TableIndexDfntContext * ctx) override;
|
||||
antlrcpp::Any visitTableProjectionDfnt(ClickHouseParser::TableProjectionDfntContext * ctx) override;
|
||||
|
||||
// TableExpr
|
||||
antlrcpp::Any visitTableArgExpr(ClickHouseParser::TableArgExprContext * ctx) override;
|
||||
@ -236,6 +245,7 @@ public:
|
||||
antlrcpp::Any visitGroupByClause(ClickHouseParser::GroupByClauseContext *ctx) override;
|
||||
antlrcpp::Any visitHavingClause(ClickHouseParser::HavingClauseContext *ctx) override;
|
||||
antlrcpp::Any visitOrderByClause(ClickHouseParser::OrderByClauseContext *ctx) override;
|
||||
antlrcpp::Any visitProjectionOrderByClause(ClickHouseParser::ProjectionOrderByClauseContext *ctx) override;
|
||||
antlrcpp::Any visitLimitByClause(ClickHouseParser::LimitByClauseContext *ctx) override;
|
||||
antlrcpp::Any visitLimitClause(ClickHouseParser::LimitClauseContext *ctx) override;
|
||||
antlrcpp::Any visitSettingsClause(ClickHouseParser::SettingsClauseContext *ctx) override;
|
||||
|
@ -177,8 +177,7 @@ bool ParserProjectionDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected &
|
||||
|
||||
auto projection = std::make_shared<ASTProjectionDeclaration>();
|
||||
projection->name = name->as<ASTIdentifier &>().name();
|
||||
projection->query = query;
|
||||
projection->children.emplace_back(projection->query);
|
||||
projection->set(projection->query, query);
|
||||
node = projection;
|
||||
|
||||
return true;
|
||||
|
@ -17,20 +17,15 @@ bool ParserProjectionSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected &
|
||||
|
||||
ParserKeyword s_with("WITH");
|
||||
ParserKeyword s_select("SELECT");
|
||||
ParserKeyword s_distinct("DISTINCT");
|
||||
ParserKeyword s_where("WHERE");
|
||||
ParserKeyword s_group_by("GROUP BY");
|
||||
ParserKeyword s_order_by("ORDER BY");
|
||||
|
||||
ParserNotEmptyExpressionList exp_list(false);
|
||||
ParserNotEmptyExpressionList exp_list_for_with_clause(false);
|
||||
ParserNotEmptyExpressionList exp_list_for_select_clause(true); /// Allows aliases without AS keyword.
|
||||
ParserExpressionWithOptionalAlias exp_elem(false);
|
||||
ParserExpression order_expression_p;
|
||||
|
||||
ASTPtr with_expression_list;
|
||||
ASTPtr select_expression_list;
|
||||
ASTPtr where_expression;
|
||||
ASTPtr group_expression_list;
|
||||
ASTPtr order_expression;
|
||||
|
||||
@ -48,21 +43,10 @@ bool ParserProjectionSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected &
|
||||
if (!s_select.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
if (s_distinct.ignore(pos, expected))
|
||||
select_query->distinct = true;
|
||||
|
||||
if (!exp_list_for_select_clause.parse(pos, select_expression_list, expected))
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO: wait for condition normalizer to land
|
||||
/// WHERE expr
|
||||
// if (s_where.ignore(pos, expected))
|
||||
// {
|
||||
// if (!exp_elem.parse(pos, where_expression, expected))
|
||||
// return false;
|
||||
// }
|
||||
|
||||
// If group by is specified, AggregatingMergeTree engine is used, and the group by keys are implied to be order by keys
|
||||
if (s_group_by.ignore(pos, expected))
|
||||
{
|
||||
@ -70,6 +54,7 @@ bool ParserProjectionSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected &
|
||||
.parse(pos, group_expression_list, expected))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (s_order_by.ignore(pos, expected))
|
||||
{
|
||||
ASTPtr expr_list;
|
||||
@ -92,7 +77,6 @@ bool ParserProjectionSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected &
|
||||
|
||||
select_query->setExpression(ASTProjectionSelectQuery::Expression::WITH, std::move(with_expression_list));
|
||||
select_query->setExpression(ASTProjectionSelectQuery::Expression::SELECT, std::move(select_expression_list));
|
||||
// select_query->setExpression(ASTProjectionSelectQuery::Expression::WHERE, std::move(where_expression));
|
||||
select_query->setExpression(ASTProjectionSelectQuery::Expression::GROUP_BY, std::move(group_expression_list));
|
||||
select_query->setExpression(ASTProjectionSelectQuery::Expression::ORDER_BY, std::move(order_expression));
|
||||
return true;
|
||||
|
@ -219,7 +219,7 @@ void MySQLHandler::finishHandshake(MySQLProtocol::ConnectionPhase::HandshakeResp
|
||||
int ret = socket().receiveBytes(buf + pos, packet_size - pos);
|
||||
if (ret == 0)
|
||||
{
|
||||
throw Exception("Cannot read all data. Bytes read: " + std::to_string(pos) + ". Bytes expected: 3.", ErrorCodes::CANNOT_READ_ALL_DATA);
|
||||
throw Exception("Cannot read all data. Bytes read: " + std::to_string(pos) + ". Bytes expected: 3", ErrorCodes::CANNOT_READ_ALL_DATA);
|
||||
}
|
||||
pos += ret;
|
||||
}
|
||||
|
@ -116,8 +116,8 @@ try
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
CurrentMetrics::values[pool_config.tasks_metric]--;
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
scheduleTask(/* with_backoff = */ true);
|
||||
}
|
||||
});
|
||||
@ -128,8 +128,8 @@ try
|
||||
catch (...)
|
||||
{
|
||||
/// With our Pool settings scheduleOrThrowOnError shouldn't throw exceptions, but for safety catch added here
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
CurrentMetrics::values[pool_config.tasks_metric]--;
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
scheduleTask(/* with_backoff = */ true);
|
||||
}
|
||||
}
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Storages/MergeTree/MergeTreeDataPartInMemory.h>
|
||||
#include <Storages/MergeTree/MergedBlockOutputStream.h>
|
||||
#include <Storages/MergeTree/ReplicatedFetchList.h>
|
||||
#include <Storages/StorageReplicatedMergeTree.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/NetException.h>
|
||||
#include <IO/createReadBufferFromFileBase.h>
|
||||
@ -86,6 +87,10 @@ struct ReplicatedFetchReadCallback
|
||||
|
||||
}
|
||||
|
||||
|
||||
Service::Service(StorageReplicatedMergeTree & data_) :
|
||||
data(data_), log(&Poco::Logger::get(data.getLogName() + " (Replicated PartsService)")) {}
|
||||
|
||||
std::string Service::getId(const std::string & node_id) const
|
||||
{
|
||||
return getEndpointId(node_id);
|
||||
@ -243,6 +248,8 @@ void Service::sendPartFromMemory(
|
||||
NativeBlockOutputStream block_out(out, 0, metadata_snapshot->getSampleBlock());
|
||||
part->checksums.write(out);
|
||||
block_out.write(part_in_memory->block);
|
||||
|
||||
data.getSendsThrottler()->add(part_in_memory->block.bytes());
|
||||
}
|
||||
|
||||
MergeTreeData::DataPart::Checksums Service::sendPartFromDisk(
|
||||
@ -298,7 +305,7 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk(
|
||||
|
||||
auto file_in = disk->readFile(path);
|
||||
HashingWriteBuffer hashing_out(out);
|
||||
copyData(*file_in, hashing_out, blocker.getCounter());
|
||||
copyDataWithThrottler(*file_in, hashing_out, blocker.getCounter(), data.getSendsThrottler());
|
||||
|
||||
if (blocker.isCancelled())
|
||||
throw Exception("Transferring part to replica was cancelled", ErrorCodes::ABORTED);
|
||||
@ -354,7 +361,7 @@ void Service::sendPartS3Metadata(const MergeTreeData::DataPartPtr & part, WriteB
|
||||
|
||||
auto file_in = createReadBufferFromFileBase(metadata_file, 0, 0, 0, nullptr, DBMS_DEFAULT_BUFFER_SIZE);
|
||||
HashingWriteBuffer hashing_out(out);
|
||||
copyData(*file_in, hashing_out, blocker.getCounter());
|
||||
copyDataWithThrottler(*file_in, hashing_out, blocker.getCounter(), data.getSendsThrottler());
|
||||
if (blocker.isCancelled())
|
||||
throw Exception("Transferring part to replica was cancelled", ErrorCodes::ABORTED);
|
||||
|
||||
@ -388,6 +395,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart(
|
||||
const String & user,
|
||||
const String & password,
|
||||
const String & interserver_scheme,
|
||||
ThrottlerPtr throttler,
|
||||
bool to_detached,
|
||||
const String & tmp_prefix_,
|
||||
std::optional<CurrentlySubmergingEmergingTagger> * tagger_ptr,
|
||||
@ -514,7 +522,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart(
|
||||
|
||||
try
|
||||
{
|
||||
return downloadPartToS3(part_name, replica_path, to_detached, tmp_prefix_, std::move(disks_s3), in);
|
||||
return downloadPartToS3(part_name, replica_path, to_detached, tmp_prefix_, std::move(disks_s3), in, throttler);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
@ -522,7 +530,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart(
|
||||
throw;
|
||||
/// Try again but without S3 copy
|
||||
return fetchPart(metadata_snapshot, context, part_name, replica_path, host, port, timeouts,
|
||||
user, password, interserver_scheme, to_detached, tmp_prefix_, nullptr, false);
|
||||
user, password, interserver_scheme, throttler, to_detached, tmp_prefix_, nullptr, false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -585,8 +593,8 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart(
|
||||
|
||||
MergeTreeData::DataPart::Checksums checksums;
|
||||
return part_type == "InMemory"
|
||||
? downloadPartToMemory(part_name, part_uuid, metadata_snapshot, context, std::move(reservation), in, projections)
|
||||
: downloadPartToDisk(part_name, replica_path, to_detached, tmp_prefix_, sync, reservation->getDisk(), in, projections, checksums);
|
||||
? downloadPartToMemory(part_name, part_uuid, metadata_snapshot, context, std::move(reservation), in, projections, throttler)
|
||||
: downloadPartToDisk(part_name, replica_path, to_detached, tmp_prefix_, sync, reservation->getDisk(), in, projections, checksums, throttler);
|
||||
}
|
||||
|
||||
MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory(
|
||||
@ -596,7 +604,8 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory(
|
||||
ContextPtr context,
|
||||
ReservationPtr reservation,
|
||||
PooledReadWriteBufferFromHTTP & in,
|
||||
size_t projections)
|
||||
size_t projections,
|
||||
ThrottlerPtr throttler)
|
||||
{
|
||||
auto volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, reservation->getDisk(), 0);
|
||||
MergeTreeData::MutableDataPartPtr new_data_part =
|
||||
@ -612,6 +621,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory(
|
||||
|
||||
NativeBlockInputStream block_in(in, 0);
|
||||
auto block = block_in.read();
|
||||
throttler->add(block.bytes());
|
||||
|
||||
MergeTreePartInfo new_part_info("all", 0, 0, 0);
|
||||
MergeTreeData::MutableDataPartPtr new_projection_part =
|
||||
@ -643,6 +653,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToMemory(
|
||||
|
||||
NativeBlockInputStream block_in(in, 0);
|
||||
auto block = block_in.read();
|
||||
throttler->add(block.bytes());
|
||||
|
||||
new_data_part->uuid = part_uuid;
|
||||
new_data_part->is_temp = true;
|
||||
@ -666,7 +677,8 @@ void Fetcher::downloadBaseOrProjectionPartToDisk(
|
||||
bool sync,
|
||||
DiskPtr disk,
|
||||
PooledReadWriteBufferFromHTTP & in,
|
||||
MergeTreeData::DataPart::Checksums & checksums) const
|
||||
MergeTreeData::DataPart::Checksums & checksums,
|
||||
ThrottlerPtr throttler) const
|
||||
{
|
||||
size_t files;
|
||||
readBinary(files, in);
|
||||
@ -689,7 +701,7 @@ void Fetcher::downloadBaseOrProjectionPartToDisk(
|
||||
|
||||
auto file_out = disk->writeFile(fs::path(part_download_path) / file_name);
|
||||
HashingWriteBuffer hashing_out(*file_out);
|
||||
copyData(in, hashing_out, file_size, blocker.getCounter());
|
||||
copyDataWithThrottler(in, hashing_out, file_size, blocker.getCounter(), throttler);
|
||||
|
||||
if (blocker.isCancelled())
|
||||
{
|
||||
@ -726,7 +738,8 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk(
|
||||
DiskPtr disk,
|
||||
PooledReadWriteBufferFromHTTP & in,
|
||||
size_t projections,
|
||||
MergeTreeData::DataPart::Checksums & checksums)
|
||||
MergeTreeData::DataPart::Checksums & checksums,
|
||||
ThrottlerPtr throttler)
|
||||
{
|
||||
static const String TMP_PREFIX = "tmp_fetch_";
|
||||
String tmp_prefix = tmp_prefix_.empty() ? TMP_PREFIX : tmp_prefix_;
|
||||
@ -763,13 +776,13 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk(
|
||||
MergeTreeData::DataPart::Checksums projection_checksum;
|
||||
disk->createDirectories(part_download_path + projection_name + ".proj/");
|
||||
downloadBaseOrProjectionPartToDisk(
|
||||
replica_path, part_download_path + projection_name + ".proj/", sync, disk, in, projection_checksum);
|
||||
replica_path, part_download_path + projection_name + ".proj/", sync, disk, in, projection_checksum, throttler);
|
||||
checksums.addFile(
|
||||
projection_name + ".proj", projection_checksum.getTotalSizeOnDisk(), projection_checksum.getTotalChecksumUInt128());
|
||||
}
|
||||
|
||||
// Download the base part
|
||||
downloadBaseOrProjectionPartToDisk(replica_path, part_download_path, sync, disk, in, checksums);
|
||||
downloadBaseOrProjectionPartToDisk(replica_path, part_download_path, sync, disk, in, checksums, throttler);
|
||||
|
||||
assertEOF(in);
|
||||
auto volume = std::make_shared<SingleDiskVolume>("volume_" + part_name, disk, 0);
|
||||
@ -787,8 +800,8 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3(
|
||||
bool to_detached,
|
||||
const String & tmp_prefix_,
|
||||
const Disks & disks_s3,
|
||||
PooledReadWriteBufferFromHTTP & in
|
||||
)
|
||||
PooledReadWriteBufferFromHTTP & in,
|
||||
ThrottlerPtr throttler)
|
||||
{
|
||||
if (disks_s3.empty())
|
||||
throw Exception("No S3 disks anymore", ErrorCodes::LOGICAL_ERROR);
|
||||
@ -841,7 +854,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3(
|
||||
|
||||
HashingWriteBuffer hashing_out(*file_out);
|
||||
|
||||
copyData(in, hashing_out, file_size, blocker.getCounter());
|
||||
copyDataWithThrottler(in, hashing_out, file_size, blocker.getCounter(), throttler);
|
||||
|
||||
if (blocker.isCancelled())
|
||||
{
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <IO/copyData.h>
|
||||
#include <IO/ConnectionTimeouts.h>
|
||||
#include <IO/ReadWriteBufferFromHTTP.h>
|
||||
#include <Common/Throttler.h>
|
||||
|
||||
|
||||
namespace zkutil
|
||||
@ -18,15 +19,17 @@ namespace zkutil
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class StorageReplicatedMergeTree;
|
||||
|
||||
namespace DataPartsExchange
|
||||
{
|
||||
|
||||
/** Service for sending parts from the table *MergeTree.
|
||||
/** Service for sending parts from the table *ReplicatedMergeTree.
|
||||
*/
|
||||
class Service final : public InterserverIOEndpoint
|
||||
{
|
||||
public:
|
||||
explicit Service(MergeTreeData & data_) : data(data_), log(&Poco::Logger::get(data.getLogName() + " (Replicated PartsService)")) {}
|
||||
explicit Service(StorageReplicatedMergeTree & data_);
|
||||
|
||||
Service(const Service &) = delete;
|
||||
Service & operator=(const Service &) = delete;
|
||||
@ -51,7 +54,7 @@ private:
|
||||
|
||||
/// StorageReplicatedMergeTree::shutdown() waits for all parts exchange handlers to finish,
|
||||
/// so Service will never access dangling reference to storage
|
||||
MergeTreeData & data;
|
||||
StorageReplicatedMergeTree & data;
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
@ -74,6 +77,7 @@ public:
|
||||
const String & user,
|
||||
const String & password,
|
||||
const String & interserver_scheme,
|
||||
ThrottlerPtr throttler,
|
||||
bool to_detached = false,
|
||||
const String & tmp_prefix_ = "",
|
||||
std::optional<CurrentlySubmergingEmergingTagger> * tagger_ptr = nullptr,
|
||||
@ -90,7 +94,9 @@ private:
|
||||
bool sync,
|
||||
DiskPtr disk,
|
||||
PooledReadWriteBufferFromHTTP & in,
|
||||
MergeTreeData::DataPart::Checksums & checksums) const;
|
||||
MergeTreeData::DataPart::Checksums & checksums,
|
||||
ThrottlerPtr throttler) const;
|
||||
|
||||
|
||||
MergeTreeData::MutableDataPartPtr downloadPartToDisk(
|
||||
const String & part_name,
|
||||
@ -101,7 +107,8 @@ private:
|
||||
DiskPtr disk,
|
||||
PooledReadWriteBufferFromHTTP & in,
|
||||
size_t projections,
|
||||
MergeTreeData::DataPart::Checksums & checksums);
|
||||
MergeTreeData::DataPart::Checksums & checksums,
|
||||
ThrottlerPtr throttler);
|
||||
|
||||
MergeTreeData::MutableDataPartPtr downloadPartToMemory(
|
||||
const String & part_name,
|
||||
@ -110,7 +117,8 @@ private:
|
||||
ContextPtr context,
|
||||
ReservationPtr reservation,
|
||||
PooledReadWriteBufferFromHTTP & in,
|
||||
size_t projections);
|
||||
size_t projections,
|
||||
ThrottlerPtr throttler);
|
||||
|
||||
MergeTreeData::MutableDataPartPtr downloadPartToS3(
|
||||
const String & part_name,
|
||||
@ -118,7 +126,8 @@ private:
|
||||
bool to_detached,
|
||||
const String & tmp_prefix_,
|
||||
const Disks & disks_s3,
|
||||
PooledReadWriteBufferFromHTTP & in);
|
||||
PooledReadWriteBufferFromHTTP & in,
|
||||
ThrottlerPtr throttler);
|
||||
|
||||
MergeTreeData & data;
|
||||
Poco::Logger * log;
|
||||
|
@ -92,6 +92,8 @@ struct Settings;
|
||||
M(Bool, replicated_can_become_leader, true, "If true, Replicated tables replicas on this node will try to acquire leadership.", 0) \
|
||||
M(Seconds, zookeeper_session_expiration_check_period, 60, "ZooKeeper session expiration check period, in seconds.", 0) \
|
||||
M(Bool, detach_old_local_parts_when_cloning_replica, 1, "Do not remove old local parts when repairing lost replica.", 0) \
|
||||
M(UInt64, max_replicated_fetches_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_replicated_sends_network_bandwidth, 0, "The maximum speed of data exchange over the network in bytes per second for replicated sends. Zero means unlimited.", 0) \
|
||||
\
|
||||
/** Check delay of replicas settings. */ \
|
||||
M(UInt64, min_relative_delay_to_measure, 120, "Calculate relative replica delay only if absolute delay is not less that this value.", 0) \
|
||||
|
@ -841,7 +841,7 @@ void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc
|
||||
|
||||
size_t block_rows = block_to_write.rows();
|
||||
size_t block_bytes = block_to_write.bytes();
|
||||
size_t block_allocated_bytes = block_to_write.allocatedBytes();
|
||||
size_t block_allocated_bytes_delta = block_to_write.allocatedBytes() - buffer.data.allocatedBytes();
|
||||
|
||||
CurrentMetrics::sub(CurrentMetrics::StorageBufferRows, block_rows);
|
||||
CurrentMetrics::sub(CurrentMetrics::StorageBufferBytes, block_bytes);
|
||||
@ -851,7 +851,7 @@ void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc
|
||||
if (!destination_id)
|
||||
{
|
||||
total_writes.rows -= block_rows;
|
||||
total_writes.bytes -= block_allocated_bytes;
|
||||
total_writes.bytes -= block_allocated_bytes_delta;
|
||||
|
||||
LOG_DEBUG(log, "Flushing buffer with {} rows (discarded), {} bytes, age {} seconds {}.", rows, bytes, time_passed, (check_thresholds ? "(bg)" : "(direct)"));
|
||||
return;
|
||||
@ -890,7 +890,7 @@ void StorageBuffer::flushBuffer(Buffer & buffer, bool check_thresholds, bool loc
|
||||
}
|
||||
|
||||
total_writes.rows -= block_rows;
|
||||
total_writes.bytes -= block_allocated_bytes;
|
||||
total_writes.bytes -= block_allocated_bytes_delta;
|
||||
|
||||
UInt64 milliseconds = watch.elapsedMilliseconds();
|
||||
LOG_DEBUG(log, "Flushing buffer with {} rows, {} bytes, age {} seconds, took {} ms {}.", rows, bytes, time_passed, milliseconds, (check_thresholds ? "(bg)" : "(direct)"));
|
||||
|
@ -1,22 +1,25 @@
|
||||
#include <Storages/StorageJoin.h>
|
||||
#include <Storages/StorageFactory.h>
|
||||
#include <Storages/StorageSet.h>
|
||||
#include <Interpreters/HashJoin.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Core/ColumnNumbers.h>
|
||||
#include <DataStreams/IBlockInputStream.h>
|
||||
#include <DataTypes/NestedUtils.h>
|
||||
#include <Disks/IDisk.h>
|
||||
#include <Interpreters/joinDispatch.h>
|
||||
#include <Interpreters/MutationsInterpreter.h>
|
||||
#include <Interpreters/TableJoin.h>
|
||||
#include <Interpreters/castColumn.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Poco/String.h> /// toLower
|
||||
#include <Common/Exception.h>
|
||||
|
||||
#include <Compression/CompressedWriteBuffer.h>
|
||||
#include <Processors/Sources/SourceWithProgress.h>
|
||||
#include <Processors/Pipe.h>
|
||||
#include <Poco/String.h> /// toLower
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -65,10 +68,18 @@ StorageJoin::StorageJoin(
|
||||
restore();
|
||||
}
|
||||
|
||||
BlockOutputStreamPtr StorageJoin::write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr context)
|
||||
{
|
||||
std::lock_guard mutate_lock(mutate_mutex);
|
||||
return StorageSetOrJoinBase::write(query, metadata_snapshot, context);
|
||||
}
|
||||
|
||||
void StorageJoin::truncate(
|
||||
const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, ContextPtr, TableExclusiveLockHolder&)
|
||||
{
|
||||
std::lock_guard mutate_lock(mutate_mutex);
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->removeRecursive(path);
|
||||
disk->createDirectories(path);
|
||||
disk->createDirectories(path + "tmp/");
|
||||
@ -77,6 +88,70 @@ void StorageJoin::truncate(
|
||||
join = std::make_shared<HashJoin>(table_join, metadata_snapshot->getSampleBlock().sortColumns(), overwrite);
|
||||
}
|
||||
|
||||
void StorageJoin::checkMutationIsPossible(const MutationCommands & commands, const Settings & /* settings */) const
|
||||
{
|
||||
for (const auto & command : commands)
|
||||
if (command.type != MutationCommand::DELETE)
|
||||
throw Exception("Table engine Join supports only DELETE mutations", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void StorageJoin::mutate(const MutationCommands & commands, ContextPtr context)
|
||||
{
|
||||
/// Firstly accuire lock for mutation, that locks changes of data.
|
||||
/// We cannot accuire rwlock here, because read lock is needed
|
||||
/// for execution of mutation interpreter.
|
||||
std::lock_guard mutate_lock(mutate_mutex);
|
||||
|
||||
constexpr auto tmp_backup_file_name = "tmp/mut.bin";
|
||||
auto metadata_snapshot = getInMemoryMetadataPtr();
|
||||
|
||||
auto backup_buf = disk->writeFile(path + tmp_backup_file_name);
|
||||
auto compressed_backup_buf = CompressedWriteBuffer(*backup_buf);
|
||||
auto backup_stream = NativeBlockOutputStream(compressed_backup_buf, 0, metadata_snapshot->getSampleBlock());
|
||||
|
||||
auto new_data = std::make_shared<HashJoin>(table_join, metadata_snapshot->getSampleBlock().sortColumns(), overwrite);
|
||||
|
||||
// New scope controls lifetime of InputStream.
|
||||
{
|
||||
auto storage_ptr = DatabaseCatalog::instance().getTable(getStorageID(), context);
|
||||
auto interpreter = std::make_unique<MutationsInterpreter>(storage_ptr, metadata_snapshot, commands, context, true);
|
||||
auto in = interpreter->execute();
|
||||
in->readPrefix();
|
||||
|
||||
while (const Block & block = in->read())
|
||||
{
|
||||
new_data->addJoinedBlock(block, true);
|
||||
if (persistent)
|
||||
backup_stream.write(block);
|
||||
}
|
||||
|
||||
in->readSuffix();
|
||||
}
|
||||
|
||||
/// Now accuire exclusive lock and modify storage.
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
join = std::move(new_data);
|
||||
increment = 1;
|
||||
|
||||
if (persistent)
|
||||
{
|
||||
backup_stream.flush();
|
||||
compressed_backup_buf.next();
|
||||
backup_buf->next();
|
||||
backup_buf->finalize();
|
||||
|
||||
std::vector<std::string> files;
|
||||
disk->listFiles(path, files);
|
||||
for (const auto & file_name: files)
|
||||
{
|
||||
if (file_name.ends_with(".bin"))
|
||||
disk->removeFileIfExists(path + file_name);
|
||||
}
|
||||
|
||||
disk->replaceFile(path + tmp_backup_file_name, path + std::to_string(increment) + ".bin");
|
||||
}
|
||||
}
|
||||
|
||||
HashJoinPtr StorageJoin::getJoinLocked(std::shared_ptr<TableJoin> analyzed_join) const
|
||||
{
|
||||
|
@ -29,6 +29,10 @@ public:
|
||||
|
||||
void truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, ContextPtr, TableExclusiveLockHolder &) override;
|
||||
|
||||
/// Only delete is supported.
|
||||
void checkMutationIsPossible(const MutationCommands & commands, const Settings & settings) const override;
|
||||
void mutate(const MutationCommands & commands, ContextPtr context) override;
|
||||
|
||||
/// Return instance of HashJoin holding lock that protects from insertions to StorageJoin.
|
||||
/// HashJoin relies on structure of hash table that's why we need to return it with locked mutex.
|
||||
HashJoinPtr getJoinLocked(std::shared_ptr<TableJoin> analyzed_join) const;
|
||||
@ -41,6 +45,8 @@ public:
|
||||
/// (but not during processing whole query, it's safe for joinGet that doesn't involve `used_flags` from HashJoin)
|
||||
ColumnWithTypeAndName joinGet(const Block & block, const Block & block_with_columns_to_add) const;
|
||||
|
||||
BlockOutputStreamPtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr context) override;
|
||||
|
||||
Pipe read(
|
||||
const Names & column_names,
|
||||
const StorageMetadataPtr & /*metadata_snapshot*/,
|
||||
@ -68,6 +74,7 @@ private:
|
||||
/// Protect state for concurrent use in insertFromBlock and joinBlock.
|
||||
/// Lock is stored in HashJoin instance during query and blocks concurrent insertions.
|
||||
mutable std::shared_mutex rwlock;
|
||||
mutable std::mutex mutate_mutex;
|
||||
|
||||
void insertBlock(const Block & block) override;
|
||||
void finishInsert() override {}
|
||||
|
@ -290,6 +290,8 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree(
|
||||
, part_moves_between_shards_orchestrator(*this)
|
||||
, allow_renaming(allow_renaming_)
|
||||
, replicated_fetches_pool_size(getContext()->getSettingsRef().background_fetches_pool_size)
|
||||
, replicated_fetches_throttler(std::make_shared<Throttler>(getSettings()->max_replicated_fetches_network_bandwidth, getContext()->getReplicatedFetchesThrottler()))
|
||||
, replicated_sends_throttler(std::make_shared<Throttler>(getSettings()->max_replicated_sends_network_bandwidth, getContext()->getReplicatedSendsThrottler()))
|
||||
{
|
||||
queue_updating_task = getContext()->getSchedulePool().createTask(
|
||||
getStorageID().getFullTableName() + " (StorageReplicatedMergeTree::queueUpdatingTask)", [this]{ queueUpdatingTask(); });
|
||||
@ -2507,7 +2509,8 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
|
||||
|
||||
part_desc->res_part = fetcher.fetchPart(
|
||||
metadata_snapshot, getContext(), part_desc->found_new_part_name, source_replica_path,
|
||||
address.host, address.replication_port, timeouts, credentials->getUser(), credentials->getPassword(), interserver_scheme, false, TMP_PREFIX + "fetch_");
|
||||
address.host, address.replication_port, timeouts, credentials->getUser(), credentials->getPassword(),
|
||||
interserver_scheme, replicated_fetches_throttler, false, TMP_PREFIX + "fetch_");
|
||||
|
||||
/// TODO: check columns_version of fetched part
|
||||
|
||||
@ -2624,7 +2627,8 @@ void StorageReplicatedMergeTree::executeClonePartFromShard(const LogEntry & entr
|
||||
return fetcher.fetchPart(
|
||||
metadata_snapshot, getContext(), entry.new_part_name, source_replica_path,
|
||||
address.host, address.replication_port,
|
||||
timeouts, credentials->getUser(), credentials->getPassword(), interserver_scheme, true);
|
||||
timeouts, credentials->getUser(), credentials->getPassword(), interserver_scheme,
|
||||
replicated_fetches_throttler, true);
|
||||
};
|
||||
|
||||
part = get_part();
|
||||
@ -3163,6 +3167,13 @@ bool StorageReplicatedMergeTree::canExecuteFetch(const ReplicatedMergeTreeLogEnt
|
||||
return false;
|
||||
}
|
||||
|
||||
if (replicated_fetches_throttler->isThrottling())
|
||||
{
|
||||
disable_reason = fmt::format("Not executing fetch of part {} because fetches have already throttled by network settings "
|
||||
"<max_replicated_fetches_network_bandwidth> or <max_replicated_fetches_network_bandwidth_for_server>.", entry.new_part_name);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -4031,6 +4042,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora
|
||||
credentials->getUser(),
|
||||
credentials->getPassword(),
|
||||
interserver_scheme,
|
||||
replicated_fetches_throttler,
|
||||
to_detached,
|
||||
"",
|
||||
&tagger_ptr,
|
||||
@ -4180,7 +4192,8 @@ bool StorageReplicatedMergeTree::fetchExistsPart(const String & part_name, const
|
||||
return fetcher.fetchPart(
|
||||
metadata_snapshot, getContext(), part_name, source_replica_path,
|
||||
address.host, address.replication_port,
|
||||
timeouts, credentials->getUser(), credentials->getPassword(), interserver_scheme, false, "", nullptr, true,
|
||||
timeouts, credentials->getUser(), credentials->getPassword(),
|
||||
interserver_scheme, replicated_fetches_throttler, false, "", nullptr, true,
|
||||
replaced_disk);
|
||||
};
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <Interpreters/PartLog.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||
#include <Common/Throttler.h>
|
||||
#include <Core/BackgroundSchedulePool.h>
|
||||
#include <Processors/Pipe.h>
|
||||
#include <Storages/MergeTree/BackgroundJobsExecutor.h>
|
||||
@ -239,6 +240,18 @@ public:
|
||||
/// Get best replica having this partition on S3
|
||||
String getSharedDataReplica(const IMergeTreeDataPart & part) const;
|
||||
|
||||
/// Get throttler for replicated fetches
|
||||
ThrottlerPtr getFetchesThrottler() const
|
||||
{
|
||||
return replicated_fetches_throttler;
|
||||
}
|
||||
|
||||
/// Get throttler for replicated sends
|
||||
ThrottlerPtr getSendsThrottler() const
|
||||
{
|
||||
return replicated_sends_throttler;
|
||||
}
|
||||
|
||||
private:
|
||||
/// Get a sequential consistent view of current parts.
|
||||
ReplicatedMergeTreeQuorumAddedParts::PartitionIdToMaxBlock getMaxAddedBlocks() const;
|
||||
@ -363,6 +376,11 @@ private:
|
||||
|
||||
const size_t replicated_fetches_pool_size;
|
||||
|
||||
/// Throttlers used in DataPartsExchange to lower maximum fetch/sends
|
||||
/// speed.
|
||||
ThrottlerPtr replicated_fetches_throttler;
|
||||
ThrottlerPtr replicated_sends_throttler;
|
||||
|
||||
template <class Func>
|
||||
void foreachCommittedParts(Func && func, bool select_sequential_consistency) const;
|
||||
|
||||
|
@ -11,10 +11,13 @@ import json
|
||||
import csv
|
||||
|
||||
|
||||
MAX_RETRY = 2
|
||||
MAX_RETRY = 3
|
||||
NUM_WORKERS = 5
|
||||
SLEEP_BETWEEN_RETRIES = 5
|
||||
PARALLEL_GROUP_SIZE = 100
|
||||
CLICKHOUSE_BINARY_PATH = "/usr/bin/clickhouse"
|
||||
CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH = "/usr/bin/clickhouse-odbc-bridge"
|
||||
DOCKERD_LOGS_PATH = "/ClickHouse/tests/integration/dockerd.log"
|
||||
CLICKHOUSE_LIBRARY_BRIDGE_BINARY_PATH = "/usr/bin/clickhouse-library-bridge"
|
||||
|
||||
TRIES_COUNT = 10
|
||||
@ -50,6 +53,11 @@ def filter_existing_tests(tests_to_run, repo_path):
|
||||
def _get_deselect_option(tests):
|
||||
return ' '.join(['--deselect {}'.format(t) for t in tests])
|
||||
|
||||
# https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
|
||||
def chunks(lst, n):
|
||||
"""Yield successive n-sized chunks from lst."""
|
||||
for i in range(0, len(lst), n):
|
||||
yield lst[i:i + n]
|
||||
|
||||
def parse_test_results_output(fname):
|
||||
read = False
|
||||
@ -82,7 +90,7 @@ def get_counters(output):
|
||||
else:
|
||||
logging.info("Strange line %s", line)
|
||||
else:
|
||||
logging.info("Strange line %s")
|
||||
logging.info("Strange line %s", line)
|
||||
return {k: list(v) for k, v in counters.items()}
|
||||
|
||||
|
||||
@ -253,7 +261,7 @@ class ClickhouseIntegrationTestsRunner:
|
||||
|
||||
def _get_all_tests(self, repo_path):
|
||||
image_cmd = self._get_runner_image_cmd(repo_path)
|
||||
cmd = "cd {}/tests/integration && ./runner {} ' --setup-plan' | grep '::' | sed 's/ (fixtures used:.*//g' | sed 's/^ *//g' > all_tests.txt".format(repo_path, image_cmd)
|
||||
cmd = "cd {}/tests/integration && ./runner --tmpfs {} ' --setup-plan' | grep '::' | sed 's/ (fixtures used:.*//g' | sed 's/^ *//g' | sed 's/ *$//g' | sort -u > all_tests.txt".format(repo_path, image_cmd)
|
||||
logging.info("Getting all tests with cmd '%s'", cmd)
|
||||
subprocess.check_call(cmd, shell=True) # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL
|
||||
|
||||
@ -267,6 +275,16 @@ class ClickhouseIntegrationTestsRunner:
|
||||
all_tests.append(line.strip())
|
||||
return list(sorted(all_tests))
|
||||
|
||||
def _get_parallel_tests_skip_list(self, repo_path):
|
||||
skip_list_file_path = "{}/tests/integration/parallel_skip.json".format(repo_path)
|
||||
if not os.path.isfile(skip_list_file_path) or os.path.getsize(skip_list_file_path) == 0:
|
||||
raise Exception("There is something wrong with getting all tests list: file '{}' is empty or does not exist.".format(skip_list_file_path))
|
||||
|
||||
skip_list_tests = []
|
||||
with open(skip_list_file_path, "r") as skip_list_file:
|
||||
skip_list_tests = json.load(skip_list_file)
|
||||
return list(sorted(skip_list_tests))
|
||||
|
||||
def group_test_by_file(self, tests):
|
||||
result = {}
|
||||
for test in tests:
|
||||
@ -286,7 +304,6 @@ class ClickhouseIntegrationTestsRunner:
|
||||
if test in main_counters["ERROR"]:
|
||||
main_counters["ERROR"].remove(test)
|
||||
is_flaky = True
|
||||
|
||||
if is_flaky:
|
||||
main_counters["FLAKY"].append(test)
|
||||
else:
|
||||
@ -319,7 +336,7 @@ class ClickhouseIntegrationTestsRunner:
|
||||
logging.info("Cannot run with custom docker image version :(")
|
||||
return image_cmd
|
||||
|
||||
def run_test_group(self, repo_path, test_group, tests_in_group, num_tries):
|
||||
def run_test_group(self, repo_path, test_group, tests_in_group, num_tries, num_workers):
|
||||
counters = {
|
||||
"ERROR": [],
|
||||
"PASSED": [],
|
||||
@ -359,8 +376,9 @@ class ClickhouseIntegrationTestsRunner:
|
||||
test_names.add(test_name)
|
||||
|
||||
test_cmd = ' '.join([test for test in sorted(test_names)])
|
||||
cmd = "cd {}/tests/integration && ./runner {} '-ss {} -rfEp --color=no --durations=0 {}' | tee {}".format(
|
||||
repo_path, image_cmd, test_cmd, _get_deselect_option(self.should_skip_tests()), output_path)
|
||||
parallel_cmd = " --parallel {} ".format(num_workers) if num_workers > 0 else ""
|
||||
cmd = "cd {}/tests/integration && ./runner --tmpfs {} -t {} {} '-ss -rfEp --color=no --durations=0 {}' | tee {}".format(
|
||||
repo_path, image_cmd, test_cmd, parallel_cmd, _get_deselect_option(self.should_skip_tests()), output_path)
|
||||
|
||||
with open(log_path, 'w') as log:
|
||||
logging.info("Executing cmd: %s", cmd)
|
||||
@ -412,7 +430,7 @@ class ClickhouseIntegrationTestsRunner:
|
||||
for i in range(TRIES_COUNT):
|
||||
final_retry += 1
|
||||
logging.info("Running tests for the %s time", i)
|
||||
counters, tests_times, log_paths = self.run_test_group(repo_path, "flaky", tests_to_run, 1)
|
||||
counters, tests_times, log_paths = self.run_test_group(repo_path, "flaky", tests_to_run, 1, 1)
|
||||
logs += log_paths
|
||||
if counters["FAILED"]:
|
||||
logging.info("Found failed tests: %s", ' '.join(counters["FAILED"]))
|
||||
@ -439,6 +457,10 @@ class ClickhouseIntegrationTestsRunner:
|
||||
self._compress_logs("{}/tests/integration".format(repo_path), test_logs)
|
||||
logging.info("Compression finished")
|
||||
|
||||
result_path_dockerd_logs = os.path.join(str(self.path()), "dockerd.log")
|
||||
if os.path.exists(result_path_dockerd_logs):
|
||||
shutil.copy(DOCKERD_LOGS_PATH, result_path_dockerd_logs)
|
||||
|
||||
test_result = []
|
||||
for state in ("ERROR", "FAILED", "PASSED", "SKIPPED", "FLAKY"):
|
||||
if state == "PASSED":
|
||||
@ -459,8 +481,19 @@ class ClickhouseIntegrationTestsRunner:
|
||||
self._install_clickhouse(build_path)
|
||||
logging.info("Dump iptables before run %s", subprocess.check_output("iptables -L", shell=True))
|
||||
all_tests = self._get_all_tests(repo_path)
|
||||
parallel_skip_tests = self._get_parallel_tests_skip_list(repo_path)
|
||||
logging.info("Found %s tests first 3 %s", len(all_tests), ' '.join(all_tests[:3]))
|
||||
grouped_tests = self.group_test_by_file(all_tests)
|
||||
filtered_sequential_tests = list(filter(lambda test: test in all_tests, parallel_skip_tests))
|
||||
filtered_parallel_tests = list(filter(lambda test: test not in parallel_skip_tests, all_tests))
|
||||
not_found_tests = list(filter(lambda test: test not in all_tests, parallel_skip_tests))
|
||||
logging.info("Found %s tests first 3 %s, parallel %s, other %s", len(all_tests), ' '.join(all_tests[:3]), len(filtered_parallel_tests), len(filtered_sequential_tests))
|
||||
logging.info("Not found %s tests first 3 %s", len(not_found_tests), ' '.join(not_found_tests[:3]))
|
||||
|
||||
grouped_tests = self.group_test_by_file(filtered_sequential_tests)
|
||||
i = 0
|
||||
for par_group in chunks(filtered_parallel_tests, PARALLEL_GROUP_SIZE):
|
||||
grouped_tests["parallel{}".format(i)] = par_group
|
||||
i+=1
|
||||
logging.info("Found %s tests groups", len(grouped_tests))
|
||||
|
||||
counters = {
|
||||
@ -482,7 +515,7 @@ class ClickhouseIntegrationTestsRunner:
|
||||
|
||||
for group, tests in items_to_run:
|
||||
logging.info("Running test group %s countaining %s tests", group, len(tests))
|
||||
group_counters, group_test_times, log_paths = self.run_test_group(repo_path, group, tests, MAX_RETRY)
|
||||
group_counters, group_test_times, log_paths = self.run_test_group(repo_path, group, tests, MAX_RETRY, NUM_WORKERS)
|
||||
total_tests = 0
|
||||
for counter, value in group_counters.items():
|
||||
logging.info("Tests from group %s stats, %s count %s", group, counter, len(value))
|
||||
@ -504,6 +537,10 @@ class ClickhouseIntegrationTestsRunner:
|
||||
self._compress_logs("{}/tests/integration".format(repo_path), test_logs)
|
||||
logging.info("Compression finished")
|
||||
|
||||
result_path_dockerd_logs = os.path.join(str(self.path()), "dockerd.log")
|
||||
if os.path.exists(result_path_dockerd_logs):
|
||||
shutil.copy(DOCKERD_LOGS_PATH, result_path_dockerd_logs)
|
||||
|
||||
if counters["FAILED"] or counters["ERROR"]:
|
||||
logging.info("Overall status failure, because we have tests in FAILED or ERROR state")
|
||||
result_state = "failure"
|
||||
|
@ -101,8 +101,8 @@ class CommandRequest:
|
||||
self.stdout_file.seek(0)
|
||||
self.stderr_file.seek(0)
|
||||
|
||||
stdout = self.stdout_file.read().decode()
|
||||
stderr = self.stderr_file.read().decode()
|
||||
stdout = self.stdout_file.read().decode('utf-8', errors='replace')
|
||||
stderr = self.stderr_file.read().decode('utf-8', errors='replace')
|
||||
|
||||
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
||||
raise QueryTimeoutExceedException('Client timed out!')
|
||||
@ -118,8 +118,8 @@ class CommandRequest:
|
||||
self.stdout_file.seek(0)
|
||||
self.stderr_file.seek(0)
|
||||
|
||||
stdout = self.stdout_file.read().decode()
|
||||
stderr = self.stderr_file.read().decode()
|
||||
stdout = self.stdout_file.read().decode('utf-8', errors='replace')
|
||||
stderr = self.stderr_file.read().decode('utf-8', errors='replace')
|
||||
|
||||
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
||||
raise QueryTimeoutExceedException('Client timed out!')
|
||||
@ -134,8 +134,8 @@ class CommandRequest:
|
||||
self.stdout_file.seek(0)
|
||||
self.stderr_file.seek(0)
|
||||
|
||||
stdout = self.stdout_file.read().decode()
|
||||
stderr = self.stderr_file.read().decode()
|
||||
stdout = self.stdout_file.read().decode('utf-8', errors='replace')
|
||||
stderr = self.stderr_file.read().decode('utf-8', errors='replace')
|
||||
|
||||
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
|
||||
raise QueryTimeoutExceedException('Client timed out!')
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -7,12 +7,12 @@ class Layout(object):
|
||||
'flat': '<flat/>',
|
||||
'hashed': '<hashed/>',
|
||||
'cache': '<cache><size_in_cells>128</size_in_cells></cache>',
|
||||
'ssd_cache': '<ssd_cache><path>/etc/clickhouse/dictionaries/all</path></ssd_cache>',
|
||||
'ssd_cache': '<ssd_cache><path>/etc/clickhouse-server/dictionaries/all</path></ssd_cache>',
|
||||
'complex_key_hashed': '<complex_key_hashed/>',
|
||||
'complex_key_hashed_one_key': '<complex_key_hashed/>',
|
||||
'complex_key_hashed_two_keys': '<complex_key_hashed/>',
|
||||
'complex_key_cache': '<complex_key_cache><size_in_cells>128</size_in_cells></complex_key_cache>',
|
||||
'complex_key_ssd_cache': '<complex_key_ssd_cache><path>/etc/clickhouse/dictionaries/all</path></complex_key_ssd_cache>',
|
||||
'complex_key_ssd_cache': '<complex_key_ssd_cache><path>/etc/clickhouse-server/dictionaries/all</path></complex_key_ssd_cache>',
|
||||
'range_hashed': '<range_hashed/>',
|
||||
'direct': '<direct/>',
|
||||
'complex_key_direct': '<complex_key_direct/>'
|
||||
|
@ -9,6 +9,7 @@ import cassandra.cluster
|
||||
import pymongo
|
||||
import pymysql.cursors
|
||||
import redis
|
||||
import logging
|
||||
from tzlocal import get_localzone
|
||||
|
||||
|
||||
@ -59,6 +60,7 @@ class SourceMySQL(ExternalSource):
|
||||
}
|
||||
|
||||
def create_mysql_conn(self):
|
||||
logging.debug(f"pymysql connect {self.user}, {self.password}, {self.internal_hostname}, {self.internal_port}")
|
||||
self.connection = pymysql.connect(
|
||||
user=self.user,
|
||||
password=self.password,
|
||||
@ -98,8 +100,11 @@ class SourceMySQL(ExternalSource):
|
||||
)
|
||||
|
||||
def prepare(self, structure, table_name, cluster):
|
||||
if self.internal_hostname is None:
|
||||
self.internal_hostname = cluster.mysql_ip
|
||||
self.create_mysql_conn()
|
||||
self.execute_mysql_query("create database if not exists test default character set 'utf8'")
|
||||
self.execute_mysql_query("drop table if exists test.{}".format(table_name))
|
||||
fields_strs = []
|
||||
for field in structure.keys + structure.ordinary_fields + structure.range_fields:
|
||||
fields_strs.append(field.name + ' ' + self.TYPE_MAPPING[field.field_type])
|
||||
@ -457,6 +462,9 @@ class SourceCassandra(ExternalSource):
|
||||
)
|
||||
|
||||
def prepare(self, structure, table_name, cluster):
|
||||
if self.internal_hostname is None:
|
||||
self.internal_hostname = cluster.cassandra_ip
|
||||
|
||||
self.client = cassandra.cluster.Cluster([self.internal_hostname], port=self.internal_port)
|
||||
self.session = self.client.connect()
|
||||
self.session.execute(
|
||||
|
@ -10,16 +10,6 @@ import socket
|
||||
import tempfile
|
||||
import logging
|
||||
import os
|
||||
|
||||
g_dns_hook = None
|
||||
|
||||
def custom_getaddrinfo(*args):
|
||||
# print("from custom_getaddrinfo g_dns_hook is None ", g_dns_hook is None)
|
||||
ret = g_dns_hook.custom_getaddrinfo(*args)
|
||||
# print("g_dns_hook.custom_getaddrinfo result", ret)
|
||||
return ret
|
||||
|
||||
|
||||
class mk_krb_conf(object):
|
||||
def __init__(self, krb_conf, kdc_ip):
|
||||
self.krb_conf = krb_conf
|
||||
@ -37,32 +27,6 @@ class mk_krb_conf(object):
|
||||
if self.amended_krb_conf is not None:
|
||||
self.amended_krb_conf.close()
|
||||
|
||||
# tweak dns resolution to connect to localhost where api_host is in URL
|
||||
class dns_hook(object):
|
||||
def __init__(self, hdfs_api):
|
||||
# print("dns_hook.init ", hdfs_api.kerberized, hdfs_api.host, hdfs_api.data_port, hdfs_api.proxy_port)
|
||||
self.hdfs_api = hdfs_api
|
||||
def __enter__(self):
|
||||
global g_dns_hook
|
||||
g_dns_hook = self
|
||||
# print("g_dns_hook is None ", g_dns_hook is None)
|
||||
self.original_getaddrinfo = socket.getaddrinfo
|
||||
socket.getaddrinfo = custom_getaddrinfo
|
||||
return self
|
||||
def __exit__(self, type, value, traceback):
|
||||
global g_dns_hook
|
||||
g_dns_hook = None
|
||||
socket.getaddrinfo = self.original_getaddrinfo
|
||||
def custom_getaddrinfo(self, *args):
|
||||
(hostname, port) = args[:2]
|
||||
# print("top of custom_getaddrinfo", hostname, port)
|
||||
|
||||
if hostname == self.hdfs_api.host and (port == self.hdfs_api.data_port or port == self.hdfs_api.proxy_port):
|
||||
# print("dns_hook substitute")
|
||||
return [(socket.AF_INET, 1, 6, '', ("127.0.0.1", port))]
|
||||
else:
|
||||
return self.original_getaddrinfo(*args)
|
||||
|
||||
class HDFSApi(object):
|
||||
def __init__(self, user, timeout=100, kerberized=False, principal=None,
|
||||
keytab=None, krb_conf=None,
|
||||
@ -84,12 +48,15 @@ class HDFSApi(object):
|
||||
# logging.basicConfig(level=logging.DEBUG)
|
||||
# logging.getLogger().setLevel(logging.DEBUG)
|
||||
# requests_log = logging.getLogger("requests.packages.urllib3")
|
||||
# requests_log.setLevel(logging.DEBUG)
|
||||
# requests_log.setLevel(logging.INFO)
|
||||
# requests_log.propagate = True
|
||||
# kerb_log = logging.getLogger("requests_kerberos")
|
||||
# kerb_log.setLevel(logging.DEBUG)
|
||||
# kerb_log.propagate = True
|
||||
|
||||
if kerberized:
|
||||
self._run_kinit()
|
||||
self.kerberos_auth = reqkerb.HTTPKerberosAuth(mutual_authentication=reqkerb.DISABLED, hostname_override=self.host, principal=self.principal)
|
||||
self.kerberos_auth = reqkerb.HTTPKerberosAuth(mutual_authentication=reqkerb.DISABLED, hostname_override="kerberizedhdfs1", principal=self.principal)
|
||||
if self.kerberos_auth is None:
|
||||
print("failed to obtain kerberos_auth")
|
||||
else:
|
||||
@ -100,51 +67,64 @@ class HDFSApi(object):
|
||||
raise Exception("kerberos principal and keytab are required")
|
||||
|
||||
with mk_krb_conf(self.krb_conf, self.kdc_ip) as instantiated_krb_conf:
|
||||
# print("instantiated_krb_conf ", instantiated_krb_conf)
|
||||
logging.debug("instantiated_krb_conf {}".format(instantiated_krb_conf))
|
||||
|
||||
os.environ["KRB5_CONFIG"] = instantiated_krb_conf
|
||||
|
||||
cmd = "(kinit -R -t {keytab} -k {principal} || (sleep 5 && kinit -R -t {keytab} -k {principal})) ; klist".format(instantiated_krb_conf=instantiated_krb_conf, keytab=self.keytab, principal=self.principal)
|
||||
|
||||
# print(cmd)
|
||||
|
||||
start = time.time()
|
||||
|
||||
while time.time() - start < self.timeout:
|
||||
try:
|
||||
subprocess.call(cmd, shell=True)
|
||||
print("KDC started, kinit successfully run")
|
||||
res = subprocess.run(cmd, shell=True)
|
||||
if res.returncode != 0:
|
||||
# check_call(...) from subprocess does not print stderr, so we do it manually
|
||||
logging.debug('Stderr:\n{}\n'.format(res.stderr.decode('utf-8')))
|
||||
logging.debug('Stdout:\n{}\n'.format(res.stdout.decode('utf-8')))
|
||||
logging.debug('Env:\n{}\n'.format(env))
|
||||
raise Exception('Command {} return non-zero code {}: {}'.format(args, res.returncode, res.stderr.decode('utf-8')))
|
||||
|
||||
logging.debug("KDC started, kinit successfully run")
|
||||
return
|
||||
except Exception as ex:
|
||||
print("Can't run kinit ... waiting {}".format(str(ex)))
|
||||
logging.debug("Can't run kinit ... waiting {}".format(str(ex)))
|
||||
time.sleep(1)
|
||||
|
||||
raise Exception("Kinit running failure")
|
||||
|
||||
def req_wrapper(self, func, expected_code, cnt=2, **kwargs):
|
||||
with dns_hook(self):
|
||||
for i in range(0, cnt):
|
||||
response_data = func(**kwargs)
|
||||
if response_data.status_code == expected_code:
|
||||
return response_data
|
||||
else:
|
||||
print("unexpected response_data.status_code {}", response_data.status_code)
|
||||
for i in range(0, cnt):
|
||||
response_data = func(**kwargs)
|
||||
if response_data.status_code == expected_code:
|
||||
return response_data
|
||||
else:
|
||||
print("unexpected response_data.status_code {}", response_data.status_code)
|
||||
response_data.raise_for_status()
|
||||
|
||||
|
||||
def read_data(self, path, universal_newlines=True):
|
||||
logging.debug("read_data protocol:{} host:{} port:{} path: {}".format(self.protocol, self.host, self.proxy_port, path))
|
||||
response = self.req_wrapper(requests.get, 307, url="{protocol}://{host}:{port}/webhdfs/v1{path}?op=OPEN".format(protocol=self.protocol, host=self.host, port=self.proxy_port, path=path), headers={'host': 'localhost'}, allow_redirects=False, verify=False, auth=self.kerberos_auth)
|
||||
# additional_params = '&'.join(response.headers['Location'].split('&')[1:2])
|
||||
url = "{location}".format(location=response.headers['Location'])
|
||||
# print("redirected to ", url)
|
||||
response_data = self.req_wrapper(requests.get, 200, url=url,
|
||||
headers={'host': 'localhost'},
|
||||
verify=False, auth=self.kerberos_auth)
|
||||
location = None
|
||||
if self.kerberized:
|
||||
location = response.headers['Location'].replace("kerberizedhdfs1:1006", "{}:{}".format(self.host, self.data_port))
|
||||
else:
|
||||
location = response.headers['Location'].replace("hdfs1:50075", "{}:{}".format(self.host, self.data_port))
|
||||
logging.debug("redirected to {}".format(location))
|
||||
|
||||
response_data = self.req_wrapper(requests.get, 200, url=location, headers={'host': 'localhost'},
|
||||
verify=False, auth=self.kerberos_auth)
|
||||
|
||||
if universal_newlines:
|
||||
return response_data.text
|
||||
else:
|
||||
return response_data.content
|
||||
|
||||
def write_data(self, path, content):
|
||||
logging.debug("write_data protocol:{} host:{} port:{} path: {} user:{}, principal:{}".format(
|
||||
self.protocol, self.host, self.proxy_port, path, self.user, self.principal))
|
||||
named_file = NamedTemporaryFile(mode='wb+')
|
||||
fpath = named_file.name
|
||||
if isinstance(content, str):
|
||||
@ -152,36 +132,36 @@ class HDFSApi(object):
|
||||
named_file.write(content)
|
||||
named_file.flush()
|
||||
|
||||
if self.kerberized:
|
||||
self._run_kinit()
|
||||
self.kerberos_auth = reqkerb.HTTPKerberosAuth(mutual_authentication=reqkerb.DISABLED, hostname_override=self.host, principal=self.principal)
|
||||
# print(self.kerberos_auth)
|
||||
|
||||
response = self.req_wrapper(requests.put, 307,
|
||||
url="{protocol}://{host}:{port}/webhdfs/v1{path}?op=CREATE".format(
|
||||
protocol=self.protocol, host=self.host,
|
||||
port=self.proxy_port,
|
||||
path=path, user=self.user),
|
||||
allow_redirects=False,
|
||||
headers={'host': 'localhost'},
|
||||
params={'overwrite' : 'true'},
|
||||
verify=False, auth=self.kerberos_auth
|
||||
)
|
||||
additional_params = '&'.join(
|
||||
response.headers['Location'].split('&')[1:2] + ["user.name={}".format(self.user), "overwrite=true"])
|
||||
url="{protocol}://{host}:{port}/webhdfs/v1{path}?op=CREATE".format(protocol=self.protocol, host='localhost',
|
||||
port=self.proxy_port,
|
||||
path=path, user=self.user),
|
||||
allow_redirects=False,
|
||||
headers={'host': 'localhost'},
|
||||
params={'overwrite' : 'true'},
|
||||
verify=False, auth=self.kerberos_auth
|
||||
)
|
||||
|
||||
logging.debug("HDFS api response:{}".format(response.headers))
|
||||
|
||||
# additional_params = '&'.join(
|
||||
# response.headers['Location'].split('&')[1:2] + ["user.name={}".format(self.user), "overwrite=true"])
|
||||
if self.kerberized:
|
||||
location = response.headers['Location'].replace("kerberizedhdfs1:1006", "{}:{}".format(self.host, self.data_port))
|
||||
else:
|
||||
location = response.headers['Location'].replace("hdfs1:50075", "{}:{}".format(self.host, self.data_port))
|
||||
|
||||
with open(fpath, mode="rb") as fh:
|
||||
file_data = fh.read()
|
||||
protocol = "http" # self.protocol
|
||||
response = self.req_wrapper(requests.put, 201,
|
||||
url="{location}".format(
|
||||
location=response.headers['Location']),
|
||||
data=file_data,
|
||||
headers={'content-type':'text/plain', 'host': 'localhost'},
|
||||
params={'file': path, 'user.name' : self.user},
|
||||
allow_redirects=False, verify=False, auth=self.kerberos_auth
|
||||
url="{location}".format(location=location),
|
||||
data=file_data,
|
||||
headers={'content-type':'text/plain', 'host': 'localhost'},
|
||||
params={'file': path, 'user.name' : self.user},
|
||||
allow_redirects=False, verify=False, auth=self.kerberos_auth
|
||||
)
|
||||
# print(response)
|
||||
logging.debug(response)
|
||||
|
||||
|
||||
def write_gzip_data(self, path, content):
|
||||
|
@ -1,7 +1,7 @@
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
import logging
|
||||
import docker
|
||||
|
||||
|
||||
@ -129,12 +129,12 @@ class _NetworkManager:
|
||||
def add_iptables_rule(self, **kwargs):
|
||||
cmd = ['iptables', '-I', 'DOCKER-USER', '1']
|
||||
cmd.extend(self._iptables_cmd_suffix(**kwargs))
|
||||
self._exec_run(cmd, privileged=True)
|
||||
self._exec_run_with_retry(cmd, retry_count=3, privileged=True)
|
||||
|
||||
def delete_iptables_rule(self, **kwargs):
|
||||
cmd = ['iptables', '-D', 'DOCKER-USER']
|
||||
cmd.extend(self._iptables_cmd_suffix(**kwargs))
|
||||
self._exec_run(cmd, privileged=True)
|
||||
self._exec_run_with_retry(cmd, retry_count=3, privileged=True)
|
||||
|
||||
@staticmethod
|
||||
def _iptables_cmd_suffix(
|
||||
@ -159,12 +159,12 @@ class _NetworkManager:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
container_expire_timeout=50, container_exit_timeout=60):
|
||||
container_expire_timeout=50, container_exit_timeout=60, docker_api_version=os.environ.get("DOCKER_API_VERSION")):
|
||||
|
||||
self.container_expire_timeout = container_expire_timeout
|
||||
self.container_exit_timeout = container_exit_timeout
|
||||
|
||||
self._docker_client = docker.from_env(version=os.environ.get("DOCKER_API_VERSION"))
|
||||
self._docker_client = docker.DockerClient(base_url='unix:///var/run/docker.sock', version=docker_api_version, timeout=600)
|
||||
|
||||
self._container = None
|
||||
|
||||
@ -207,6 +207,13 @@ class _NetworkManager:
|
||||
|
||||
return self._container
|
||||
|
||||
def _exec_run_with_retry(self, cmd, retry_count, **kwargs):
|
||||
for i in range(retry_count):
|
||||
try:
|
||||
self._exec_run(cmd, **kwargs)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logging.error(f"_exec_run failed for {cmd}, {e}")
|
||||
|
||||
def _exec_run(self, cmd, **kwargs):
|
||||
container = self._ensure_container()
|
||||
|
||||
@ -219,3 +226,61 @@ class _NetworkManager:
|
||||
raise subprocess.CalledProcessError(exit_code, cmd)
|
||||
|
||||
return output
|
||||
|
||||
# Approximately mesure network I/O speed for interface
|
||||
class NetThroughput(object):
|
||||
def __init__(self, node, interface="eth0"):
|
||||
self.interface = interface
|
||||
self.node = node
|
||||
try:
|
||||
check = subprocess.check_output(f'grep "^ *{self.interface}:" /proc/net/dev', shell=True)
|
||||
if not check:
|
||||
raise Exception(f"No such interface {self.interface} found in /proc/net/dev")
|
||||
except:
|
||||
raise Exception(f"No such interface {self.interface} found in /proc/net/dev")
|
||||
|
||||
self.current_in = self._get_in_bytes()
|
||||
self.current_out = self._get_out_bytes()
|
||||
self.measure_time = time.time()
|
||||
|
||||
def _get_in_bytes(self):
|
||||
try:
|
||||
result = self.node.exec_in_container(['bash', '-c', f'awk "/^ *{self.interface}:/"\' {{ if ($1 ~ /.*:[0-9][0-9]*/) {{ sub(/^.*:/, "") ; print $1 }} else {{ print $2 }} }}\' /proc/net/dev'])
|
||||
except:
|
||||
raise Exception(f"Cannot receive in bytes from /proc/net/dev for interface {self.interface}")
|
||||
|
||||
try:
|
||||
return int(result)
|
||||
except:
|
||||
raise Exception(f"Got non-numeric in bytes '{result}' from /proc/net/dev for interface {self.interface}")
|
||||
|
||||
def _get_out_bytes(self):
|
||||
try:
|
||||
result = self.node.exec_in_container(['bash', '-c', f'awk "/^ *{self.interface}:/"\' {{ if ($1 ~ /.*:[0-9][0-9]*/) {{ print $9 }} else {{ print $10 }} }}\' /proc/net/dev'])
|
||||
except:
|
||||
raise Exception(f"Cannot receive out bytes from /proc/net/dev for interface {self.interface}")
|
||||
|
||||
try:
|
||||
return int(result)
|
||||
except:
|
||||
raise Exception(f"Got non-numeric out bytes '{result}' from /proc/net/dev for interface {self.interface}")
|
||||
|
||||
def measure_speed(self, measure='bytes'):
|
||||
new_in = self._get_in_bytes()
|
||||
new_out = self._get_out_bytes()
|
||||
current_time = time.time()
|
||||
in_speed = (new_in - self.current_in) / (current_time - self.measure_time)
|
||||
out_speed = (new_out - self.current_out) / (current_time - self.measure_time)
|
||||
|
||||
self.current_out = new_out
|
||||
self.current_in = new_in
|
||||
self.measure_time = current_time
|
||||
|
||||
if measure == 'bytes':
|
||||
return in_speed, out_speed
|
||||
elif measure == 'kilobytes':
|
||||
return in_speed / 1024., out_speed / 1024.
|
||||
elif measure == 'megabytes':
|
||||
return in_speed / (1024 * 1024), out_speed / (1024 * 1024)
|
||||
else:
|
||||
raise Exception(f"Unknown measure {measure}")
|
||||
|
17
tests/integration/helpers/zookeeper_secure_config.xml
Normal file
17
tests/integration/helpers/zookeeper_secure_config.xml
Normal file
@ -0,0 +1,17 @@
|
||||
<yandex>
|
||||
<zookeeper>
|
||||
<node index="1">
|
||||
<host>zoo1</host>
|
||||
<port>2281</port>
|
||||
</node>
|
||||
<node index="2">
|
||||
<host>zoo2</host>
|
||||
<port>2281</port>
|
||||
</node>
|
||||
<node index="3">
|
||||
<host>zoo3</host>
|
||||
<port>2281</port>
|
||||
</node>
|
||||
<session_timeout_ms>3000</session_timeout_ms>
|
||||
</zookeeper>
|
||||
</yandex>
|
260
tests/integration/parallel.json
Normal file
260
tests/integration/parallel.json
Normal file
@ -0,0 +1,260 @@
|
||||
[
|
||||
"test_atomic_drop_table/test.py::test_atomic_delete_with_stopped_zookeeper",
|
||||
"test_attach_without_fetching/test.py::test_attach_without_fetching",
|
||||
"test_broken_part_during_merge/test.py::test_merge_and_part_corruption",
|
||||
"test_cleanup_dir_after_bad_zk_conn/test.py::test_attach_without_zk",
|
||||
"test_cleanup_dir_after_bad_zk_conn/test.py::test_cleanup_dir_after_bad_zk_conn",
|
||||
"test_cleanup_dir_after_bad_zk_conn/test.py::test_cleanup_dir_after_wrong_replica_name",
|
||||
"test_cleanup_dir_after_bad_zk_conn/test.py::test_cleanup_dir_after_wrong_zk_path",
|
||||
"test_consistent_parts_after_clone_replica/test.py::test_inconsistent_parts_if_drop_while_replica_not_active",
|
||||
"test_cross_replication/test.py::test",
|
||||
"test_ddl_worker_non_leader/test.py::test_non_leader_replica",
|
||||
"test_delayed_replica_failover/test.py::test",
|
||||
"test_dictionaries_update_field/test.py::test_update_field[complex_key_hashed_update_field_dictionary-HASHED]",
|
||||
"test_dictionaries_update_field/test.py::test_update_field[flat_update_field_dictionary-FLAT]",
|
||||
"test_dictionaries_update_field/test.py::test_update_field[simple_key_hashed_update_field_dictionary-HASHED]",
|
||||
"test_dictionary_allow_read_expired_keys/test_default_reading.py::test_default_reading",
|
||||
"test_dictionary_allow_read_expired_keys/test_default_string.py::test_return_real_values",
|
||||
"test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py::test_simple_dict_get_or_default",
|
||||
"test_dictionary_allow_read_expired_keys/test_dict_get.py::test_simple_dict_get",
|
||||
"test_disabled_mysql_server/test.py::test_disabled_mysql_server",
|
||||
"test_distributed_ddl_on_cross_replication/test.py::test_alter_ddl",
|
||||
"test_distributed_ddl_on_cross_replication/test.py::test_atomic_database",
|
||||
"test_distributed_ddl_parallel/test.py::test_all_in_parallel",
|
||||
"test_distributed_ddl_parallel/test.py::test_slow_dict_load_7",
|
||||
"test_distributed_ddl_parallel/test.py::test_smoke",
|
||||
"test_distributed_ddl_parallel/test.py::test_smoke_parallel",
|
||||
"test_distributed_ddl_parallel/test.py::test_smoke_parallel_dict_reload",
|
||||
"test_distributed_ddl_parallel/test.py::test_two_in_parallel_two_queued",
|
||||
"test_distributed_ddl_password/test.py::test_alter",
|
||||
"test_distributed_ddl_password/test.py::test_truncate",
|
||||
"test_distributed_ddl/test.py::test_allowed_databases[configs]",
|
||||
"test_distributed_ddl/test.py::test_allowed_databases[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_create_as_select[configs]",
|
||||
"test_distributed_ddl/test.py::test_create_as_select[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_create_reserved[configs]",
|
||||
"test_distributed_ddl/test.py::test_create_reserved[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_create_view[configs]",
|
||||
"test_distributed_ddl/test.py::test_create_view[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_default_database[configs]",
|
||||
"test_distributed_ddl/test.py::test_default_database[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_detach_query[configs]",
|
||||
"test_distributed_ddl/test.py::test_detach_query[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_implicit_macros[configs]",
|
||||
"test_distributed_ddl/test.py::test_implicit_macros[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_kill_query[configs]",
|
||||
"test_distributed_ddl/test.py::test_kill_query[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_macro[configs]",
|
||||
"test_distributed_ddl/test.py::test_macro[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_on_connection_loss[configs]",
|
||||
"test_distributed_ddl/test.py::test_on_connection_loss[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_on_server_fail[configs]",
|
||||
"test_distributed_ddl/test.py::test_on_server_fail[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_on_session_expired[configs]",
|
||||
"test_distributed_ddl/test.py::test_on_session_expired[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_optimize_query[configs]",
|
||||
"test_distributed_ddl/test.py::test_optimize_query[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_rename[configs]",
|
||||
"test_distributed_ddl/test.py::test_rename[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_replicated_without_arguments[configs]",
|
||||
"test_distributed_ddl/test.py::test_replicated_without_arguments[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_simple_alters[configs]",
|
||||
"test_distributed_ddl/test.py::test_simple_alters[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_socket_timeout[configs]",
|
||||
"test_distributed_ddl/test.py::test_socket_timeout[configs_secure]",
|
||||
"test_distributed_ddl/test_replicated_alter.py::test_replicated_alters[configs]",
|
||||
"test_distributed_ddl/test_replicated_alter.py::test_replicated_alters[configs_secure]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-default-node1-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-default-node1-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-default-node2-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-default-node2-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-ready_to_wait-node1-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-ready_to_wait-node1-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-ready_to_wait-node2-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-ready_to_wait-node2-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-default-node1-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-default-node1-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-default-node2-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-default-node2-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-ready_to_wait-node1-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-ready_to_wait-node1-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-ready_to_wait-node2-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-ready_to_wait-node2-remote]",
|
||||
"test_drop_replica/test.py::test_drop_replica",
|
||||
"test_hedged_requests_parallel/test.py::test_combination1",
|
||||
"test_hedged_requests_parallel/test.py::test_combination2",
|
||||
"test_hedged_requests_parallel/test.py::test_query_with_no_data_to_sample",
|
||||
"test_hedged_requests_parallel/test.py::test_send_data",
|
||||
"test_hedged_requests_parallel/test.py::test_send_table_status_sleep",
|
||||
"test_hedged_requests/test.py::test_combination1",
|
||||
"test_hedged_requests/test.py::test_combination2",
|
||||
"test_hedged_requests/test.py::test_combination3",
|
||||
"test_hedged_requests/test.py::test_combination4",
|
||||
"test_hedged_requests/test.py::test_long_query",
|
||||
"test_hedged_requests/test.py::test_receive_timeout1",
|
||||
"test_hedged_requests/test.py::test_receive_timeout2",
|
||||
"test_hedged_requests/test.py::test_send_data",
|
||||
"test_hedged_requests/test.py::test_send_data2",
|
||||
"test_hedged_requests/test.py::test_send_table_status_sleep",
|
||||
"test_hedged_requests/test.py::test_send_table_status_sleep2",
|
||||
"test_hedged_requests/test.py::test_stuck_replica",
|
||||
"test_https_replication/test.py::test_both_http",
|
||||
"test_https_replication/test.py::test_both_https",
|
||||
"test_https_replication/test.py::test_mixed_protocol",
|
||||
"test_https_replication/test.py::test_replication_after_partition",
|
||||
"test_insert_into_distributed_sync_async/test.py::test_async_inserts_into_local_shard",
|
||||
"test_insert_into_distributed_sync_async/test.py::test_insertion_sync",
|
||||
"test_insert_into_distributed_sync_async/test.py::test_insertion_sync_fails_with_timeout",
|
||||
"test_insert_into_distributed_sync_async/test.py::test_insertion_sync_with_disabled_timeout",
|
||||
"test_insert_into_distributed_sync_async/test.py::test_insertion_without_sync_ignores_timeout",
|
||||
"test_insert_into_distributed/test.py::test_inserts_batching",
|
||||
"test_insert_into_distributed/test.py::test_inserts_local",
|
||||
"test_insert_into_distributed/test.py::test_inserts_low_cardinality",
|
||||
"test_insert_into_distributed/test.py::test_inserts_single_replica_internal_replication",
|
||||
"test_insert_into_distributed/test.py::test_inserts_single_replica_local_internal_replication",
|
||||
"test_insert_into_distributed/test.py::test_inserts_single_replica_no_internal_replication",
|
||||
"test_insert_into_distributed/test.py::test_prefer_localhost_replica",
|
||||
"test_insert_into_distributed/test.py::test_reconnect",
|
||||
"test_insert_into_distributed/test.py::test_table_function",
|
||||
"test_insert_into_distributed_through_materialized_view/test.py::test_inserts_batching SKIPPED",
|
||||
"test_insert_into_distributed_through_materialized_view/test.py::test_inserts_local",
|
||||
"test_insert_into_distributed_through_materialized_view/test.py::test_reconnect",
|
||||
"test_keeper_multinode_blocade_leader/test.py::test_blocade_leader",
|
||||
"test_keeper_multinode_blocade_leader/test.py::test_blocade_leader_twice",
|
||||
"test_keeper_multinode_simple/test.py::test_follower_restart",
|
||||
"test_keeper_multinode_simple/test.py::test_read_write_multinode",
|
||||
"test_keeper_multinode_simple/test.py::test_session_expiration",
|
||||
"test_keeper_multinode_simple/test.py::test_simple_replicated_table",
|
||||
"test_keeper_multinode_simple/test.py::test_watch_on_follower",
|
||||
"test_limited_replicated_fetches/test.py::test_limited_fetches",
|
||||
"test_materialize_mysql_database/test.py::test_clickhouse_killed_while_insert_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_clickhouse_killed_while_insert_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_clickhouse_killed_while_insert_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_clickhouse_killed_while_insert_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_insert_with_modify_binlog_checksum_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_insert_with_modify_binlog_checksum_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_insert_with_modify_binlog_checksum_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_insert_with_modify_binlog_checksum_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_empty_transaction_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_empty_transaction_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_empty_transaction_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_empty_transaction_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_mysql_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_mysql_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_mysql_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_mysql_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_dml_with_mysql_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_dml_with_mysql_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_dml_with_mysql_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_dml_with_mysql_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_err_sync_user_privs_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_err_sync_user_privs_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_err_sync_user_privs_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_err_sync_user_privs_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_multi_table_update[clickhouse_node0]",
|
||||
"test_materialize_mysql_database/test.py::test_multi_table_update[clickhouse_node1]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_killed_while_insert_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_killed_while_insert_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_killed_while_insert_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_killed_while_insert_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_kill_sync_thread_restore_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_kill_sync_thread_restore_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_kill_sync_thread_restore_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_kill_sync_thread_restore_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_settings[clickhouse_node0]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_settings[clickhouse_node1]",
|
||||
"test_materialize_mysql_database/test.py::test_network_partition_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_network_partition_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_network_partition_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_network_partition_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_select_without_columns_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_select_without_columns_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_select_without_columns_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_select_without_columns_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_system_parts_table[clickhouse_node0]",
|
||||
"test_materialize_mysql_database/test.py::test_system_parts_table[clickhouse_node1]",
|
||||
"test_materialize_mysql_database/test.py::test_system_tables_table[clickhouse_node0]",
|
||||
"test_materialize_mysql_database/test.py::test_system_tables_table[clickhouse_node1]",
|
||||
"test_materialize_mysql_database/test.py::test_utf8mb4[clickhouse_node0]",
|
||||
"test_materialize_mysql_database/test.py::test_utf8mb4[clickhouse_node1]",
|
||||
"test_parts_delete_zookeeper/test.py::test_merge_doesnt_work_without_zookeeper",
|
||||
"test_polymorphic_parts/test.py::test_compact_parts_only",
|
||||
"test_polymorphic_parts/test.py::test_different_part_types_on_replicas[polymorphic_table_compact-Compact]",
|
||||
"test_polymorphic_parts/test.py::test_different_part_types_on_replicas[polymorphic_table_wide-Wide]",
|
||||
"test_polymorphic_parts/test.py::test_in_memory",
|
||||
"test_polymorphic_parts/test.py::test_in_memory_alters",
|
||||
"test_polymorphic_parts/test.py::test_in_memory_deduplication",
|
||||
"test_polymorphic_parts/test.py::test_in_memory_wal_rotate",
|
||||
"test_polymorphic_parts/test.py::test_polymorphic_parts_basics[first_node0-second_node0]",
|
||||
"test_polymorphic_parts/test.py::test_polymorphic_parts_basics[first_node1-second_node1]",
|
||||
"test_polymorphic_parts/test.py::test_polymorphic_parts_diff_versions_2 SKIPPED",
|
||||
"test_polymorphic_parts/test.py::test_polymorphic_parts_diff_versions SKIPPED",
|
||||
"test_polymorphic_parts/test.py::test_polymorphic_parts_index",
|
||||
"test_polymorphic_parts/test.py::test_polymorphic_parts_non_adaptive",
|
||||
"test_quorum_inserts_parallel/test.py::test_parallel_quorum_actually_parallel",
|
||||
"test_quorum_inserts_parallel/test.py::test_parallel_quorum_actually_quorum",
|
||||
"test_random_inserts/test.py::test_insert_multithreaded",
|
||||
"test_random_inserts/test.py::test_random_inserts",
|
||||
"test_reload_clusters_config/test.py::test_add_cluster",
|
||||
"test_reload_clusters_config/test.py::test_delete_cluster",
|
||||
"test_reload_clusters_config/test.py::test_simple_reload",
|
||||
"test_reload_clusters_config/test.py::test_update_one_cluster",
|
||||
"test_replace_partition/test.py::test_drop_failover",
|
||||
"test_replace_partition/test.py::test_normal_work",
|
||||
"test_replace_partition/test.py::test_replace_after_replace_failover",
|
||||
"test_replicated_database/test.py::test_alters_from_different_replicas",
|
||||
"test_replicated_database/test.py::test_create_replicated_table",
|
||||
"test_replicated_database/test.py::test_recover_staled_replica",
|
||||
"test_replicated_database/test.py::test_simple_alter_table[MergeTree]",
|
||||
"test_replicated_database/test.py::test_simple_alter_table[ReplicatedMergeTree]",
|
||||
"test_replicated_database/test.py::test_startup_without_zk",
|
||||
"test_replicated_fetches_timeouts/test.py::test_no_stall",
|
||||
"test_storage_kafka/test.py::test_bad_reschedule",
|
||||
"test_storage_kafka/test.py::test_commits_of_unprocessed_messages_on_drop",
|
||||
"test_storage_kafka/test.py::test_exception_from_destructor",
|
||||
"test_storage_kafka/test.py::test_kafka_commit_on_block_write",
|
||||
"test_storage_kafka/test.py::test_kafka_consumer_hang",
|
||||
"test_storage_kafka/test.py::test_kafka_consumer_hang2",
|
||||
"test_storage_kafka/test.py::test_kafka_csv_with_delimiter",
|
||||
"test_storage_kafka/test.py::test_kafka_csv_with_thread_per_consumer",
|
||||
"test_storage_kafka/test.py::test_kafka_duplicates_when_commit_failed",
|
||||
"test_storage_kafka/test.py::test_kafka_engine_put_errors_to_stream",
|
||||
"test_storage_kafka/test.py::test_kafka_engine_put_errors_to_stream_with_random_malformed_json",
|
||||
"test_storage_kafka/test.py::test_kafka_flush_by_block_size",
|
||||
"test_storage_kafka/test.py::test_kafka_flush_by_time",
|
||||
"test_storage_kafka/test.py::test_kafka_flush_on_big_message",
|
||||
"test_storage_kafka/test.py::test_kafka_formats",
|
||||
"test_storage_kafka/test.py::test_kafka_formats_with_broken_message",
|
||||
"test_storage_kafka/test.py::test_kafka_insert",
|
||||
"test_storage_kafka/test.py::test_kafka_issue11308",
|
||||
"test_storage_kafka/test.py::test_kafka_issue14202",
|
||||
"test_storage_kafka/test.py::test_kafka_issue4116",
|
||||
"test_storage_kafka/test.py::test_kafka_json_as_string",
|
||||
"test_storage_kafka/test.py::test_kafka_json_without_delimiter",
|
||||
"test_storage_kafka/test.py::test_kafka_lot_of_partitions_partial_commit_of_bulk",
|
||||
"test_storage_kafka/test.py::test_kafka_many_materialized_views",
|
||||
"test_storage_kafka/test.py::test_kafka_materialized_view",
|
||||
"test_storage_kafka/test.py::test_kafka_materialized_view_with_subquery",
|
||||
"test_storage_kafka/test.py::test_kafka_no_holes_when_write_suffix_failed",
|
||||
"test_storage_kafka/test.py::test_kafka_produce_consume",
|
||||
"test_storage_kafka/test.py::test_kafka_produce_key_timestamp",
|
||||
"test_storage_kafka/test.py::test_kafka_protobuf",
|
||||
"test_storage_kafka/test.py::test_kafka_protobuf_no_delimiter",
|
||||
"test_storage_kafka/test.py::test_kafka_rebalance",
|
||||
"test_storage_kafka/test.py::test_kafka_select_empty",
|
||||
"test_storage_kafka/test.py::test_kafka_settings_new_syntax",
|
||||
"test_storage_kafka/test.py::test_kafka_settings_old_syntax",
|
||||
"test_storage_kafka/test.py::test_kafka_string_field_on_first_position_in_protobuf",
|
||||
"test_storage_kafka/test.py::test_kafka_tsv_with_delimiter",
|
||||
"test_storage_kafka/test.py::test_kafka_unavailable",
|
||||
"test_storage_kafka/test.py::test_kafka_virtual_columns",
|
||||
"test_storage_kafka/test.py::test_kafka_virtual_columns2",
|
||||
"test_storage_kafka/test.py::test_kafka_virtual_columns_with_materialized_view",
|
||||
"test_storage_kafka/test.py::test_librdkafka_compression",
|
||||
"test_storage_kafka/test.py::test_premature_flush_on_eof",
|
||||
"test_storage_kerberized_kafka/test.py::test_kafka_json_as_string",
|
||||
"test_storage_kerberized_kafka/test.py::test_kafka_json_as_string_no_kdc",
|
||||
"test_system_clusters_actual_information/test.py::test",
|
||||
"test_system_metrics/test.py::test_readonly_metrics",
|
||||
"test_system_replicated_fetches/test.py::test_system_replicated_fetches"
|
||||
]
|
6
tests/integration/parallel.readme
Normal file
6
tests/integration/parallel.readme
Normal file
@ -0,0 +1,6 @@
|
||||
# Parallel tests skip list is currently generated manually. All tests except those in parallel_skip.txt will run in parallel.
|
||||
# Current list is generated with following commands
|
||||
# 1. Generate all tests list as in CI run
|
||||
./runner ' --setup-plan' | grep '::' | sed 's/ (fixtures used:.*//g' | sed 's/^ *//g' | sed 's/ *$//g' | sort -u > all_tests.txt
|
||||
# 2. Filter known tests that are currently not run in parallel
|
||||
cat all_tests.txt | grep '^test_replicated_database\|^test_disabled_mysql_server\|^test_distributed_ddl\|^test_distributed_ddl\|^test_quorum_inserts_parallel\|^test_ddl_worker_non_leader\|^test_consistent_parts_after_clone_replica\|^test_materialize_mysql_database\|^test_atomic_drop_table\|^test_distributed_respect_user_timeouts\|^test_storage_kafka\|^test_replace_partition\|^test_replicated_fetches_timeouts\|^test_system_clusters_actual_information\|^test_delayed_replica_failover\|^test_limited_replicated_fetches\|^test_hedged_requests\|^test_insert_into_distributed\|^test_insert_into_distributed_through_materialized_view\|^test_drop_replica\|^test_attach_without_fetching\|^test_system_replicated_fetches\|^test_cross_replication\|^test_dictionary_allow_read_expired_keys\|^test_dictionary_allow_read_expired_keys\|^test_dictionary_allow_read_expired_keys\|^test_insert_into_distributed_sync_async\|^test_hedged_requests_parallel\|^test_dictionaries_update_field\|^test_broken_part_during_merge\|^test_random_inserts\|^test_reload_clusters_config\|^test_parts_delete_zookeeper\|^test_polymorphic_parts\|^test_keeper_multinode_simple\|^test_https_replication\|^test_storage_kerberized_kafka\|^test_cleanup_dir_after_bad_zk_conn\|^test_system_metrics\|^test_keeper_multinode_blocade_leader' | awk '{$1=$1;print}' | jq -R -n '[inputs] | .' > parallel_skip.json
|
262
tests/integration/parallel_skip.json
Normal file
262
tests/integration/parallel_skip.json
Normal file
@ -0,0 +1,262 @@
|
||||
[
|
||||
"test_host_ip_change/test.py::test_dns_cache_update",
|
||||
"test_host_ip_change/test.py::test_ip_change_drop_dns_cache",
|
||||
"test_host_ip_change/test.py::test_ip_change_update_dns_cache",
|
||||
"test_host_ip_change/test.py::test_user_access_ip_change[node0]",
|
||||
"test_host_ip_change/test.py::test_user_access_ip_change[node1]",
|
||||
"test_atomic_drop_table/test.py::test_atomic_delete_with_stopped_zookeeper",
|
||||
"test_attach_without_fetching/test.py::test_attach_without_fetching",
|
||||
"test_broken_part_during_merge/test.py::test_merge_and_part_corruption",
|
||||
"test_cleanup_dir_after_bad_zk_conn/test.py::test_attach_without_zk",
|
||||
"test_cleanup_dir_after_bad_zk_conn/test.py::test_cleanup_dir_after_bad_zk_conn",
|
||||
"test_cleanup_dir_after_bad_zk_conn/test.py::test_cleanup_dir_after_wrong_replica_name",
|
||||
"test_cleanup_dir_after_bad_zk_conn/test.py::test_cleanup_dir_after_wrong_zk_path",
|
||||
"test_consistent_parts_after_clone_replica/test.py::test_inconsistent_parts_if_drop_while_replica_not_active",
|
||||
"test_cross_replication/test.py::test",
|
||||
"test_ddl_worker_non_leader/test.py::test_non_leader_replica",
|
||||
"test_delayed_replica_failover/test.py::test",
|
||||
"test_dictionaries_update_field/test.py::test_update_field[complex_key_hashed_update_field_dictionary-HASHED]",
|
||||
"test_dictionaries_update_field/test.py::test_update_field[flat_update_field_dictionary-FLAT]",
|
||||
"test_dictionaries_update_field/test.py::test_update_field[simple_key_hashed_update_field_dictionary-HASHED]",
|
||||
"test_dictionary_allow_read_expired_keys/test_default_reading.py::test_default_reading",
|
||||
"test_dictionary_allow_read_expired_keys/test_default_string.py::test_return_real_values",
|
||||
"test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py::test_simple_dict_get_or_default",
|
||||
"test_dictionary_allow_read_expired_keys/test_dict_get.py::test_simple_dict_get",
|
||||
"test_disabled_mysql_server/test.py::test_disabled_mysql_server",
|
||||
"test_distributed_ddl_on_cross_replication/test.py::test_alter_ddl",
|
||||
"test_distributed_ddl_on_cross_replication/test.py::test_atomic_database",
|
||||
"test_distributed_ddl_parallel/test.py::test_all_in_parallel",
|
||||
"test_distributed_ddl_parallel/test.py::test_slow_dict_load_7",
|
||||
"test_distributed_ddl_parallel/test.py::test_smoke",
|
||||
"test_distributed_ddl_parallel/test.py::test_smoke_parallel",
|
||||
"test_distributed_ddl_parallel/test.py::test_smoke_parallel_dict_reload",
|
||||
"test_distributed_ddl_parallel/test.py::test_two_in_parallel_two_queued",
|
||||
"test_distributed_ddl_password/test.py::test_alter",
|
||||
"test_distributed_ddl_password/test.py::test_truncate",
|
||||
"test_distributed_ddl/test.py::test_allowed_databases[configs]",
|
||||
"test_distributed_ddl/test.py::test_allowed_databases[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_create_as_select[configs]",
|
||||
"test_distributed_ddl/test.py::test_create_as_select[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_create_reserved[configs]",
|
||||
"test_distributed_ddl/test.py::test_create_reserved[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_create_view[configs]",
|
||||
"test_distributed_ddl/test.py::test_create_view[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_default_database[configs]",
|
||||
"test_distributed_ddl/test.py::test_default_database[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_detach_query[configs]",
|
||||
"test_distributed_ddl/test.py::test_detach_query[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_implicit_macros[configs]",
|
||||
"test_distributed_ddl/test.py::test_implicit_macros[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_kill_query[configs]",
|
||||
"test_distributed_ddl/test.py::test_kill_query[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_macro[configs]",
|
||||
"test_distributed_ddl/test.py::test_macro[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_on_connection_loss[configs]",
|
||||
"test_distributed_ddl/test.py::test_on_connection_loss[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_on_server_fail[configs]",
|
||||
"test_distributed_ddl/test.py::test_on_server_fail[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_on_session_expired[configs]",
|
||||
"test_distributed_ddl/test.py::test_on_session_expired[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_optimize_query[configs]",
|
||||
"test_distributed_ddl/test.py::test_optimize_query[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_rename[configs]",
|
||||
"test_distributed_ddl/test.py::test_rename[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_replicated_without_arguments[configs]",
|
||||
"test_distributed_ddl/test.py::test_replicated_without_arguments[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_simple_alters[configs]",
|
||||
"test_distributed_ddl/test.py::test_simple_alters[configs_secure]",
|
||||
"test_distributed_ddl/test.py::test_socket_timeout[configs]",
|
||||
"test_distributed_ddl/test.py::test_socket_timeout[configs_secure]",
|
||||
"test_distributed_ddl/test_replicated_alter.py::test_replicated_alters[configs]",
|
||||
"test_distributed_ddl/test_replicated_alter.py::test_replicated_alters[configs_secure]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-default-node1-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-default-node1-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-default-node2-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-default-node2-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-ready_to_wait-node1-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-ready_to_wait-node1-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-ready_to_wait-node2-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs-ready_to_wait-node2-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-default-node1-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-default-node1-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-default-node2-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-default-node2-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-ready_to_wait-node1-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-ready_to_wait-node1-remote]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-ready_to_wait-node2-distributed]",
|
||||
"test_distributed_respect_user_timeouts/test.py::test_reconnect[configs_secure-ready_to_wait-node2-remote]",
|
||||
"test_drop_replica/test.py::test_drop_replica",
|
||||
"test_hedged_requests_parallel/test.py::test_combination1",
|
||||
"test_hedged_requests_parallel/test.py::test_combination2",
|
||||
"test_hedged_requests_parallel/test.py::test_query_with_no_data_to_sample",
|
||||
"test_hedged_requests_parallel/test.py::test_send_data",
|
||||
"test_hedged_requests_parallel/test.py::test_send_table_status_sleep",
|
||||
"test_hedged_requests/test.py::test_combination1",
|
||||
"test_hedged_requests/test.py::test_combination2",
|
||||
"test_hedged_requests/test.py::test_combination3",
|
||||
"test_hedged_requests/test.py::test_combination4",
|
||||
"test_hedged_requests/test.py::test_long_query",
|
||||
"test_hedged_requests/test.py::test_receive_timeout1",
|
||||
"test_hedged_requests/test.py::test_receive_timeout2",
|
||||
"test_hedged_requests/test.py::test_send_data",
|
||||
"test_hedged_requests/test.py::test_send_data2",
|
||||
"test_hedged_requests/test.py::test_send_table_status_sleep",
|
||||
"test_hedged_requests/test.py::test_send_table_status_sleep2",
|
||||
"test_hedged_requests/test.py::test_stuck_replica",
|
||||
"test_https_replication/test.py::test_both_http",
|
||||
"test_https_replication/test.py::test_both_https",
|
||||
"test_https_replication/test.py::test_mixed_protocol",
|
||||
"test_https_replication/test.py::test_replication_after_partition",
|
||||
"test_insert_into_distributed_sync_async/test.py::test_async_inserts_into_local_shard",
|
||||
"test_insert_into_distributed_sync_async/test.py::test_insertion_sync",
|
||||
"test_insert_into_distributed_sync_async/test.py::test_insertion_sync_fails_with_timeout",
|
||||
"test_insert_into_distributed_sync_async/test.py::test_insertion_sync_with_disabled_timeout",
|
||||
"test_insert_into_distributed_sync_async/test.py::test_insertion_without_sync_ignores_timeout",
|
||||
"test_insert_into_distributed/test.py::test_inserts_batching",
|
||||
"test_insert_into_distributed/test.py::test_inserts_local",
|
||||
"test_insert_into_distributed/test.py::test_inserts_low_cardinality",
|
||||
"test_insert_into_distributed/test.py::test_inserts_single_replica_internal_replication",
|
||||
"test_insert_into_distributed/test.py::test_inserts_single_replica_local_internal_replication",
|
||||
"test_insert_into_distributed/test.py::test_inserts_single_replica_no_internal_replication",
|
||||
"test_insert_into_distributed/test.py::test_prefer_localhost_replica",
|
||||
"test_insert_into_distributed/test.py::test_reconnect",
|
||||
"test_insert_into_distributed/test.py::test_table_function",
|
||||
"test_insert_into_distributed_through_materialized_view/test.py::test_inserts_local",
|
||||
"test_insert_into_distributed_through_materialized_view/test.py::test_reconnect",
|
||||
"test_keeper_multinode_blocade_leader/test.py::test_blocade_leader",
|
||||
"test_keeper_multinode_blocade_leader/test.py::test_blocade_leader_twice",
|
||||
"test_keeper_multinode_simple/test.py::test_follower_restart",
|
||||
"test_keeper_multinode_simple/test.py::test_read_write_multinode",
|
||||
"test_keeper_multinode_simple/test.py::test_session_expiration",
|
||||
"test_keeper_multinode_simple/test.py::test_simple_replicated_table",
|
||||
"test_keeper_multinode_simple/test.py::test_watch_on_follower",
|
||||
"test_limited_replicated_fetches/test.py::test_limited_fetches",
|
||||
"test_materialize_mysql_database/test.py::test_clickhouse_killed_while_insert_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_clickhouse_killed_while_insert_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_clickhouse_killed_while_insert_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_clickhouse_killed_while_insert_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_insert_with_modify_binlog_checksum_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_insert_with_modify_binlog_checksum_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_insert_with_modify_binlog_checksum_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_insert_with_modify_binlog_checksum_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_empty_transaction_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_empty_transaction_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_empty_transaction_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_empty_transaction_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_mysql_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_mysql_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_mysql_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_ddl_with_mysql_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_dml_with_mysql_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_dml_with_mysql_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_dml_with_mysql_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_dml_with_mysql_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_err_sync_user_privs_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_err_sync_user_privs_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_err_sync_user_privs_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_materialize_database_err_sync_user_privs_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_multi_table_update[clickhouse_node0]",
|
||||
"test_materialize_mysql_database/test.py::test_multi_table_update[clickhouse_node1]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_killed_while_insert_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_killed_while_insert_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_killed_while_insert_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_killed_while_insert_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_kill_sync_thread_restore_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_kill_sync_thread_restore_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_kill_sync_thread_restore_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_kill_sync_thread_restore_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_settings[clickhouse_node0]",
|
||||
"test_materialize_mysql_database/test.py::test_mysql_settings[clickhouse_node1]",
|
||||
"test_materialize_mysql_database/test.py::test_network_partition_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_network_partition_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_network_partition_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_network_partition_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_select_without_columns_5_7[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_select_without_columns_5_7[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_select_without_columns_8_0[atomic]",
|
||||
"test_materialize_mysql_database/test.py::test_select_without_columns_8_0[ordinary]",
|
||||
"test_materialize_mysql_database/test.py::test_system_parts_table[clickhouse_node0]",
|
||||
"test_materialize_mysql_database/test.py::test_system_parts_table[clickhouse_node1]",
|
||||
"test_materialize_mysql_database/test.py::test_system_tables_table[clickhouse_node0]",
|
||||
"test_materialize_mysql_database/test.py::test_system_tables_table[clickhouse_node1]",
|
||||
"test_materialize_mysql_database/test.py::test_utf8mb4[clickhouse_node0]",
|
||||
"test_materialize_mysql_database/test.py::test_utf8mb4[clickhouse_node1]",
|
||||
"test_parts_delete_zookeeper/test.py::test_merge_doesnt_work_without_zookeeper",
|
||||
"test_polymorphic_parts/test.py::test_compact_parts_only",
|
||||
"test_polymorphic_parts/test.py::test_different_part_types_on_replicas[polymorphic_table_compact-Compact]",
|
||||
"test_polymorphic_parts/test.py::test_different_part_types_on_replicas[polymorphic_table_wide-Wide]",
|
||||
"test_polymorphic_parts/test.py::test_in_memory",
|
||||
"test_polymorphic_parts/test.py::test_in_memory_alters",
|
||||
"test_polymorphic_parts/test.py::test_in_memory_deduplication",
|
||||
"test_polymorphic_parts/test.py::test_in_memory_wal_rotate",
|
||||
"test_polymorphic_parts/test.py::test_polymorphic_parts_basics[first_node0-second_node0]",
|
||||
"test_polymorphic_parts/test.py::test_polymorphic_parts_basics[first_node1-second_node1]",
|
||||
"test_polymorphic_parts/test.py::test_polymorphic_parts_index",
|
||||
"test_polymorphic_parts/test.py::test_polymorphic_parts_non_adaptive",
|
||||
"test_quorum_inserts_parallel/test.py::test_parallel_quorum_actually_parallel",
|
||||
"test_quorum_inserts_parallel/test.py::test_parallel_quorum_actually_quorum",
|
||||
"test_random_inserts/test.py::test_insert_multithreaded",
|
||||
"test_random_inserts/test.py::test_random_inserts",
|
||||
"test_reload_clusters_config/test.py::test_add_cluster",
|
||||
"test_reload_clusters_config/test.py::test_delete_cluster",
|
||||
"test_reload_clusters_config/test.py::test_simple_reload",
|
||||
"test_reload_clusters_config/test.py::test_update_one_cluster",
|
||||
"test_replace_partition/test.py::test_drop_failover",
|
||||
"test_replace_partition/test.py::test_normal_work",
|
||||
"test_replace_partition/test.py::test_replace_after_replace_failover",
|
||||
"test_replicated_database/test.py::test_alters_from_different_replicas",
|
||||
"test_replicated_database/test.py::test_create_replicated_table",
|
||||
"test_replicated_database/test.py::test_recover_staled_replica",
|
||||
"test_replicated_database/test.py::test_simple_alter_table[MergeTree]",
|
||||
"test_replicated_database/test.py::test_simple_alter_table[ReplicatedMergeTree]",
|
||||
"test_replicated_database/test.py::test_startup_without_zk",
|
||||
"test_replicated_fetches_timeouts/test.py::test_no_stall",
|
||||
"test_storage_kafka/test.py::test_bad_reschedule",
|
||||
"test_storage_kafka/test.py::test_commits_of_unprocessed_messages_on_drop",
|
||||
"test_storage_kafka/test.py::test_exception_from_destructor",
|
||||
"test_storage_kafka/test.py::test_kafka_commit_on_block_write",
|
||||
"test_storage_kafka/test.py::test_kafka_consumer_hang",
|
||||
"test_storage_kafka/test.py::test_kafka_consumer_hang2",
|
||||
"test_storage_kafka/test.py::test_kafka_csv_with_delimiter",
|
||||
"test_storage_kafka/test.py::test_kafka_csv_with_thread_per_consumer",
|
||||
"test_storage_kafka/test.py::test_kafka_duplicates_when_commit_failed",
|
||||
"test_storage_kafka/test.py::test_kafka_engine_put_errors_to_stream",
|
||||
"test_storage_kafka/test.py::test_kafka_engine_put_errors_to_stream_with_random_malformed_json",
|
||||
"test_storage_kafka/test.py::test_kafka_flush_by_block_size",
|
||||
"test_storage_kafka/test.py::test_kafka_flush_by_time",
|
||||
"test_storage_kafka/test.py::test_kafka_flush_on_big_message",
|
||||
"test_storage_kafka/test.py::test_kafka_formats",
|
||||
"test_storage_kafka/test.py::test_kafka_formats_with_broken_message",
|
||||
"test_storage_kafka/test.py::test_kafka_insert",
|
||||
"test_storage_kafka/test.py::test_kafka_issue11308",
|
||||
"test_storage_kafka/test.py::test_kafka_issue14202",
|
||||
"test_storage_kafka/test.py::test_kafka_issue4116",
|
||||
"test_storage_kafka/test.py::test_kafka_json_as_string",
|
||||
"test_storage_kafka/test.py::test_kafka_json_without_delimiter",
|
||||
"test_storage_kafka/test.py::test_kafka_lot_of_partitions_partial_commit_of_bulk",
|
||||
"test_storage_kafka/test.py::test_kafka_many_materialized_views",
|
||||
"test_storage_kafka/test.py::test_kafka_materialized_view",
|
||||
"test_storage_kafka/test.py::test_kafka_materialized_view_with_subquery",
|
||||
"test_storage_kafka/test.py::test_kafka_no_holes_when_write_suffix_failed",
|
||||
"test_storage_kafka/test.py::test_kafka_produce_consume",
|
||||
"test_storage_kafka/test.py::test_kafka_produce_key_timestamp",
|
||||
"test_storage_kafka/test.py::test_kafka_protobuf",
|
||||
"test_storage_kafka/test.py::test_kafka_protobuf_no_delimiter",
|
||||
"test_storage_kafka/test.py::test_kafka_rebalance",
|
||||
"test_storage_kafka/test.py::test_kafka_select_empty",
|
||||
"test_storage_kafka/test.py::test_kafka_settings_new_syntax",
|
||||
"test_storage_kafka/test.py::test_kafka_settings_old_syntax",
|
||||
"test_storage_kafka/test.py::test_kafka_string_field_on_first_position_in_protobuf",
|
||||
"test_storage_kafka/test.py::test_kafka_tsv_with_delimiter",
|
||||
"test_storage_kafka/test.py::test_kafka_unavailable",
|
||||
"test_storage_kafka/test.py::test_kafka_virtual_columns",
|
||||
"test_storage_kafka/test.py::test_kafka_virtual_columns2",
|
||||
"test_storage_kafka/test.py::test_kafka_virtual_columns_with_materialized_view",
|
||||
"test_storage_kafka/test.py::test_librdkafka_compression",
|
||||
"test_storage_kafka/test.py::test_premature_flush_on_eof",
|
||||
"test_storage_kerberized_kafka/test.py::test_kafka_json_as_string",
|
||||
"test_storage_kerberized_kafka/test.py::test_kafka_json_as_string_no_kdc",
|
||||
"test_system_clusters_actual_information/test.py::test",
|
||||
"test_system_metrics/test.py::test_readonly_metrics",
|
||||
"test_system_replicated_fetches/test.py::test_system_replicated_fetches"
|
||||
]
|
@ -1,6 +1,13 @@
|
||||
[pytest]
|
||||
python_files = test*.py
|
||||
norecursedirs = _instances
|
||||
timeout = 300
|
||||
timeout = 1800
|
||||
junit_duration_report = call
|
||||
junit_suite_name = integration
|
||||
log_cli = 1
|
||||
log_cli_level = CRITICAL
|
||||
log_cli_format = %%(asctime)s [%(levelname)8s] %(funcName)s %(message)s (%(filename)s:%(lineno)s)
|
||||
log_file = pytest.log
|
||||
log_file_level = DEBUG
|
||||
log_file_format = %(asctime)s [%(levelname)8s] %(funcName)s %(message)s (%(filename)s:%(lineno)s)
|
||||
log_file_date_format=%Y-%m-%d %H:%M:%S
|
||||
|
@ -27,7 +27,7 @@ def check_args_and_update_paths(args):
|
||||
else:
|
||||
CLICKHOUSE_ROOT = args.clickhouse_root
|
||||
else:
|
||||
logging.info("ClickHouse root is not set. Will use {}".format(DEFAULT_CLICKHOUSE_ROOT))
|
||||
logging.info("ClickHouse root is not set. Will use %s" % (DEFAULT_CLICKHOUSE_ROOT))
|
||||
CLICKHOUSE_ROOT = DEFAULT_CLICKHOUSE_ROOT
|
||||
|
||||
if not os.path.isabs(args.binary):
|
||||
@ -48,21 +48,21 @@ def check_args_and_update_paths(args):
|
||||
args.base_configs_dir = os.path.abspath(os.path.join(CURRENT_WORK_DIR, args.base_configs_dir))
|
||||
else:
|
||||
args.base_configs_dir = os.path.abspath(os.path.join(CLICKHOUSE_ROOT, CONFIG_DIR_IN_REPO))
|
||||
logging.info("Base configs dir is not set. Will use {}".format(args.base_configs_dir))
|
||||
logging.info("Base configs dir is not set. Will use %s" % (args.base_configs_dir))
|
||||
|
||||
if args.cases_dir:
|
||||
if not os.path.isabs(args.cases_dir):
|
||||
args.cases_dir = os.path.abspath(os.path.join(CURRENT_WORK_DIR, args.cases_dir))
|
||||
else:
|
||||
args.cases_dir = os.path.abspath(os.path.join(CLICKHOUSE_ROOT, INTERGATION_DIR_IN_REPO))
|
||||
logging.info("Cases dir is not set. Will use {}".format(args.cases_dir))
|
||||
logging.info("Cases dir is not set. Will use %s" % (args.cases_dir))
|
||||
|
||||
if args.src_dir:
|
||||
if not os.path.isabs(args.src_dir):
|
||||
args.src_dir = os.path.abspath(os.path.join(CURRENT_WORK_DIR, args.src_dir))
|
||||
else:
|
||||
args.src_dir = os.path.abspath(os.path.join(CLICKHOUSE_ROOT, SRC_DIR_IN_REPO))
|
||||
logging.info("src dir is not set. Will use {}".format(args.src_dir))
|
||||
logging.info("src dir is not set. Will use %s" % (args.src_dir))
|
||||
|
||||
logging.info("base_configs_dir: {}, binary: {}, cases_dir: {} ".format(args.base_configs_dir, args.binary, args.cases_dir))
|
||||
|
||||
@ -70,6 +70,10 @@ def check_args_and_update_paths(args):
|
||||
if not os.path.exists(path):
|
||||
raise Exception("Path {} doesn't exist".format(path))
|
||||
|
||||
if args.dockerd_volume:
|
||||
if not os.path.isabs(args.dockerd_volume):
|
||||
args.src_dir = os.path.abspath(os.path.join(CURRENT_WORK_DIR, args.dockerd_volume))
|
||||
|
||||
if (not os.path.exists(os.path.join(args.base_configs_dir, "config.xml"))) and (not os.path.exists(os.path.join(args.base_configs_dir, "config.yaml"))):
|
||||
raise Exception("No configs.xml or configs.yaml in {}".format(args.base_configs_dir))
|
||||
|
||||
@ -143,6 +147,11 @@ if __name__ == "__main__":
|
||||
default=False,
|
||||
help="Don't use net host in parent docker container")
|
||||
|
||||
parser.add_argument(
|
||||
"--network",
|
||||
default='host',
|
||||
help="Set network driver for runnner container")
|
||||
|
||||
parser.add_argument(
|
||||
"--docker-image-version",
|
||||
default="latest",
|
||||
@ -153,14 +162,48 @@ if __name__ == "__main__":
|
||||
action="append",
|
||||
help="Set non-default tags for images used in docker compose recipes(yandex/my_container:my_tag)")
|
||||
|
||||
parser.add_argument(
|
||||
"-n", "--parallel",
|
||||
action="store",
|
||||
dest="parallel",
|
||||
help="Parallelism")
|
||||
|
||||
parser.add_argument(
|
||||
"-t", "--tests_list",
|
||||
action="store",
|
||||
nargs='+',
|
||||
default=[],
|
||||
dest="tests_list",
|
||||
help="List of tests to run")
|
||||
|
||||
parser.add_argument(
|
||||
"--tmpfs",
|
||||
action='store_true',
|
||||
default=False,
|
||||
dest="tmpfs",
|
||||
help="Use tmpfs for dockerd files")
|
||||
|
||||
parser.add_argument(
|
||||
"--dockerd-volume-dir",
|
||||
action='store',
|
||||
dest="dockerd_volume",
|
||||
help="Bind volume to this dir to use for dockerd files")
|
||||
|
||||
parser.add_argument('pytest_args', nargs='*', help="args for pytest command")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
check_args_and_update_paths(args)
|
||||
|
||||
parallel_args = ""
|
||||
if args.parallel:
|
||||
parallel_args += "--dist=loadfile"
|
||||
parallel_args += " -n {}".format(args.parallel)
|
||||
|
||||
net = ""
|
||||
if not args.disable_net_host:
|
||||
if args.network:
|
||||
net = "--net={}".format(args.network)
|
||||
elif not args.disable_net_host:
|
||||
net = "--net=host"
|
||||
|
||||
env_tags = ""
|
||||
@ -183,25 +226,34 @@ if __name__ == "__main__":
|
||||
elif image == "yandex/clickhouse-kerberos-kdc":
|
||||
env_tags += "-e {}={}".format("DOCKER_KERBEROS_KDC_TAG", tag)
|
||||
else:
|
||||
logging.info("Unknown image {}".format(image))
|
||||
logging.info("Unknown image %s" % (image))
|
||||
|
||||
# create named volume which will be used inside to store images and other docker related files,
|
||||
# to avoid redownloading it every time
|
||||
#
|
||||
# should be removed manually when not needed
|
||||
subprocess.check_call('docker volume create {name}_volume'.format(name=CONTAINER_NAME), shell=True)
|
||||
dockerd_internal_volume = ""
|
||||
if args.tmpfs:
|
||||
dockerd_internal_volume = "--tmpfs /var/lib/docker -e DOCKER_RAMDISK=true"
|
||||
elif args.dockerd_volume:
|
||||
dockerd_internal_volume = "--mount type=bind,source={},target=/var/lib/docker".format(args.dockerd_volume)
|
||||
else:
|
||||
subprocess.check_call('docker volume create {name}_volume'.format(name=CONTAINER_NAME), shell=True)
|
||||
dockerd_internal_volume = "--volume={}_volume:/var/lib/docker".format(CONTAINER_NAME)
|
||||
|
||||
# enable tty mode & interactive for docker if we have real tty
|
||||
tty = ""
|
||||
if sys.stdout.isatty() and sys.stdin.isatty():
|
||||
tty = "-it"
|
||||
|
||||
|
||||
cmd = "docker run {net} {tty} --rm --name {name} --privileged \
|
||||
--volume={odbc_bridge_bin}:/clickhouse-odbc-bridge --volume={bin}:/clickhouse \
|
||||
--volume={library_bridge_bin}:/clickhouse-library-bridge --volume={bin}:/clickhouse \
|
||||
--volume={base_cfg}:/clickhouse-config --volume={cases_dir}:/ClickHouse/tests/integration \
|
||||
--volume={src_dir}/Server/grpc_protos:/ClickHouse/src/Server/grpc_protos \
|
||||
--volume={name}_volume:/var/lib/docker {env_tags} -e PYTEST_OPTS='{opts}' {img} {command}".format(
|
||||
{dockerd_internal_volume} -e DOCKER_CLIENT_TIMEOUT=300 -e COMPOSE_HTTP_TIMEOUT=600 \
|
||||
{env_tags} -e PYTEST_OPTS='{parallel} {opts} {tests_list}' {img} {command}".format(
|
||||
net=net,
|
||||
tty=tty,
|
||||
bin=args.binary,
|
||||
@ -211,7 +263,10 @@ if __name__ == "__main__":
|
||||
cases_dir=args.cases_dir,
|
||||
src_dir=args.src_dir,
|
||||
env_tags=env_tags,
|
||||
parallel=parallel_args,
|
||||
opts=' '.join(args.pytest_args),
|
||||
tests_list=' '.join(args.tests_list),
|
||||
dockerd_internal_volume=dockerd_internal_volume,
|
||||
img=DIND_INTEGRATION_TESTS_IMAGE_NAME + ":" + args.docker_image_version,
|
||||
name=CONTAINER_NAME,
|
||||
command=args.command
|
||||
|
@ -13,9 +13,6 @@ node = cluster.add_instance('node', main_configs=[
|
||||
'configs/asynchronous_metrics_update_period_s.xml',
|
||||
])
|
||||
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
logging.getLogger().addHandler(logging.StreamHandler())
|
||||
|
||||
@pytest.fixture(scope='module', autouse=True)
|
||||
def start_cluster():
|
||||
try:
|
||||
|
@ -18,7 +18,7 @@ def started_cluster():
|
||||
|
||||
|
||||
def test_access_control_on_cluster():
|
||||
ch1.query_with_retry("CREATE USER Alex ON CLUSTER 'cluster'", retry_count=3)
|
||||
ch1.query_with_retry("CREATE USER IF NOT EXISTS Alex ON CLUSTER 'cluster'", retry_count=5)
|
||||
assert ch1.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n"
|
||||
assert ch2.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n"
|
||||
assert ch3.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n"
|
||||
|
@ -0,0 +1,4 @@
|
||||
<yandex>
|
||||
<merge_tree replace="replace">
|
||||
</merge_tree>
|
||||
</yandex>
|
408
tests/integration/test_adaptive_granularity/test.py
Normal file
408
tests/integration/test_adaptive_granularity/test.py
Normal file
@ -0,0 +1,408 @@
|
||||
import time
|
||||
|
||||
import logging
|
||||
import pytest
|
||||
from helpers.client import QueryRuntimeException, QueryTimeoutExceedException
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import assert_eq_with_retry
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
|
||||
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
|
||||
|
||||
node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', with_installed_binary=True)
|
||||
node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True)
|
||||
|
||||
node5 = cluster.add_instance('node5', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', with_installed_binary=True)
|
||||
node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True)
|
||||
|
||||
node7 = cluster.add_instance('node7', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', stay_alive=True, with_installed_binary=True)
|
||||
node8 = cluster.add_instance('node8', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True)
|
||||
|
||||
node9 = cluster.add_instance('node9', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/merge_tree_settings.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True)
|
||||
node10 = cluster.add_instance('node10', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/merge_tree_settings.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', stay_alive=True, with_installed_binary=True)
|
||||
|
||||
node11 = cluster.add_instance('node11', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True)
|
||||
node12 = cluster.add_instance('node12', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True)
|
||||
|
||||
|
||||
def prepare_single_pair_with_setting(first_node, second_node, group):
|
||||
for node in (first_node, second_node):
|
||||
node.query("CREATE DATABASE IF NOT EXISTS test")
|
||||
|
||||
first_node.query("DROP TABLE IF EXISTS table_by_default")
|
||||
second_node.query("DROP TABLE IF EXISTS table_by_default")
|
||||
first_node.query("DROP TABLE IF EXISTS table_with_fixed_granularity")
|
||||
second_node.query("DROP TABLE IF EXISTS table_with_fixed_granularity")
|
||||
|
||||
|
||||
# Two tables with adaptive granularity
|
||||
first_node.query(
|
||||
'''
|
||||
CREATE TABLE table_by_default(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{g}/table_by_default', '1')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity_bytes = 10485760
|
||||
'''.format(g=group))
|
||||
|
||||
second_node.query(
|
||||
'''
|
||||
CREATE TABLE table_by_default(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{g}/table_by_default', '2')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity_bytes = 10485760
|
||||
'''.format(g=group))
|
||||
|
||||
# Two tables with fixed granularity
|
||||
first_node.query(
|
||||
'''
|
||||
CREATE TABLE table_with_fixed_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{g}/table_with_fixed_granularity', '1')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity_bytes = 0
|
||||
'''.format(g=group))
|
||||
|
||||
second_node.query(
|
||||
'''
|
||||
CREATE TABLE table_with_fixed_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{g}/table_with_fixed_granularity', '2')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity_bytes = 0
|
||||
'''.format(g=group))
|
||||
|
||||
# Two tables with different granularity
|
||||
with pytest.raises(QueryRuntimeException):
|
||||
first_node.query(
|
||||
'''
|
||||
CREATE TABLE table_with_different_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{g}/table_with_different_granularity', '1')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity_bytes = 10485760
|
||||
'''.format(g=group))
|
||||
|
||||
second_node.query(
|
||||
'''
|
||||
CREATE TABLE table_with_different_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{g}/table_with_different_granularity', '2')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity_bytes = 0
|
||||
'''.format(g=group))
|
||||
|
||||
# Two tables with different granularity, but enabled mixed parts
|
||||
first_node.query(
|
||||
'''
|
||||
CREATE TABLE table_with_mixed_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{g}/table_with_mixed_granularity', '1')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity_bytes = 10485760, enable_mixed_granularity_parts=1
|
||||
'''.format(g=group))
|
||||
|
||||
second_node.query(
|
||||
'''
|
||||
CREATE TABLE table_with_mixed_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{g}/table_with_mixed_granularity', '2')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity_bytes = 0, enable_mixed_granularity_parts=1
|
||||
'''.format(g=group))
|
||||
|
||||
|
||||
def prepare_single_pair_without_setting(first_node, second_node, group):
|
||||
for node in (first_node, second_node):
|
||||
node.query("CREATE DATABASE IF NOT EXISTS test")
|
||||
|
||||
# Two tables with fixed granularity
|
||||
first_node.query(
|
||||
'''
|
||||
CREATE TABLE table_with_fixed_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{g}/table_with_fixed_granularity', '1')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
'''.format(g=group))
|
||||
|
||||
second_node.query(
|
||||
'''
|
||||
CREATE TABLE table_with_fixed_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{g}/table_with_fixed_granularity', '2')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity_bytes = 0
|
||||
'''.format(g=group))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def start_static_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
prepare_single_pair_with_setting(node1, node2, "shard1")
|
||||
prepare_single_pair_with_setting(node3, node4, "shard2")
|
||||
prepare_single_pair_without_setting(node5, node6, "shard3")
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('first_node', 'second_node', 'table'),
|
||||
[
|
||||
(node1, node2, 'table_by_default'),
|
||||
(node1, node2, 'table_with_fixed_granularity'),
|
||||
(node3, node4, 'table_by_default'),
|
||||
(node3, node4, 'table_with_fixed_granularity'),
|
||||
(node5, node6, 'table_with_fixed_granularity'),
|
||||
]
|
||||
)
|
||||
def test_different_versions_cluster(start_static_cluster, first_node, second_node, table):
|
||||
counter = 1
|
||||
for n1, n2 in ((first_node, second_node), (second_node, first_node)):
|
||||
n1.query("INSERT INTO {tbl} VALUES (toDate('2018-10-01'), {c1}, 333), (toDate('2018-10-02'), {c2}, 444)".format(
|
||||
tbl=table, c1=counter * 2, c2=counter * 2 + 1))
|
||||
n2.query("SYSTEM SYNC REPLICA {tbl}".format(tbl=table))
|
||||
assert_eq_with_retry(n2, "SELECT count() from {tbl}".format(tbl=table), str(counter * 2))
|
||||
n1.query("DETACH TABLE {tbl}".format(tbl=table))
|
||||
n2.query("DETACH TABLE {tbl}".format(tbl=table))
|
||||
n1.query("ATTACH TABLE {tbl}".format(tbl=table))
|
||||
n2.query("ATTACH TABLE {tbl}".format(tbl=table))
|
||||
assert_eq_with_retry(n1, "SELECT count() from {tbl}".format(tbl=table), str(counter * 2))
|
||||
assert_eq_with_retry(n2, "SELECT count() from {tbl}".format(tbl=table), str(counter * 2))
|
||||
n1.query("OPTIMIZE TABLE {tbl} FINAL".format(tbl=table))
|
||||
n2.query("SYSTEM SYNC REPLICA {tbl}".format(tbl=table))
|
||||
assert_eq_with_retry(n1, "SELECT count() from {tbl}".format(tbl=table), str(counter * 2))
|
||||
assert_eq_with_retry(n2, "SELECT count() from {tbl}".format(tbl=table), str(counter * 2))
|
||||
counter += 1
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def start_dynamic_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
node7.query(
|
||||
'''
|
||||
CREATE TABLE table_with_default_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/7/table_with_default_granularity', '1')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
''')
|
||||
|
||||
node7.query(
|
||||
'''
|
||||
CREATE TABLE table_with_adaptive_default_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/7/table_with_adaptive_default_granularity', '1')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity_bytes=10485760
|
||||
''')
|
||||
|
||||
node8.query(
|
||||
'''
|
||||
CREATE TABLE table_with_default_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/8/table_with_default_granularity', '1')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
''')
|
||||
|
||||
node9.query(
|
||||
'''
|
||||
CREATE TABLE table_with_default_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/9/table_with_default_granularity', '1')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
''')
|
||||
|
||||
node10.query(
|
||||
'''
|
||||
CREATE TABLE table_with_default_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/10/table_with_default_granularity', '1')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
''')
|
||||
|
||||
node11.query(
|
||||
'''
|
||||
CREATE TABLE table_with_default_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/shard11/table_with_default_granularity', '1')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
''')
|
||||
|
||||
node12.query(
|
||||
'''
|
||||
CREATE TABLE table_with_default_granularity(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/shard11/table_with_default_granularity', '2')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
''')
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('n', 'tables'),
|
||||
[
|
||||
(node7, ['table_with_default_granularity', 'table_with_adaptive_default_granularity']),
|
||||
(node8, ['table_with_default_granularity']),
|
||||
]
|
||||
)
|
||||
def test_version_single_node_update(start_dynamic_cluster, n, tables):
|
||||
for table in tables:
|
||||
n.query(
|
||||
"INSERT INTO {tbl} VALUES (toDate('2018-10-01'), 1, 333), (toDate('2018-10-02'), 2, 444)".format(tbl=table))
|
||||
n.restart_with_latest_version()
|
||||
for table in tables:
|
||||
assert n.query("SELECT count() from {tbl}".format(tbl=table)) == '2\n'
|
||||
n.query(
|
||||
"INSERT INTO {tbl} VALUES (toDate('2018-10-01'), 3, 333), (toDate('2018-10-02'), 4, 444)".format(tbl=table))
|
||||
assert n.query("SELECT count() from {tbl}".format(tbl=table)) == '4\n'
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('node',),
|
||||
[
|
||||
(node9,),
|
||||
(node10,)
|
||||
]
|
||||
)
|
||||
def test_mixed_granularity_single_node(start_dynamic_cluster, node):
|
||||
node.query(
|
||||
"INSERT INTO table_with_default_granularity VALUES (toDate('2018-10-01'), 1, 333), (toDate('2018-10-02'), 2, 444)")
|
||||
node.query(
|
||||
"INSERT INTO table_with_default_granularity VALUES (toDate('2018-09-01'), 1, 333), (toDate('2018-09-02'), 2, 444)")
|
||||
|
||||
path_to_part = node.query(
|
||||
"SELECT path FROM system.parts WHERE table = 'table_with_default_granularity' AND active=1 ORDER BY partition DESC LIMIT 1").strip()
|
||||
|
||||
result = node.exec_in_container(["bash", "-c", "find {p} -name '*.mrk*'".format(
|
||||
p=path_to_part)]) # check that we have non adaptive files
|
||||
logging.info(f"path {path_to_part} result\n {result}")
|
||||
|
||||
def callback(n):
|
||||
new_config = """
|
||||
<yandex><merge_tree>
|
||||
<enable_mixed_granularity_parts>1</enable_mixed_granularity_parts>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree></yandex>"""
|
||||
|
||||
n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", new_config)
|
||||
n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", new_config)
|
||||
|
||||
node.restart_with_latest_version(callback_onstop=callback)
|
||||
node.query("SYSTEM RELOAD CONFIG")
|
||||
assert_eq_with_retry(node,
|
||||
"SELECT value FROM system.merge_tree_settings WHERE name='enable_mixed_granularity_parts'",
|
||||
'1')
|
||||
assert node.query("SELECT count() from table_with_default_granularity") == '4\n'
|
||||
node.query(
|
||||
"INSERT INTO table_with_default_granularity VALUES (toDate('2018-10-01'), 3, 333), (toDate('2018-10-02'), 4, 444)")
|
||||
assert node.query("SELECT count() from table_with_default_granularity") == '6\n'
|
||||
node.query("OPTIMIZE TABLE table_with_default_granularity PARTITION 201810 FINAL")
|
||||
assert node.query("SELECT count() from table_with_default_granularity") == '6\n'
|
||||
path_to_merged_part = node.query(
|
||||
"SELECT path FROM system.parts WHERE table = 'table_with_default_granularity' AND active=1 ORDER BY partition DESC LIMIT 1").strip()
|
||||
node.exec_in_container(["bash", "-c", "find {p} -name '*.mrk2' | grep '.*'".format(
|
||||
p=path_to_merged_part)]) # check that we have adaptive files
|
||||
|
||||
path_to_old_part = node.query(
|
||||
"SELECT path FROM system.parts WHERE table = 'table_with_default_granularity' AND active=1 ORDER BY partition ASC LIMIT 1").strip()
|
||||
|
||||
node.exec_in_container(["bash", "-c", "find {p} -name '*.mrk' | grep '.*'".format(
|
||||
p=path_to_old_part)]) # check that we have non adaptive files
|
||||
|
||||
node.query("ALTER TABLE table_with_default_granularity UPDATE dummy = dummy + 1 WHERE 1")
|
||||
# still works
|
||||
assert node.query("SELECT count() from table_with_default_granularity") == '6\n'
|
||||
|
||||
node.query("ALTER TABLE table_with_default_granularity MODIFY COLUMN dummy String")
|
||||
node.query("ALTER TABLE table_with_default_granularity ADD COLUMN dummy2 Float64")
|
||||
|
||||
# still works
|
||||
assert node.query("SELECT count() from table_with_default_granularity") == '6\n'
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="flaky")
|
||||
def test_version_update_two_nodes(start_dynamic_cluster):
|
||||
node11.query(
|
||||
"INSERT INTO table_with_default_granularity VALUES (toDate('2018-10-01'), 1, 333), (toDate('2018-10-02'), 2, 444)")
|
||||
node12.query("SYSTEM SYNC REPLICA table_with_default_granularity", timeout=20)
|
||||
assert node12.query("SELECT COUNT() FROM table_with_default_granularity") == '2\n'
|
||||
|
||||
def callback(n):
|
||||
new_config = """
|
||||
<yandex><merge_tree>
|
||||
<enable_mixed_granularity_parts>0</enable_mixed_granularity_parts>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree></yandex>"""
|
||||
|
||||
n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", new_config)
|
||||
n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", new_config)
|
||||
|
||||
node12.restart_with_latest_version(callback_onstop=callback)
|
||||
|
||||
node12.query(
|
||||
"INSERT INTO table_with_default_granularity VALUES (toDate('2018-10-01'), 3, 333), (toDate('2018-10-02'), 4, 444)")
|
||||
node11.query("SYSTEM SYNC REPLICA table_with_default_granularity", timeout=20)
|
||||
assert node11.query("SELECT COUNT() FROM table_with_default_granularity") == '4\n'
|
||||
|
||||
node12.query(
|
||||
'''
|
||||
CREATE TABLE table_with_default_granularity_new(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/shard11/table_with_default_granularity_new', '2')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
''')
|
||||
|
||||
node11.query(
|
||||
'''
|
||||
CREATE TABLE table_with_default_granularity_new(date Date, id UInt32, dummy UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/shard11/table_with_default_granularity_new', '1')
|
||||
PARTITION BY toYYYYMM(date)
|
||||
ORDER BY id
|
||||
''')
|
||||
|
||||
node12.query(
|
||||
"INSERT INTO table_with_default_granularity_new VALUES (toDate('2018-10-01'), 1, 333), (toDate('2018-10-02'), 2, 444)")
|
||||
with pytest.raises(QueryTimeoutExceedException):
|
||||
node11.query("SYSTEM SYNC REPLICA table_with_default_granularity_new", timeout=20)
|
||||
node12.query(
|
||||
"INSERT INTO table_with_default_granularity_new VALUES (toDate('2018-10-01'), 3, 333), (toDate('2018-10-02'), 4, 444)")
|
||||
|
||||
node11.restart_with_latest_version(callback_onstop=callback) # just to be sure
|
||||
|
||||
for i in range(3):
|
||||
try:
|
||||
node11.query("SYSTEM SYNC REPLICA table_with_default_granularity_new", timeout=120)
|
||||
node12.query("SYSTEM SYNC REPLICA table_with_default_granularity_new", timeout=120)
|
||||
break
|
||||
except Exception as ex:
|
||||
print(("Exception during replica sync", ex))
|
||||
node11.query("SYSTEM RESTART REPLICA table_with_default_granularity_new")
|
||||
node12.query("SYSTEM RESTART REPLICA table_with_default_granularity_new")
|
||||
time.sleep(2 * i)
|
||||
|
||||
assert node11.query("SELECT COUNT() FROM table_with_default_granularity_new") == "4\n"
|
||||
assert node12.query("SELECT COUNT() FROM table_with_default_granularity_new") == "4\n"
|
||||
|
||||
node11.query(
|
||||
"INSERT INTO table_with_default_granularity VALUES (toDate('2018-10-01'), 5, 333), (toDate('2018-10-02'), 6, 444)")
|
||||
for i in range(3):
|
||||
try:
|
||||
node12.query("SYSTEM SYNC REPLICA table_with_default_granularity", timeout=120)
|
||||
break
|
||||
except Exception as ex:
|
||||
print(("Exception during replica sync", ex))
|
||||
node11.query("SYSTEM RESTART REPLICA table_with_default_granularity")
|
||||
node12.query("SYSTEM RESTART REPLICA table_with_default_granularity")
|
||||
time.sleep(2 * i)
|
||||
|
||||
assert node12.query("SELECT COUNT() FROM table_with_default_granularity") == '6\n'
|
@ -0,0 +1,53 @@
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance('node1', with_zookeeper=True)
|
||||
node2 = cluster.add_instance('node2', with_zookeeper=True)
|
||||
|
||||
# no adaptive granularity by default
|
||||
node3 = cluster.add_instance('node3', image='yandex/clickhouse-server', tag='19.9.5.36', with_installed_binary=True,
|
||||
stay_alive=True)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_attach_detach(start_cluster):
|
||||
node1.query("""
|
||||
CREATE TABLE test (key UInt64)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/test', '1')
|
||||
ORDER BY tuple()
|
||||
SETTINGS index_granularity_bytes = 0""")
|
||||
|
||||
node1.query("INSERT INTO test VALUES (1), (2)")
|
||||
|
||||
node2.query("""
|
||||
CREATE TABLE test (key UInt64)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/test', '2')
|
||||
ORDER BY tuple() SETTINGS enable_mixed_granularity_parts = 0""")
|
||||
|
||||
node2.query("INSERT INTO test VALUES (3), (4)")
|
||||
|
||||
node1.query_with_retry("SYSTEM SYNC REPLICA test", timeout=10)
|
||||
node2.query_with_retry("SYSTEM SYNC REPLICA test", timeout=10)
|
||||
|
||||
assert node1.query("SELECT COUNT() FROM test") == "4\n"
|
||||
assert node2.query("SELECT COUNT() FROM test") == "4\n"
|
||||
|
||||
node1.query("DETACH TABLE test")
|
||||
node2.query("DETACH TABLE test")
|
||||
|
||||
node1.query("ATTACH TABLE test")
|
||||
node2.query("ATTACH TABLE test")
|
||||
|
||||
assert node1.query("SELECT COUNT() FROM test") == "4\n"
|
||||
assert node2.query("SELECT COUNT() FROM test") == "4\n"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user