mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-20 08:40:50 +00:00
Merge remote-tracking branch 'origin/master' into HEAD
This commit is contained in:
commit
40209f9ed2
1
.gitmodules
vendored
1
.gitmodules
vendored
@ -186,4 +186,3 @@
|
||||
[submodule "contrib/cyrus-sasl"]
|
||||
path = contrib/cyrus-sasl
|
||||
url = https://github.com/cyrusimap/cyrus-sasl
|
||||
branch = cyrus-sasl-2.1
|
||||
|
@ -17,5 +17,5 @@ ClickHouse is an open-source column-oriented database management system that all
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [eBay migrating from Druid](https://us02web.zoom.us/webinar/register/tZMkfu6rpjItHtaQ1DXcgPWcSOnmM73HLGKL) on September 23, 2020.
|
||||
* [ClickHouse for Edge Analytics](https://ones2020.sched.com/event/bWPs) on September 29, 2020.
|
||||
* [ClickHouse online meetup (in Russian)](https://clck.ru/R2zB9) on October 1, 2020.
|
||||
|
@ -14,6 +14,8 @@ TRIES=3
|
||||
AMD64_BIN_URL="https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"
|
||||
AARCH64_BIN_URL="https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_special_build_check/clang-10-aarch64_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"
|
||||
|
||||
# Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'.
|
||||
|
||||
FASTER_DOWNLOAD=wget
|
||||
if command -v axel >/dev/null; then
|
||||
FASTER_DOWNLOAD=axel
|
||||
@ -36,14 +38,6 @@ if [[ ! -f clickhouse ]]; then
|
||||
$FASTER_DOWNLOAD "$AMD64_BIN_URL"
|
||||
elif [[ $CPU == aarch64 ]]; then
|
||||
$FASTER_DOWNLOAD "$AARCH64_BIN_URL"
|
||||
|
||||
# Download configs. ARM version has no embedded configs.
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml
|
||||
mkdir config.d
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/access_control.xml -O config.d/access_control.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml
|
||||
else
|
||||
echo "Unsupported CPU type: $CPU"
|
||||
exit 1
|
||||
@ -64,6 +58,8 @@ if [[ ! -d data ]]; then
|
||||
tar $TAR_PARAMS --strip-components=1 --directory=. -x -v -f $DATASET
|
||||
fi
|
||||
|
||||
uptime
|
||||
|
||||
echo "Starting clickhouse-server"
|
||||
|
||||
./clickhouse server > server.log 2>&1 &
|
||||
@ -105,9 +101,12 @@ echo
|
||||
echo "Benchmark complete. System info:"
|
||||
echo
|
||||
|
||||
echo '----Version and build id--------'
|
||||
./clickhouse local --query "SELECT version(), buildId()"
|
||||
echo '----Version, build id-----------'
|
||||
./clickhouse local --query "SELECT format('Version: {}, build id: {}', version(), buildId())"
|
||||
./clickhouse local --query "SELECT format('The number of threads is: {}', value) FROM system.settings WHERE name = 'max_threads'" --output-format TSVRaw
|
||||
./clickhouse local --query "SELECT format('Current time: {}', toString(now(), 'UTC'))"
|
||||
echo '----CPU-------------------------'
|
||||
cat /proc/cpuinfo | grep -i -F 'model name' | uniq
|
||||
lscpu
|
||||
echo '----Block Devices---------------'
|
||||
lsblk
|
||||
|
@ -14,10 +14,10 @@ if (NOT ENABLE_RDKAFKA)
|
||||
return()
|
||||
endif()
|
||||
|
||||
if (NOT ARCH_ARM)
|
||||
if (NOT ARCH_ARM AND USE_LIBGSASL)
|
||||
option (USE_INTERNAL_RDKAFKA_LIBRARY "Set to FALSE to use system librdkafka instead of the bundled" ${NOT_UNBUNDLED})
|
||||
elseif(USE_INTERNAL_RDKAFKA_LIBRARY)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use internal librdkafka with ARCH_ARM=${ARCH_ARM}")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use internal librdkafka with ARCH_ARM=${ARCH_ARM} AND USE_LIBGSASL=${USE_LIBGSASL}")
|
||||
endif ()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cppkafka/CMakeLists.txt")
|
||||
|
2
contrib/cyrus-sasl
vendored
2
contrib/cyrus-sasl
vendored
@ -1 +1 @@
|
||||
Subproject commit 9995bf9d8e14f58934d9313ac64f13780d6dd3c9
|
||||
Subproject commit 6054630889fd1cd8d0659573d69badcee1e23a00
|
2
contrib/protobuf
vendored
2
contrib/protobuf
vendored
@ -1 +1 @@
|
||||
Subproject commit d6a10dd3db55d8f7f9e464db9151874cde1f79ec
|
||||
Subproject commit 445d1ae73a450b1e94622e7040989aa2048402e3
|
@ -11,3 +11,7 @@ else ()
|
||||
endif ()
|
||||
|
||||
add_subdirectory("${protobuf_SOURCE_DIR}/cmake" "${protobuf_BINARY_DIR}")
|
||||
|
||||
# We don't want to stop compilation on warnings in protobuf's headers.
|
||||
# The following line overrides the value assigned by the command target_include_directories() in libprotobuf.cmake
|
||||
set_property(TARGET libprotobuf PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES ${protobuf_SOURCE_DIR}/src)
|
||||
|
4
debian/rules
vendored
4
debian/rules
vendored
@ -36,8 +36,8 @@ endif
|
||||
|
||||
CMAKE_FLAGS += -DENABLE_UTILS=0
|
||||
|
||||
DEB_CC ?= $(shell which gcc-9 gcc-8 gcc | head -n1)
|
||||
DEB_CXX ?= $(shell which g++-9 g++-8 g++ | head -n1)
|
||||
DEB_CC ?= $(shell which gcc-10 gcc-9 gcc | head -n1)
|
||||
DEB_CXX ?= $(shell which g++-10 g++-9 g++ | head -n1)
|
||||
|
||||
ifdef DEB_CXX
|
||||
DEB_BUILD_GNU_TYPE := $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE)
|
||||
|
@ -133,10 +133,6 @@
|
||||
"name": "yandex/clickhouse-postgresql-java-client",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/kerberos_kdc": {
|
||||
"name": "yandex/clickhouse-kerberos-kdc",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/base": {
|
||||
"name": "yandex/clickhouse-test-base",
|
||||
"dependent": [
|
||||
|
@ -89,7 +89,8 @@ EOT
|
||||
fi
|
||||
|
||||
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
||||
$gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG &
|
||||
# Listen only on localhost until the initialization is done
|
||||
$gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG -- --listen_host=127.0.0.1 &
|
||||
pid="$!"
|
||||
|
||||
# check if clickhouse is ready to accept connections
|
||||
|
@ -97,7 +97,7 @@ ccache --zero-stats ||:
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_CXX_COMPILER=clang++-10 -DCMAKE_C_COMPILER=clang-10 "${CMAKE_LIBS_CONFIG[@]}" "${FASTTEST_CMAKE_FLAGS[@]}" | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/cmake_log.txt
|
||||
ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/build_log.txt
|
||||
time ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/build_log.txt
|
||||
ninja install | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/install_log.txt
|
||||
|
||||
|
||||
@ -111,35 +111,11 @@ ln -s /test_output /var/log/clickhouse-server
|
||||
cp "$CLICKHOUSE_DIR/programs/server/config.xml" /etc/clickhouse-server/
|
||||
cp "$CLICKHOUSE_DIR/programs/server/users.xml" /etc/clickhouse-server/
|
||||
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/custom_settings_prefixes.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/executable_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
|
||||
#ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||
# install tests config
|
||||
$CLICKHOUSE_DIR/tests/config/install.sh
|
||||
# doesn't support SSL
|
||||
rm -f /etc/clickhouse-server/config.d/secure_ports.xml
|
||||
|
||||
# Keep original query_masking_rules.xml
|
||||
ln -s --backup=simple --suffix=_original.xml /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
# Kill the server in case we are running locally and not in docker
|
||||
kill_clickhouse
|
||||
@ -216,7 +192,7 @@ TESTS_TO_SKIP=(
|
||||
01460_DistributedFilesToInsert
|
||||
)
|
||||
|
||||
clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
|
||||
time clickhouse-test -j 8 --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
|
||||
|
||||
|
||||
# substr is to remove semicolon after test name
|
||||
@ -234,7 +210,7 @@ then
|
||||
kill_clickhouse
|
||||
|
||||
# Clean the data so that there is no interference from the previous test run.
|
||||
rm -rvf /var/lib/clickhouse ||:
|
||||
rm -rf /var/lib/clickhouse ||:
|
||||
mkdir /var/lib/clickhouse
|
||||
|
||||
clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
|
||||
|
@ -48,7 +48,7 @@ function configure
|
||||
cp -av "$repo_dir"/programs/server/config* db
|
||||
cp -av "$repo_dir"/programs/server/user* db
|
||||
# TODO figure out which ones are needed
|
||||
cp -av "$repo_dir"/tests/config/listen.xml db/config.d
|
||||
cp -av "$repo_dir"/tests/config/config.d/listen.xml db/config.d
|
||||
cp -av "$script_dir"/query-fuzzer-tweaks-users.xml db/users.d
|
||||
}
|
||||
|
||||
|
@ -16,8 +16,7 @@ RUN apt-get update \
|
||||
odbc-postgresql \
|
||||
sqlite3 \
|
||||
curl \
|
||||
tar \
|
||||
krb5-user
|
||||
tar
|
||||
RUN rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
|
@ -1,15 +0,0 @@
|
||||
# docker build -t yandex/clickhouse-kerberos-kdc .
|
||||
|
||||
FROM centos:6.6
|
||||
# old OS to make is faster and smaller
|
||||
|
||||
RUN yum install -y krb5-server krb5-libs krb5-auth-dialog krb5-workstation
|
||||
|
||||
EXPOSE 88 749
|
||||
|
||||
RUN touch /config.sh
|
||||
# should be overwritten e.g. via docker_compose volumes
|
||||
# volumes: /some_path/my_kerberos_config.sh:/config.sh:ro
|
||||
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "/config.sh"]
|
@ -1,59 +0,0 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
kafka_kerberized_zookeeper:
|
||||
image: confluentinc/cp-zookeeper:5.2.0
|
||||
# restart: always
|
||||
hostname: kafka_kerberized_zookeeper
|
||||
environment:
|
||||
ZOOKEEPER_SERVER_ID: 1
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_SERVERS: "kafka_kerberized_zookeeper:2888:3888"
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/zookeeper_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/krb.conf -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dsun.security.krb5.debug=true"
|
||||
volumes:
|
||||
- ${KERBERIZED_KAFKA_DIR}/secrets:/etc/kafka/secrets
|
||||
- /dev/urandom:/dev/random
|
||||
depends_on:
|
||||
- kafka_kerberos
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
kerberized_kafka1:
|
||||
image: confluentinc/cp-kafka:5.2.0
|
||||
# restart: always
|
||||
hostname: kerberized_kafka1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "9093:9093"
|
||||
environment:
|
||||
KAFKA_LISTENERS: OUTSIDE://:19092,UNSECURED_OUTSIDE://:19093,UNSECURED_INSIDE://:9093
|
||||
KAFKA_ADVERTISED_LISTENERS: OUTSIDE://kerberized_kafka1:19092,UNSECURED_OUTSIDE://kerberized_kafka1:19093,UNSECURED_INSIDE://localhost:9093
|
||||
# KAFKA_LISTENERS: INSIDE://kerberized_kafka1:9092,OUTSIDE://kerberized_kafka1:19092
|
||||
# KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:9092,OUTSIDE://kerberized_kafka1:19092
|
||||
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: GSSAPI
|
||||
KAFKA_SASL_ENABLED_MECHANISMS: GSSAPI
|
||||
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: OUTSIDE:SASL_PLAINTEXT,UNSECURED_OUTSIDE:PLAINTEXT,UNSECURED_INSIDE:PLAINTEXT,
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: OUTSIDE
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: "kafka_kerberized_zookeeper:2181"
|
||||
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/broker_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/krb.conf -Dsun.security.krb5.debug=true"
|
||||
volumes:
|
||||
- ${KERBERIZED_KAFKA_DIR}/secrets:/etc/kafka/secrets
|
||||
- /dev/urandom:/dev/random
|
||||
depends_on:
|
||||
- kafka_kerberized_zookeeper
|
||||
- kafka_kerberos
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
kafka_kerberos:
|
||||
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG}
|
||||
hostname: kafka_kerberos
|
||||
volumes:
|
||||
- ${KERBERIZED_KAFKA_DIR}/secrets:/tmp/keytab
|
||||
- ${KERBERIZED_KAFKA_DIR}/../../kerberos_image_config.sh:/config.sh
|
||||
- /dev/urandom:/dev/random
|
||||
ports: [88, 749]
|
@ -27,7 +27,6 @@ export DOCKER_MYSQL_JAVA_CLIENT_TAG=${DOCKER_MYSQL_JAVA_CLIENT_TAG:=latest}
|
||||
export DOCKER_MYSQL_JS_CLIENT_TAG=${DOCKER_MYSQL_JS_CLIENT_TAG:=latest}
|
||||
export DOCKER_MYSQL_PHP_CLIENT_TAG=${DOCKER_MYSQL_PHP_CLIENT_TAG:=latest}
|
||||
export DOCKER_POSTGRESQL_JAVA_CLIENT_TAG=${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:=latest}
|
||||
export DOCKER_KERBEROS_KDC_TAG=${DOCKER_KERBEROS_KDC_TAG:=latest}
|
||||
|
||||
cd /ClickHouse/tests/integration
|
||||
exec "$@"
|
||||
|
@ -37,10 +37,11 @@ def tsv_escape(s):
|
||||
parser = argparse.ArgumentParser(description='Run performance test.')
|
||||
# Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set.
|
||||
parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file')
|
||||
parser.add_argument('--host', nargs='*', default=['localhost'], help="Server hostname(s). Corresponds to '--port' options.")
|
||||
parser.add_argument('--port', nargs='*', default=[9000], help="Server port(s). Corresponds to '--host' options.")
|
||||
parser.add_argument('--host', nargs='*', default=['localhost'], help="Space-separated list of server hostname(s). Corresponds to '--port' options.")
|
||||
parser.add_argument('--port', nargs='*', default=[9000], help="Space-separated list of server port(s). Corresponds to '--host' options.")
|
||||
parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.')
|
||||
parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.')
|
||||
parser.add_argument('--queries-to-run', nargs='*', type=int, default=None, help='Space-separated list of indexes of queries to test.')
|
||||
parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.')
|
||||
parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.')
|
||||
parser.add_argument('--print-settings', action='store_true', help='Print test settings and exit.')
|
||||
@ -217,10 +218,21 @@ for t in threads:
|
||||
|
||||
reportStageEnd('create')
|
||||
|
||||
# Run the queries in randomized order, but preserve their indexes as specified
|
||||
# in the test XML. To avoid using too much time, limit the number of queries
|
||||
# we run per test.
|
||||
queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries or len(test_queries)))
|
||||
# By default, test all queries.
|
||||
queries_to_run = range(0, len(test_queries))
|
||||
|
||||
if args.max_queries:
|
||||
# If specified, test a limited number of queries chosen at random.
|
||||
queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries))
|
||||
|
||||
if args.queries_to_run:
|
||||
# Run the specified queries, with some sanity check.
|
||||
for i in args.queries_to_run:
|
||||
if i < 0 or i >= len(test_queries):
|
||||
print(f'There is no query no. "{i}" in this test, only [{0}-{len(test_queries) - 1}] are present')
|
||||
exit(1)
|
||||
|
||||
queries_to_run = args.queries_to_run
|
||||
|
||||
# Run test queries.
|
||||
for query_index in queries_to_run:
|
||||
|
@ -8,26 +8,8 @@ dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
if [[ -n "$USE_DATABASE_ATOMIC" ]] && [[ "$USE_DATABASE_ATOMIC" -eq 1 ]]; then
|
||||
ln -s /usr/share/clickhouse-test/config/database_atomic_configd.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/database_atomic_usersd.xml /etc/clickhouse-server/users.d/
|
||||
fi
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
function start()
|
||||
{
|
||||
|
@ -48,28 +48,8 @@ mkdir -p /var/lib/clickhouse
|
||||
mkdir -p /var/log/clickhouse-server
|
||||
chmod 777 -R /var/log/clickhouse-server/
|
||||
|
||||
# Temorary way to keep CI green while moving dictionaries to separate directory
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
chmod 777 -R /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
|
||||
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load those if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
function start()
|
||||
{
|
||||
|
@ -21,9 +21,7 @@ RUN apt-get update -y \
|
||||
telnet \
|
||||
tree \
|
||||
unixodbc \
|
||||
wget \
|
||||
zookeeper \
|
||||
zookeeperd
|
||||
wget
|
||||
|
||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
||||
|
@ -8,48 +8,9 @@ dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/custom_settings_prefixes.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/executable_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load it if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then
|
||||
ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/
|
||||
fi
|
||||
if [[ -n "$USE_DATABASE_ATOMIC" ]] && [[ "$USE_DATABASE_ATOMIC" -eq 1 ]]; then
|
||||
ln -s /usr/share/clickhouse-test/config/database_atomic_configd.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/database_atomic_usersd.xml /etc/clickhouse-server/users.d/
|
||||
fi
|
||||
|
||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||
|
||||
service zookeeper start
|
||||
sleep 5
|
||||
service clickhouse-server start && sleep 5
|
||||
|
||||
if cat /usr/bin/clickhouse-test | grep -q -- "--use-skip-list"; then
|
||||
|
@ -66,9 +66,7 @@ RUN apt-get --allow-unauthenticated update -y \
|
||||
unixodbc \
|
||||
unixodbc-dev \
|
||||
wget \
|
||||
zlib1g-dev \
|
||||
zookeeper \
|
||||
zookeeperd
|
||||
zlib1g-dev
|
||||
|
||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
||||
|
@ -8,48 +8,9 @@ dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/custom_settings_prefixes.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/executable_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load it if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then
|
||||
ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/
|
||||
fi
|
||||
if [[ -n "$USE_DATABASE_ATOMIC" ]] && [[ "$USE_DATABASE_ATOMIC" -eq 1 ]]; then
|
||||
ln -s /usr/share/clickhouse-test/config/database_atomic_configd.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/database_atomic_usersd.xml /etc/clickhouse-server/users.d/
|
||||
fi
|
||||
|
||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||
|
||||
service zookeeper start
|
||||
sleep 5
|
||||
service clickhouse-server start && sleep 5
|
||||
|
||||
if cat /usr/bin/clickhouse-test | grep -q -- "--use-skip-list"; then
|
||||
|
@ -11,8 +11,6 @@ RUN apt-get update -y \
|
||||
tzdata \
|
||||
fakeroot \
|
||||
debhelper \
|
||||
zookeeper \
|
||||
zookeeperd \
|
||||
expect \
|
||||
python \
|
||||
python-lxml \
|
||||
|
@ -39,41 +39,8 @@ mkdir -p /var/log/clickhouse-server
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
chmod 777 -R /var/log/clickhouse-server/
|
||||
|
||||
# Temorary way to keep CI green while moving dictionaries to separate directory
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
chmod 777 -R /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
|
||||
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/executable_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load it if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
service zookeeper start
|
||||
sleep 5
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
start_clickhouse
|
||||
|
||||
|
@ -39,9 +39,8 @@ function start()
|
||||
done
|
||||
}
|
||||
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||
|
||||
|
@ -35,7 +35,7 @@ RUN apt-get update \
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN pip3 install urllib3 testflows==1.6.42 docker-compose docker dicttoxml kazoo tzlocal
|
||||
RUN pip3 install urllib3 testflows==1.6.48 docker-compose docker dicttoxml kazoo tzlocal
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 17.09.1-ce
|
||||
|
@ -165,22 +165,6 @@ Similar to GraphiteMergeTree, the Kafka engine supports extended configuration u
|
||||
|
||||
For a list of possible configuration options, see the [librdkafka configuration reference](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). Use the underscore (`_`) instead of a dot in the ClickHouse configuration. For example, `check.crcs=true` will be `<check_crcs>true</check_crcs>`.
|
||||
|
||||
### Kerberos support {#kafka-kerberos-support}
|
||||
|
||||
To deal with Kerberos-aware Kafka, add `security_protocol` child element with `sasl_plaintext` value. It is enough if Kerberos ticket-granting ticket is obtained and cached by OS facilities.
|
||||
ClickHouse is able to maintain Kerberos credentials using a keytab file. Consider `sasl_kerberos_service_name`, `sasl_kerberos_keytab`, `sasl_kerberos_principal` and `sasl.kerberos.kinit.cmd` child elements.
|
||||
|
||||
Example:
|
||||
|
||||
``` xml
|
||||
<!-- Kerberos-aware Kafka -->
|
||||
<kafka>
|
||||
<security_protocol>SASL_PLAINTEXT</security_protocol>
|
||||
<sasl_kerberos_keytab>/home/kafkauser/kafkauser.keytab</sasl_kerberos_keytab>
|
||||
<sasl_kerberos_principal>kafkauser/kafkahost@EXAMPLE.COM</sasl_kerberos_principal>
|
||||
</kafka>
|
||||
```
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
- `_topic` — Kafka topic.
|
||||
|
@ -38,7 +38,7 @@ toc_title: Adopters
|
||||
| <a href="https://db.com" class="favicon">Deutsche Bank</a> | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) |
|
||||
| <a href="https://www.diva-e.com" class="favicon">Diva-e</a> | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) |
|
||||
| <a href="https://www.ecwid.com/" class="favicon">Ecwid</a> | E-commerce SaaS | Metrics, Logging | — | — | [Slides in Russian, April 2019](https://nastachku.ru/var/files/1/presentation/backend/2_Backend_6.pdf) |
|
||||
| <a href="https://www.ebay.com/" class="favicon">eBay</a> | E-commerce | TBA | — | — | [Webinar, Sep 2020](https://altinity.com/webinarspage/2020/09/08/migrating-from-druid-to-next-gen-olap-on-clickhouse-ebays-experience) |
|
||||
| <a href="https://www.ebay.com/" class="favicon">eBay</a> | E-commerce | Logs, Metrics and Events | — | — | [Official website, Sep 2020](https://tech.ebayinc.com/engineering/ou-online-analytical-processing/) |
|
||||
| <a href="https://www.exness.com" class="favicon">Exness</a> | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
||||
| <a href="https://fastnetmon.com/" class="favicon">FastNetMon</a> | DDoS Protection | Main Product | | — | [Official website](https://fastnetmon.com/docs-fnm-advanced/fastnetmon-advanced-traffic-persistency/) |
|
||||
| <a href="https://www.flipkart.com/" class="favicon">Flipkart</a> | e-Commerce | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=239) |
|
||||
|
@ -3,7 +3,7 @@ machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 在运营商 {#select-in-operators}
|
||||
# IN 操作符 {#select-in-operators}
|
||||
|
||||
该 `IN`, `NOT IN`, `GLOBAL IN`,和 `GLOBAL NOT IN` 运算符是单独复盖的,因为它们的功能相当丰富。
|
||||
|
||||
|
@ -3,7 +3,7 @@ set(CLICKHOUSE_SERVER_SOURCES
|
||||
Server.cpp
|
||||
)
|
||||
|
||||
if (OS_LINUX AND ARCH_AMD64)
|
||||
if (OS_LINUX)
|
||||
set (LINK_CONFIG_LIB INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:clickhouse_server_configs> -Wl,${NO_WHOLE_ARCHIVE}")
|
||||
endif ()
|
||||
|
||||
|
@ -338,6 +338,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
if (config().getBool("mlock_executable", false))
|
||||
{
|
||||
if (hasLinuxCapability(CAP_IPC_LOCK))
|
||||
{
|
||||
try
|
||||
{
|
||||
/// Get the memory area with (current) code segment.
|
||||
/// It's better to lock only the code segment instead of calling "mlockall",
|
||||
@ -350,6 +352,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
else
|
||||
LOG_TRACE(log, "The memory map of clickhouse executable has been mlock'ed, total {}", ReadableSize(len));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_WARNING(log, "Cannot mlock: {}", getCurrentExceptionMessage(false));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_INFO(log, "It looks like the process has no CAP_IPC_LOCK capability, binary mlock will be disabled."
|
||||
|
10
release
10
release
@ -95,9 +95,9 @@ then
|
||||
exit 3
|
||||
fi
|
||||
|
||||
export DEB_CC=${DEB_CC=clang-6.0}
|
||||
export DEB_CXX=${DEB_CXX=clang++-6.0}
|
||||
EXTRAPACKAGES="$EXTRAPACKAGES clang-6.0 lld-6.0"
|
||||
export DEB_CC=${DEB_CC=clang-10}
|
||||
export DEB_CXX=${DEB_CXX=clang++-10}
|
||||
EXTRAPACKAGES="$EXTRAPACKAGES clang-10 lld-10"
|
||||
elif [[ $BUILD_TYPE == 'valgrind' ]]; then
|
||||
MALLOC_OPTS="-DENABLE_TCMALLOC=0 -DENABLE_JEMALLOC=0"
|
||||
VERSION_POSTFIX+="+valgrind"
|
||||
@ -118,8 +118,8 @@ echo -e "\nCurrent version is $VERSION_STRING"
|
||||
if [ -z "$NO_BUILD" ] ; then
|
||||
gen_changelog "$VERSION_STRING" "" "$AUTHOR" ""
|
||||
if [ -z "$USE_PBUILDER" ] ; then
|
||||
DEB_CC=${DEB_CC:=`which gcc-9 gcc-8 gcc | head -n1`}
|
||||
DEB_CXX=${DEB_CXX:=`which g++-9 g++-8 g++ | head -n1`}
|
||||
DEB_CC=${DEB_CC:=`which gcc-10 gcc-9 gcc | head -n1`}
|
||||
DEB_CXX=${DEB_CXX:=`which gcc-10 g++-9 g++ | head -n1`}
|
||||
# Build (only binary packages).
|
||||
debuild --preserve-env -e PATH \
|
||||
-e DEB_CC=$DEB_CC -e DEB_CXX=$DEB_CXX -e CMAKE_FLAGS="$CMAKE_FLAGS" \
|
||||
|
@ -192,7 +192,7 @@ namespace
|
||||
}
|
||||
|
||||
|
||||
std::vector<AccessEntityPtr> parseUsers(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
|
||||
std::vector<AccessEntityPtr> parseUsers(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys user_names;
|
||||
config.keys("users", user_names);
|
||||
@ -200,16 +200,8 @@ namespace
|
||||
std::vector<AccessEntityPtr> users;
|
||||
users.reserve(user_names.size());
|
||||
for (const auto & user_name : user_names)
|
||||
{
|
||||
try
|
||||
{
|
||||
users.push_back(parseUser(config, user_name));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Could not parse user " + backQuote(user_name));
|
||||
}
|
||||
}
|
||||
|
||||
return users;
|
||||
}
|
||||
|
||||
@ -256,12 +248,11 @@ namespace
|
||||
}
|
||||
|
||||
quota->to_roles.add(user_ids);
|
||||
|
||||
return quota;
|
||||
}
|
||||
|
||||
|
||||
std::vector<AccessEntityPtr> parseQuotas(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
|
||||
std::vector<AccessEntityPtr> parseQuotas(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys user_names;
|
||||
config.keys("users", user_names);
|
||||
@ -277,30 +268,22 @@ namespace
|
||||
std::vector<AccessEntityPtr> quotas;
|
||||
quotas.reserve(quota_names.size());
|
||||
for (const auto & quota_name : quota_names)
|
||||
{
|
||||
try
|
||||
{
|
||||
auto it = quota_to_user_ids.find(quota_name);
|
||||
const std::vector<UUID> & quota_users = (it != quota_to_user_ids.end()) ? std::move(it->second) : std::vector<UUID>{};
|
||||
quotas.push_back(parseQuota(config, quota_name, quota_users));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Could not parse quota " + backQuote(quota_name));
|
||||
}
|
||||
}
|
||||
return quotas;
|
||||
}
|
||||
|
||||
|
||||
std::vector<AccessEntityPtr> parseRowPolicies(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
|
||||
std::vector<AccessEntityPtr> parseRowPolicies(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
std::map<std::pair<String /* database */, String /* table */>, std::unordered_map<String /* user */, String /* filter */>> all_filters_map;
|
||||
Poco::Util::AbstractConfiguration::Keys user_names;
|
||||
|
||||
try
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys user_names;
|
||||
config.keys("users", user_names);
|
||||
|
||||
for (const String & user_name : user_names)
|
||||
{
|
||||
const String databases_config = "users." + user_name + ".databases";
|
||||
@ -343,11 +326,6 @@ namespace
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Could not parse row policies");
|
||||
}
|
||||
|
||||
std::vector<AccessEntityPtr> policies;
|
||||
for (auto & [database_and_table_name, user_to_filters] : all_filters_map)
|
||||
@ -450,23 +428,14 @@ namespace
|
||||
|
||||
std::vector<AccessEntityPtr> parseSettingsProfiles(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const std::function<void(const std::string_view &)> & check_setting_name_function,
|
||||
Poco::Logger * log)
|
||||
const std::function<void(const std::string_view &)> & check_setting_name_function)
|
||||
{
|
||||
std::vector<AccessEntityPtr> profiles;
|
||||
Poco::Util::AbstractConfiguration::Keys profile_names;
|
||||
config.keys("profiles", profile_names);
|
||||
for (const auto & profile_name : profile_names)
|
||||
{
|
||||
try
|
||||
{
|
||||
profiles.push_back(parseSettingsProfile(config, profile_name, check_setting_name_function));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Could not parse profile " + backQuote(profile_name));
|
||||
}
|
||||
}
|
||||
|
||||
return profiles;
|
||||
}
|
||||
}
|
||||
@ -520,13 +489,13 @@ void UsersConfigAccessStorage::setConfig(const Poco::Util::AbstractConfiguration
|
||||
void UsersConfigAccessStorage::parseFromConfig(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
std::vector<std::pair<UUID, AccessEntityPtr>> all_entities;
|
||||
for (const auto & entity : parseUsers(config, getLogger()))
|
||||
for (const auto & entity : parseUsers(config))
|
||||
all_entities.emplace_back(generateID(*entity), entity);
|
||||
for (const auto & entity : parseQuotas(config, getLogger()))
|
||||
for (const auto & entity : parseQuotas(config))
|
||||
all_entities.emplace_back(generateID(*entity), entity);
|
||||
for (const auto & entity : parseRowPolicies(config, getLogger()))
|
||||
for (const auto & entity : parseRowPolicies(config))
|
||||
all_entities.emplace_back(generateID(*entity), entity);
|
||||
for (const auto & entity : parseSettingsProfiles(config, check_setting_name_function, getLogger()))
|
||||
for (const auto & entity : parseSettingsProfiles(config, check_setting_name_function))
|
||||
all_entities.emplace_back(generateID(*entity), entity);
|
||||
memory_storage.setAll(all_entities);
|
||||
}
|
||||
|
@ -138,6 +138,7 @@ void ConfigReloader::reloadIfNewer(bool force, bool throw_on_error, bool fallbac
|
||||
if (throw_on_error)
|
||||
throw;
|
||||
tryLogCurrentException(log, "Error updating configuration from '" + path + "' config.");
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_DEBUG(log, "Loaded config '{}', performed update on configuration", path);
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <common/getThreadId.h>
|
||||
#include <common/types.h>
|
||||
|
||||
|
||||
@ -19,7 +20,7 @@ namespace DB
|
||||
DB::UInt64 randomSeed()
|
||||
{
|
||||
struct timespec times;
|
||||
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, ×))
|
||||
if (clock_gettime(CLOCK_MONOTONIC, ×))
|
||||
DB::throwFromErrno("Cannot clock_gettime.", DB::ErrorCodes::CANNOT_CLOCK_GETTIME);
|
||||
|
||||
/// Not cryptographically secure as time, pid and stack address can be predictable.
|
||||
@ -27,7 +28,7 @@ DB::UInt64 randomSeed()
|
||||
SipHash hash;
|
||||
hash.update(times.tv_nsec);
|
||||
hash.update(times.tv_sec);
|
||||
hash.update(getpid());
|
||||
hash.update(getThreadId());
|
||||
hash.update(×);
|
||||
return hash.get64();
|
||||
}
|
||||
|
@ -926,7 +926,7 @@ void CacheDictionary::update(UpdateUnitPtr & update_unit_ptr) const
|
||||
else
|
||||
cell.setExpiresAt(std::chrono::time_point<std::chrono::system_clock>::max());
|
||||
|
||||
update_unit_ptr->getPresentIdHandler()(id, cell_idx);
|
||||
update_unit_ptr->callPresentIdHandler(id, cell_idx);
|
||||
/// mark corresponding id as found
|
||||
remaining_ids[id] = 1;
|
||||
}
|
||||
@ -988,9 +988,9 @@ void CacheDictionary::update(UpdateUnitPtr & update_unit_ptr) const
|
||||
if (was_default)
|
||||
cell.setDefault();
|
||||
if (was_default)
|
||||
update_unit_ptr->getAbsentIdHandler()(id, cell_idx);
|
||||
update_unit_ptr->callAbsentIdHandler(id, cell_idx);
|
||||
else
|
||||
update_unit_ptr->getPresentIdHandler()(id, cell_idx);
|
||||
update_unit_ptr->callPresentIdHandler(id, cell_idx);
|
||||
continue;
|
||||
}
|
||||
/// We don't have expired data for that `id` so all we can do is to rethrow `last_exception`.
|
||||
@ -1022,7 +1022,7 @@ void CacheDictionary::update(UpdateUnitPtr & update_unit_ptr) const
|
||||
setDefaultAttributeValue(attribute, cell_idx);
|
||||
|
||||
/// inform caller that the cell has not been found
|
||||
update_unit_ptr->getAbsentIdHandler()(id, cell_idx);
|
||||
update_unit_ptr->callAbsentIdHandler(id, cell_idx);
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheKeysRequestedMiss, not_found_num);
|
||||
|
@ -399,16 +399,18 @@ private:
|
||||
absent_id_handler([](Key, size_t){}){}
|
||||
|
||||
|
||||
PresentIdHandler getPresentIdHandler()
|
||||
void callPresentIdHandler(Key key, size_t cell_idx)
|
||||
{
|
||||
std::lock_guard lock(callback_mutex);
|
||||
return can_use_callback ? present_id_handler : PresentIdHandler{};
|
||||
if (can_use_callback)
|
||||
present_id_handler(key, cell_idx);
|
||||
}
|
||||
|
||||
AbsentIdHandler getAbsentIdHandler()
|
||||
void callAbsentIdHandler(Key key, size_t cell_idx)
|
||||
{
|
||||
std::lock_guard lock(callback_mutex);
|
||||
return can_use_callback ? absent_id_handler : AbsentIdHandler{};
|
||||
if (can_use_callback)
|
||||
absent_id_handler(key, cell_idx);
|
||||
}
|
||||
|
||||
std::vector<Key> requested_ids;
|
||||
|
@ -148,7 +148,9 @@ void CacheDictionary::getItemsNumberImpl(
|
||||
std::begin(cache_expired_ids), std::end(cache_expired_ids),
|
||||
std::back_inserter(required_ids), [](auto & pair) { return pair.first; });
|
||||
|
||||
auto on_cell_updated = [&] (const auto id, const auto cell_idx)
|
||||
auto on_cell_updated =
|
||||
[&attribute_array, &cache_not_found_ids, &cache_expired_ids, &out]
|
||||
(const auto id, const auto cell_idx)
|
||||
{
|
||||
const auto attribute_value = attribute_array[cell_idx];
|
||||
|
||||
|
@ -80,7 +80,7 @@ public:
|
||||
DataTypePtr getReturnType(const ColumnsWithTypeAndName &) const override { return {}; } // Not used
|
||||
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return true; }
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
||||
|
||||
bool isVariadic() const override { return true; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
|
@ -401,7 +401,7 @@ void ProcessList::killAllQueries()
|
||||
|
||||
QueryStatusInfo QueryStatus::getInfo(bool get_thread_list, bool get_profile_events, bool get_settings) const
|
||||
{
|
||||
QueryStatusInfo res;
|
||||
QueryStatusInfo res{};
|
||||
|
||||
res.query = query;
|
||||
res.client_info = client_info;
|
||||
|
8
tests/config/README.md
Normal file
8
tests/config/README.md
Normal file
@ -0,0 +1,8 @@
|
||||
# ClickHouse configs for test environment
|
||||
|
||||
## How to use
|
||||
CI use these configs in all checks installing them with `install.sh` script. If you want to run all tests from `tests/queries/0_stateless` and `test/queries/1_stateful` on your local machine you have to set up configs from this directory for your `clickhouse-server`. The most simple way is to install them using `install.sh` script. Other option is just copy files into your clickhouse config directory.
|
||||
|
||||
## How to add new config
|
||||
|
||||
Just place file `.xml` with new config into appropriate directory and add `ln` command into `install.sh` script. After that CI will use this config in all tests runs.
|
54
tests/config/install.sh
Executable file
54
tests/config/install.sh
Executable file
@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
# script allows to install configs for clickhouse server and clients required
|
||||
# for testing (stateless and stateful tests)
|
||||
|
||||
set -x -e
|
||||
|
||||
DEST_SERVER_PATH="${1:-/etc/clickhouse-server}"
|
||||
DEST_CLIENT_PATH="${2:-/etc/clickhouse-client}"
|
||||
SRC_PATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
|
||||
|
||||
echo "Going to install test configs from $SRC_PATH into $DEST_SERVER_PATH"
|
||||
|
||||
mkdir -p $DEST_SERVER_PATH/config.d/
|
||||
mkdir -p $DEST_SERVER_PATH/users.d/
|
||||
mkdir -p $DEST_CLIENT_PATH
|
||||
|
||||
ln -s $SRC_PATH/config.d/zookeeper.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/listen.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/part_log.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/text_log.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/metric_log.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/custom_settings_prefixes.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/macros.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/disks.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/secure_ports.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/clusters.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/graphite.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/users.d/log_queries.xml $DEST_SERVER_PATH/users.d/
|
||||
ln -s $SRC_PATH/users.d/readonly.xml $DEST_SERVER_PATH/users.d/
|
||||
ln -s $SRC_PATH/users.d/access_management.xml $DEST_SERVER_PATH/users.d/
|
||||
|
||||
ln -s $SRC_PATH/ints_dictionary.xml $DEST_SERVER_PATH/
|
||||
ln -s $SRC_PATH/strings_dictionary.xml $DEST_SERVER_PATH/
|
||||
ln -s $SRC_PATH/decimals_dictionary.xml $DEST_SERVER_PATH/
|
||||
ln -s $SRC_PATH/executable_dictionary.xml $DEST_SERVER_PATH/
|
||||
|
||||
ln -s $SRC_PATH/server.key $DEST_SERVER_PATH/
|
||||
ln -s $SRC_PATH/server.crt $DEST_SERVER_PATH/
|
||||
ln -s $SRC_PATH/dhparam.pem $DEST_SERVER_PATH/
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load it if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
$SRC_PATH/config.d/query_masking_rules.xml $DEST_SERVER_PATH/config.d/
|
||||
|
||||
if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then
|
||||
ln -s $SRC_PATH/config.d/polymorphic_parts.xml $DEST_SERVER_PATH/config.d/
|
||||
fi
|
||||
if [[ -n "$USE_DATABASE_ATOMIC" ]] && [[ "$USE_DATABASE_ATOMIC" -eq 1 ]]; then
|
||||
ln -s $SRC_PATH/config.d/database_atomic_configd.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/users.d/database_atomic_usersd.xml $DEST_SERVER_PATH/users.d/
|
||||
fi
|
||||
|
||||
ln -sf $SRC_PATH/client_config.xml $DEST_CLIENT_PATH/config.xml
|
@ -45,6 +45,7 @@ def _create_env_file(path, variables, fname=DEFAULT_ENV_NAME):
|
||||
f.write("=".join([var, value]) + "\n")
|
||||
return full_path
|
||||
|
||||
|
||||
def subprocess_check_call(args):
|
||||
# Uncomment for debugging
|
||||
# print('run:', ' ' . join(args))
|
||||
@ -124,7 +125,6 @@ class ClickHouseCluster:
|
||||
self.base_zookeeper_cmd = None
|
||||
self.base_mysql_cmd = []
|
||||
self.base_kafka_cmd = []
|
||||
self.base_kerberized_kafka_cmd = []
|
||||
self.base_rabbitmq_cmd = []
|
||||
self.base_cassandra_cmd = []
|
||||
self.pre_zookeeper_commands = []
|
||||
@ -133,7 +133,6 @@ class ClickHouseCluster:
|
||||
self.with_mysql = False
|
||||
self.with_postgres = False
|
||||
self.with_kafka = False
|
||||
self.with_kerberized_kafka = False
|
||||
self.with_rabbitmq = False
|
||||
self.with_odbc_drivers = False
|
||||
self.with_hdfs = False
|
||||
@ -170,7 +169,7 @@ class ClickHouseCluster:
|
||||
|
||||
def add_instance(self, name, base_config_dir=None, main_configs=None, user_configs=None, dictionaries=None,
|
||||
macros=None,
|
||||
with_zookeeper=False, with_mysql=False, with_kafka=False, with_kerberized_kafka=False, with_rabbitmq=False,
|
||||
with_zookeeper=False, with_mysql=False, with_kafka=False, with_rabbitmq=False,
|
||||
clickhouse_path_dir=None,
|
||||
with_odbc_drivers=False, with_postgres=False, with_hdfs=False, with_mongo=False,
|
||||
with_redis=False, with_minio=False, with_cassandra=False,
|
||||
@ -208,7 +207,6 @@ class ClickHouseCluster:
|
||||
zookeeper_config_path=self.zookeeper_config_path,
|
||||
with_mysql=with_mysql,
|
||||
with_kafka=with_kafka,
|
||||
with_kerberized_kafka=with_kerberized_kafka,
|
||||
with_rabbitmq=with_rabbitmq,
|
||||
with_mongo=with_mongo,
|
||||
with_redis=with_redis,
|
||||
@ -292,13 +290,6 @@ class ClickHouseCluster:
|
||||
p.join(docker_compose_yml_dir, 'docker_compose_kafka.yml')]
|
||||
cmds.append(self.base_kafka_cmd)
|
||||
|
||||
if with_kerberized_kafka and not self.with_kerberized_kafka:
|
||||
self.with_kerberized_kafka = True
|
||||
self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_kerberized_kafka.yml')])
|
||||
self.base_kerberized_kafka_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name',
|
||||
self.project_name, '--file', p.join(docker_compose_yml_dir, 'docker_compose_kerberized_kafka.yml')]
|
||||
cmds.append(self.base_kerberized_kafka_cmd)
|
||||
|
||||
if with_rabbitmq and not self.with_rabbitmq:
|
||||
self.with_rabbitmq = True
|
||||
self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_rabbitmq.yml')])
|
||||
@ -495,8 +486,8 @@ class ClickHouseCluster:
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
try:
|
||||
connection.database_names()
|
||||
print "Connected to Mongo dbs:", connection.database_names()
|
||||
connection.list_database_names()
|
||||
print "Connected to Mongo dbs:", connection.list_database_names()
|
||||
return
|
||||
except Exception as ex:
|
||||
print "Can't connect to Mongo " + str(ex)
|
||||
@ -617,11 +608,6 @@ class ClickHouseCluster:
|
||||
self.kafka_docker_id = self.get_instance_docker_id('kafka1')
|
||||
self.wait_schema_registry_to_start(120)
|
||||
|
||||
if self.with_kerberized_kafka and self.base_kerberized_kafka_cmd:
|
||||
env = os.environ.copy()
|
||||
env['KERBERIZED_KAFKA_DIR'] = instance.path + '/'
|
||||
subprocess.check_call(self.base_kerberized_kafka_cmd + common_opts + ['--renew-anon-volumes'], env=env)
|
||||
self.kerberized_kafka_docker_id = self.get_instance_docker_id('kerberized_kafka1')
|
||||
if self.with_rabbitmq and self.base_rabbitmq_cmd:
|
||||
subprocess_check_call(self.base_rabbitmq_cmd + common_opts + ['--renew-anon-volumes'])
|
||||
self.rabbitmq_docker_id = self.get_instance_docker_id('rabbitmq1')
|
||||
@ -802,12 +788,9 @@ services:
|
||||
- {instance_config_dir}:/etc/clickhouse-server/
|
||||
- {db_dir}:/var/lib/clickhouse/
|
||||
- {logs_dir}:/var/log/clickhouse-server/
|
||||
- /etc/passwd:/etc/passwd:ro
|
||||
{binary_volume}
|
||||
{odbc_bridge_volume}
|
||||
{odbc_ini_path}
|
||||
{keytab_path}
|
||||
{krb5_conf}
|
||||
entrypoint: {entrypoint_cmd}
|
||||
tmpfs: {tmpfs}
|
||||
cap_add:
|
||||
@ -837,7 +820,7 @@ class ClickHouseInstance:
|
||||
def __init__(
|
||||
self, cluster, base_path, name, base_config_dir, custom_main_configs, custom_user_configs,
|
||||
custom_dictionaries,
|
||||
macros, with_zookeeper, zookeeper_config_path, with_mysql, with_kafka, with_kerberized_kafka, with_rabbitmq, with_mongo,
|
||||
macros, with_zookeeper, zookeeper_config_path, with_mysql, with_kafka, with_rabbitmq, with_mongo,
|
||||
with_redis, with_minio,
|
||||
with_cassandra, server_bin_path, odbc_bridge_bin_path, clickhouse_path_dir, with_odbc_drivers,
|
||||
hostname=None, env_variables=None,
|
||||
@ -856,7 +839,6 @@ class ClickHouseInstance:
|
||||
self.custom_user_config_paths = [p.abspath(p.join(base_path, c)) for c in custom_user_configs]
|
||||
self.custom_dictionaries_paths = [p.abspath(p.join(base_path, c)) for c in custom_dictionaries]
|
||||
self.clickhouse_path_dir = p.abspath(p.join(base_path, clickhouse_path_dir)) if clickhouse_path_dir else None
|
||||
self.kerberos_secrets_dir = p.abspath(p.join(base_path, 'secrets'))
|
||||
self.macros = macros if macros is not None else {}
|
||||
self.with_zookeeper = with_zookeeper
|
||||
self.zookeeper_config_path = zookeeper_config_path
|
||||
@ -866,7 +848,6 @@ class ClickHouseInstance:
|
||||
|
||||
self.with_mysql = with_mysql
|
||||
self.with_kafka = with_kafka
|
||||
self.with_kerberized_kafka = with_kerberized_kafka
|
||||
self.with_rabbitmq = with_rabbitmq
|
||||
self.with_mongo = with_mongo
|
||||
self.with_redis = with_redis
|
||||
@ -882,13 +863,6 @@ class ClickHouseInstance:
|
||||
else:
|
||||
self.odbc_ini_path = ""
|
||||
|
||||
if with_kerberized_kafka:
|
||||
self.keytab_path = '- ' + os.path.dirname(self.docker_compose_path) + "/secrets:/tmp/keytab"
|
||||
self.krb5_conf = '- ' + os.path.dirname(self.docker_compose_path) + "/secrets/krb.conf:/etc/krb5.conf:ro"
|
||||
else:
|
||||
self.keytab_path = ""
|
||||
self.krb5_conf = ""
|
||||
|
||||
self.docker_client = None
|
||||
self.ip_address = None
|
||||
self.client = None
|
||||
@ -1218,9 +1192,6 @@ class ClickHouseInstance:
|
||||
if self.with_zookeeper:
|
||||
shutil.copy(self.zookeeper_config_path, conf_d_dir)
|
||||
|
||||
if self.with_kerberized_kafka:
|
||||
shutil.copytree(self.kerberos_secrets_dir, p.abspath(p.join(self.path, 'secrets')))
|
||||
|
||||
# Copy config.d configs
|
||||
print "Copy custom test config files {} to {}".format(self.custom_main_config_paths, self.config_d_dir)
|
||||
for path in self.custom_main_config_paths:
|
||||
@ -1256,9 +1227,6 @@ class ClickHouseInstance:
|
||||
depends_on.append("kafka1")
|
||||
depends_on.append("schema-registry")
|
||||
|
||||
if self.with_kerberized_kafka:
|
||||
depends_on.append("kerberized_kafka1")
|
||||
|
||||
if self.with_rabbitmq:
|
||||
depends_on.append("rabbitmq1")
|
||||
|
||||
@ -1322,8 +1290,6 @@ class ClickHouseInstance:
|
||||
user=os.getuid(),
|
||||
env_file=env_file,
|
||||
odbc_ini_path=odbc_ini_path,
|
||||
keytab_path=self.keytab_path,
|
||||
krb5_conf=self.krb5_conf,
|
||||
entrypoint_cmd=entrypoint_cmd,
|
||||
networks=networks,
|
||||
app_net=app_net,
|
||||
|
@ -333,16 +333,16 @@ class _SourceExecutableBase(ExternalSource):
|
||||
user='root')
|
||||
|
||||
|
||||
class SourceExecutableCache(_SourceExecutableBase):
|
||||
class SourceExecutableHashed(_SourceExecutableBase):
|
||||
|
||||
def _get_cmd(self, path):
|
||||
return "cat {}".format(path)
|
||||
|
||||
def compatible_with_layout(self, layout):
|
||||
return 'cache' not in layout.name
|
||||
return 'hashed' in layout.name
|
||||
|
||||
|
||||
class SourceExecutableHashed(_SourceExecutableBase):
|
||||
class SourceExecutableCache(_SourceExecutableBase):
|
||||
|
||||
def _get_cmd(self, path):
|
||||
return "cat - >/dev/null;cat {}".format(path)
|
||||
|
@ -60,3 +60,19 @@ def assert_eq_with_retry(instance, query, expectation, retry_count=20, sleep_tim
|
||||
if expectation_tsv != val:
|
||||
raise AssertionError("'{}' != '{}'\n{}".format(expectation_tsv, val, '\n'.join(
|
||||
expectation_tsv.diff(val, n1="expectation", n2="query"))))
|
||||
|
||||
def assert_logs_contain(instance, substring):
|
||||
if not instance.contains_in_log(substring):
|
||||
raise AssertionError("'{}' not found in logs".format(substring))
|
||||
|
||||
def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_time=0.5):
|
||||
for i in xrange(retry_count):
|
||||
try:
|
||||
if instance.contains_in_log(substring):
|
||||
break
|
||||
time.sleep(sleep_time)
|
||||
except Exception as ex:
|
||||
print "contains_in_log_with_retry retry {} exception {}".format(i + 1, ex)
|
||||
time.sleep(sleep_time)
|
||||
else:
|
||||
raise AssertionError("'{}' not found in logs".format(substring))
|
||||
|
@ -156,8 +156,6 @@ if __name__ == "__main__":
|
||||
env_tags += "-e {}={} ".format("DOCKER_POSTGRESQL_JAVA_CLIENT_TAG", tag)
|
||||
elif image == "yandex/clickhouse-integration-test":
|
||||
env_tags += "-e {}={}".format("DOCKER_BASE_TAG", tag)
|
||||
elif image == "yandex/clickhouse-kerberos-kdc":
|
||||
env_tags += "-e {}={}".format("DOCKER_KERBEROS_KDC_TAG", tag)
|
||||
else:
|
||||
logging.info("Unknown image {}".format(image))
|
||||
|
||||
|
@ -1,3 +0,0 @@
|
||||
<yandex>
|
||||
<text_log/>
|
||||
</yandex>
|
@ -6,13 +6,5 @@
|
||||
<custom_c>Float64_-43.25e-1</custom_c>
|
||||
<custom_d>'some text'</custom_d>
|
||||
</default>
|
||||
|
||||
<profile_with_unknown_setting>
|
||||
<x>1</x>
|
||||
</profile_with_unknown_setting>
|
||||
|
||||
<profile_illformed_setting>
|
||||
<custom_f>1</custom_f>
|
||||
</profile_illformed_setting>
|
||||
</profiles>
|
||||
</yandex>
|
@ -0,0 +1,7 @@
|
||||
<yandex>
|
||||
<profiles>
|
||||
<default>
|
||||
<custom_f>1</custom_f>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
@ -1,9 +1,10 @@
|
||||
import pytest
|
||||
import os
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance('node', main_configs=["configs/config.d/text_log.xml"],
|
||||
user_configs=["configs/users.d/custom_settings.xml"])
|
||||
node = cluster.add_instance('node')
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", autouse=True)
|
||||
@ -16,28 +17,17 @@ def started_cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test():
|
||||
def test_custom_settings():
|
||||
node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/custom_settings.xml"), '/etc/clickhouse-server/users.d/z.xml')
|
||||
node.query("SYSTEM RELOAD CONFIG")
|
||||
|
||||
assert node.query("SELECT getSetting('custom_a')") == "-5\n"
|
||||
assert node.query("SELECT getSetting('custom_b')") == "10000000000\n"
|
||||
assert node.query("SELECT getSetting('custom_c')") == "-4.325\n"
|
||||
assert node.query("SELECT getSetting('custom_d')") == "some text\n"
|
||||
|
||||
assert "custom_a = -5, custom_b = 10000000000, custom_c = -4.325, custom_d = \\'some text\\'" \
|
||||
in node.query("SHOW CREATE SETTINGS PROFILE default")
|
||||
|
||||
assert "no settings profile" in node.query_and_get_error(
|
||||
"SHOW CREATE SETTINGS PROFILE profile_with_unknown_setting")
|
||||
assert "no settings profile" in node.query_and_get_error("SHOW CREATE SETTINGS PROFILE profile_illformed_setting")
|
||||
|
||||
|
||||
def test_invalid_settings():
|
||||
node.query("SYSTEM RELOAD CONFIG")
|
||||
node.query("SYSTEM FLUSH LOGS")
|
||||
|
||||
assert node.query("SELECT COUNT() FROM system.text_log WHERE"
|
||||
" message LIKE '%Could not parse profile `profile_illformed_setting`%'"
|
||||
" AND message LIKE '%Couldn\\'t restore Field from dump%'") == "1\n"
|
||||
|
||||
assert node.query("SELECT COUNT() FROM system.text_log WHERE"
|
||||
" message LIKE '%Could not parse profile `profile_with_unknown_setting`%'"
|
||||
" AND message LIKE '%Setting x is neither a builtin setting nor started with the prefix \\'custom_\\'%'") == "1\n"
|
||||
def test_illformed_setting():
|
||||
node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/illformed_setting.xml"), '/etc/clickhouse-server/users.d/z.xml')
|
||||
error_message = "Couldn't restore Field from dump: 1"
|
||||
assert error_message in node.query_and_get_error("SYSTEM RELOAD CONFIG")
|
||||
|
@ -1,346 +0,0 @@
|
||||
import math
|
||||
import os
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceMongo, SourceMongoURI, SourceHTTP, SourceHTTPS, SourceCassandra
|
||||
from helpers.external_sources import SourceMySQL, SourceClickHouse, SourceFile, SourceExecutableCache, \
|
||||
SourceExecutableHashed
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
dict_configs_path = os.path.join(SCRIPT_DIR, 'configs/dictionaries')
|
||||
|
||||
FIELDS = {
|
||||
"simple": [
|
||||
Field("KeyField", 'UInt64', is_key=True, default_value_for_get=9999999),
|
||||
Field("UInt8_", 'UInt8', default_value_for_get=55),
|
||||
Field("UInt16_", 'UInt16', default_value_for_get=66),
|
||||
Field("UInt32_", 'UInt32', default_value_for_get=77),
|
||||
Field("UInt64_", 'UInt64', default_value_for_get=88),
|
||||
Field("Int8_", 'Int8', default_value_for_get=-55),
|
||||
Field("Int16_", 'Int16', default_value_for_get=-66),
|
||||
Field("Int32_", 'Int32', default_value_for_get=-77),
|
||||
Field("Int64_", 'Int64', default_value_for_get=-88),
|
||||
Field("UUID_", 'UUID', default_value_for_get='550e8400-0000-0000-0000-000000000000'),
|
||||
Field("Date_", 'Date', default_value_for_get='2018-12-30'),
|
||||
Field("DateTime_", 'DateTime', default_value_for_get='2018-12-30 00:00:00'),
|
||||
Field("String_", 'String', default_value_for_get='hi'),
|
||||
Field("Float32_", 'Float32', default_value_for_get=555.11),
|
||||
Field("Float64_", 'Float64', default_value_for_get=777.11),
|
||||
Field("ParentKeyField", "UInt64", default_value_for_get=444, hierarchical=True)
|
||||
],
|
||||
"complex": [
|
||||
Field("KeyField1", 'UInt64', is_key=True, default_value_for_get=9999999),
|
||||
Field("KeyField2", 'String', is_key=True, default_value_for_get='xxxxxxxxx'),
|
||||
Field("UInt8_", 'UInt8', default_value_for_get=55),
|
||||
Field("UInt16_", 'UInt16', default_value_for_get=66),
|
||||
Field("UInt32_", 'UInt32', default_value_for_get=77),
|
||||
Field("UInt64_", 'UInt64', default_value_for_get=88),
|
||||
Field("Int8_", 'Int8', default_value_for_get=-55),
|
||||
Field("Int16_", 'Int16', default_value_for_get=-66),
|
||||
Field("Int32_", 'Int32', default_value_for_get=-77),
|
||||
Field("Int64_", 'Int64', default_value_for_get=-88),
|
||||
Field("UUID_", 'UUID', default_value_for_get='550e8400-0000-0000-0000-000000000000'),
|
||||
Field("Date_", 'Date', default_value_for_get='2018-12-30'),
|
||||
Field("DateTime_", 'DateTime', default_value_for_get='2018-12-30 00:00:00'),
|
||||
Field("String_", 'String', default_value_for_get='hi'),
|
||||
Field("Float32_", 'Float32', default_value_for_get=555.11),
|
||||
Field("Float64_", 'Float64', default_value_for_get=777.11),
|
||||
],
|
||||
"ranged": [
|
||||
Field("KeyField1", 'UInt64', is_key=True),
|
||||
Field("KeyField2", 'Date', is_range_key=True),
|
||||
Field("StartDate", 'Date', range_hash_type='min'),
|
||||
Field("EndDate", 'Date', range_hash_type='max'),
|
||||
Field("UInt8_", 'UInt8', default_value_for_get=55),
|
||||
Field("UInt16_", 'UInt16', default_value_for_get=66),
|
||||
Field("UInt32_", 'UInt32', default_value_for_get=77),
|
||||
Field("UInt64_", 'UInt64', default_value_for_get=88),
|
||||
Field("Int8_", 'Int8', default_value_for_get=-55),
|
||||
Field("Int16_", 'Int16', default_value_for_get=-66),
|
||||
Field("Int32_", 'Int32', default_value_for_get=-77),
|
||||
Field("Int64_", 'Int64', default_value_for_get=-88),
|
||||
Field("UUID_", 'UUID', default_value_for_get='550e8400-0000-0000-0000-000000000000'),
|
||||
Field("Date_", 'Date', default_value_for_get='2018-12-30'),
|
||||
Field("DateTime_", 'DateTime', default_value_for_get='2018-12-30 00:00:00'),
|
||||
Field("String_", 'String', default_value_for_get='hi'),
|
||||
Field("Float32_", 'Float32', default_value_for_get=555.11),
|
||||
Field("Float64_", 'Float64', default_value_for_get=777.11),
|
||||
]
|
||||
}
|
||||
|
||||
VALUES = {
|
||||
"simple": [
|
||||
[1, 22, 333, 4444, 55555, -6, -77,
|
||||
-888, -999, '550e8400-e29b-41d4-a716-446655440003',
|
||||
'1973-06-28', '1985-02-28 23:43:25', 'hello', 22.543, 3332154213.4, 0],
|
||||
[2, 3, 4, 5, 6, -7, -8,
|
||||
-9, -10, '550e8400-e29b-41d4-a716-446655440002',
|
||||
'1978-06-28', '1986-02-28 23:42:25', 'hello', 21.543, 3222154213.4, 1]
|
||||
],
|
||||
"complex": [
|
||||
[1, 'world', 22, 333, 4444, 55555, -6,
|
||||
-77, -888, -999, '550e8400-e29b-41d4-a716-446655440003',
|
||||
'1973-06-28', '1985-02-28 23:43:25',
|
||||
'hello', 22.543, 3332154213.4],
|
||||
[2, 'qwerty2', 52, 2345, 6544, 9191991, -2,
|
||||
-717, -81818, -92929, '550e8400-e29b-41d4-a716-446655440007',
|
||||
'1975-09-28', '2000-02-28 23:33:24',
|
||||
'my', 255.543, 3332221.44]
|
||||
|
||||
],
|
||||
"ranged": [
|
||||
[1, '2019-02-10', '2019-02-01', '2019-02-28',
|
||||
22, 333, 4444, 55555, -6, -77, -888, -999,
|
||||
'550e8400-e29b-41d4-a716-446655440003',
|
||||
'1973-06-28', '1985-02-28 23:43:25', 'hello',
|
||||
22.543, 3332154213.4],
|
||||
[2, '2019-04-10', '2019-04-01', '2019-04-28',
|
||||
11, 3223, 41444, 52515, -65, -747, -8388, -9099,
|
||||
'550e8400-e29b-41d4-a716-446655440004',
|
||||
'1973-06-29', '2002-02-28 23:23:25', '!!!!',
|
||||
32.543, 3332543.4]
|
||||
]
|
||||
}
|
||||
|
||||
LAYOUTS = [
|
||||
Layout("flat"),
|
||||
Layout("hashed"),
|
||||
Layout("cache"),
|
||||
Layout("complex_key_hashed"),
|
||||
Layout("complex_key_cache"),
|
||||
Layout("range_hashed"),
|
||||
Layout("direct"),
|
||||
Layout("complex_key_direct")
|
||||
]
|
||||
|
||||
SOURCES = [
|
||||
SourceCassandra("Cassandra", "localhost", "9043", "cassandra1", "9042", "", ""),
|
||||
SourceMongo("MongoDB", "localhost", "27018", "mongo1", "27017", "root", "clickhouse"),
|
||||
SourceMongoURI("MongoDB_URI", "localhost", "27018", "mongo1", "27017", "root", "clickhouse"),
|
||||
SourceMySQL("MySQL", "localhost", "3308", "mysql1", "3306", "root", "clickhouse"),
|
||||
SourceClickHouse("RemoteClickHouse", "localhost", "9000", "clickhouse1", "9000", "default", ""),
|
||||
SourceClickHouse("LocalClickHouse", "localhost", "9000", "node", "9000", "default", ""),
|
||||
SourceFile("File", "localhost", "9000", "node", "9000", "", ""),
|
||||
SourceExecutableHashed("ExecutableHashed", "localhost", "9000", "node", "9000", "", ""),
|
||||
SourceExecutableCache("ExecutableCache", "localhost", "9000", "node", "9000", "", ""),
|
||||
SourceHTTP("SourceHTTP", "localhost", "9000", "clickhouse1", "9000", "", ""),
|
||||
SourceHTTPS("SourceHTTPS", "localhost", "9000", "clickhouse1", "9000", "", ""),
|
||||
]
|
||||
|
||||
DICTIONARIES = []
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
|
||||
|
||||
def get_dict(source, layout, fields, suffix_name=''):
|
||||
global dict_configs_path
|
||||
|
||||
structure = DictionaryStructure(layout, fields)
|
||||
dict_name = source.name + "_" + layout.name + '_' + suffix_name
|
||||
dict_path = os.path.join(dict_configs_path, dict_name + '.xml')
|
||||
dictionary = Dictionary(dict_name, structure, source, dict_path, "table_" + dict_name, fields)
|
||||
dictionary.generate_config()
|
||||
return dictionary
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global DICTIONARIES
|
||||
global cluster
|
||||
global node
|
||||
global dict_configs_path
|
||||
|
||||
for f in os.listdir(dict_configs_path):
|
||||
os.remove(os.path.join(dict_configs_path, f))
|
||||
|
||||
for layout in LAYOUTS:
|
||||
for source in SOURCES:
|
||||
if source.compatible_with_layout(layout):
|
||||
DICTIONARIES.append(get_dict(source, layout, FIELDS[layout.layout_type]))
|
||||
else:
|
||||
print "Source", source.name, "incompatible with layout", layout.name
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
cluster.add_instance('clickhouse1', main_configs=main_configs)
|
||||
|
||||
dictionaries = []
|
||||
for fname in os.listdir(dict_configs_path):
|
||||
dictionaries.append(os.path.join(dict_configs_path, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_mysql=True,
|
||||
with_mongo=True, with_redis=True, with_cassandra=True)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
for dictionary in DICTIONARIES:
|
||||
print "Preparing", dictionary.name
|
||||
dictionary.prepare_source(cluster)
|
||||
print "Prepared"
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def get_dictionaries(fold, total_folds, all_dicts):
|
||||
chunk_len = int(math.ceil(len(all_dicts) / float(total_folds)))
|
||||
if chunk_len * fold >= len(all_dicts):
|
||||
return []
|
||||
return all_dicts[fold * chunk_len: (fold + 1) * chunk_len]
|
||||
|
||||
|
||||
def remove_mysql_dicts():
|
||||
"""
|
||||
We have false-positive race condition in our openSSL version.
|
||||
MySQL dictionary use OpenSSL, so to prevent known failure we
|
||||
disable tests for these dictionaries.
|
||||
|
||||
Read of size 8 at 0x7b3c00005dd0 by thread T61 (mutexes: write M1010349240585225536):
|
||||
#0 EVP_CIPHER_mode <null> (clickhouse+0x13b2223b)
|
||||
#1 do_ssl3_write <null> (clickhouse+0x13a137bc)
|
||||
#2 ssl3_write_bytes <null> (clickhouse+0x13a12387)
|
||||
#3 ssl3_write <null> (clickhouse+0x139db0e6)
|
||||
#4 ssl_write_internal <null> (clickhouse+0x139eddce)
|
||||
#5 SSL_write <null> (clickhouse+0x139edf20)
|
||||
#6 ma_tls_write <null> (clickhouse+0x139c7557)
|
||||
#7 ma_pvio_tls_write <null> (clickhouse+0x139a8f59)
|
||||
#8 ma_pvio_write <null> (clickhouse+0x139a8488)
|
||||
#9 ma_net_real_write <null> (clickhouse+0x139a4e2c)
|
||||
#10 ma_net_write_command <null> (clickhouse+0x139a546d)
|
||||
#11 mthd_my_send_cmd <null> (clickhouse+0x13992546)
|
||||
#12 mysql_close_slow_part <null> (clickhouse+0x13999afd)
|
||||
#13 mysql_close <null> (clickhouse+0x13999071)
|
||||
#14 mysqlxx::Connection::~Connection() <null> (clickhouse+0x1370f814)
|
||||
#15 mysqlxx::Pool::~Pool() <null> (clickhouse+0x13715a7b)
|
||||
|
||||
TODO remove this when open ssl will be fixed or thread sanitizer will be suppressed
|
||||
"""
|
||||
|
||||
# global DICTIONARIES
|
||||
# DICTIONARIES = [d for d in DICTIONARIES if not d.name.startswith("MySQL")]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("fold", list(range(10)))
|
||||
def test_simple_dictionaries(started_cluster, fold):
|
||||
if node.is_built_with_thread_sanitizer():
|
||||
remove_mysql_dicts()
|
||||
|
||||
fields = FIELDS["simple"]
|
||||
values = VALUES["simple"]
|
||||
data = [Row(fields, vals) for vals in values]
|
||||
|
||||
all_simple_dicts = [d for d in DICTIONARIES if d.structure.layout.layout_type == "simple"]
|
||||
simple_dicts = get_dictionaries(fold, 10, all_simple_dicts)
|
||||
|
||||
print "Length of dicts:", len(simple_dicts)
|
||||
for dct in simple_dicts:
|
||||
dct.load_data(data)
|
||||
|
||||
node.query("system reload dictionaries")
|
||||
|
||||
queries_with_answers = []
|
||||
for dct in simple_dicts:
|
||||
for row in data:
|
||||
for field in fields:
|
||||
if not field.is_key:
|
||||
for query in dct.get_select_get_queries(field, row):
|
||||
queries_with_answers.append((query, row.get_value_by_name(field.name)))
|
||||
|
||||
for query in dct.get_select_has_queries(field, row):
|
||||
queries_with_answers.append((query, 1))
|
||||
|
||||
for query in dct.get_select_get_or_default_queries(field, row):
|
||||
queries_with_answers.append((query, field.default_value_for_get))
|
||||
for query in dct.get_hierarchical_queries(data[0]):
|
||||
queries_with_answers.append((query, [1]))
|
||||
|
||||
for query in dct.get_hierarchical_queries(data[1]):
|
||||
queries_with_answers.append((query, [2, 1]))
|
||||
|
||||
for query in dct.get_is_in_queries(data[0], data[1]):
|
||||
queries_with_answers.append((query, 0))
|
||||
|
||||
for query in dct.get_is_in_queries(data[1], data[0]):
|
||||
queries_with_answers.append((query, 1))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
print query
|
||||
if isinstance(answer, list):
|
||||
answer = str(answer).replace(' ', '')
|
||||
assert node.query(query) == str(answer) + '\n'
|
||||
|
||||
|
||||
@pytest.mark.parametrize("fold", list(range(10)))
|
||||
def test_complex_dictionaries(started_cluster, fold):
|
||||
if node.is_built_with_thread_sanitizer():
|
||||
remove_mysql_dicts()
|
||||
|
||||
fields = FIELDS["complex"]
|
||||
values = VALUES["complex"]
|
||||
data = [Row(fields, vals) for vals in values]
|
||||
|
||||
all_complex_dicts = [d for d in DICTIONARIES if d.structure.layout.layout_type == "complex"]
|
||||
complex_dicts = get_dictionaries(fold, 10, all_complex_dicts)
|
||||
|
||||
for dct in complex_dicts:
|
||||
dct.load_data(data)
|
||||
|
||||
node.query("system reload dictionaries")
|
||||
|
||||
queries_with_answers = []
|
||||
for dct in complex_dicts:
|
||||
for row in data:
|
||||
for field in fields:
|
||||
if not field.is_key:
|
||||
for query in dct.get_select_get_queries(field, row):
|
||||
queries_with_answers.append((query, row.get_value_by_name(field.name)))
|
||||
|
||||
for query in dct.get_select_has_queries(field, row):
|
||||
queries_with_answers.append((query, 1))
|
||||
|
||||
for query in dct.get_select_get_or_default_queries(field, row):
|
||||
queries_with_answers.append((query, field.default_value_for_get))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
print query
|
||||
assert node.query(query) == str(answer) + '\n'
|
||||
|
||||
|
||||
@pytest.mark.parametrize("fold", list(range(10)))
|
||||
def test_ranged_dictionaries(started_cluster, fold):
|
||||
if node.is_built_with_thread_sanitizer():
|
||||
remove_mysql_dicts()
|
||||
|
||||
fields = FIELDS["ranged"]
|
||||
values = VALUES["ranged"]
|
||||
data = [Row(fields, vals) for vals in values]
|
||||
|
||||
all_ranged_dicts = [d for d in DICTIONARIES if d.structure.layout.layout_type == "ranged"]
|
||||
ranged_dicts = get_dictionaries(fold, 10, all_ranged_dicts)
|
||||
|
||||
for dct in ranged_dicts:
|
||||
dct.load_data(data)
|
||||
|
||||
node.query("system reload dictionaries")
|
||||
|
||||
queries_with_answers = []
|
||||
for dct in ranged_dicts:
|
||||
for row in data:
|
||||
for field in fields:
|
||||
if not field.is_key and not field.is_range:
|
||||
for query in dct.get_select_get_queries(field, row):
|
||||
queries_with_answers.append((query, row.get_value_by_name(field.name)))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
print query
|
||||
assert node.query(query) == str(answer) + '\n'
|
@ -0,0 +1,239 @@
|
||||
import os
|
||||
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
|
||||
KEY_FIELDS = {
|
||||
"simple": [
|
||||
Field("KeyField", 'UInt64', is_key=True, default_value_for_get=9999999)
|
||||
],
|
||||
"complex": [
|
||||
Field("KeyField1", 'UInt64', is_key=True, default_value_for_get=9999999),
|
||||
Field("KeyField2", 'String', is_key=True, default_value_for_get='xxxxxxxxx')
|
||||
],
|
||||
"ranged": [
|
||||
Field("KeyField1", 'UInt64', is_key=True),
|
||||
Field("KeyField2", 'Date', is_range_key=True)
|
||||
]
|
||||
}
|
||||
|
||||
START_FIELDS = {
|
||||
"simple": [],
|
||||
"complex": [],
|
||||
"ranged" : [
|
||||
Field("StartDate", 'Date', range_hash_type='min'),
|
||||
Field("EndDate", 'Date', range_hash_type='max')
|
||||
]
|
||||
}
|
||||
|
||||
MIDDLE_FIELDS = [
|
||||
Field("UInt8_", 'UInt8', default_value_for_get=55),
|
||||
Field("UInt16_", 'UInt16', default_value_for_get=66),
|
||||
Field("UInt32_", 'UInt32', default_value_for_get=77),
|
||||
Field("UInt64_", 'UInt64', default_value_for_get=88),
|
||||
Field("Int8_", 'Int8', default_value_for_get=-55),
|
||||
Field("Int16_", 'Int16', default_value_for_get=-66),
|
||||
Field("Int32_", 'Int32', default_value_for_get=-77),
|
||||
Field("Int64_", 'Int64', default_value_for_get=-88),
|
||||
Field("UUID_", 'UUID', default_value_for_get='550e8400-0000-0000-0000-000000000000'),
|
||||
Field("Date_", 'Date', default_value_for_get='2018-12-30'),
|
||||
Field("DateTime_", 'DateTime', default_value_for_get='2018-12-30 00:00:00'),
|
||||
Field("String_", 'String', default_value_for_get='hi'),
|
||||
Field("Float32_", 'Float32', default_value_for_get=555.11),
|
||||
Field("Float64_", 'Float64', default_value_for_get=777.11),
|
||||
]
|
||||
|
||||
END_FIELDS = {
|
||||
"simple" : [
|
||||
Field("ParentKeyField", "UInt64", default_value_for_get=444, hierarchical=True)
|
||||
],
|
||||
"complex" : [],
|
||||
"ranged" : []
|
||||
}
|
||||
|
||||
LAYOUTS_SIMPLE = ["flat", "hashed", "cache", "direct"]
|
||||
LAYOUTS_COMPLEX = ["complex_key_hashed", "complex_key_cache", "complex_key_direct"]
|
||||
LAYOUTS_RANGED = ["range_hashed"]
|
||||
|
||||
VALUES = {
|
||||
"simple": [
|
||||
[1, 22, 333, 4444, 55555, -6, -77,
|
||||
-888, -999, '550e8400-e29b-41d4-a716-446655440003',
|
||||
'1973-06-28', '1985-02-28 23:43:25', 'hello', 22.543, 3332154213.4, 0],
|
||||
[2, 3, 4, 5, 6, -7, -8,
|
||||
-9, -10, '550e8400-e29b-41d4-a716-446655440002',
|
||||
'1978-06-28', '1986-02-28 23:42:25', 'hello', 21.543, 3222154213.4, 1]
|
||||
],
|
||||
"complex": [
|
||||
[1, 'world', 22, 333, 4444, 55555, -6,
|
||||
-77, -888, -999, '550e8400-e29b-41d4-a716-446655440003',
|
||||
'1973-06-28', '1985-02-28 23:43:25',
|
||||
'hello', 22.543, 3332154213.4],
|
||||
[2, 'qwerty2', 52, 2345, 6544, 9191991, -2,
|
||||
-717, -81818, -92929, '550e8400-e29b-41d4-a716-446655440007',
|
||||
'1975-09-28', '2000-02-28 23:33:24',
|
||||
'my', 255.543, 3332221.44]
|
||||
],
|
||||
"ranged": [
|
||||
[1, '2019-02-10', '2019-02-01', '2019-02-28',
|
||||
22, 333, 4444, 55555, -6, -77, -888, -999,
|
||||
'550e8400-e29b-41d4-a716-446655440003',
|
||||
'1973-06-28', '1985-02-28 23:43:25', 'hello',
|
||||
22.543, 3332154213.4],
|
||||
[2, '2019-04-10', '2019-04-01', '2019-04-28',
|
||||
11, 3223, 41444, 52515, -65, -747, -8388, -9099,
|
||||
'550e8400-e29b-41d4-a716-446655440004',
|
||||
'1973-06-29', '2002-02-28 23:23:25', '!!!!',
|
||||
32.543, 3332543.4]
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
DICT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'configs/dictionaries')
|
||||
|
||||
def get_dict(source, layout, fields, suffix_name=''):
|
||||
global DICT_CONFIG_PATH
|
||||
structure = DictionaryStructure(layout, fields)
|
||||
dict_name = source.name + "_" + layout.name + '_' + suffix_name
|
||||
dict_path = os.path.join(DICT_CONFIG_PATH, dict_name + '.xml')
|
||||
dictionary = Dictionary(dict_name, structure, source, dict_path, "table_" + dict_name, fields)
|
||||
dictionary.generate_config()
|
||||
return dictionary
|
||||
|
||||
class SimpleLayoutTester:
|
||||
def __init__(self):
|
||||
self.fields = KEY_FIELDS["simple"] + START_FIELDS["simple"] + MIDDLE_FIELDS + END_FIELDS["simple"]
|
||||
self.values = VALUES["simple"]
|
||||
self.data = [Row(self.fields, vals) for vals in self.values]
|
||||
self.layout_to_dictionary = dict()
|
||||
|
||||
def create_dictionaries(self, source_):
|
||||
for layout in LAYOUTS_SIMPLE:
|
||||
if source_.compatible_with_layout(Layout(layout)):
|
||||
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
||||
|
||||
def prepare(self, cluster_):
|
||||
for _, dictionary in self.layout_to_dictionary.items():
|
||||
dictionary.prepare_source(cluster_)
|
||||
dictionary.load_data(self.data)
|
||||
|
||||
def execute(self, layout_name, node):
|
||||
if not self.layout_to_dictionary.has_key(layout_name):
|
||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||
|
||||
dct = self.layout_to_dictionary[layout_name]
|
||||
|
||||
node.query("system reload dictionaries")
|
||||
queries_with_answers = []
|
||||
|
||||
for row in self.data:
|
||||
for field in self.fields:
|
||||
if not field.is_key:
|
||||
for query in dct.get_select_get_queries(field, row):
|
||||
queries_with_answers.append((query, row.get_value_by_name(field.name)))
|
||||
|
||||
for query in dct.get_select_has_queries(field, row):
|
||||
queries_with_answers.append((query, 1))
|
||||
|
||||
for query in dct.get_select_get_or_default_queries(field, row):
|
||||
queries_with_answers.append((query, field.default_value_for_get))
|
||||
|
||||
for query in dct.get_hierarchical_queries(self.data[0]):
|
||||
queries_with_answers.append((query, [1]))
|
||||
|
||||
for query in dct.get_hierarchical_queries(self.data[1]):
|
||||
queries_with_answers.append((query, [2, 1]))
|
||||
|
||||
for query in dct.get_is_in_queries(self.data[0], self.data[1]):
|
||||
queries_with_answers.append((query, 0))
|
||||
|
||||
for query in dct.get_is_in_queries(self.data[1], self.data[0]):
|
||||
queries_with_answers.append((query, 1))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
# print query
|
||||
if isinstance(answer, list):
|
||||
answer = str(answer).replace(' ', '')
|
||||
assert node.query(query) == str(answer) + '\n'
|
||||
|
||||
|
||||
class ComplexLayoutTester:
|
||||
def __init__(self):
|
||||
self.fields = KEY_FIELDS["complex"] + START_FIELDS["complex"] + MIDDLE_FIELDS + END_FIELDS["complex"]
|
||||
self.values = VALUES["complex"]
|
||||
self.data = [Row(self.fields, vals) for vals in self.values]
|
||||
self.layout_to_dictionary = dict()
|
||||
|
||||
def create_dictionaries(self, source_):
|
||||
for layout in LAYOUTS_COMPLEX:
|
||||
if source_.compatible_with_layout(Layout(layout)):
|
||||
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
||||
|
||||
def prepare(self, cluster_):
|
||||
for _, dictionary in self.layout_to_dictionary.items():
|
||||
dictionary.prepare_source(cluster_)
|
||||
dictionary.load_data(self.data)
|
||||
|
||||
def execute(self, layout_name, node):
|
||||
if not self.layout_to_dictionary.has_key(layout_name):
|
||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||
|
||||
dct = self.layout_to_dictionary[layout_name]
|
||||
|
||||
node.query("system reload dictionaries")
|
||||
queries_with_answers = []
|
||||
|
||||
for row in self.data:
|
||||
for field in self.fields:
|
||||
if not field.is_key:
|
||||
for query in dct.get_select_get_queries(field, row):
|
||||
queries_with_answers.append((query, row.get_value_by_name(field.name)))
|
||||
|
||||
for query in dct.get_select_has_queries(field, row):
|
||||
queries_with_answers.append((query, 1))
|
||||
|
||||
for query in dct.get_select_get_or_default_queries(field, row):
|
||||
queries_with_answers.append((query, field.default_value_for_get))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
# print query
|
||||
assert node.query(query) == str(answer) + '\n'
|
||||
|
||||
|
||||
class RangedLayoutTester:
|
||||
def __init__(self):
|
||||
self.fields = KEY_FIELDS["ranged"] + START_FIELDS["ranged"] + MIDDLE_FIELDS + END_FIELDS["ranged"]
|
||||
self.values = VALUES["ranged"]
|
||||
self.data = [Row(self.fields, vals) for vals in self.values]
|
||||
self.layout_to_dictionary = dict()
|
||||
|
||||
def create_dictionaries(self, source_):
|
||||
for layout in LAYOUTS_RANGED:
|
||||
if source_.compatible_with_layout(Layout(layout)):
|
||||
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
||||
|
||||
def prepare(self, cluster_):
|
||||
for _, dictionary in self.layout_to_dictionary.items():
|
||||
dictionary.prepare_source(cluster_)
|
||||
dictionary.load_data(self.data)
|
||||
|
||||
def execute(self, layout_name, node):
|
||||
|
||||
if not self.layout_to_dictionary.has_key(layout_name):
|
||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||
|
||||
dct = self.layout_to_dictionary[layout_name]
|
||||
|
||||
node.query("system reload dictionaries")
|
||||
|
||||
queries_with_answers = []
|
||||
for row in self.data:
|
||||
for field in self.fields:
|
||||
if not field.is_key and not field.is_range:
|
||||
for query in dct.get_select_get_queries(field, row):
|
||||
queries_with_answers.append((query, row.get_value_by_name(field.name)))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
# print query
|
||||
assert node.query(query) == str(answer) + '\n'
|
||||
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceCassandra
|
||||
|
||||
SOURCE = SourceCassandra("Cassandra", "localhost", "9043", "cassandra1", "9042", "", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_cassandra=True)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_SIMPLE)
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_COMPLEX)
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceClickHouse
|
||||
|
||||
SOURCE = SourceClickHouse("LocalClickHouse", "localhost", "9000", "node", "9000", "default", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_SIMPLE)
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_COMPLEX)
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,84 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceClickHouse
|
||||
|
||||
SOURCE = SourceClickHouse("RemoteClickHouse", "localhost", "9000", "clickhouse1", "9000", "default", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
cluster.add_instance('clickhouse1', main_configs=main_configs)
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", list(set(LAYOUTS_SIMPLE).difference(set("cache"))) )
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", list(set(LAYOUTS_COMPLEX).difference(set("complex_key_cache"))))
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,78 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceExecutableCache
|
||||
|
||||
SOURCE = SourceExecutableCache("ExecutableCache", "localhost", "9000", "node", "9000", "", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", ['cache'])
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", ['complex_key_cache'])
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceExecutableHashed
|
||||
|
||||
SOURCE = SourceExecutableHashed("ExecutableHashed", "localhost", "9000", "node", "9000", "", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", ['hashed'])
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", ['complex_key_hashed'])
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceFile
|
||||
|
||||
SOURCE = SourceFile("File", "localhost", "9000", "node", "9000", "", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", set(LAYOUTS_SIMPLE).difference({'cache', 'direct'}) )
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", list(set(LAYOUTS_COMPLEX).difference({'complex_key_cache', 'complex_key_direct'})))
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,84 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceHTTP
|
||||
|
||||
SOURCE = SourceHTTP("SourceHTTP", "localhost", "9000", "clickhouse1", "9000", "", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
cluster.add_instance('clickhouse1', main_configs=main_configs)
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_SIMPLE)
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_COMPLEX)
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,84 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceHTTPS
|
||||
|
||||
SOURCE = SourceHTTPS("SourceHTTPS", "localhost", "9000", "clickhouse1", "9000", "", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
cluster.add_instance('clickhouse1', main_configs=main_configs)
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_SIMPLE)
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_COMPLEX)
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceMongo
|
||||
|
||||
SOURCE = SourceMongo("MongoDB", "localhost", "27018", "mongo1", "27017", "root", "clickhouse")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_mongo=True)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_SIMPLE)
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_COMPLEX)
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,75 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceMongoURI
|
||||
|
||||
SOURCE = SourceMongoURI("MongoDB_URI", "localhost", "27018", "mongo1", "27017", "root", "clickhouse")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_mongo=True)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
# See comment in SourceMongoURI
|
||||
@pytest.mark.parametrize("layout_name", ["flat"])
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceMySQL
|
||||
|
||||
SOURCE = SourceMySQL("MySQL", "localhost", "3308", "mysql1", "3306", "root", "clickhouse")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_mysql=True)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_SIMPLE)
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_COMPLEX)
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,9 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<profiles replace="replace">
|
||||
<default>
|
||||
<max_memory_usage>20000000000</max_memory_usage>
|
||||
<load_balancing>nearest_hostname</load_balancing>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
@ -0,0 +1,9 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<profiles replace="replace">
|
||||
<default>
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
<load_balancing>first_or_random</load_balancing>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
@ -0,0 +1,9 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<profiles replace="replace">
|
||||
<default>
|
||||
<max_memory_usage>20000000000</max_memory_usage>
|
||||
<load_balancing>a</load_balancing>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
@ -0,0 +1,9 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<profiles replace="replace">
|
||||
<default>
|
||||
<max_memory_usage>a</max_memory_usage>
|
||||
<load_balancing>nearest_hostname</load_balancing>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<profiles replace="replace">
|
||||
<default>
|
||||
<xyz>8</xyz>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
@ -0,0 +1,90 @@
|
||||
import pytest
|
||||
import os
|
||||
import time
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import assert_eq_with_retry, assert_logs_contain_with_retry
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance('node', user_configs=["configs/normal_settings.xml"])
|
||||
|
||||
@pytest.fixture(scope="module", autouse=True)
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def reset_to_normal_settings_after_test():
|
||||
try:
|
||||
node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/normal_settings.xml"), '/etc/clickhouse-server/users.d/z.xml')
|
||||
node.query("SYSTEM RELOAD CONFIG")
|
||||
yield
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
def test_force_reload():
|
||||
assert node.query("SELECT getSetting('max_memory_usage')") == "10000000000\n"
|
||||
assert node.query("SELECT getSetting('load_balancing')") == "first_or_random\n"
|
||||
|
||||
node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/changed_settings.xml"), '/etc/clickhouse-server/users.d/z.xml')
|
||||
node.query("SYSTEM RELOAD CONFIG")
|
||||
|
||||
assert node.query("SELECT getSetting('max_memory_usage')") == "20000000000\n"
|
||||
assert node.query("SELECT getSetting('load_balancing')") == "nearest_hostname\n"
|
||||
|
||||
|
||||
def test_reload_on_timeout():
|
||||
assert node.query("SELECT getSetting('max_memory_usage')") == "10000000000\n"
|
||||
assert node.query("SELECT getSetting('load_balancing')") == "first_or_random\n"
|
||||
|
||||
time.sleep(1) # The modification time of the 'z.xml' file should be different,
|
||||
# because config files are reload by timer only when the modification time is changed.
|
||||
node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/changed_settings.xml"), '/etc/clickhouse-server/users.d/z.xml')
|
||||
|
||||
assert_eq_with_retry(node, "SELECT getSetting('max_memory_usage')", "20000000000")
|
||||
assert_eq_with_retry(node, "SELECT getSetting('load_balancing')", "nearest_hostname")
|
||||
|
||||
|
||||
def test_unknown_setting_force_reload():
|
||||
node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/unknown_setting.xml"), '/etc/clickhouse-server/users.d/z.xml')
|
||||
|
||||
error_message = "Setting xyz is neither a builtin setting nor started with the prefix 'custom_' registered for user-defined settings"
|
||||
assert error_message in node.query_and_get_error("SYSTEM RELOAD CONFIG")
|
||||
|
||||
assert node.query("SELECT getSetting('max_memory_usage')") == "10000000000\n"
|
||||
assert node.query("SELECT getSetting('load_balancing')") == "first_or_random\n"
|
||||
|
||||
|
||||
def test_unknown_setting_reload_on_timeout():
|
||||
time.sleep(1) # The modification time of the 'z.xml' file should be different,
|
||||
# because config files are reload by timer only when the modification time is changed.
|
||||
node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/unknown_setting.xml"), '/etc/clickhouse-server/users.d/z.xml')
|
||||
|
||||
error_message = "Setting xyz is neither a builtin setting nor started with the prefix 'custom_' registered for user-defined settings"
|
||||
assert_logs_contain_with_retry(node, error_message)
|
||||
|
||||
assert node.query("SELECT getSetting('max_memory_usage')") == "10000000000\n"
|
||||
assert node.query("SELECT getSetting('load_balancing')") == "first_or_random\n"
|
||||
|
||||
|
||||
def test_unexpected_setting_int():
|
||||
node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/unexpected_setting_int.xml"), '/etc/clickhouse-server/users.d/z.xml')
|
||||
error_message = "Cannot parse"
|
||||
assert error_message in node.query_and_get_error("SYSTEM RELOAD CONFIG")
|
||||
|
||||
assert node.query("SELECT getSetting('max_memory_usage')") == "10000000000\n"
|
||||
assert node.query("SELECT getSetting('load_balancing')") == "first_or_random\n"
|
||||
|
||||
|
||||
def test_unexpected_setting_enum():
|
||||
node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/unexpected_setting_int.xml"), '/etc/clickhouse-server/users.d/z.xml')
|
||||
error_message = "Cannot parse"
|
||||
assert error_message in node.query_and_get_error("SYSTEM RELOAD CONFIG")
|
||||
|
||||
assert node.query("SELECT getSetting('max_memory_usage')") == "10000000000\n"
|
||||
assert node.query("SELECT getSetting('load_balancing')") == "first_or_random\n"
|
@ -1,26 +0,0 @@
|
||||
<yandex>
|
||||
<kafka>
|
||||
<auto_offset_reset>earliest</auto_offset_reset>
|
||||
<!-- Debugging of possible issues, like:
|
||||
- https://github.com/edenhill/librdkafka/issues/2077
|
||||
- https://github.com/edenhill/librdkafka/issues/1778
|
||||
- #5615
|
||||
|
||||
XXX: for now this messages will appears in stderr.
|
||||
-->
|
||||
<security_protocol>SASL_PLAINTEXT</security_protocol>
|
||||
<sasl_mechanism>GSSAPI</sasl_mechanism>
|
||||
<sasl_kerberos_service_name>kafka</sasl_kerberos_service_name>
|
||||
<sasl_kerberos_keytab>/tmp/keytab/clickhouse.keytab</sasl_kerberos_keytab>
|
||||
<sasl_kerberos_principal>kafkauser/instance@TEST.CLICKHOUSE.TECH</sasl_kerberos_principal>
|
||||
<debug>security</debug>
|
||||
<api_version_request>false</api_version_request>
|
||||
</kafka>
|
||||
|
||||
<kafka_consumer_hang>
|
||||
<!-- default: 3000 -->
|
||||
<heartbeat_interval_ms>300</heartbeat_interval_ms>
|
||||
<!-- default: 10000 -->
|
||||
<session_timeout_ms>6000</session_timeout_ms>
|
||||
</kafka_consumer_hang>
|
||||
</yandex>
|
@ -1,11 +0,0 @@
|
||||
<yandex>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/log.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/log.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<stderr>/var/log/clickhouse-server/stderr.log</stderr>
|
||||
<stdout>/var/log/clickhouse-server/stdout.log</stdout>
|
||||
</logger>
|
||||
</yandex>
|
@ -1,132 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
set -x # trace
|
||||
|
||||
: "${REALM:=TEST.CLICKHOUSE.TECH}"
|
||||
: "${DOMAIN_REALM:=test.clickhouse.tech}"
|
||||
: "${KERB_MASTER_KEY:=masterkey}"
|
||||
: "${KERB_ADMIN_USER:=admin}"
|
||||
: "${KERB_ADMIN_PASS:=admin}"
|
||||
|
||||
create_config() {
|
||||
: "${KDC_ADDRESS:=$(hostname -f)}"
|
||||
|
||||
cat>/etc/krb5.conf<<EOF
|
||||
[logging]
|
||||
default = FILE:/var/log/kerberos/krb5libs.log
|
||||
kdc = FILE:/var/log/kerberos/krb5kdc.log
|
||||
admin_server = FILE:/var/log/kerberos/kadmind.log
|
||||
|
||||
[libdefaults]
|
||||
default_realm = $REALM
|
||||
dns_lookup_realm = false
|
||||
dns_lookup_kdc = false
|
||||
ticket_lifetime = 15s
|
||||
renew_lifetime = 15s
|
||||
forwardable = true
|
||||
# WARNING: We use weaker key types to simplify testing as stronger key types
|
||||
# require the enhanced security JCE policy file to be installed. You should
|
||||
# NOT run with this configuration in production or any real environment. You
|
||||
# have been warned.
|
||||
default_tkt_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
|
||||
default_tgs_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
|
||||
permitted_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
|
||||
|
||||
[realms]
|
||||
$REALM = {
|
||||
kdc = $KDC_ADDRESS
|
||||
admin_server = $KDC_ADDRESS
|
||||
}
|
||||
|
||||
[domain_realm]
|
||||
.$DOMAIN_REALM = $REALM
|
||||
$DOMAIN_REALM = $REALM
|
||||
EOF
|
||||
|
||||
cat>/var/kerberos/krb5kdc/kdc.conf<<EOF
|
||||
[kdcdefaults]
|
||||
kdc_ports = 88
|
||||
kdc_tcp_ports = 88
|
||||
|
||||
[realms]
|
||||
$REALM = {
|
||||
acl_file = /var/kerberos/krb5kdc/kadm5.acl
|
||||
dict_file = /usr/share/dict/words
|
||||
admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
|
||||
# WARNING: We use weaker key types to simplify testing as stronger key types
|
||||
# require the enhanced security JCE policy file to be installed. You should
|
||||
# NOT run with this configuration in production or any real environment. You
|
||||
# have been warned.
|
||||
master_key_type = des3-hmac-sha1
|
||||
supported_enctypes = arcfour-hmac:normal des3-hmac-sha1:normal des-cbc-crc:normal des:normal des:v4 des:norealm des:onlyrealm des:afs3
|
||||
default_principal_flags = +preauth
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
create_db() {
|
||||
/usr/sbin/kdb5_util -P $KERB_MASTER_KEY -r $REALM create -s
|
||||
}
|
||||
|
||||
start_kdc() {
|
||||
mkdir -p /var/log/kerberos
|
||||
|
||||
/etc/rc.d/init.d/krb5kdc start
|
||||
/etc/rc.d/init.d/kadmin start
|
||||
|
||||
chkconfig krb5kdc on
|
||||
chkconfig kadmin on
|
||||
}
|
||||
|
||||
restart_kdc() {
|
||||
/etc/rc.d/init.d/krb5kdc restart
|
||||
/etc/rc.d/init.d/kadmin restart
|
||||
}
|
||||
|
||||
create_admin_user() {
|
||||
kadmin.local -q "addprinc -pw $KERB_ADMIN_PASS $KERB_ADMIN_USER/admin"
|
||||
echo "*/admin@$REALM *" > /var/kerberos/krb5kdc/kadm5.acl
|
||||
}
|
||||
|
||||
create_keytabs() {
|
||||
|
||||
kadmin.local -q "addprinc -randkey zookeeper/kafka_kerberized_zookeeper@${REALM}"
|
||||
kadmin.local -q "ktadd -norandkey -k /tmp/keytab/kafka_kerberized_zookeeper.keytab zookeeper/kafka_kerberized_zookeeper@${REALM}"
|
||||
|
||||
kadmin.local -q "addprinc -randkey kafka/kerberized_kafka1@${REALM}"
|
||||
kadmin.local -q "ktadd -norandkey -k /tmp/keytab/kerberized_kafka.keytab kafka/kerberized_kafka1@${REALM}"
|
||||
|
||||
kadmin.local -q "addprinc -randkey zkclient@${REALM}"
|
||||
kadmin.local -q "ktadd -norandkey -k /tmp/keytab/zkclient.keytab zkclient@${REALM}"
|
||||
|
||||
|
||||
kadmin.local -q "addprinc -randkey kafkauser/instance@${REALM}"
|
||||
kadmin.local -q "ktadd -norandkey -k /tmp/keytab/clickhouse.keytab kafkauser/instance@${REALM}"
|
||||
|
||||
chmod g+r /tmp/keytab/clickhouse.keytab
|
||||
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
if [ ! -f /kerberos_initialized ]; then
|
||||
create_config
|
||||
create_db
|
||||
create_admin_user
|
||||
start_kdc
|
||||
|
||||
touch /kerberos_initialized
|
||||
fi
|
||||
|
||||
if [ ! -f /var/kerberos/krb5kdc/principal ]; then
|
||||
while true; do sleep 1000; done
|
||||
else
|
||||
start_kdc
|
||||
create_keytabs
|
||||
tail -F /var/log/kerberos/krb5kdc.log
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
[[ "$0" == "${BASH_SOURCE[0]}" ]] && main "$@"
|
@ -1,14 +0,0 @@
|
||||
KafkaServer {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/kafka/secrets/kerberized_kafka.keytab"
|
||||
principal="kafka/kerberized_kafka1@TEST.CLICKHOUSE.TECH";
|
||||
};
|
||||
Client {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/kafka/secrets/zkclient.keytab"
|
||||
principal="zkclient@TEST.CLICKHOUSE.TECH";
|
||||
};
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user