mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' into run_func_tests_in_parallel
This commit is contained in:
commit
987a46faee
1
.gitmodules
vendored
1
.gitmodules
vendored
@ -186,4 +186,3 @@
|
||||
[submodule "contrib/cyrus-sasl"]
|
||||
path = contrib/cyrus-sasl
|
||||
url = https://github.com/cyrusimap/cyrus-sasl
|
||||
branch = cyrus-sasl-2.1
|
||||
|
@ -14,6 +14,8 @@ TRIES=3
|
||||
AMD64_BIN_URL="https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"
|
||||
AARCH64_BIN_URL="https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_special_build_check/clang-10-aarch64_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"
|
||||
|
||||
# Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'.
|
||||
|
||||
FASTER_DOWNLOAD=wget
|
||||
if command -v axel >/dev/null; then
|
||||
FASTER_DOWNLOAD=axel
|
||||
@ -36,14 +38,6 @@ if [[ ! -f clickhouse ]]; then
|
||||
$FASTER_DOWNLOAD "$AMD64_BIN_URL"
|
||||
elif [[ $CPU == aarch64 ]]; then
|
||||
$FASTER_DOWNLOAD "$AARCH64_BIN_URL"
|
||||
|
||||
# Download configs. ARM version has no embedded configs.
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml
|
||||
mkdir config.d
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/access_control.xml -O config.d/access_control.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml
|
||||
else
|
||||
echo "Unsupported CPU type: $CPU"
|
||||
exit 1
|
||||
@ -60,10 +54,12 @@ if [[ ! -d data ]]; then
|
||||
if [[ ! -f $DATASET ]]; then
|
||||
$FASTER_DOWNLOAD "https://clickhouse-datasets.s3.yandex.net/hits/partitions/$DATASET"
|
||||
fi
|
||||
|
||||
|
||||
tar $TAR_PARAMS --strip-components=1 --directory=. -x -v -f $DATASET
|
||||
fi
|
||||
|
||||
uptime
|
||||
|
||||
echo "Starting clickhouse-server"
|
||||
|
||||
./clickhouse server > server.log 2>&1 &
|
||||
@ -105,9 +101,12 @@ echo
|
||||
echo "Benchmark complete. System info:"
|
||||
echo
|
||||
|
||||
echo '----Version and build id--------'
|
||||
./clickhouse local --query "SELECT version(), buildId()"
|
||||
echo '----Version, build id-----------'
|
||||
./clickhouse local --query "SELECT format('Version: {}, build id: {}', version(), buildId())"
|
||||
./clickhouse local --query "SELECT format('The number of threads is: {}', value) FROM system.settings WHERE name = 'max_threads'" --output-format TSVRaw
|
||||
./clickhouse local --query "SELECT format('Current time: {}', toString(now(), 'UTC'))"
|
||||
echo '----CPU-------------------------'
|
||||
cat /proc/cpuinfo | grep -i -F 'model name' | uniq
|
||||
lscpu
|
||||
echo '----Block Devices---------------'
|
||||
lsblk
|
||||
|
@ -14,10 +14,10 @@ if (NOT ENABLE_RDKAFKA)
|
||||
return()
|
||||
endif()
|
||||
|
||||
if (NOT ARCH_ARM)
|
||||
if (NOT ARCH_ARM AND USE_LIBGSASL)
|
||||
option (USE_INTERNAL_RDKAFKA_LIBRARY "Set to FALSE to use system librdkafka instead of the bundled" ${NOT_UNBUNDLED})
|
||||
elseif(USE_INTERNAL_RDKAFKA_LIBRARY)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use internal librdkafka with ARCH_ARM=${ARCH_ARM}")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use internal librdkafka with ARCH_ARM=${ARCH_ARM} AND USE_LIBGSASL=${USE_LIBGSASL}")
|
||||
endif ()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cppkafka/CMakeLists.txt")
|
||||
|
2
contrib/cyrus-sasl
vendored
2
contrib/cyrus-sasl
vendored
@ -1 +1 @@
|
||||
Subproject commit 9995bf9d8e14f58934d9313ac64f13780d6dd3c9
|
||||
Subproject commit 6054630889fd1cd8d0659573d69badcee1e23a00
|
2
contrib/protobuf
vendored
2
contrib/protobuf
vendored
@ -1 +1 @@
|
||||
Subproject commit d6a10dd3db55d8f7f9e464db9151874cde1f79ec
|
||||
Subproject commit 445d1ae73a450b1e94622e7040989aa2048402e3
|
@ -11,3 +11,7 @@ else ()
|
||||
endif ()
|
||||
|
||||
add_subdirectory("${protobuf_SOURCE_DIR}/cmake" "${protobuf_BINARY_DIR}")
|
||||
|
||||
# We don't want to stop compilation on warnings in protobuf's headers.
|
||||
# The following line overrides the value assigned by the command target_include_directories() in libprotobuf.cmake
|
||||
set_property(TARGET libprotobuf PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES ${protobuf_SOURCE_DIR}/src)
|
||||
|
4
debian/rules
vendored
4
debian/rules
vendored
@ -36,8 +36,8 @@ endif
|
||||
|
||||
CMAKE_FLAGS += -DENABLE_UTILS=0
|
||||
|
||||
DEB_CC ?= $(shell which gcc-9 gcc-8 gcc | head -n1)
|
||||
DEB_CXX ?= $(shell which g++-9 g++-8 g++ | head -n1)
|
||||
DEB_CC ?= $(shell which gcc-10 gcc-9 gcc | head -n1)
|
||||
DEB_CXX ?= $(shell which g++-10 g++-9 g++ | head -n1)
|
||||
|
||||
ifdef DEB_CXX
|
||||
DEB_BUILD_GNU_TYPE := $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE)
|
||||
|
@ -133,10 +133,6 @@
|
||||
"name": "yandex/clickhouse-postgresql-java-client",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/kerberos_kdc": {
|
||||
"name": "yandex/clickhouse-kerberos-kdc",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/base": {
|
||||
"name": "yandex/clickhouse-test-base",
|
||||
"dependent": [
|
||||
|
@ -89,7 +89,8 @@ EOT
|
||||
fi
|
||||
|
||||
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
||||
$gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG &
|
||||
# Listen only on localhost until the initialization is done
|
||||
$gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG -- --listen_host=127.0.0.1 &
|
||||
pid="$!"
|
||||
|
||||
# check if clickhouse is ready to accept connections
|
||||
|
@ -97,7 +97,7 @@ ccache --zero-stats ||:
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_CXX_COMPILER=clang++-10 -DCMAKE_C_COMPILER=clang-10 "${CMAKE_LIBS_CONFIG[@]}" "${FASTTEST_CMAKE_FLAGS[@]}" | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/cmake_log.txt
|
||||
ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/build_log.txt
|
||||
time ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/build_log.txt
|
||||
ninja install | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/install_log.txt
|
||||
|
||||
|
||||
@ -111,35 +111,10 @@ ln -s /test_output /var/log/clickhouse-server
|
||||
cp "$CLICKHOUSE_DIR/programs/server/config.xml" /etc/clickhouse-server/
|
||||
cp "$CLICKHOUSE_DIR/programs/server/users.xml" /etc/clickhouse-server/
|
||||
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/custom_settings_prefixes.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/executable_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
|
||||
#ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||
|
||||
# Keep original query_masking_rules.xml
|
||||
ln -s --backup=simple --suffix=_original.xml /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
# install tests config
|
||||
$CLICKHOUSE_DIR/tests/config/install.sh
|
||||
# doesn't support SSL
|
||||
rm -f /etc/clickhouse-server/config.d/secure_ports.xml
|
||||
|
||||
# Kill the server in case we are running locally and not in docker
|
||||
kill_clickhouse
|
||||
@ -216,7 +191,7 @@ TESTS_TO_SKIP=(
|
||||
01460_DistributedFilesToInsert
|
||||
)
|
||||
|
||||
clickhouse-test -j 8 --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
|
||||
time clickhouse-test -j 8 --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
|
||||
|
||||
|
||||
# substr is to remove semicolon after test name
|
||||
@ -234,7 +209,7 @@ then
|
||||
kill_clickhouse
|
||||
|
||||
# Clean the data so that there is no interference from the previous test run.
|
||||
rm -rvf /var/lib/clickhouse ||:
|
||||
rm -rf /var/lib/clickhouse ||:
|
||||
mkdir /var/lib/clickhouse
|
||||
|
||||
clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
|
||||
|
@ -48,7 +48,7 @@ function configure
|
||||
cp -av "$repo_dir"/programs/server/config* db
|
||||
cp -av "$repo_dir"/programs/server/user* db
|
||||
# TODO figure out which ones are needed
|
||||
cp -av "$repo_dir"/tests/config/listen.xml db/config.d
|
||||
cp -av "$repo_dir"/tests/config/config.d/listen.xml db/config.d
|
||||
cp -av "$script_dir"/query-fuzzer-tweaks-users.xml db/users.d
|
||||
}
|
||||
|
||||
|
@ -16,8 +16,7 @@ RUN apt-get update \
|
||||
odbc-postgresql \
|
||||
sqlite3 \
|
||||
curl \
|
||||
tar \
|
||||
krb5-user
|
||||
tar
|
||||
RUN rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
|
@ -1,15 +0,0 @@
|
||||
# docker build -t yandex/clickhouse-kerberos-kdc .
|
||||
|
||||
FROM centos:6.6
|
||||
# old OS to make is faster and smaller
|
||||
|
||||
RUN yum install -y krb5-server krb5-libs krb5-auth-dialog krb5-workstation
|
||||
|
||||
EXPOSE 88 749
|
||||
|
||||
RUN touch /config.sh
|
||||
# should be overwritten e.g. via docker_compose volumes
|
||||
# volumes: /some_path/my_kerberos_config.sh:/config.sh:ro
|
||||
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "/config.sh"]
|
@ -1,59 +0,0 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
kafka_kerberized_zookeeper:
|
||||
image: confluentinc/cp-zookeeper:5.2.0
|
||||
# restart: always
|
||||
hostname: kafka_kerberized_zookeeper
|
||||
environment:
|
||||
ZOOKEEPER_SERVER_ID: 1
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_SERVERS: "kafka_kerberized_zookeeper:2888:3888"
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/zookeeper_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/krb.conf -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dsun.security.krb5.debug=true"
|
||||
volumes:
|
||||
- ${KERBERIZED_KAFKA_DIR}/secrets:/etc/kafka/secrets
|
||||
- /dev/urandom:/dev/random
|
||||
depends_on:
|
||||
- kafka_kerberos
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
kerberized_kafka1:
|
||||
image: confluentinc/cp-kafka:5.2.0
|
||||
# restart: always
|
||||
hostname: kerberized_kafka1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "9093:9093"
|
||||
environment:
|
||||
KAFKA_LISTENERS: OUTSIDE://:19092,UNSECURED_OUTSIDE://:19093,UNSECURED_INSIDE://:9093
|
||||
KAFKA_ADVERTISED_LISTENERS: OUTSIDE://kerberized_kafka1:19092,UNSECURED_OUTSIDE://kerberized_kafka1:19093,UNSECURED_INSIDE://localhost:9093
|
||||
# KAFKA_LISTENERS: INSIDE://kerberized_kafka1:9092,OUTSIDE://kerberized_kafka1:19092
|
||||
# KAFKA_ADVERTISED_LISTENERS: INSIDE://localhost:9092,OUTSIDE://kerberized_kafka1:19092
|
||||
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: GSSAPI
|
||||
KAFKA_SASL_ENABLED_MECHANISMS: GSSAPI
|
||||
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: OUTSIDE:SASL_PLAINTEXT,UNSECURED_OUTSIDE:PLAINTEXT,UNSECURED_INSIDE:PLAINTEXT,
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: OUTSIDE
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: "kafka_kerberized_zookeeper:2181"
|
||||
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/broker_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/krb.conf -Dsun.security.krb5.debug=true"
|
||||
volumes:
|
||||
- ${KERBERIZED_KAFKA_DIR}/secrets:/etc/kafka/secrets
|
||||
- /dev/urandom:/dev/random
|
||||
depends_on:
|
||||
- kafka_kerberized_zookeeper
|
||||
- kafka_kerberos
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
kafka_kerberos:
|
||||
image: yandex/clickhouse-kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG}
|
||||
hostname: kafka_kerberos
|
||||
volumes:
|
||||
- ${KERBERIZED_KAFKA_DIR}/secrets:/tmp/keytab
|
||||
- ${KERBERIZED_KAFKA_DIR}/../../kerberos_image_config.sh:/config.sh
|
||||
- /dev/urandom:/dev/random
|
||||
ports: [88, 749]
|
@ -27,7 +27,6 @@ export DOCKER_MYSQL_JAVA_CLIENT_TAG=${DOCKER_MYSQL_JAVA_CLIENT_TAG:=latest}
|
||||
export DOCKER_MYSQL_JS_CLIENT_TAG=${DOCKER_MYSQL_JS_CLIENT_TAG:=latest}
|
||||
export DOCKER_MYSQL_PHP_CLIENT_TAG=${DOCKER_MYSQL_PHP_CLIENT_TAG:=latest}
|
||||
export DOCKER_POSTGRESQL_JAVA_CLIENT_TAG=${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:=latest}
|
||||
export DOCKER_KERBEROS_KDC_TAG=${DOCKER_KERBEROS_KDC_TAG:=latest}
|
||||
|
||||
cd /ClickHouse/tests/integration
|
||||
exec "$@"
|
||||
|
@ -24,10 +24,11 @@ def tsv_escape(s):
|
||||
parser = argparse.ArgumentParser(description='Run performance test.')
|
||||
# Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set.
|
||||
parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file')
|
||||
parser.add_argument('--host', nargs='*', default=['localhost'], help="Server hostname(s). Corresponds to '--port' options.")
|
||||
parser.add_argument('--port', nargs='*', default=[9000], help="Server port(s). Corresponds to '--host' options.")
|
||||
parser.add_argument('--host', nargs='*', default=['localhost'], help="Space-separated list of server hostname(s). Corresponds to '--port' options.")
|
||||
parser.add_argument('--port', nargs='*', default=[9000], help="Space-separated list of server port(s). Corresponds to '--host' options.")
|
||||
parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.')
|
||||
parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.')
|
||||
parser.add_argument('--queries-to-run', nargs='*', type=int, default=None, help='Space-separated list of indexes of queries to test.')
|
||||
parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.')
|
||||
parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.')
|
||||
parser.add_argument('--print-settings', action='store_true', help='Print test settings and exit.')
|
||||
@ -188,10 +189,20 @@ for t in threads:
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
# Run the queries in randomized order, but preserve their indexes as specified
|
||||
# in the test XML. To avoid using too much time, limit the number of queries
|
||||
# we run per test.
|
||||
queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries or len(test_queries)))
|
||||
queries_to_run = range(0, len(test_queries))
|
||||
|
||||
if args.max_queries:
|
||||
# If specified, test a limited number of queries chosen at random.
|
||||
queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries))
|
||||
|
||||
if args.queries_to_run:
|
||||
# Run the specified queries, with some sanity check.
|
||||
for i in args.queries_to_run:
|
||||
if i < 0 or i >= len(test_queries):
|
||||
print(f'There is no query no. "{i}" in this test, only [{0}-{len(test_queries) - 1}] are present')
|
||||
exit(1)
|
||||
|
||||
queries_to_run = args.queries_to_run
|
||||
|
||||
# Run test queries.
|
||||
for query_index in queries_to_run:
|
||||
|
@ -8,26 +8,8 @@ dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
if [[ -n "$USE_DATABASE_ATOMIC" ]] && [[ "$USE_DATABASE_ATOMIC" -eq 1 ]]; then
|
||||
ln -s /usr/share/clickhouse-test/config/database_atomic_configd.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/database_atomic_usersd.xml /etc/clickhouse-server/users.d/
|
||||
fi
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
function start()
|
||||
{
|
||||
|
@ -48,28 +48,8 @@ mkdir -p /var/lib/clickhouse
|
||||
mkdir -p /var/log/clickhouse-server
|
||||
chmod 777 -R /var/log/clickhouse-server/
|
||||
|
||||
# Temorary way to keep CI green while moving dictionaries to separate directory
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
chmod 777 -R /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
|
||||
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load those if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
function start()
|
||||
{
|
||||
|
@ -21,9 +21,7 @@ RUN apt-get update -y \
|
||||
telnet \
|
||||
tree \
|
||||
unixodbc \
|
||||
wget \
|
||||
zookeeper \
|
||||
zookeeperd
|
||||
wget
|
||||
|
||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
||||
|
@ -8,48 +8,9 @@ dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/custom_settings_prefixes.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/executable_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load it if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then
|
||||
ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/
|
||||
fi
|
||||
if [[ -n "$USE_DATABASE_ATOMIC" ]] && [[ "$USE_DATABASE_ATOMIC" -eq 1 ]]; then
|
||||
ln -s /usr/share/clickhouse-test/config/database_atomic_configd.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/database_atomic_usersd.xml /etc/clickhouse-server/users.d/
|
||||
fi
|
||||
|
||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||
|
||||
service zookeeper start
|
||||
sleep 5
|
||||
service clickhouse-server start && sleep 5
|
||||
|
||||
if cat /usr/bin/clickhouse-test | grep -q -- "--use-skip-list"; then
|
||||
|
@ -66,9 +66,7 @@ RUN apt-get --allow-unauthenticated update -y \
|
||||
unixodbc \
|
||||
unixodbc-dev \
|
||||
wget \
|
||||
zlib1g-dev \
|
||||
zookeeper \
|
||||
zookeeperd
|
||||
zlib1g-dev
|
||||
|
||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||
&& wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
||||
|
@ -8,48 +8,9 @@ dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/custom_settings_prefixes.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/executable_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load it if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then
|
||||
ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/
|
||||
fi
|
||||
if [[ -n "$USE_DATABASE_ATOMIC" ]] && [[ "$USE_DATABASE_ATOMIC" -eq 1 ]]; then
|
||||
ln -s /usr/share/clickhouse-test/config/database_atomic_configd.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/database_atomic_usersd.xml /etc/clickhouse-server/users.d/
|
||||
fi
|
||||
|
||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||
|
||||
service zookeeper start
|
||||
sleep 5
|
||||
service clickhouse-server start && sleep 5
|
||||
|
||||
if cat /usr/bin/clickhouse-test | grep -q -- "--use-skip-list"; then
|
||||
|
@ -11,8 +11,6 @@ RUN apt-get update -y \
|
||||
tzdata \
|
||||
fakeroot \
|
||||
debhelper \
|
||||
zookeeper \
|
||||
zookeeperd \
|
||||
expect \
|
||||
python \
|
||||
python-lxml \
|
||||
|
@ -39,41 +39,8 @@ mkdir -p /var/log/clickhouse-server
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
chmod 777 -R /var/log/clickhouse-server/
|
||||
|
||||
# Temorary way to keep CI green while moving dictionaries to separate directory
|
||||
mkdir -p /etc/clickhouse-server/dict_examples
|
||||
chmod 777 -R /etc/clickhouse-server/dict_examples
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
|
||||
|
||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/executable_dictionary.xml /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load it if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||
|
||||
service zookeeper start
|
||||
sleep 5
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
start_clickhouse
|
||||
|
||||
|
@ -39,9 +39,8 @@ function start()
|
||||
done
|
||||
}
|
||||
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||
|
||||
|
@ -29,7 +29,7 @@ def get_options(i):
|
||||
if 0 < i:
|
||||
options += " --order=random"
|
||||
if i % 2 == 1:
|
||||
options += " --atomic-db-engine"
|
||||
options += " --db-engine=Ordinary"
|
||||
return options
|
||||
|
||||
|
||||
|
@ -35,7 +35,7 @@ RUN apt-get update \
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN pip3 install urllib3 testflows==1.6.42 docker-compose docker dicttoxml kazoo tzlocal
|
||||
RUN pip3 install urllib3 testflows==1.6.48 docker-compose docker dicttoxml kazoo tzlocal
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 17.09.1-ce
|
||||
|
@ -165,22 +165,6 @@ Similar to GraphiteMergeTree, the Kafka engine supports extended configuration u
|
||||
|
||||
For a list of possible configuration options, see the [librdkafka configuration reference](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). Use the underscore (`_`) instead of a dot in the ClickHouse configuration. For example, `check.crcs=true` will be `<check_crcs>true</check_crcs>`.
|
||||
|
||||
### Kerberos support {#kafka-kerberos-support}
|
||||
|
||||
To deal with Kerberos-aware Kafka, add `security_protocol` child element with `sasl_plaintext` value. It is enough if Kerberos ticket-granting ticket is obtained and cached by OS facilities.
|
||||
ClickHouse is able to maintain Kerberos credentials using a keytab file. Consider `sasl_kerberos_service_name`, `sasl_kerberos_keytab`, `sasl_kerberos_principal` and `sasl.kerberos.kinit.cmd` child elements.
|
||||
|
||||
Example:
|
||||
|
||||
``` xml
|
||||
<!-- Kerberos-aware Kafka -->
|
||||
<kafka>
|
||||
<security_protocol>SASL_PLAINTEXT</security_protocol>
|
||||
<sasl_kerberos_keytab>/home/kafkauser/kafkauser.keytab</sasl_kerberos_keytab>
|
||||
<sasl_kerberos_principal>kafkauser/kafkahost@EXAMPLE.COM</sasl_kerberos_principal>
|
||||
</kafka>
|
||||
```
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
- `_topic` — Kafka topic.
|
||||
|
@ -38,7 +38,7 @@ toc_title: Adopters
|
||||
| <a href="https://db.com" class="favicon">Deutsche Bank</a> | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) |
|
||||
| <a href="https://www.diva-e.com" class="favicon">Diva-e</a> | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) |
|
||||
| <a href="https://www.ecwid.com/" class="favicon">Ecwid</a> | E-commerce SaaS | Metrics, Logging | — | — | [Slides in Russian, April 2019](https://nastachku.ru/var/files/1/presentation/backend/2_Backend_6.pdf) |
|
||||
| <a href="https://www.ebay.com/" class="favicon">eBay</a> | E-commerce | TBA | — | — | [Webinar, Sep 2020](https://altinity.com/webinarspage/2020/09/08/migrating-from-druid-to-next-gen-olap-on-clickhouse-ebays-experience) |
|
||||
| <a href="https://www.ebay.com/" class="favicon">eBay</a> | E-commerce | Logs, Metrics and Events | — | — | [Official website, Sep 2020](https://tech.ebayinc.com/engineering/ou-online-analytical-processing/) |
|
||||
| <a href="https://www.exness.com" class="favicon">Exness</a> | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
||||
| <a href="https://fastnetmon.com/" class="favicon">FastNetMon</a> | DDoS Protection | Main Product | | — | [Official website](https://fastnetmon.com/docs-fnm-advanced/fastnetmon-advanced-traffic-persistency/) |
|
||||
| <a href="https://www.flipkart.com/" class="favicon">Flipkart</a> | e-Commerce | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=239) |
|
||||
|
@ -3,7 +3,7 @@ machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 在运营商 {#select-in-operators}
|
||||
# IN 操作符 {#select-in-operators}
|
||||
|
||||
该 `IN`, `NOT IN`, `GLOBAL IN`,和 `GLOBAL NOT IN` 运算符是单独复盖的,因为它们的功能相当丰富。
|
||||
|
||||
|
@ -1477,7 +1477,9 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
{
|
||||
auto create_query_push_ast = rewriteCreateQueryStorage(task_shard.current_pull_table_create_query,
|
||||
task_table.table_push, task_table.engine_push_ast);
|
||||
create_query_push_ast->as<ASTCreateQuery &>().if_not_exists = true;
|
||||
auto & create = create_query_push_ast->as<ASTCreateQuery &>();
|
||||
create.if_not_exists = true;
|
||||
InterpreterCreateQuery::prepareOnClusterQuery(create, context, task_table.cluster_push_name);
|
||||
String query = queryToString(create_query_push_ast);
|
||||
|
||||
LOG_DEBUG(log, "Create destination tables. Query: {}", query);
|
||||
|
@ -215,31 +215,20 @@ Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast)
|
||||
return primary_key_columns;
|
||||
}
|
||||
|
||||
String extractReplicatedTableZookeeperPath(const ASTPtr & storage_ast)
|
||||
bool isReplicatedTableEngine(const ASTPtr & storage_ast)
|
||||
{
|
||||
String storage_str = queryToString(storage_ast);
|
||||
|
||||
const auto & storage = storage_ast->as<ASTStorage &>();
|
||||
const auto & engine = storage.engine->as<ASTFunction &>();
|
||||
|
||||
if (!endsWith(engine.name, "MergeTree"))
|
||||
{
|
||||
String storage_str = queryToString(storage_ast);
|
||||
throw Exception(
|
||||
"Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
if (!startsWith(engine.name, "Replicated"))
|
||||
{
|
||||
return "";
|
||||
}
|
||||
|
||||
auto replicated_table_arguments = engine.arguments->children;
|
||||
|
||||
auto zk_table_path_ast = replicated_table_arguments[0]->as<ASTLiteral &>();
|
||||
auto zk_table_path_string = zk_table_path_ast.value.safeGet<String>();
|
||||
|
||||
return zk_table_path_string;
|
||||
return startsWith(engine.name, "Replicated");
|
||||
}
|
||||
|
||||
ShardPriority getReplicasPriority(const Cluster::Addresses & replicas, const std::string & local_hostname, UInt8 random)
|
||||
|
@ -200,7 +200,7 @@ ASTPtr extractOrderBy(const ASTPtr & storage_ast);
|
||||
|
||||
Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast);
|
||||
|
||||
String extractReplicatedTableZookeeperPath(const ASTPtr & storage_ast);
|
||||
bool isReplicatedTableEngine(const ASTPtr & storage_ast);
|
||||
|
||||
ShardPriority getReplicasPriority(const Cluster::Addresses & replicas, const std::string & local_hostname, UInt8 random);
|
||||
|
||||
|
@ -48,7 +48,7 @@ struct TaskTable
|
||||
String getCertainPartitionPieceTaskStatusPath(const String & partition_name, const size_t piece_number) const;
|
||||
|
||||
|
||||
bool isReplicatedTable() const { return engine_push_zk_path != ""; }
|
||||
bool isReplicatedTable() const { return is_replicated_table; }
|
||||
|
||||
/// Partitions will be split into number-of-splits pieces.
|
||||
/// Each piece will be copied independently. (10 by default)
|
||||
@ -78,6 +78,7 @@ struct TaskTable
|
||||
|
||||
/// First argument of Replicated...MergeTree()
|
||||
String engine_push_zk_path;
|
||||
bool is_replicated_table;
|
||||
|
||||
ASTPtr rewriteReplicatedCreateQueryToPlain();
|
||||
|
||||
@ -269,7 +270,7 @@ inline TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConf
|
||||
engine_push_ast = parseQuery(parser_storage, engine_push_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||
engine_push_partition_key_ast = extractPartitionKey(engine_push_ast);
|
||||
primary_key_comma_separated = Nested::createCommaSeparatedStringFrom(extractPrimaryKeyColumnNames(engine_push_ast));
|
||||
engine_push_zk_path = extractReplicatedTableZookeeperPath(engine_push_ast);
|
||||
is_replicated_table = isReplicatedTableEngine(engine_push_ast);
|
||||
}
|
||||
|
||||
sharding_key_str = config.getString(table_prefix + "sharding_key");
|
||||
@ -372,15 +373,18 @@ inline ASTPtr TaskTable::rewriteReplicatedCreateQueryToPlain()
|
||||
auto & new_storage_ast = prev_engine_push_ast->as<ASTStorage &>();
|
||||
auto & new_engine_ast = new_storage_ast.engine->as<ASTFunction &>();
|
||||
|
||||
auto & replicated_table_arguments = new_engine_ast.arguments->children;
|
||||
|
||||
/// Delete first two arguments of Replicated...MergeTree() table.
|
||||
replicated_table_arguments.erase(replicated_table_arguments.begin());
|
||||
replicated_table_arguments.erase(replicated_table_arguments.begin());
|
||||
|
||||
/// Remove replicated from name
|
||||
/// Remove "Replicated" from name
|
||||
new_engine_ast.name = new_engine_ast.name.substr(10);
|
||||
|
||||
if (new_engine_ast.arguments)
|
||||
{
|
||||
auto & replicated_table_arguments = new_engine_ast.arguments->children;
|
||||
|
||||
/// Delete first two arguments of Replicated...MergeTree() table.
|
||||
replicated_table_arguments.erase(replicated_table_arguments.begin());
|
||||
replicated_table_arguments.erase(replicated_table_arguments.begin());
|
||||
}
|
||||
|
||||
return new_storage_ast.clone();
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,7 @@ set(CLICKHOUSE_SERVER_SOURCES
|
||||
Server.cpp
|
||||
)
|
||||
|
||||
if (OS_LINUX AND ARCH_AMD64)
|
||||
if (OS_LINUX)
|
||||
set (LINK_CONFIG_LIB INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:clickhouse_server_configs> -Wl,${NO_WHOLE_ARCHIVE}")
|
||||
endif ()
|
||||
|
||||
|
@ -339,16 +339,23 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
if (hasLinuxCapability(CAP_IPC_LOCK))
|
||||
{
|
||||
/// Get the memory area with (current) code segment.
|
||||
/// It's better to lock only the code segment instead of calling "mlockall",
|
||||
/// because otherwise debug info will be also locked in memory, and it can be huge.
|
||||
auto [addr, len] = getMappedArea(reinterpret_cast<void *>(mainEntryClickHouseServer));
|
||||
try
|
||||
{
|
||||
/// Get the memory area with (current) code segment.
|
||||
/// It's better to lock only the code segment instead of calling "mlockall",
|
||||
/// because otherwise debug info will be also locked in memory, and it can be huge.
|
||||
auto [addr, len] = getMappedArea(reinterpret_cast<void *>(mainEntryClickHouseServer));
|
||||
|
||||
LOG_TRACE(log, "Will do mlock to prevent executable memory from being paged out. It may take a few seconds.");
|
||||
if (0 != mlock(addr, len))
|
||||
LOG_WARNING(log, "Failed mlock: {}", errnoToString(ErrorCodes::SYSTEM_ERROR));
|
||||
else
|
||||
LOG_TRACE(log, "The memory map of clickhouse executable has been mlock'ed, total {}", ReadableSize(len));
|
||||
LOG_TRACE(log, "Will do mlock to prevent executable memory from being paged out. It may take a few seconds.");
|
||||
if (0 != mlock(addr, len))
|
||||
LOG_WARNING(log, "Failed mlock: {}", errnoToString(ErrorCodes::SYSTEM_ERROR));
|
||||
else
|
||||
LOG_TRACE(log, "The memory map of clickhouse executable has been mlock'ed, total {}", ReadableSize(len));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_WARNING(log, "Cannot mlock: {}", getCurrentExceptionMessage(false));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
10
release
10
release
@ -95,9 +95,9 @@ then
|
||||
exit 3
|
||||
fi
|
||||
|
||||
export DEB_CC=${DEB_CC=clang-6.0}
|
||||
export DEB_CXX=${DEB_CXX=clang++-6.0}
|
||||
EXTRAPACKAGES="$EXTRAPACKAGES clang-6.0 lld-6.0"
|
||||
export DEB_CC=${DEB_CC=clang-10}
|
||||
export DEB_CXX=${DEB_CXX=clang++-10}
|
||||
EXTRAPACKAGES="$EXTRAPACKAGES clang-10 lld-10"
|
||||
elif [[ $BUILD_TYPE == 'valgrind' ]]; then
|
||||
MALLOC_OPTS="-DENABLE_TCMALLOC=0 -DENABLE_JEMALLOC=0"
|
||||
VERSION_POSTFIX+="+valgrind"
|
||||
@ -118,8 +118,8 @@ echo -e "\nCurrent version is $VERSION_STRING"
|
||||
if [ -z "$NO_BUILD" ] ; then
|
||||
gen_changelog "$VERSION_STRING" "" "$AUTHOR" ""
|
||||
if [ -z "$USE_PBUILDER" ] ; then
|
||||
DEB_CC=${DEB_CC:=`which gcc-9 gcc-8 gcc | head -n1`}
|
||||
DEB_CXX=${DEB_CXX:=`which g++-9 g++-8 g++ | head -n1`}
|
||||
DEB_CC=${DEB_CC:=`which gcc-10 gcc-9 gcc | head -n1`}
|
||||
DEB_CXX=${DEB_CXX:=`which gcc-10 g++-9 g++ | head -n1`}
|
||||
# Build (only binary packages).
|
||||
debuild --preserve-env -e PATH \
|
||||
-e DEB_CC=$DEB_CC -e DEB_CXX=$DEB_CXX -e CMAKE_FLAGS="$CMAKE_FLAGS" \
|
||||
|
@ -192,7 +192,7 @@ namespace
|
||||
}
|
||||
|
||||
|
||||
std::vector<AccessEntityPtr> parseUsers(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
|
||||
std::vector<AccessEntityPtr> parseUsers(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys user_names;
|
||||
config.keys("users", user_names);
|
||||
@ -200,16 +200,8 @@ namespace
|
||||
std::vector<AccessEntityPtr> users;
|
||||
users.reserve(user_names.size());
|
||||
for (const auto & user_name : user_names)
|
||||
{
|
||||
try
|
||||
{
|
||||
users.push_back(parseUser(config, user_name));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Could not parse user " + backQuote(user_name));
|
||||
}
|
||||
}
|
||||
users.push_back(parseUser(config, user_name));
|
||||
|
||||
return users;
|
||||
}
|
||||
|
||||
@ -256,12 +248,11 @@ namespace
|
||||
}
|
||||
|
||||
quota->to_roles.add(user_ids);
|
||||
|
||||
return quota;
|
||||
}
|
||||
|
||||
|
||||
std::vector<AccessEntityPtr> parseQuotas(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
|
||||
std::vector<AccessEntityPtr> parseQuotas(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys user_names;
|
||||
config.keys("users", user_names);
|
||||
@ -278,76 +269,63 @@ namespace
|
||||
quotas.reserve(quota_names.size());
|
||||
for (const auto & quota_name : quota_names)
|
||||
{
|
||||
try
|
||||
{
|
||||
auto it = quota_to_user_ids.find(quota_name);
|
||||
const std::vector<UUID> & quota_users = (it != quota_to_user_ids.end()) ? std::move(it->second) : std::vector<UUID>{};
|
||||
quotas.push_back(parseQuota(config, quota_name, quota_users));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Could not parse quota " + backQuote(quota_name));
|
||||
}
|
||||
auto it = quota_to_user_ids.find(quota_name);
|
||||
const std::vector<UUID> & quota_users = (it != quota_to_user_ids.end()) ? std::move(it->second) : std::vector<UUID>{};
|
||||
quotas.push_back(parseQuota(config, quota_name, quota_users));
|
||||
}
|
||||
return quotas;
|
||||
}
|
||||
|
||||
|
||||
std::vector<AccessEntityPtr> parseRowPolicies(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
|
||||
std::vector<AccessEntityPtr> parseRowPolicies(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
std::map<std::pair<String /* database */, String /* table */>, std::unordered_map<String /* user */, String /* filter */>> all_filters_map;
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys user_names;
|
||||
config.keys("users", user_names);
|
||||
|
||||
try
|
||||
for (const String & user_name : user_names)
|
||||
{
|
||||
config.keys("users", user_names);
|
||||
for (const String & user_name : user_names)
|
||||
const String databases_config = "users." + user_name + ".databases";
|
||||
if (config.has(databases_config))
|
||||
{
|
||||
const String databases_config = "users." + user_name + ".databases";
|
||||
if (config.has(databases_config))
|
||||
Poco::Util::AbstractConfiguration::Keys database_keys;
|
||||
config.keys(databases_config, database_keys);
|
||||
|
||||
/// Read tables within databases
|
||||
for (const String & database_key : database_keys)
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys database_keys;
|
||||
config.keys(databases_config, database_keys);
|
||||
const String database_config = databases_config + "." + database_key;
|
||||
|
||||
/// Read tables within databases
|
||||
for (const String & database_key : database_keys)
|
||||
String database_name;
|
||||
if (((database_key == "database") || (database_key.starts_with("database["))) && config.has(database_config + "[@name]"))
|
||||
database_name = config.getString(database_config + "[@name]");
|
||||
else if (size_t bracket_pos = database_key.find('['); bracket_pos != std::string::npos)
|
||||
database_name = database_key.substr(0, bracket_pos);
|
||||
else
|
||||
database_name = database_key;
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys table_keys;
|
||||
config.keys(database_config, table_keys);
|
||||
|
||||
/// Read table properties
|
||||
for (const String & table_key : table_keys)
|
||||
{
|
||||
const String database_config = databases_config + "." + database_key;
|
||||
|
||||
String database_name;
|
||||
if (((database_key == "database") || (database_key.starts_with("database["))) && config.has(database_config + "[@name]"))
|
||||
database_name = config.getString(database_config + "[@name]");
|
||||
else if (size_t bracket_pos = database_key.find('['); bracket_pos != std::string::npos)
|
||||
database_name = database_key.substr(0, bracket_pos);
|
||||
String table_config = database_config + "." + table_key;
|
||||
String table_name;
|
||||
if (((table_key == "table") || (table_key.starts_with("table["))) && config.has(table_config + "[@name]"))
|
||||
table_name = config.getString(table_config + "[@name]");
|
||||
else if (size_t bracket_pos = table_key.find('['); bracket_pos != std::string::npos)
|
||||
table_name = table_key.substr(0, bracket_pos);
|
||||
else
|
||||
database_name = database_key;
|
||||
table_name = table_key;
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys table_keys;
|
||||
config.keys(database_config, table_keys);
|
||||
|
||||
/// Read table properties
|
||||
for (const String & table_key : table_keys)
|
||||
{
|
||||
String table_config = database_config + "." + table_key;
|
||||
String table_name;
|
||||
if (((table_key == "table") || (table_key.starts_with("table["))) && config.has(table_config + "[@name]"))
|
||||
table_name = config.getString(table_config + "[@name]");
|
||||
else if (size_t bracket_pos = table_key.find('['); bracket_pos != std::string::npos)
|
||||
table_name = table_key.substr(0, bracket_pos);
|
||||
else
|
||||
table_name = table_key;
|
||||
|
||||
String filter_config = table_config + ".filter";
|
||||
all_filters_map[{database_name, table_name}][user_name] = config.getString(filter_config);
|
||||
}
|
||||
String filter_config = table_config + ".filter";
|
||||
all_filters_map[{database_name, table_name}][user_name] = config.getString(filter_config);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Could not parse row policies");
|
||||
}
|
||||
|
||||
std::vector<AccessEntityPtr> policies;
|
||||
for (auto & [database_and_table_name, user_to_filters] : all_filters_map)
|
||||
@ -450,23 +428,14 @@ namespace
|
||||
|
||||
std::vector<AccessEntityPtr> parseSettingsProfiles(
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const std::function<void(const std::string_view &)> & check_setting_name_function,
|
||||
Poco::Logger * log)
|
||||
const std::function<void(const std::string_view &)> & check_setting_name_function)
|
||||
{
|
||||
std::vector<AccessEntityPtr> profiles;
|
||||
Poco::Util::AbstractConfiguration::Keys profile_names;
|
||||
config.keys("profiles", profile_names);
|
||||
for (const auto & profile_name : profile_names)
|
||||
{
|
||||
try
|
||||
{
|
||||
profiles.push_back(parseSettingsProfile(config, profile_name, check_setting_name_function));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Could not parse profile " + backQuote(profile_name));
|
||||
}
|
||||
}
|
||||
profiles.push_back(parseSettingsProfile(config, profile_name, check_setting_name_function));
|
||||
|
||||
return profiles;
|
||||
}
|
||||
}
|
||||
@ -520,13 +489,13 @@ void UsersConfigAccessStorage::setConfig(const Poco::Util::AbstractConfiguration
|
||||
void UsersConfigAccessStorage::parseFromConfig(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
std::vector<std::pair<UUID, AccessEntityPtr>> all_entities;
|
||||
for (const auto & entity : parseUsers(config, getLogger()))
|
||||
for (const auto & entity : parseUsers(config))
|
||||
all_entities.emplace_back(generateID(*entity), entity);
|
||||
for (const auto & entity : parseQuotas(config, getLogger()))
|
||||
for (const auto & entity : parseQuotas(config))
|
||||
all_entities.emplace_back(generateID(*entity), entity);
|
||||
for (const auto & entity : parseRowPolicies(config, getLogger()))
|
||||
for (const auto & entity : parseRowPolicies(config))
|
||||
all_entities.emplace_back(generateID(*entity), entity);
|
||||
for (const auto & entity : parseSettingsProfiles(config, check_setting_name_function, getLogger()))
|
||||
for (const auto & entity : parseSettingsProfiles(config, check_setting_name_function))
|
||||
all_entities.emplace_back(generateID(*entity), entity);
|
||||
memory_storage.setAll(all_entities);
|
||||
}
|
||||
|
@ -138,6 +138,7 @@ void ConfigReloader::reloadIfNewer(bool force, bool throw_on_error, bool fallbac
|
||||
if (throw_on_error)
|
||||
throw;
|
||||
tryLogCurrentException(log, "Error updating configuration from '" + path + "' config.");
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_DEBUG(log, "Loaded config '{}', performed update on configuration", path);
|
||||
|
@ -41,11 +41,6 @@ void FileChecker::setEmpty(const String & full_file_path)
|
||||
map[fileName(full_file_path)] = 0;
|
||||
}
|
||||
|
||||
FileChecker::Map FileChecker::getFileSizes() const
|
||||
{
|
||||
return map;
|
||||
}
|
||||
|
||||
CheckResults FileChecker::check() const
|
||||
{
|
||||
// Read the files again every time you call `check` - so as not to violate the constancy.
|
||||
|
@ -27,12 +27,10 @@ public:
|
||||
/// The purpose of this function is to rollback a group of unfinished writes.
|
||||
void repair();
|
||||
|
||||
private:
|
||||
/// File name -> size.
|
||||
using Map = std::map<String, UInt64>;
|
||||
|
||||
Map getFileSizes() const;
|
||||
|
||||
private:
|
||||
void initialize();
|
||||
void updateImpl(const String & file_path);
|
||||
void load(Map & local_map, const String & path) const;
|
||||
|
@ -23,18 +23,15 @@ Macros::Macros(const Poco::Util::AbstractConfiguration & config, const String &
|
||||
}
|
||||
|
||||
String Macros::expand(const String & s,
|
||||
size_t level,
|
||||
const String & database_name,
|
||||
const String & table_name,
|
||||
const UUID & uuid) const
|
||||
MacroExpansionInfo & info) const
|
||||
{
|
||||
if (s.find('{') == String::npos)
|
||||
return s;
|
||||
|
||||
if (level && s.size() > 65536)
|
||||
if (info.level && s.size() > 65536)
|
||||
throw Exception("Too long string while expanding macros", ErrorCodes::SYNTAX_ERROR);
|
||||
|
||||
if (level >= 10)
|
||||
if (info.level >= 10)
|
||||
throw Exception("Too deep recursion while expanding macros: '" + s + "'", ErrorCodes::SYNTAX_ERROR);
|
||||
|
||||
String res;
|
||||
@ -64,17 +61,28 @@ String Macros::expand(const String & s,
|
||||
/// Prefer explicit macros over implicit.
|
||||
if (it != macros.end())
|
||||
res += it->second;
|
||||
else if (macro_name == "database" && !database_name.empty())
|
||||
res += database_name;
|
||||
else if (macro_name == "table" && !table_name.empty())
|
||||
res += table_name;
|
||||
else if (macro_name == "database" && !info.database_name.empty())
|
||||
res += info.database_name;
|
||||
else if (macro_name == "table" && !info.table_name.empty())
|
||||
res += info.table_name;
|
||||
else if (macro_name == "uuid")
|
||||
{
|
||||
if (uuid == UUIDHelpers::Nil)
|
||||
if (info.uuid == UUIDHelpers::Nil)
|
||||
throw Exception("Macro 'uuid' and empty arguments of ReplicatedMergeTree "
|
||||
"are supported only for ON CLUSTER queries with Atomic database engine",
|
||||
ErrorCodes::SYNTAX_ERROR);
|
||||
res += toString(uuid);
|
||||
/// For ON CLUSTER queries we don't want to require all macros definitions in initiator's config.
|
||||
/// However, initiator must check that for cross-replication cluster zookeeper_path does not contain {uuid} macro.
|
||||
/// It becomes impossible to check if {uuid} is contained inside some unknown macro.
|
||||
if (info.level)
|
||||
throw Exception("Macro 'uuid' should not be inside another macro", ErrorCodes::SYNTAX_ERROR);
|
||||
res += toString(info.uuid);
|
||||
info.expanded_uuid = true;
|
||||
}
|
||||
else if (info.ignore_unknown)
|
||||
{
|
||||
res += macro_name;
|
||||
info.has_unknown = true;
|
||||
}
|
||||
else
|
||||
throw Exception("No macro '" + macro_name +
|
||||
@ -84,7 +92,8 @@ String Macros::expand(const String & s,
|
||||
pos = end + 1;
|
||||
}
|
||||
|
||||
return expand(res, level + 1, database_name, table_name);
|
||||
++info.level;
|
||||
return expand(res, info);
|
||||
}
|
||||
|
||||
String Macros::getValue(const String & key) const
|
||||
@ -94,9 +103,20 @@ String Macros::getValue(const String & key) const
|
||||
throw Exception("No macro " + key + " in config", ErrorCodes::SYNTAX_ERROR);
|
||||
}
|
||||
|
||||
|
||||
String Macros::expand(const String & s) const
|
||||
{
|
||||
MacroExpansionInfo info;
|
||||
return expand(s, info);
|
||||
}
|
||||
|
||||
String Macros::expand(const String & s, const StorageID & table_id, bool allow_uuid) const
|
||||
{
|
||||
return expand(s, 0, table_id.database_name, table_id.table_name, allow_uuid ? table_id.uuid : UUIDHelpers::Nil);
|
||||
MacroExpansionInfo info;
|
||||
info.database_name = table_id.database_name;
|
||||
info.table_name = table_id.table_name;
|
||||
info.uuid = allow_uuid ? table_id.uuid : UUIDHelpers::Nil;
|
||||
return expand(s, info);
|
||||
}
|
||||
|
||||
Names Macros::expand(const Names & source_names, size_t level) const
|
||||
@ -104,8 +124,12 @@ Names Macros::expand(const Names & source_names, size_t level) const
|
||||
Names result_names;
|
||||
result_names.reserve(source_names.size());
|
||||
|
||||
MacroExpansionInfo info;
|
||||
for (const String & name : source_names)
|
||||
result_names.push_back(expand(name, level));
|
||||
{
|
||||
info.level = level;
|
||||
result_names.push_back(expand(name, info));
|
||||
}
|
||||
|
||||
return result_names;
|
||||
}
|
||||
|
@ -27,15 +27,28 @@ public:
|
||||
Macros() = default;
|
||||
Macros(const Poco::Util::AbstractConfiguration & config, const String & key);
|
||||
|
||||
struct MacroExpansionInfo
|
||||
{
|
||||
/// Settings
|
||||
String database_name;
|
||||
String table_name;
|
||||
UUID uuid = UUIDHelpers::Nil;
|
||||
bool ignore_unknown = false;
|
||||
|
||||
/// Information about macro expansion
|
||||
size_t level = 0;
|
||||
bool expanded_uuid = false;
|
||||
bool has_unknown = false;
|
||||
};
|
||||
|
||||
/** Replace the substring of the form {macro_name} with the value for macro_name, obtained from the config file.
|
||||
* If {database} and {table} macros aren`t defined explicitly, expand them as database_name and table_name respectively.
|
||||
* level - the level of recursion.
|
||||
*/
|
||||
String expand(const String & s,
|
||||
size_t level = 0,
|
||||
const String & database_name = "",
|
||||
const String & table_name = "",
|
||||
const UUID & uuid = UUIDHelpers::Nil) const;
|
||||
MacroExpansionInfo & info) const;
|
||||
|
||||
String expand(const String & s) const;
|
||||
|
||||
String expand(const String & s, const StorageID & table_id, bool allow_uuid) const;
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <common/getThreadId.h>
|
||||
#include <common/types.h>
|
||||
|
||||
|
||||
@ -19,7 +20,7 @@ namespace DB
|
||||
DB::UInt64 randomSeed()
|
||||
{
|
||||
struct timespec times;
|
||||
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, ×))
|
||||
if (clock_gettime(CLOCK_MONOTONIC, ×))
|
||||
DB::throwFromErrno("Cannot clock_gettime.", DB::ErrorCodes::CANNOT_CLOCK_GETTIME);
|
||||
|
||||
/// Not cryptographically secure as time, pid and stack address can be predictable.
|
||||
@ -27,7 +28,7 @@ DB::UInt64 randomSeed()
|
||||
SipHash hash;
|
||||
hash.update(times.tv_nsec);
|
||||
hash.update(times.tv_sec);
|
||||
hash.update(getpid());
|
||||
hash.update(getThreadId());
|
||||
hash.update(×);
|
||||
return hash.get64();
|
||||
}
|
||||
|
@ -350,7 +350,7 @@ class IColumn;
|
||||
M(UInt64, max_live_view_insert_blocks_before_refresh, 64, "Limit maximum number of inserted blocks after which mergeable blocks are dropped and query is re-executed.", 0) \
|
||||
M(UInt64, min_free_disk_space_for_temporary_data, 0, "The minimum disk space to keep while writing temporary data used in external sorting and aggregation.", 0) \
|
||||
\
|
||||
M(DefaultDatabaseEngine, default_database_engine, DefaultDatabaseEngine::Ordinary, "Default database engine.", 0) \
|
||||
M(DefaultDatabaseEngine, default_database_engine, DefaultDatabaseEngine::Atomic, "Default database engine.", 0) \
|
||||
M(Bool, show_table_uuid_in_table_create_query_if_not_nil, false, "For tables in databases with Engine=Atomic show UUID of the table in its CREATE query.", 0) \
|
||||
M(Bool, enable_scalar_subquery_optimization, true, "If it is set to true, prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once.", 0) \
|
||||
M(Bool, optimize_trivial_count_query, true, "Process trivial 'SELECT count() FROM table' query from metadata.", 0) \
|
||||
|
@ -926,7 +926,7 @@ void CacheDictionary::update(UpdateUnitPtr & update_unit_ptr) const
|
||||
else
|
||||
cell.setExpiresAt(std::chrono::time_point<std::chrono::system_clock>::max());
|
||||
|
||||
update_unit_ptr->getPresentIdHandler()(id, cell_idx);
|
||||
update_unit_ptr->callPresentIdHandler(id, cell_idx);
|
||||
/// mark corresponding id as found
|
||||
remaining_ids[id] = 1;
|
||||
}
|
||||
@ -988,9 +988,9 @@ void CacheDictionary::update(UpdateUnitPtr & update_unit_ptr) const
|
||||
if (was_default)
|
||||
cell.setDefault();
|
||||
if (was_default)
|
||||
update_unit_ptr->getAbsentIdHandler()(id, cell_idx);
|
||||
update_unit_ptr->callAbsentIdHandler(id, cell_idx);
|
||||
else
|
||||
update_unit_ptr->getPresentIdHandler()(id, cell_idx);
|
||||
update_unit_ptr->callPresentIdHandler(id, cell_idx);
|
||||
continue;
|
||||
}
|
||||
/// We don't have expired data for that `id` so all we can do is to rethrow `last_exception`.
|
||||
@ -1022,7 +1022,7 @@ void CacheDictionary::update(UpdateUnitPtr & update_unit_ptr) const
|
||||
setDefaultAttributeValue(attribute, cell_idx);
|
||||
|
||||
/// inform caller that the cell has not been found
|
||||
update_unit_ptr->getAbsentIdHandler()(id, cell_idx);
|
||||
update_unit_ptr->callAbsentIdHandler(id, cell_idx);
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheKeysRequestedMiss, not_found_num);
|
||||
|
@ -399,16 +399,18 @@ private:
|
||||
absent_id_handler([](Key, size_t){}){}
|
||||
|
||||
|
||||
PresentIdHandler getPresentIdHandler()
|
||||
void callPresentIdHandler(Key key, size_t cell_idx)
|
||||
{
|
||||
std::lock_guard lock(callback_mutex);
|
||||
return can_use_callback ? present_id_handler : PresentIdHandler{};
|
||||
if (can_use_callback)
|
||||
present_id_handler(key, cell_idx);
|
||||
}
|
||||
|
||||
AbsentIdHandler getAbsentIdHandler()
|
||||
void callAbsentIdHandler(Key key, size_t cell_idx)
|
||||
{
|
||||
std::lock_guard lock(callback_mutex);
|
||||
return can_use_callback ? absent_id_handler : AbsentIdHandler{};
|
||||
if (can_use_callback)
|
||||
absent_id_handler(key, cell_idx);
|
||||
}
|
||||
|
||||
std::vector<Key> requested_ids;
|
||||
|
@ -148,7 +148,9 @@ void CacheDictionary::getItemsNumberImpl(
|
||||
std::begin(cache_expired_ids), std::end(cache_expired_ids),
|
||||
std::back_inserter(required_ids), [](auto & pair) { return pair.first; });
|
||||
|
||||
auto on_cell_updated = [&] (const auto id, const auto cell_idx)
|
||||
auto on_cell_updated =
|
||||
[&attribute_array, &cache_not_found_ids, &cache_expired_ids, &out]
|
||||
(const auto id, const auto cell_idx)
|
||||
{
|
||||
const auto attribute_value = attribute_array[cell_idx];
|
||||
|
||||
|
@ -80,7 +80,7 @@ public:
|
||||
DataTypePtr getReturnType(const ColumnsWithTypeAndName &) const override { return {}; } // Not used
|
||||
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return true; }
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
||||
|
||||
bool isVariadic() const override { return true; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
|
@ -623,4 +623,21 @@ const std::string & Cluster::ShardInfo::pathForInsert(bool prefer_localhost_repl
|
||||
return dir_name_for_internal_replication_with_local;
|
||||
}
|
||||
|
||||
bool Cluster::maybeCrossReplication() const
|
||||
{
|
||||
/// Cluster can be used for cross-replication if some replicas have different default database names,
|
||||
/// so one clickhouse-server instance can contain multiple replicas.
|
||||
|
||||
if (addresses_with_failover.empty())
|
||||
return false;
|
||||
|
||||
const String & database_name = addresses_with_failover.front().front().default_database;
|
||||
for (const auto & shard : addresses_with_failover)
|
||||
for (const auto & replica : shard)
|
||||
if (replica.default_database != database_name)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -193,6 +193,10 @@ public:
|
||||
/// Get a new Cluster that contains all servers (all shards with all replicas) from existing cluster as independent shards.
|
||||
std::unique_ptr<Cluster> getClusterWithReplicasAsShards(const Settings & settings) const;
|
||||
|
||||
/// Returns false if cluster configuration doesn't allow to use it for cross-replication.
|
||||
/// NOTE: true does not mean, that it's actually a cross-replication cluster.
|
||||
bool maybeCrossReplication() const;
|
||||
|
||||
private:
|
||||
using SlotToShard = std::vector<UInt64>;
|
||||
SlotToShard slot_to_shard;
|
||||
|
@ -525,7 +525,7 @@ static bool allowDictJoin(StoragePtr joined_storage, const Context & context, St
|
||||
if (!dict)
|
||||
return false;
|
||||
|
||||
dict_name = dict->dictionaryName();
|
||||
dict_name = dict->resolvedDictionaryName();
|
||||
auto dictionary = context.getExternalDictionariesLoader().getDictionary(dict_name);
|
||||
if (!dictionary)
|
||||
return false;
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/Macros.h>
|
||||
|
||||
#include <Core/Defines.h>
|
||||
#include <Core/Settings.h>
|
||||
@ -853,17 +854,60 @@ BlockIO InterpreterCreateQuery::createDictionary(ASTCreateQuery & create)
|
||||
return {};
|
||||
}
|
||||
|
||||
void InterpreterCreateQuery::prepareOnClusterQuery(ASTCreateQuery & create, const Context & context, const String & cluster_name)
|
||||
{
|
||||
if (create.attach)
|
||||
return;
|
||||
|
||||
/// For CREATE query generate UUID on initiator, so it will be the same on all hosts.
|
||||
/// It will be ignored if database does not support UUIDs.
|
||||
if (create.uuid == UUIDHelpers::Nil)
|
||||
create.uuid = UUIDHelpers::generateV4();
|
||||
|
||||
/// For cross-replication cluster we cannot use UUID in replica path.
|
||||
String cluster_name_expanded = context.getMacros()->expand(cluster_name);
|
||||
ClusterPtr cluster = context.getCluster(cluster_name_expanded);
|
||||
|
||||
if (cluster->maybeCrossReplication())
|
||||
{
|
||||
/// Check that {uuid} macro is not used in zookeeper_path for ReplicatedMergeTree.
|
||||
/// Otherwise replicas will generate different paths.
|
||||
if (!create.storage)
|
||||
return;
|
||||
if (!create.storage->engine)
|
||||
return;
|
||||
if (!startsWith(create.storage->engine->name, "Replicated"))
|
||||
return;
|
||||
|
||||
bool has_explicit_zk_path_arg = create.storage->engine->arguments &&
|
||||
create.storage->engine->arguments->children.size() >= 2 &&
|
||||
create.storage->engine->arguments->children[0]->as<ASTLiteral>() &&
|
||||
create.storage->engine->arguments->children[0]->as<ASTLiteral>()->value.getType() == Field::Types::String;
|
||||
|
||||
if (has_explicit_zk_path_arg)
|
||||
{
|
||||
String zk_path = create.storage->engine->arguments->children[0]->as<ASTLiteral>()->value.get<String>();
|
||||
Macros::MacroExpansionInfo info;
|
||||
info.uuid = create.uuid;
|
||||
info.ignore_unknown = true;
|
||||
context.getMacros()->expand(zk_path, info);
|
||||
if (!info.expanded_uuid)
|
||||
return;
|
||||
}
|
||||
|
||||
throw Exception("Seems like cluster is configured for cross-replication, "
|
||||
"but zookeeper_path for ReplicatedMergeTree is not specified or contains {uuid} macro. "
|
||||
"It's not supported for cross replication, because tables must have different UUIDs. "
|
||||
"Please specify unique zookeeper_path explicitly.", ErrorCodes::INCORRECT_QUERY);
|
||||
}
|
||||
}
|
||||
|
||||
BlockIO InterpreterCreateQuery::execute()
|
||||
{
|
||||
auto & create = query_ptr->as<ASTCreateQuery &>();
|
||||
if (!create.cluster.empty())
|
||||
{
|
||||
/// Allows to execute ON CLUSTER queries during version upgrade
|
||||
bool force_backward_compatibility = !context.getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil;
|
||||
/// For CREATE query generate UUID on initiator, so it will be the same on all hosts.
|
||||
/// It will be ignored if database does not support UUIDs.
|
||||
if (!force_backward_compatibility && !create.attach && create.uuid == UUIDHelpers::Nil)
|
||||
create.uuid = UUIDHelpers::generateV4();
|
||||
prepareOnClusterQuery(create, context, create.cluster);
|
||||
return executeDDLQueryOnCluster(query_ptr, context, getRequiredAccess());
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,8 @@ public:
|
||||
static ColumnsDescription getColumnsDescription(const ASTExpressionList & columns, const Context & context, bool sanity_check_compression_codecs);
|
||||
static ConstraintsDescription getConstraintsDescription(const ASTExpressionList * constraints);
|
||||
|
||||
static void prepareOnClusterQuery(ASTCreateQuery & create, const Context & context, const String & cluster_name);
|
||||
|
||||
private:
|
||||
struct TableProperties
|
||||
{
|
||||
|
@ -401,7 +401,7 @@ void ProcessList::killAllQueries()
|
||||
|
||||
QueryStatusInfo QueryStatus::getInfo(bool get_thread_list, bool get_profile_events, bool get_settings) const
|
||||
{
|
||||
QueryStatusInfo res;
|
||||
QueryStatusInfo res{};
|
||||
|
||||
res.query = query;
|
||||
res.client_info = client_info;
|
||||
|
@ -92,6 +92,12 @@ String StorageDictionary::generateNamesAndTypesDescription(const NamesAndTypesLi
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
String StorageDictionary::resolvedDictionaryName() const
|
||||
{
|
||||
if (location == Location::SameDatabaseAndNameAsDictionary)
|
||||
return dictionary_name;
|
||||
return DatabaseCatalog::instance().resolveDictionaryName(dictionary_name);
|
||||
}
|
||||
|
||||
StorageDictionary::StorageDictionary(
|
||||
const StorageID & table_id_,
|
||||
@ -132,7 +138,7 @@ Pipe StorageDictionary::read(
|
||||
const size_t max_block_size,
|
||||
const unsigned /*threads*/)
|
||||
{
|
||||
auto dictionary = context.getExternalDictionariesLoader().getDictionary(dictionary_name);
|
||||
auto dictionary = context.getExternalDictionariesLoader().getDictionary(resolvedDictionaryName());
|
||||
auto stream = dictionary->getBlockInputStream(column_names, max_block_size);
|
||||
/// TODO: update dictionary interface for processors.
|
||||
return Pipe(std::make_shared<SourceFromInputStream>(stream));
|
||||
@ -152,7 +158,8 @@ void registerStorageDictionary(StorageFactory & factory)
|
||||
|
||||
if (!args.attach)
|
||||
{
|
||||
const auto & dictionary = args.context.getExternalDictionariesLoader().getDictionary(dictionary_name);
|
||||
auto resolved = DatabaseCatalog::instance().resolveDictionaryName(dictionary_name);
|
||||
const auto & dictionary = args.context.getExternalDictionariesLoader().getDictionary(resolved);
|
||||
const DictionaryStructure & dictionary_structure = dictionary->getStructure();
|
||||
checkNamesAndTypesCompatibleWithDictionary(dictionary_name, args.columns, dictionary_structure);
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ public:
|
||||
static String generateNamesAndTypesDescription(const NamesAndTypesList & list);
|
||||
|
||||
const String & dictionaryName() const { return dictionary_name; }
|
||||
String resolvedDictionaryName() const;
|
||||
|
||||
/// Specifies where the table is located relative to the dictionary.
|
||||
enum class Location
|
||||
|
@ -52,7 +52,6 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_IDENTIFIER;
|
||||
extern const int INCORRECT_FILE_NAME;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
}
|
||||
|
||||
namespace
|
||||
@ -200,17 +199,6 @@ StorageFile::StorageFile(CommonArguments args)
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
}
|
||||
|
||||
|
||||
static std::chrono::seconds getLockTimeout(const Context & context)
|
||||
{
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
Int64 lock_timeout = settings.lock_acquire_timeout.totalSeconds();
|
||||
if (settings.max_execution_time.totalSeconds() != 0 && settings.max_execution_time.totalSeconds() < lock_timeout)
|
||||
lock_timeout = settings.max_execution_time.totalSeconds();
|
||||
return std::chrono::seconds{lock_timeout};
|
||||
}
|
||||
|
||||
|
||||
class StorageFileSource : public SourceWithProgress
|
||||
{
|
||||
public:
|
||||
@ -257,9 +245,7 @@ public:
|
||||
{
|
||||
if (storage->use_table_fd)
|
||||
{
|
||||
unique_lock = std::unique_lock(storage->rwlock, getLockTimeout(context));
|
||||
if (!unique_lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
unique_lock = std::unique_lock(storage->rwlock);
|
||||
|
||||
/// We could use common ReadBuffer and WriteBuffer in storage to leverage cache
|
||||
/// and add ability to seek unseekable files, but cache sync isn't supported.
|
||||
@ -278,9 +264,7 @@ public:
|
||||
}
|
||||
else
|
||||
{
|
||||
shared_lock = std::shared_lock(storage->rwlock, getLockTimeout(context));
|
||||
if (!shared_lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
shared_lock = std::shared_lock(storage->rwlock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -389,8 +373,8 @@ private:
|
||||
|
||||
bool finished_generate = false;
|
||||
|
||||
std::shared_lock<std::shared_timed_mutex> shared_lock;
|
||||
std::unique_lock<std::shared_timed_mutex> unique_lock;
|
||||
std::shared_lock<std::shared_mutex> shared_lock;
|
||||
std::unique_lock<std::shared_mutex> unique_lock;
|
||||
};
|
||||
|
||||
|
||||
@ -433,7 +417,7 @@ Pipe StorageFile::read(
|
||||
|
||||
for (size_t i = 0; i < num_streams; ++i)
|
||||
pipes.emplace_back(std::make_shared<StorageFileSource>(
|
||||
this_ptr, metadata_snapshot, context, max_block_size, files_info, metadata_snapshot->getColumns().getDefaults()));
|
||||
this_ptr, metadata_snapshot, context, max_block_size, files_info, metadata_snapshot->getColumns().getDefaults()));
|
||||
|
||||
return Pipe::unitePipes(std::move(pipes));
|
||||
}
|
||||
@ -445,16 +429,12 @@ public:
|
||||
explicit StorageFileBlockOutputStream(
|
||||
StorageFile & storage_,
|
||||
const StorageMetadataPtr & metadata_snapshot_,
|
||||
std::unique_lock<std::shared_timed_mutex> && lock_,
|
||||
const CompressionMethod compression_method,
|
||||
const Context & context)
|
||||
: storage(storage_)
|
||||
, metadata_snapshot(metadata_snapshot_)
|
||||
, lock(std::move(lock_))
|
||||
, lock(storage.rwlock)
|
||||
{
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
std::unique_ptr<WriteBufferFromFileDescriptor> naked_buffer = nullptr;
|
||||
if (storage.use_table_fd)
|
||||
{
|
||||
@ -508,7 +488,7 @@ public:
|
||||
private:
|
||||
StorageFile & storage;
|
||||
StorageMetadataPtr metadata_snapshot;
|
||||
std::unique_lock<std::shared_timed_mutex> lock;
|
||||
std::unique_lock<std::shared_mutex> lock;
|
||||
std::unique_ptr<WriteBuffer> write_buf;
|
||||
BlockOutputStreamPtr writer;
|
||||
bool prefix_written{false};
|
||||
@ -526,7 +506,7 @@ BlockOutputStreamPtr StorageFile::write(
|
||||
if (!paths.empty())
|
||||
path = paths[0];
|
||||
|
||||
return std::make_shared<StorageFileBlockOutputStream>(*this, metadata_snapshot, std::unique_lock{rwlock, getLockTimeout(context)},
|
||||
return std::make_shared<StorageFileBlockOutputStream>(*this, metadata_snapshot,
|
||||
chooseCompressionMethod(path, compression_method), context);
|
||||
}
|
||||
|
||||
@ -549,6 +529,8 @@ void StorageFile::rename(const String & new_path_to_table_data, const StorageID
|
||||
if (path_new == paths[0])
|
||||
return;
|
||||
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
Poco::File(Poco::Path(path_new).parent()).createDirectories();
|
||||
Poco::File(paths[0]).renameTo(path_new);
|
||||
|
||||
@ -565,6 +547,8 @@ void StorageFile::truncate(
|
||||
if (paths.size() != 1)
|
||||
throw Exception("Can't truncate table '" + getStorageID().getNameForLogs() + "' in readonly mode", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
if (use_table_fd)
|
||||
{
|
||||
if (0 != ::ftruncate(table_fd, 0))
|
||||
|
@ -89,7 +89,7 @@ private:
|
||||
std::atomic<bool> table_fd_was_used{false}; /// To detect repeating reads from stdin
|
||||
off_t table_fd_init_offset = -1; /// Initial position of fd, used for repeating reads
|
||||
|
||||
mutable std::shared_timed_mutex rwlock;
|
||||
mutable std::shared_mutex rwlock;
|
||||
|
||||
Poco::Logger * log = &Poco::Logger::get("StorageFile");
|
||||
};
|
||||
|
@ -39,7 +39,6 @@ namespace DB
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int DUPLICATE_COLUMN;
|
||||
extern const int SIZES_OF_MARKS_FILES_ARE_INCONSISTENT;
|
||||
@ -51,6 +50,7 @@ namespace ErrorCodes
|
||||
class LogSource final : public SourceWithProgress
|
||||
{
|
||||
public:
|
||||
|
||||
static Block getHeader(const NamesAndTypesList & columns)
|
||||
{
|
||||
Block res;
|
||||
@ -116,16 +116,13 @@ private:
|
||||
class LogBlockOutputStream final : public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
explicit LogBlockOutputStream(
|
||||
StorageLog & storage_, const StorageMetadataPtr & metadata_snapshot_, std::unique_lock<std::shared_timed_mutex> && lock_)
|
||||
explicit LogBlockOutputStream(StorageLog & storage_, const StorageMetadataPtr & metadata_snapshot_)
|
||||
: storage(storage_)
|
||||
, metadata_snapshot(metadata_snapshot_)
|
||||
, lock(std::move(lock_))
|
||||
, lock(storage.rwlock)
|
||||
, marks_stream(
|
||||
storage.disk->writeFile(storage.marks_file_path, 4096, WriteMode::Rewrite))
|
||||
{
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
}
|
||||
|
||||
~LogBlockOutputStream() override
|
||||
@ -152,7 +149,7 @@ public:
|
||||
private:
|
||||
StorageLog & storage;
|
||||
StorageMetadataPtr metadata_snapshot;
|
||||
std::unique_lock<std::shared_timed_mutex> lock;
|
||||
std::unique_lock<std::shared_mutex> lock;
|
||||
bool done = false;
|
||||
|
||||
struct Stream
|
||||
@ -510,11 +507,9 @@ void StorageLog::addFiles(const String & column_name, const IDataType & type)
|
||||
}
|
||||
|
||||
|
||||
void StorageLog::loadMarks(std::chrono::seconds lock_timeout)
|
||||
void StorageLog::loadMarks()
|
||||
{
|
||||
std::unique_lock lock(rwlock, lock_timeout);
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
if (loaded_marks)
|
||||
return;
|
||||
@ -557,6 +552,8 @@ void StorageLog::rename(const String & new_path_to_table_data, const StorageID &
|
||||
{
|
||||
assert(table_path != new_path_to_table_data);
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->moveDirectory(table_path, new_path_to_table_data);
|
||||
|
||||
table_path = new_path_to_table_data;
|
||||
@ -572,6 +569,8 @@ void StorageLog::rename(const String & new_path_to_table_data, const StorageID &
|
||||
|
||||
void StorageLog::truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, const Context &, TableExclusiveLockHolder &)
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
files.clear();
|
||||
file_count = 0;
|
||||
loaded_marks = false;
|
||||
@ -611,17 +610,6 @@ const StorageLog::Marks & StorageLog::getMarksWithRealRowCount(const StorageMeta
|
||||
return it->second.marks;
|
||||
}
|
||||
|
||||
|
||||
static std::chrono::seconds getLockTimeout(const Context & context)
|
||||
{
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
Int64 lock_timeout = settings.lock_acquire_timeout.totalSeconds();
|
||||
if (settings.max_execution_time.totalSeconds() != 0 && settings.max_execution_time.totalSeconds() < lock_timeout)
|
||||
lock_timeout = settings.max_execution_time.totalSeconds();
|
||||
return std::chrono::seconds{lock_timeout};
|
||||
}
|
||||
|
||||
|
||||
Pipe StorageLog::read(
|
||||
const Names & column_names,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
@ -632,15 +620,11 @@ Pipe StorageLog::read(
|
||||
unsigned num_streams)
|
||||
{
|
||||
metadata_snapshot->check(column_names, getVirtuals(), getStorageID());
|
||||
|
||||
auto lock_timeout = getLockTimeout(context);
|
||||
loadMarks(lock_timeout);
|
||||
loadMarks();
|
||||
|
||||
NamesAndTypesList all_columns = Nested::collect(metadata_snapshot->getColumns().getAllPhysical().addTypes(column_names));
|
||||
|
||||
std::shared_lock lock(rwlock, lock_timeout);
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
Pipes pipes;
|
||||
|
||||
@ -669,28 +653,18 @@ Pipe StorageLog::read(
|
||||
max_read_buffer_size));
|
||||
}
|
||||
|
||||
/// No need to hold lock while reading because we read fixed range of data that does not change while appending more data.
|
||||
return Pipe::unitePipes(std::move(pipes));
|
||||
}
|
||||
|
||||
BlockOutputStreamPtr StorageLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, const Context & context)
|
||||
BlockOutputStreamPtr StorageLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, const Context & /*context*/)
|
||||
{
|
||||
auto lock_timeout = getLockTimeout(context);
|
||||
loadMarks(lock_timeout);
|
||||
|
||||
std::unique_lock lock(rwlock, lock_timeout);
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
return std::make_shared<LogBlockOutputStream>(*this, metadata_snapshot, std::move(lock));
|
||||
loadMarks();
|
||||
return std::make_shared<LogBlockOutputStream>(*this, metadata_snapshot);
|
||||
}
|
||||
|
||||
CheckResults StorageLog::checkData(const ASTPtr & /* query */, const Context & context)
|
||||
CheckResults StorageLog::checkData(const ASTPtr & /* query */, const Context & /* context */)
|
||||
{
|
||||
std::shared_lock lock(rwlock, getLockTimeout(context));
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
return file_checker.check();
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ private:
|
||||
DiskPtr disk;
|
||||
String table_path;
|
||||
|
||||
mutable std::shared_timed_mutex rwlock;
|
||||
mutable std::shared_mutex rwlock;
|
||||
|
||||
Files files;
|
||||
|
||||
@ -104,7 +104,7 @@ private:
|
||||
/// Read marks files if they are not already read.
|
||||
/// It is done lazily, so that with a large number of tables, the server starts quickly.
|
||||
/// You can not call with a write locked `rwlock`.
|
||||
void loadMarks(std::chrono::seconds lock_timeout);
|
||||
void loadMarks();
|
||||
|
||||
/** For normal columns, the number of rows in the block is specified in the marks.
|
||||
* For array columns and nested structures, there are more than one group of marks that correspond to different files
|
||||
|
@ -47,13 +47,13 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int INCORRECT_FILE_NAME;
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
}
|
||||
|
||||
|
||||
class StripeLogSource final : public SourceWithProgress
|
||||
{
|
||||
public:
|
||||
|
||||
static Block getHeader(
|
||||
StorageStripeLog & storage,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
@ -157,11 +157,10 @@ private:
|
||||
class StripeLogBlockOutputStream final : public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
explicit StripeLogBlockOutputStream(
|
||||
StorageStripeLog & storage_, const StorageMetadataPtr & metadata_snapshot_, std::unique_lock<std::shared_timed_mutex> && lock_)
|
||||
explicit StripeLogBlockOutputStream(StorageStripeLog & storage_, const StorageMetadataPtr & metadata_snapshot_)
|
||||
: storage(storage_)
|
||||
, metadata_snapshot(metadata_snapshot_)
|
||||
, lock(std::move(lock_))
|
||||
, lock(storage.rwlock)
|
||||
, data_out_file(storage.table_path + "data.bin")
|
||||
, data_out_compressed(storage.disk->writeFile(data_out_file, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append))
|
||||
, data_out(std::make_unique<CompressedWriteBuffer>(
|
||||
@ -171,8 +170,6 @@ public:
|
||||
, index_out(std::make_unique<CompressedWriteBuffer>(*index_out_compressed))
|
||||
, block_out(*data_out, 0, metadata_snapshot->getSampleBlock(), false, index_out.get(), storage.disk->getFileSize(data_out_file))
|
||||
{
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
}
|
||||
|
||||
~StripeLogBlockOutputStream() override
|
||||
@ -226,7 +223,7 @@ public:
|
||||
private:
|
||||
StorageStripeLog & storage;
|
||||
StorageMetadataPtr metadata_snapshot;
|
||||
std::unique_lock<std::shared_timed_mutex> lock;
|
||||
std::unique_lock<std::shared_mutex> lock;
|
||||
|
||||
String data_out_file;
|
||||
std::unique_ptr<WriteBuffer> data_out_compressed;
|
||||
@ -289,6 +286,8 @@ void StorageStripeLog::rename(const String & new_path_to_table_data, const Stora
|
||||
{
|
||||
assert(table_path != new_path_to_table_data);
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->moveDirectory(table_path, new_path_to_table_data);
|
||||
|
||||
table_path = new_path_to_table_data;
|
||||
@ -298,16 +297,6 @@ void StorageStripeLog::rename(const String & new_path_to_table_data, const Stora
|
||||
}
|
||||
|
||||
|
||||
static std::chrono::seconds getLockTimeout(const Context & context)
|
||||
{
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
Int64 lock_timeout = settings.lock_acquire_timeout.totalSeconds();
|
||||
if (settings.max_execution_time.totalSeconds() != 0 && settings.max_execution_time.totalSeconds() < lock_timeout)
|
||||
lock_timeout = settings.max_execution_time.totalSeconds();
|
||||
return std::chrono::seconds{lock_timeout};
|
||||
}
|
||||
|
||||
|
||||
Pipe StorageStripeLog::read(
|
||||
const Names & column_names,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
@ -317,9 +306,7 @@ Pipe StorageStripeLog::read(
|
||||
const size_t /*max_block_size*/,
|
||||
unsigned num_streams)
|
||||
{
|
||||
std::shared_lock lock(rwlock, getLockTimeout(context));
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
metadata_snapshot->check(column_names, getVirtuals(), getStorageID());
|
||||
|
||||
@ -358,28 +345,24 @@ Pipe StorageStripeLog::read(
|
||||
}
|
||||
|
||||
|
||||
BlockOutputStreamPtr StorageStripeLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, const Context & context)
|
||||
BlockOutputStreamPtr StorageStripeLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, const Context & /*context*/)
|
||||
{
|
||||
std::unique_lock lock(rwlock, getLockTimeout(context));
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
return std::make_shared<StripeLogBlockOutputStream>(*this, metadata_snapshot, std::move(lock));
|
||||
return std::make_shared<StripeLogBlockOutputStream>(*this, metadata_snapshot);
|
||||
}
|
||||
|
||||
|
||||
CheckResults StorageStripeLog::checkData(const ASTPtr & /* query */, const Context & context)
|
||||
CheckResults StorageStripeLog::checkData(const ASTPtr & /* query */, const Context & /* context */)
|
||||
{
|
||||
std::shared_lock lock(rwlock, getLockTimeout(context));
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
return file_checker.check();
|
||||
}
|
||||
|
||||
void StorageStripeLog::truncate(const ASTPtr &, const StorageMetadataPtr &, const Context &, TableExclusiveLockHolder &)
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->clearDirectory(table_path);
|
||||
|
||||
file_checker = FileChecker{disk, table_path + "sizes.json"};
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ private:
|
||||
size_t max_compress_block_size;
|
||||
|
||||
FileChecker file_checker;
|
||||
mutable std::shared_timed_mutex rwlock;
|
||||
mutable std::shared_mutex rwlock;
|
||||
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
@ -13,7 +13,6 @@
|
||||
|
||||
#include <IO/ReadBufferFromFileBase.h>
|
||||
#include <IO/WriteBufferFromFileBase.h>
|
||||
#include <IO/LimitReadBuffer.h>
|
||||
#include <Compression/CompressionFactory.h>
|
||||
#include <Compression/CompressedReadBuffer.h>
|
||||
#include <Compression/CompressedWriteBuffer.h>
|
||||
@ -47,7 +46,6 @@ namespace DB
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
extern const int DUPLICATE_COLUMN;
|
||||
extern const int INCORRECT_FILE_NAME;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
@ -57,6 +55,7 @@ namespace ErrorCodes
|
||||
class TinyLogSource final : public SourceWithProgress
|
||||
{
|
||||
public:
|
||||
|
||||
static Block getHeader(const NamesAndTypesList & columns)
|
||||
{
|
||||
Block res;
|
||||
@ -67,17 +66,10 @@ public:
|
||||
return Nested::flatten(res);
|
||||
}
|
||||
|
||||
TinyLogSource(
|
||||
size_t block_size_,
|
||||
const NamesAndTypesList & columns_,
|
||||
StorageTinyLog & storage_,
|
||||
size_t max_read_buffer_size_,
|
||||
FileChecker::Map file_sizes_)
|
||||
TinyLogSource(size_t block_size_, const NamesAndTypesList & columns_, StorageTinyLog & storage_, size_t max_read_buffer_size_)
|
||||
: SourceWithProgress(getHeader(columns_))
|
||||
, block_size(block_size_), columns(columns_), storage(storage_)
|
||||
, max_read_buffer_size(max_read_buffer_size_), file_sizes(std::move(file_sizes_))
|
||||
{
|
||||
}
|
||||
, block_size(block_size_), columns(columns_), storage(storage_), lock(storage_.rwlock)
|
||||
, max_read_buffer_size(max_read_buffer_size_) {}
|
||||
|
||||
String getName() const override { return "TinyLog"; }
|
||||
|
||||
@ -88,21 +80,19 @@ private:
|
||||
size_t block_size;
|
||||
NamesAndTypesList columns;
|
||||
StorageTinyLog & storage;
|
||||
std::shared_lock<std::shared_mutex> lock;
|
||||
bool is_finished = false;
|
||||
size_t max_read_buffer_size;
|
||||
FileChecker::Map file_sizes;
|
||||
|
||||
struct Stream
|
||||
{
|
||||
Stream(const DiskPtr & disk, const String & data_path, size_t max_read_buffer_size_, size_t file_size)
|
||||
Stream(const DiskPtr & disk, const String & data_path, size_t max_read_buffer_size_)
|
||||
: plain(disk->readFile(data_path, std::min(max_read_buffer_size_, disk->getFileSize(data_path)))),
|
||||
limited(std::make_unique<LimitReadBuffer>(*plain, file_size, false)),
|
||||
compressed(*plain)
|
||||
{
|
||||
}
|
||||
|
||||
std::unique_ptr<ReadBuffer> plain;
|
||||
std::unique_ptr<ReadBuffer> limited;
|
||||
CompressedReadBuffer compressed;
|
||||
};
|
||||
|
||||
@ -120,14 +110,9 @@ private:
|
||||
class TinyLogBlockOutputStream final : public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
explicit TinyLogBlockOutputStream(
|
||||
StorageTinyLog & storage_,
|
||||
const StorageMetadataPtr & metadata_snapshot_,
|
||||
std::unique_lock<std::shared_timed_mutex> && lock_)
|
||||
: storage(storage_), metadata_snapshot(metadata_snapshot_), lock(std::move(lock_))
|
||||
explicit TinyLogBlockOutputStream(StorageTinyLog & storage_, const StorageMetadataPtr & metadata_snapshot_)
|
||||
: storage(storage_), metadata_snapshot(metadata_snapshot_), lock(storage_.rwlock)
|
||||
{
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
}
|
||||
|
||||
~TinyLogBlockOutputStream() override
|
||||
@ -155,7 +140,7 @@ public:
|
||||
private:
|
||||
StorageTinyLog & storage;
|
||||
StorageMetadataPtr metadata_snapshot;
|
||||
std::unique_lock<std::shared_timed_mutex> lock;
|
||||
std::unique_lock<std::shared_mutex> lock;
|
||||
bool done = false;
|
||||
|
||||
struct Stream
|
||||
@ -246,17 +231,13 @@ void TinyLogSource::readData(const String & name, const IDataType & type, IColum
|
||||
String stream_name = IDataType::getFileNameForStream(name, path);
|
||||
|
||||
if (!streams.count(stream_name))
|
||||
{
|
||||
String file_path = storage.files[stream_name].data_file_path;
|
||||
streams[stream_name] = std::make_unique<Stream>(
|
||||
storage.disk, file_path, max_read_buffer_size, file_sizes[fileName(file_path)]);
|
||||
}
|
||||
streams[stream_name] = std::make_unique<Stream>(storage.disk, storage.files[stream_name].data_file_path, max_read_buffer_size);
|
||||
|
||||
return &streams[stream_name]->compressed;
|
||||
};
|
||||
|
||||
if (deserialize_states.count(name) == 0)
|
||||
type.deserializeBinaryBulkStatePrefix(settings, deserialize_states[name]);
|
||||
type.deserializeBinaryBulkStatePrefix(settings, deserialize_states[name]);
|
||||
|
||||
type.deserializeBinaryBulkWithMultipleStreams(column, limit, settings, deserialize_states[name]);
|
||||
}
|
||||
@ -429,6 +410,8 @@ void StorageTinyLog::rename(const String & new_path_to_table_data, const Storage
|
||||
{
|
||||
assert(table_path != new_path_to_table_data);
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->moveDirectory(table_path, new_path_to_table_data);
|
||||
|
||||
table_path = new_path_to_table_data;
|
||||
@ -441,16 +424,6 @@ void StorageTinyLog::rename(const String & new_path_to_table_data, const Storage
|
||||
}
|
||||
|
||||
|
||||
static std::chrono::seconds getLockTimeout(const Context & context)
|
||||
{
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
Int64 lock_timeout = settings.lock_acquire_timeout.totalSeconds();
|
||||
if (settings.max_execution_time.totalSeconds() != 0 && settings.max_execution_time.totalSeconds() < lock_timeout)
|
||||
lock_timeout = settings.max_execution_time.totalSeconds();
|
||||
return std::chrono::seconds{lock_timeout};
|
||||
}
|
||||
|
||||
|
||||
Pipe StorageTinyLog::read(
|
||||
const Names & column_names,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
@ -464,40 +437,28 @@ Pipe StorageTinyLog::read(
|
||||
|
||||
// When reading, we lock the entire storage, because we only have one file
|
||||
// per column and can't modify it concurrently.
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
|
||||
std::shared_lock lock{rwlock, getLockTimeout(context)};
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
/// No need to hold lock while reading because we read fixed range of data that does not change while appending more data.
|
||||
return Pipe(std::make_shared<TinyLogSource>(
|
||||
max_block_size,
|
||||
Nested::collect(metadata_snapshot->getColumns().getAllPhysical().addTypes(column_names)),
|
||||
*this,
|
||||
settings.max_read_buffer_size,
|
||||
file_checker.getFileSizes()));
|
||||
max_block_size, Nested::collect(metadata_snapshot->getColumns().getAllPhysical().addTypes(column_names)), *this, context.getSettingsRef().max_read_buffer_size));
|
||||
}
|
||||
|
||||
|
||||
BlockOutputStreamPtr StorageTinyLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, const Context & context)
|
||||
BlockOutputStreamPtr StorageTinyLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, const Context & /*context*/)
|
||||
{
|
||||
return std::make_shared<TinyLogBlockOutputStream>(*this, metadata_snapshot, std::unique_lock{rwlock, getLockTimeout(context)});
|
||||
return std::make_shared<TinyLogBlockOutputStream>(*this, metadata_snapshot);
|
||||
}
|
||||
|
||||
|
||||
CheckResults StorageTinyLog::checkData(const ASTPtr & /* query */, const Context & context)
|
||||
CheckResults StorageTinyLog::checkData(const ASTPtr & /* query */, const Context & /* context */)
|
||||
{
|
||||
std::shared_lock lock(rwlock, getLockTimeout(context));
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
return file_checker.check();
|
||||
}
|
||||
|
||||
void StorageTinyLog::truncate(
|
||||
const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, const Context &, TableExclusiveLockHolder &)
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->clearDirectory(table_path);
|
||||
|
||||
files.clear();
|
||||
@ -507,6 +468,14 @@ void StorageTinyLog::truncate(
|
||||
addFiles(column.name, *column.type);
|
||||
}
|
||||
|
||||
void StorageTinyLog::drop()
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
if (disk->exists(table_path))
|
||||
disk->removeRecursive(table_path);
|
||||
files.clear();
|
||||
}
|
||||
|
||||
|
||||
void registerStorageTinyLog(StorageFactory & factory)
|
||||
{
|
||||
|
@ -43,6 +43,8 @@ public:
|
||||
|
||||
void truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, const Context &, TableExclusiveLockHolder &) override;
|
||||
|
||||
void drop() override;
|
||||
|
||||
protected:
|
||||
StorageTinyLog(
|
||||
DiskPtr disk_,
|
||||
@ -68,7 +70,7 @@ private:
|
||||
Files files;
|
||||
|
||||
FileChecker file_checker;
|
||||
mutable std::shared_timed_mutex rwlock;
|
||||
mutable std::shared_mutex rwlock;
|
||||
|
||||
Poco::Logger * log;
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
@ -42,7 +43,7 @@ NamesAndTypesList StorageSystemZooKeeper::getNamesAndTypes()
|
||||
}
|
||||
|
||||
|
||||
static bool extractPathImpl(const IAST & elem, String & res)
|
||||
static bool extractPathImpl(const IAST & elem, String & res, const Context & context)
|
||||
{
|
||||
const auto * function = elem.as<ASTFunction>();
|
||||
if (!function)
|
||||
@ -51,7 +52,7 @@ static bool extractPathImpl(const IAST & elem, String & res)
|
||||
if (function->name == "and")
|
||||
{
|
||||
for (const auto & child : function->arguments->children)
|
||||
if (extractPathImpl(*child, res))
|
||||
if (extractPathImpl(*child, res, context))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -60,23 +61,24 @@ static bool extractPathImpl(const IAST & elem, String & res)
|
||||
if (function->name == "equals")
|
||||
{
|
||||
const auto & args = function->arguments->as<ASTExpressionList &>();
|
||||
const IAST * value;
|
||||
ASTPtr value;
|
||||
|
||||
if (args.children.size() != 2)
|
||||
return false;
|
||||
|
||||
const ASTIdentifier * ident;
|
||||
if ((ident = args.children.at(0)->as<ASTIdentifier>()))
|
||||
value = args.children.at(1).get();
|
||||
value = args.children.at(1);
|
||||
else if ((ident = args.children.at(1)->as<ASTIdentifier>()))
|
||||
value = args.children.at(0).get();
|
||||
value = args.children.at(0);
|
||||
else
|
||||
return false;
|
||||
|
||||
if (ident->name != "path")
|
||||
return false;
|
||||
|
||||
const auto * literal = value->as<ASTLiteral>();
|
||||
auto evaluated = evaluateConstantExpressionAsLiteral(value, context);
|
||||
const auto * literal = evaluated->as<ASTLiteral>();
|
||||
if (!literal)
|
||||
return false;
|
||||
|
||||
@ -93,20 +95,20 @@ static bool extractPathImpl(const IAST & elem, String & res)
|
||||
|
||||
/** Retrieve from the query a condition of the form `path = 'path'`, from conjunctions in the WHERE clause.
|
||||
*/
|
||||
static String extractPath(const ASTPtr & query)
|
||||
static String extractPath(const ASTPtr & query, const Context & context)
|
||||
{
|
||||
const auto & select = query->as<ASTSelectQuery &>();
|
||||
if (!select.where())
|
||||
return "";
|
||||
|
||||
String res;
|
||||
return extractPathImpl(*select.where(), res) ? res : "";
|
||||
return extractPathImpl(*select.where(), res, context) ? res : "";
|
||||
}
|
||||
|
||||
|
||||
void StorageSystemZooKeeper::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const
|
||||
{
|
||||
String path = extractPath(query_info.query);
|
||||
String path = extractPath(query_info.query, context);
|
||||
if (path.empty())
|
||||
throw Exception("SELECT from system.zookeeper table must contain condition like path = 'path' in WHERE clause.", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
|
@ -107,9 +107,9 @@ def remove_control_characters(s):
|
||||
return s
|
||||
|
||||
def get_db_engine(args):
|
||||
if args.atomic_db_engine:
|
||||
return " ENGINE=Atomic"
|
||||
return ""
|
||||
if args.db_engine:
|
||||
return " ENGINE=" + args.db_engine
|
||||
return "" # Will use default engine
|
||||
|
||||
def run_single_test(args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file):
|
||||
|
||||
@ -456,7 +456,7 @@ class BuildFlags(object):
|
||||
DEBUG = 'debug-build'
|
||||
UNBUNDLED = 'unbundled-build'
|
||||
RELEASE = 'release-build'
|
||||
DATABASE_ATOMIC = 'database-atomic'
|
||||
DATABASE_ORDINARY = 'database-ordinary'
|
||||
POLYMORPHIC_PARTS = 'polymorphic-parts'
|
||||
|
||||
|
||||
@ -501,8 +501,8 @@ def collect_build_flags(client):
|
||||
(stdout, stderr) = clickhouse_proc.communicate("SELECT value FROM system.settings WHERE name = 'default_database_engine'")
|
||||
|
||||
if clickhouse_proc.returncode == 0:
|
||||
if 'Atomic' in stdout:
|
||||
result.append(BuildFlags.DATABASE_ATOMIC)
|
||||
if 'Ordinary' in stdout:
|
||||
result.append(BuildFlags.DATABASE_ORDINARY)
|
||||
else:
|
||||
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
||||
|
||||
@ -820,7 +820,7 @@ if __name__ == '__main__':
|
||||
parser.add_argument('-r', '--server-check-retries', default=30, type=int, help='Num of tries to execute SELECT 1 before tests started')
|
||||
parser.add_argument('--skip-list-path', help="Path to skip-list file")
|
||||
parser.add_argument('--use-skip-list', action='store_true', default=False, help="Use skip list to skip tests if found")
|
||||
parser.add_argument('--atomic-db-engine', action='store_true', help='Create databases with Atomic engine by default')
|
||||
parser.add_argument('--db-engine', help='Database engine name')
|
||||
|
||||
parser.add_argument('--no-stateless', action='store_true', help='Disable all stateless tests')
|
||||
parser.add_argument('--no-stateful', action='store_true', help='Disable all stateful tests')
|
||||
|
8
tests/config/README.md
Normal file
8
tests/config/README.md
Normal file
@ -0,0 +1,8 @@
|
||||
# ClickHouse configs for test environment
|
||||
|
||||
## How to use
|
||||
CI use these configs in all checks installing them with `install.sh` script. If you want to run all tests from `tests/queries/0_stateless` and `test/queries/1_stateful` on your local machine you have to set up configs from this directory for your `clickhouse-server`. The most simple way is to install them using `install.sh` script. Other option is just copy files into your clickhouse config directory.
|
||||
|
||||
## How to add new config
|
||||
|
||||
Just place file `.xml` with new config into appropriate directory and add `ln` command into `install.sh` script. After that CI will use this config in all tests runs.
|
@ -1,8 +0,0 @@
|
||||
<yandex>
|
||||
<profiles>
|
||||
<default>
|
||||
<default_database_engine>Atomic</default_database_engine>
|
||||
<show_table_uuid_in_table_create_query_if_not_nil>0</show_table_uuid_in_table_create_query_if_not_nil>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
54
tests/config/install.sh
Executable file
54
tests/config/install.sh
Executable file
@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
# script allows to install configs for clickhouse server and clients required
|
||||
# for testing (stateless and stateful tests)
|
||||
|
||||
set -x -e
|
||||
|
||||
DEST_SERVER_PATH="${1:-/etc/clickhouse-server}"
|
||||
DEST_CLIENT_PATH="${2:-/etc/clickhouse-client}"
|
||||
SRC_PATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
|
||||
|
||||
echo "Going to install test configs from $SRC_PATH into $DEST_SERVER_PATH"
|
||||
|
||||
mkdir -p $DEST_SERVER_PATH/config.d/
|
||||
mkdir -p $DEST_SERVER_PATH/users.d/
|
||||
mkdir -p $DEST_CLIENT_PATH
|
||||
|
||||
ln -s $SRC_PATH/config.d/zookeeper.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/listen.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/part_log.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/text_log.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/metric_log.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/custom_settings_prefixes.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/macros.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/disks.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/secure_ports.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/clusters.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/graphite.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/config.d/database_atomic.xml $DEST_SERVER_PATH/config.d/
|
||||
ln -s $SRC_PATH/users.d/log_queries.xml $DEST_SERVER_PATH/users.d/
|
||||
ln -s $SRC_PATH/users.d/readonly.xml $DEST_SERVER_PATH/users.d/
|
||||
ln -s $SRC_PATH/users.d/access_management.xml $DEST_SERVER_PATH/users.d/
|
||||
|
||||
ln -s $SRC_PATH/ints_dictionary.xml $DEST_SERVER_PATH/
|
||||
ln -s $SRC_PATH/strings_dictionary.xml $DEST_SERVER_PATH/
|
||||
ln -s $SRC_PATH/decimals_dictionary.xml $DEST_SERVER_PATH/
|
||||
ln -s $SRC_PATH/executable_dictionary.xml $DEST_SERVER_PATH/
|
||||
|
||||
ln -s $SRC_PATH/server.key $DEST_SERVER_PATH/
|
||||
ln -s $SRC_PATH/server.crt $DEST_SERVER_PATH/
|
||||
ln -s $SRC_PATH/dhparam.pem $DEST_SERVER_PATH/
|
||||
|
||||
# Retain any pre-existing config and allow ClickHouse to load it if required
|
||||
ln -s --backup=simple --suffix=_original.xml \
|
||||
$SRC_PATH/config.d/query_masking_rules.xml $DEST_SERVER_PATH/config.d/
|
||||
|
||||
if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then
|
||||
ln -s $SRC_PATH/config.d/polymorphic_parts.xml $DEST_SERVER_PATH/config.d/
|
||||
fi
|
||||
if [[ -n "$USE_DATABASE_ORDINARY" ]] && [[ "$USE_DATABASE_ORDINARY" -eq 1 ]]; then
|
||||
ln -s $SRC_PATH/users.d/database_ordinary.xml $DEST_SERVER_PATH/users.d/
|
||||
fi
|
||||
|
||||
ln -sf $SRC_PATH/client_config.xml $DEST_CLIENT_PATH/config.xml
|
7
tests/config/users.d/database_ordinary.xml
Normal file
7
tests/config/users.d/database_ordinary.xml
Normal file
@ -0,0 +1,7 @@
|
||||
<yandex>
|
||||
<profiles>
|
||||
<default>
|
||||
<default_database_engine>Ordinary</default_database_engine>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
@ -45,6 +45,7 @@ def _create_env_file(path, variables, fname=DEFAULT_ENV_NAME):
|
||||
f.write("=".join([var, value]) + "\n")
|
||||
return full_path
|
||||
|
||||
|
||||
def subprocess_check_call(args):
|
||||
# Uncomment for debugging
|
||||
# print('run:', ' ' . join(args))
|
||||
@ -124,7 +125,6 @@ class ClickHouseCluster:
|
||||
self.base_zookeeper_cmd = None
|
||||
self.base_mysql_cmd = []
|
||||
self.base_kafka_cmd = []
|
||||
self.base_kerberized_kafka_cmd = []
|
||||
self.base_rabbitmq_cmd = []
|
||||
self.base_cassandra_cmd = []
|
||||
self.pre_zookeeper_commands = []
|
||||
@ -133,7 +133,6 @@ class ClickHouseCluster:
|
||||
self.with_mysql = False
|
||||
self.with_postgres = False
|
||||
self.with_kafka = False
|
||||
self.with_kerberized_kafka = False
|
||||
self.with_rabbitmq = False
|
||||
self.with_odbc_drivers = False
|
||||
self.with_hdfs = False
|
||||
@ -170,7 +169,7 @@ class ClickHouseCluster:
|
||||
|
||||
def add_instance(self, name, base_config_dir=None, main_configs=None, user_configs=None, dictionaries=None,
|
||||
macros=None,
|
||||
with_zookeeper=False, with_mysql=False, with_kafka=False, with_kerberized_kafka=False, with_rabbitmq=False,
|
||||
with_zookeeper=False, with_mysql=False, with_kafka=False, with_rabbitmq=False,
|
||||
clickhouse_path_dir=None,
|
||||
with_odbc_drivers=False, with_postgres=False, with_hdfs=False, with_mongo=False,
|
||||
with_redis=False, with_minio=False, with_cassandra=False,
|
||||
@ -208,7 +207,6 @@ class ClickHouseCluster:
|
||||
zookeeper_config_path=self.zookeeper_config_path,
|
||||
with_mysql=with_mysql,
|
||||
with_kafka=with_kafka,
|
||||
with_kerberized_kafka=with_kerberized_kafka,
|
||||
with_rabbitmq=with_rabbitmq,
|
||||
with_mongo=with_mongo,
|
||||
with_redis=with_redis,
|
||||
@ -292,13 +290,6 @@ class ClickHouseCluster:
|
||||
p.join(docker_compose_yml_dir, 'docker_compose_kafka.yml')]
|
||||
cmds.append(self.base_kafka_cmd)
|
||||
|
||||
if with_kerberized_kafka and not self.with_kerberized_kafka:
|
||||
self.with_kerberized_kafka = True
|
||||
self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_kerberized_kafka.yml')])
|
||||
self.base_kerberized_kafka_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name',
|
||||
self.project_name, '--file', p.join(docker_compose_yml_dir, 'docker_compose_kerberized_kafka.yml')]
|
||||
cmds.append(self.base_kerberized_kafka_cmd)
|
||||
|
||||
if with_rabbitmq and not self.with_rabbitmq:
|
||||
self.with_rabbitmq = True
|
||||
self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_rabbitmq.yml')])
|
||||
@ -495,8 +486,8 @@ class ClickHouseCluster:
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
try:
|
||||
connection.database_names()
|
||||
print "Connected to Mongo dbs:", connection.database_names()
|
||||
connection.list_database_names()
|
||||
print "Connected to Mongo dbs:", connection.list_database_names()
|
||||
return
|
||||
except Exception as ex:
|
||||
print "Can't connect to Mongo " + str(ex)
|
||||
@ -617,11 +608,6 @@ class ClickHouseCluster:
|
||||
self.kafka_docker_id = self.get_instance_docker_id('kafka1')
|
||||
self.wait_schema_registry_to_start(120)
|
||||
|
||||
if self.with_kerberized_kafka and self.base_kerberized_kafka_cmd:
|
||||
env = os.environ.copy()
|
||||
env['KERBERIZED_KAFKA_DIR'] = instance.path + '/'
|
||||
subprocess.check_call(self.base_kerberized_kafka_cmd + common_opts + ['--renew-anon-volumes'], env=env)
|
||||
self.kerberized_kafka_docker_id = self.get_instance_docker_id('kerberized_kafka1')
|
||||
if self.with_rabbitmq and self.base_rabbitmq_cmd:
|
||||
subprocess_check_call(self.base_rabbitmq_cmd + common_opts + ['--renew-anon-volumes'])
|
||||
self.rabbitmq_docker_id = self.get_instance_docker_id('rabbitmq1')
|
||||
@ -802,12 +788,9 @@ services:
|
||||
- {instance_config_dir}:/etc/clickhouse-server/
|
||||
- {db_dir}:/var/lib/clickhouse/
|
||||
- {logs_dir}:/var/log/clickhouse-server/
|
||||
- /etc/passwd:/etc/passwd:ro
|
||||
{binary_volume}
|
||||
{odbc_bridge_volume}
|
||||
{odbc_ini_path}
|
||||
{keytab_path}
|
||||
{krb5_conf}
|
||||
entrypoint: {entrypoint_cmd}
|
||||
tmpfs: {tmpfs}
|
||||
cap_add:
|
||||
@ -837,7 +820,7 @@ class ClickHouseInstance:
|
||||
def __init__(
|
||||
self, cluster, base_path, name, base_config_dir, custom_main_configs, custom_user_configs,
|
||||
custom_dictionaries,
|
||||
macros, with_zookeeper, zookeeper_config_path, with_mysql, with_kafka, with_kerberized_kafka, with_rabbitmq, with_mongo,
|
||||
macros, with_zookeeper, zookeeper_config_path, with_mysql, with_kafka, with_rabbitmq, with_mongo,
|
||||
with_redis, with_minio,
|
||||
with_cassandra, server_bin_path, odbc_bridge_bin_path, clickhouse_path_dir, with_odbc_drivers,
|
||||
hostname=None, env_variables=None,
|
||||
@ -856,7 +839,6 @@ class ClickHouseInstance:
|
||||
self.custom_user_config_paths = [p.abspath(p.join(base_path, c)) for c in custom_user_configs]
|
||||
self.custom_dictionaries_paths = [p.abspath(p.join(base_path, c)) for c in custom_dictionaries]
|
||||
self.clickhouse_path_dir = p.abspath(p.join(base_path, clickhouse_path_dir)) if clickhouse_path_dir else None
|
||||
self.kerberos_secrets_dir = p.abspath(p.join(base_path, 'secrets'))
|
||||
self.macros = macros if macros is not None else {}
|
||||
self.with_zookeeper = with_zookeeper
|
||||
self.zookeeper_config_path = zookeeper_config_path
|
||||
@ -866,7 +848,6 @@ class ClickHouseInstance:
|
||||
|
||||
self.with_mysql = with_mysql
|
||||
self.with_kafka = with_kafka
|
||||
self.with_kerberized_kafka = with_kerberized_kafka
|
||||
self.with_rabbitmq = with_rabbitmq
|
||||
self.with_mongo = with_mongo
|
||||
self.with_redis = with_redis
|
||||
@ -882,13 +863,6 @@ class ClickHouseInstance:
|
||||
else:
|
||||
self.odbc_ini_path = ""
|
||||
|
||||
if with_kerberized_kafka:
|
||||
self.keytab_path = '- ' + os.path.dirname(self.docker_compose_path) + "/secrets:/tmp/keytab"
|
||||
self.krb5_conf = '- ' + os.path.dirname(self.docker_compose_path) + "/secrets/krb.conf:/etc/krb5.conf:ro"
|
||||
else:
|
||||
self.keytab_path = ""
|
||||
self.krb5_conf = ""
|
||||
|
||||
self.docker_client = None
|
||||
self.ip_address = None
|
||||
self.client = None
|
||||
@ -1218,9 +1192,6 @@ class ClickHouseInstance:
|
||||
if self.with_zookeeper:
|
||||
shutil.copy(self.zookeeper_config_path, conf_d_dir)
|
||||
|
||||
if self.with_kerberized_kafka:
|
||||
shutil.copytree(self.kerberos_secrets_dir, p.abspath(p.join(self.path, 'secrets')))
|
||||
|
||||
# Copy config.d configs
|
||||
print "Copy custom test config files {} to {}".format(self.custom_main_config_paths, self.config_d_dir)
|
||||
for path in self.custom_main_config_paths:
|
||||
@ -1256,9 +1227,6 @@ class ClickHouseInstance:
|
||||
depends_on.append("kafka1")
|
||||
depends_on.append("schema-registry")
|
||||
|
||||
if self.with_kerberized_kafka:
|
||||
depends_on.append("kerberized_kafka1")
|
||||
|
||||
if self.with_rabbitmq:
|
||||
depends_on.append("rabbitmq1")
|
||||
|
||||
@ -1322,8 +1290,6 @@ class ClickHouseInstance:
|
||||
user=os.getuid(),
|
||||
env_file=env_file,
|
||||
odbc_ini_path=odbc_ini_path,
|
||||
keytab_path=self.keytab_path,
|
||||
krb5_conf=self.krb5_conf,
|
||||
entrypoint_cmd=entrypoint_cmd,
|
||||
networks=networks,
|
||||
app_net=app_net,
|
||||
|
@ -333,16 +333,16 @@ class _SourceExecutableBase(ExternalSource):
|
||||
user='root')
|
||||
|
||||
|
||||
class SourceExecutableCache(_SourceExecutableBase):
|
||||
class SourceExecutableHashed(_SourceExecutableBase):
|
||||
|
||||
def _get_cmd(self, path):
|
||||
return "cat {}".format(path)
|
||||
|
||||
def compatible_with_layout(self, layout):
|
||||
return 'cache' not in layout.name
|
||||
return 'hashed' in layout.name
|
||||
|
||||
|
||||
class SourceExecutableHashed(_SourceExecutableBase):
|
||||
class SourceExecutableCache(_SourceExecutableBase):
|
||||
|
||||
def _get_cmd(self, path):
|
||||
return "cat - >/dev/null;cat {}".format(path)
|
||||
|
@ -60,3 +60,19 @@ def assert_eq_with_retry(instance, query, expectation, retry_count=20, sleep_tim
|
||||
if expectation_tsv != val:
|
||||
raise AssertionError("'{}' != '{}'\n{}".format(expectation_tsv, val, '\n'.join(
|
||||
expectation_tsv.diff(val, n1="expectation", n2="query"))))
|
||||
|
||||
def assert_logs_contain(instance, substring):
|
||||
if not instance.contains_in_log(substring):
|
||||
raise AssertionError("'{}' not found in logs".format(substring))
|
||||
|
||||
def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_time=0.5):
|
||||
for i in xrange(retry_count):
|
||||
try:
|
||||
if instance.contains_in_log(substring):
|
||||
break
|
||||
time.sleep(sleep_time)
|
||||
except Exception as ex:
|
||||
print "contains_in_log_with_retry retry {} exception {}".format(i + 1, ex)
|
||||
time.sleep(sleep_time)
|
||||
else:
|
||||
raise AssertionError("'{}' not found in logs".format(substring))
|
||||
|
@ -156,8 +156,6 @@ if __name__ == "__main__":
|
||||
env_tags += "-e {}={} ".format("DOCKER_POSTGRESQL_JAVA_CLIENT_TAG", tag)
|
||||
elif image == "yandex/clickhouse-integration-test":
|
||||
env_tags += "-e {}={}".format("DOCKER_BASE_TAG", tag)
|
||||
elif image == "yandex/clickhouse-kerberos-kdc":
|
||||
env_tags += "-e {}={}".format("DOCKER_KERBEROS_KDC_TAG", tag)
|
||||
else:
|
||||
logging.info("Unknown image {}".format(image))
|
||||
|
||||
|
@ -13,7 +13,7 @@ node1 = cluster.add_instance('node1', main_configs=["configs/config.d/zookeeper_
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
node1.query("CREATE DATABASE zktest ENGINE=Ordinary;")
|
||||
node1.query("CREATE DATABASE zktest ENGINE=Ordinary;") # Different behaviour with Atomic
|
||||
node1.query(
|
||||
'''
|
||||
CREATE TABLE zktest.atomic_drop_table (n UInt32)
|
||||
|
@ -14,7 +14,7 @@ path_to_data = '/var/lib/clickhouse/'
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
q('CREATE DATABASE test ENGINE = Ordinary')
|
||||
q('CREATE DATABASE test ENGINE = Ordinary') # Different path in shadow/ with Atomic
|
||||
|
||||
yield cluster
|
||||
|
||||
|
@ -17,6 +17,7 @@ node4 = cluster.add_instance('node4')
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
@ -141,22 +142,24 @@ def test_backup_from_old_version_config(started_cluster):
|
||||
|
||||
|
||||
def test_backup_and_alter(started_cluster):
|
||||
node4.query("CREATE TABLE backup_table(A Int64, B String, C Date) Engine = MergeTree order by tuple()")
|
||||
node4.query("CREATE DATABASE test ENGINE=Ordinary") # Different path in shadow/ with Atomic
|
||||
|
||||
node4.query("INSERT INTO backup_table VALUES(2, '2', toDate('2019-10-01'))")
|
||||
node4.query("CREATE TABLE test.backup_table(A Int64, B String, C Date) Engine = MergeTree order by tuple()")
|
||||
|
||||
node4.query("ALTER TABLE backup_table FREEZE PARTITION tuple();")
|
||||
node4.query("INSERT INTO test.backup_table VALUES(2, '2', toDate('2019-10-01'))")
|
||||
|
||||
node4.query("ALTER TABLE backup_table DROP COLUMN C")
|
||||
node4.query("ALTER TABLE test.backup_table FREEZE PARTITION tuple();")
|
||||
|
||||
node4.query("ALTER TABLE backup_table MODIFY COLUMN B UInt64")
|
||||
node4.query("ALTER TABLE test.backup_table DROP COLUMN C")
|
||||
|
||||
node4.query("ALTER TABLE backup_table DROP PARTITION tuple()")
|
||||
node4.query("ALTER TABLE test.backup_table MODIFY COLUMN B UInt64")
|
||||
|
||||
node4.query("ALTER TABLE test.backup_table DROP PARTITION tuple()")
|
||||
|
||||
node4.exec_in_container(['bash', '-c',
|
||||
'cp -r /var/lib/clickhouse/shadow/1/data/default/backup_table/all_1_1_0/ /var/lib/clickhouse/data/default/backup_table/detached'])
|
||||
'cp -r /var/lib/clickhouse/shadow/1/data/test/backup_table/all_1_1_0/ /var/lib/clickhouse/data/test/backup_table/detached'])
|
||||
|
||||
node4.query("ALTER TABLE backup_table ATTACH PARTITION tuple()")
|
||||
node4.query("ALTER TABLE test.backup_table ATTACH PARTITION tuple()")
|
||||
|
||||
assert node4.query("SELECT sum(A) FROM backup_table") == "2\n"
|
||||
assert node4.query("SELECT B + 2 FROM backup_table") == "4\n"
|
||||
assert node4.query("SELECT sum(A) FROM test.backup_table") == "2\n"
|
||||
assert node4.query("SELECT B + 2 FROM test.backup_table") == "4\n"
|
||||
|
@ -33,7 +33,7 @@
|
||||
<enabled_partitions>3 4 5 6 1 2 0 </enabled_partitions>
|
||||
|
||||
<!-- Engine of destination tables -->
|
||||
<engine>ENGINE=ReplicatedMergeTree('/clickhouse/tables/cluster{cluster}/{shard}/hits', '{replica}') PARTITION BY d % 3 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16</engine>
|
||||
<engine>ENGINE=ReplicatedMergeTree PARTITION BY d % 3 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16</engine>
|
||||
|
||||
<!-- Which sarding key to use while copying -->
|
||||
<sharding_key>d + 1</sharding_key>
|
||||
@ -93,4 +93,4 @@
|
||||
</cluster1>
|
||||
</remote_servers>
|
||||
|
||||
</yandex>
|
||||
</yandex>
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
<!-- Engine of destination tables -->
|
||||
<engine>ENGINE=
|
||||
ReplicatedMergeTree('/clickhouse/tables/cluster{cluster}/{shard}/b', '{replica}')
|
||||
ReplicatedMergeTree
|
||||
PARTITION BY toMonday(date)
|
||||
ORDER BY d
|
||||
</engine>
|
||||
@ -97,4 +97,4 @@
|
||||
</cluster1>
|
||||
</remote_servers>
|
||||
|
||||
</yandex>
|
||||
</yandex>
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
<!-- Engine of destination tables -->
|
||||
<engine>ENGINE=
|
||||
ReplicatedMergeTree('/clickhouse/tables/cluster{cluster}/{shard}/test_block_size', '{replica}')
|
||||
ReplicatedMergeTree
|
||||
ORDER BY d PARTITION BY partition
|
||||
</engine>
|
||||
|
||||
@ -99,4 +99,4 @@
|
||||
</shard_0_0>
|
||||
</remote_servers>
|
||||
|
||||
</yandex>
|
||||
</yandex>
|
||||
|
@ -81,11 +81,11 @@ class Task1:
|
||||
for cluster_num in ["0", "1"]:
|
||||
ddl_check_query(instance, "DROP DATABASE IF EXISTS default ON CLUSTER cluster{}".format(cluster_num))
|
||||
ddl_check_query(instance,
|
||||
"CREATE DATABASE IF NOT EXISTS default ON CLUSTER cluster{} ENGINE=Ordinary".format(
|
||||
"CREATE DATABASE IF NOT EXISTS default ON CLUSTER cluster{}".format(
|
||||
cluster_num))
|
||||
|
||||
ddl_check_query(instance, "CREATE TABLE hits ON CLUSTER cluster0 (d UInt64, d1 UInt64 MATERIALIZED d+1) " +
|
||||
"ENGINE=ReplicatedMergeTree('/clickhouse/tables/cluster_{cluster}/{shard}/hits', '{replica}') " +
|
||||
"ENGINE=ReplicatedMergeTree " +
|
||||
"PARTITION BY d % 3 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16")
|
||||
ddl_check_query(instance,
|
||||
"CREATE TABLE hits_all ON CLUSTER cluster0 (d UInt64) ENGINE=Distributed(cluster0, default, hits, d)")
|
||||
@ -110,10 +110,11 @@ class Task1:
|
||||
|
||||
class Task2:
|
||||
|
||||
def __init__(self, cluster):
|
||||
def __init__(self, cluster, unique_zk_path):
|
||||
self.cluster = cluster
|
||||
self.zk_task_path = "/clickhouse-copier/task_month_to_week_partition"
|
||||
self.copier_task_config = open(os.path.join(CURRENT_TEST_DIR, 'task_month_to_week_description.xml'), 'r').read()
|
||||
self.unique_zk_path = unique_zk_path
|
||||
|
||||
def start(self):
|
||||
instance = cluster.instances['s0_0_0']
|
||||
@ -121,11 +122,13 @@ class Task2:
|
||||
for cluster_num in ["0", "1"]:
|
||||
ddl_check_query(instance, "DROP DATABASE IF EXISTS default ON CLUSTER cluster{}".format(cluster_num))
|
||||
ddl_check_query(instance,
|
||||
"CREATE DATABASE IF NOT EXISTS default ON CLUSTER cluster{} ENGINE=Ordinary".format(
|
||||
"CREATE DATABASE IF NOT EXISTS default ON CLUSTER cluster{}".format(
|
||||
cluster_num))
|
||||
|
||||
ddl_check_query(instance,
|
||||
"CREATE TABLE a ON CLUSTER cluster0 (date Date, d UInt64, d1 UInt64 ALIAS d+1) ENGINE=ReplicatedMergeTree('/clickhouse/tables/cluster_{cluster}/{shard}/a', '{replica}', date, intHash64(d), (date, intHash64(d)), 8192)")
|
||||
"CREATE TABLE a ON CLUSTER cluster0 (date Date, d UInt64, d1 UInt64 ALIAS d+1) "
|
||||
"ENGINE=ReplicatedMergeTree('/clickhouse/tables/cluster_{cluster}/{shard}/" + self.unique_zk_path + "', "
|
||||
"'{replica}', date, intHash64(d), (date, intHash64(d)), 8192)")
|
||||
ddl_check_query(instance,
|
||||
"CREATE TABLE a_all ON CLUSTER cluster0 (date Date, d UInt64) ENGINE=Distributed(cluster0, default, a, d)")
|
||||
|
||||
@ -169,7 +172,7 @@ class Task_test_block_size:
|
||||
|
||||
ddl_check_query(instance, """
|
||||
CREATE TABLE test_block_size ON CLUSTER shard_0_0 (partition Date, d UInt64)
|
||||
ENGINE=ReplicatedMergeTree('/clickhouse/tables/cluster_{cluster}/{shard}/test_block_size', '{replica}')
|
||||
ENGINE=ReplicatedMergeTree
|
||||
ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d)""", 2)
|
||||
|
||||
instance.query(
|
||||
@ -332,17 +335,17 @@ def test_copy_with_recovering_after_move_faults(started_cluster, use_sample_offs
|
||||
|
||||
@pytest.mark.timeout(600)
|
||||
def test_copy_month_to_week_partition(started_cluster):
|
||||
execute_task(Task2(started_cluster), [])
|
||||
execute_task(Task2(started_cluster, "test1"), [])
|
||||
|
||||
|
||||
@pytest.mark.timeout(600)
|
||||
def test_copy_month_to_week_partition_with_recovering(started_cluster):
|
||||
execute_task(Task2(started_cluster), ['--copy-fault-probability', str(COPYING_FAIL_PROBABILITY)])
|
||||
execute_task(Task2(started_cluster, "test2"), ['--copy-fault-probability', str(COPYING_FAIL_PROBABILITY)])
|
||||
|
||||
|
||||
@pytest.mark.timeout(600)
|
||||
def test_copy_month_to_week_partition_with_recovering_after_move_faults(started_cluster):
|
||||
execute_task(Task2(started_cluster), ['--move-fault-probability', str(MOVING_FAIL_PROBABILITY)])
|
||||
execute_task(Task2(started_cluster, "test3"), ['--move-fault-probability', str(MOVING_FAIL_PROBABILITY)])
|
||||
|
||||
|
||||
def test_block_size(started_cluster):
|
||||
|
@ -59,7 +59,7 @@ class TaskTrivial:
|
||||
|
||||
for node in [source, destination]:
|
||||
node.query("DROP DATABASE IF EXISTS default")
|
||||
node.query("CREATE DATABASE IF NOT EXISTS default ENGINE=Ordinary")
|
||||
node.query("CREATE DATABASE IF NOT EXISTS default")
|
||||
|
||||
source.query("CREATE TABLE trivial (d UInt64, d1 UInt64 MATERIALIZED d+1) "
|
||||
"ENGINE=ReplicatedMergeTree('/clickhouse/tables/source_trivial_cluster/1/trivial', '1') "
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user