Merge branch 'master' into system-querylog-map

This commit is contained in:
sundy-li 2021-01-23 11:28:22 +08:00
commit 685210e02b
245 changed files with 3371 additions and 2404 deletions

View File

@ -0,0 +1,19 @@
---
name: Sanitizer alert
about: Potential issue has been found by special code instrumentation
title: ''
labels: testing
assignees: ''
---
(you don't have to strictly follow this form)
**Describe the bug**
A link to the report
**How to reproduce**
Try to reproduce the report and copy the tables and queries involved.
**Error message and/or stacktrace**
You can find additional information in server logs.

2
.gitmodules vendored
View File

@ -84,7 +84,7 @@
url = https://github.com/google/brotli.git
[submodule "contrib/h3"]
path = contrib/h3
url = https://github.com/uber/h3
url = https://github.com/ClickHouse-Extras/h3
[submodule "contrib/hyperscan"]
path = contrib/hyperscan
url = https://github.com/ClickHouse-Extras/hyperscan.git

View File

@ -15,9 +15,13 @@ currently being supported with security updates:
| 20.4 | :x: |
| 20.5 | :x: |
| 20.6 | :x: |
| 20.7 | :white_check_mark: |
| 20.7 | :x: |
| 20.8 | :white_check_mark: |
| 20.9 | :white_check_mark: |
| 20.9 | :x: |
| 20.10 | :x: |
| 20.11 | :white_check_mark: |
| 20.12 | :white_check_mark: |
| 21.1 | :white_check_mark: |
## Reporting a Vulnerability

View File

@ -780,7 +780,7 @@ public:
return lut[index].date + time_offset;
}
inline time_t addWeeks(time_t t, Int64 delta) const
inline NO_SANITIZE_UNDEFINED time_t addWeeks(time_t t, Int64 delta) const
{
return addDays(t, delta * 7);
}
@ -812,7 +812,7 @@ public:
return lut[result_day].date + time_offset;
}
inline DayNum addMonths(DayNum d, Int64 delta) const
inline NO_SANITIZE_UNDEFINED DayNum addMonths(DayNum d, Int64 delta) const
{
const Values & values = lut[d];
@ -836,12 +836,12 @@ public:
}
}
inline time_t addQuarters(time_t t, Int64 delta) const
inline NO_SANITIZE_UNDEFINED time_t addQuarters(time_t t, Int64 delta) const
{
return addMonths(t, delta * 3);
}
inline DayNum addQuarters(DayNum d, Int64 delta) const
inline NO_SANITIZE_UNDEFINED DayNum addQuarters(DayNum d, Int64 delta) const
{
return addMonths(d, delta * 3);
}

View File

@ -27,9 +27,12 @@ if (GLIBC_COMPATIBILITY)
list(APPEND glibc_compatibility_sources musl/getentropy.c)
endif()
add_library (clickhouse_memcpy OBJECT
${ClickHouse_SOURCE_DIR}/contrib/FastMemcpy/memcpy_wrapper.c
)
if (NOT ARCH_ARM)
# clickhouse_memcpy don't support ARCH_ARM, see https://github.com/ClickHouse/ClickHouse/issues/18951
add_library (clickhouse_memcpy OBJECT
${ClickHouse_SOURCE_DIR}/contrib/FastMemcpy/memcpy_wrapper.c
)
endif()
# Need to omit frame pointers to match the performance of glibc
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")

View File

@ -118,7 +118,9 @@ TRAP(logout)
TRAP(logwtmp)
TRAP(lrand48)
TRAP(mallinfo)
TRAP(mallopt)
#if !defined(SANITIZER)
TRAP(mallopt) // Used by tsan
#endif
TRAP(mblen)
TRAP(mbrlen)
TRAP(mbrtowc)
@ -193,7 +195,9 @@ TRAP(dbm_nextkey)
TRAP(dbm_open)
TRAP(dbm_store)
TRAP(dirname)
TRAP(dlerror)
#if !defined(SANITIZER)
TRAP(dlerror) // Used by tsan
#endif
TRAP(ftw)
TRAP(getc_unlocked)
//TRAP(getenv) // Ok at program startup

View File

@ -1,9 +1,9 @@
# This strings autochanged from release_lib.sh:
SET(VERSION_REVISION 54445)
SET(VERSION_REVISION 54447)
SET(VERSION_MAJOR 21)
SET(VERSION_MINOR 1)
SET(VERSION_MINOR 2)
SET(VERSION_PATCH 1)
SET(VERSION_GITHASH 667dd0cf0ccecdaa6f334177b7ece2f53bd196a1)
SET(VERSION_DESCRIBE v21.1.1.5646-prestable)
SET(VERSION_STRING 21.1.1.5646)
SET(VERSION_GITHASH 53d0c9fa7255aa1dc48991d19f4246ff71cc2fd7)
SET(VERSION_DESCRIBE v21.2.1.1-prestable)
SET(VERSION_STRING 21.2.1.1)
# end of autochange

View File

@ -1,10 +1,4 @@
if (NOT ARCH_ARM AND OPENSSL_FOUND)
option (ENABLE_RDKAFKA "Enable kafka" ${ENABLE_LIBRARIES})
elseif(ENABLE_RDKAFKA AND NOT OPENSSL_FOUND)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use librdkafka without SSL")
elseif(ENABLE_RDKAFKA)
message (${RECONFIGURE_MESSAGE_LEVEL} "librdafka is not supported on ARM and on FreeBSD")
endif ()
option (ENABLE_RDKAFKA "Enable kafka" ${ENABLE_LIBRARIES})
if (NOT ENABLE_RDKAFKA)
if (USE_INTERNAL_RDKAFKA_LIBRARY)
@ -13,11 +7,7 @@ if (NOT ENABLE_RDKAFKA)
return()
endif()
if (NOT ARCH_ARM)
option (USE_INTERNAL_RDKAFKA_LIBRARY "Set to FALSE to use system librdkafka instead of the bundled" ${NOT_UNBUNDLED})
elseif(USE_INTERNAL_RDKAFKA_LIBRARY)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use internal librdkafka with ARCH_ARM=${ARCH_ARM}")
endif ()
option (USE_INTERNAL_RDKAFKA_LIBRARY "Set to FALSE to use system librdkafka instead of the bundled" ${NOT_UNBUNDLED})
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cppkafka/CMakeLists.txt")
if(USE_INTERNAL_RDKAFKA_LIBRARY)
@ -67,14 +57,12 @@ if (RDKAFKA_LIB AND RDKAFKA_INCLUDE_DIR)
if (LZ4_LIBRARY)
list (APPEND RDKAFKA_LIBRARY ${LZ4_LIBRARY})
endif ()
elseif (NOT MISSING_INTERNAL_RDKAFKA_LIBRARY AND NOT MISSING_INTERNAL_CPPKAFKA_LIBRARY AND NOT ARCH_ARM)
elseif (NOT MISSING_INTERNAL_RDKAFKA_LIBRARY AND NOT MISSING_INTERNAL_CPPKAFKA_LIBRARY)
set (USE_INTERNAL_RDKAFKA_LIBRARY 1)
set (RDKAFKA_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/librdkafka/src")
set (RDKAFKA_LIBRARY rdkafka)
set (CPPKAFKA_LIBRARY cppkafka)
set (USE_RDKAFKA 1)
elseif(ARCH_ARM)
message (${RECONFIGURE_MESSAGE_LEVEL} "Using internal rdkafka on ARM is not supported")
endif ()
message (STATUS "Using librdkafka=${USE_RDKAFKA}: ${RDKAFKA_INCLUDE_DIR} : ${RDKAFKA_LIBRARY} ${CPPKAFKA_LIBRARY}")

View File

@ -35,6 +35,7 @@ if (NOT ZLIB_FOUND AND NOT MISSING_INTERNAL_ZLIB_LIBRARY)
set (ZLIB_INCLUDE_DIRECTORIES ${ZLIB_INCLUDE_DIR}) # for protobuf
set (ZLIB_FOUND 1) # for poco
set (ZLIB_LIBRARIES zlib CACHE INTERNAL "")
set (ZLIB_LIBRARY_NAME ${ZLIB_LIBRARIES}) # for cassandra
set (ZLIB_NAME "${INTERNAL_ZLIB_NAME}")
endif ()

2
contrib/cassandra vendored

@ -1 +1 @@
Subproject commit d10187efb25b26da391def077edf3c6f2f3a23dd
Subproject commit 9cbc1a806df5d40fddbf84533b9873542c6513d8

2
contrib/h3 vendored

@ -1 +1 @@
Subproject commit 6cfd649e8c0d3ed913e8aae928a669fc3b8a2365
Subproject commit e209086ae1b5477307f545a0f6111780edc59940

View File

@ -16,6 +16,7 @@ ${H3_SOURCE_DIR}/lib/mathExtensions.c
${H3_SOURCE_DIR}/lib/polygon.c
${H3_SOURCE_DIR}/lib/vec2d.c
${H3_SOURCE_DIR}/lib/vec3d.c
${H3_SOURCE_DIR}/lib/vertex.c
${H3_SOURCE_DIR}/lib/vertexGraph.c
)

2
contrib/libpq vendored

@ -1 +1 @@
Subproject commit 8e7e905854714a7fbb49c124dbc45c7bd4b98e07
Subproject commit 1f9c286dba60809edb64e384d6727d80d269b6cf

View File

@ -50,12 +50,12 @@ set(SRCS
${RDKAFKA_SOURCE_DIR}/rdkafka_request.c
${RDKAFKA_SOURCE_DIR}/rdkafka_roundrobin_assignor.c
${RDKAFKA_SOURCE_DIR}/rdkafka_sasl.c
# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_cyrus.c # optionally included below
${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_oauthbearer.c
# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_cyrus.c # optionally included below
# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_oauthbearer.c # optionally included below
${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_plain.c
${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_scram.c
# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_scram.c # optionally included below
# ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_win32.c
${RDKAFKA_SOURCE_DIR}/rdkafka_ssl.c
# ${RDKAFKA_SOURCE_DIR}/rdkafka_ssl.c # optionally included below
${RDKAFKA_SOURCE_DIR}/rdkafka_sticky_assignor.c
${RDKAFKA_SOURCE_DIR}/rdkafka_subscription.c
${RDKAFKA_SOURCE_DIR}/rdkafka_timer.c
@ -82,10 +82,33 @@ set(SRCS
if(${ENABLE_CYRUS_SASL})
message (STATUS "librdkafka with SASL support")
set(SRCS
${SRCS}
${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_cyrus.c # needed to support Kerberos, requires cyrus-sasl
)
set(WITH_SASL_CYRUS 1)
endif()
if(OPENSSL_FOUND)
message (STATUS "librdkafka with SSL support")
set(WITH_SSL 1)
if(${ENABLE_CYRUS_SASL})
set(WITH_SASL_SCRAM 1)
set(WITH_SASL_OAUTHBEARER 1)
endif()
endif()
if(WITH_SSL)
list(APPEND SRCS ${RDKAFKA_SOURCE_DIR}/rdkafka_ssl.c)
endif()
if(WITH_SASL_CYRUS)
list(APPEND SRCS ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_cyrus.c) # needed to support Kerberos, requires cyrus-sasl
endif()
if(WITH_SASL_SCRAM)
list(APPEND SRCS ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_scram.c)
endif()
if(WITH_SASL_OAUTHBEARER)
list(APPEND SRCS ${RDKAFKA_SOURCE_DIR}/rdkafka_sasl_oauthbearer.c)
endif()
add_library(rdkafka ${SRCS})
@ -101,7 +124,6 @@ if(OPENSSL_SSL_LIBRARY AND OPENSSL_CRYPTO_LIBRARY)
endif()
if(${ENABLE_CYRUS_SASL})
target_link_libraries(rdkafka PRIVATE ${CYRUS_SASL_LIBRARY})
set(WITH_SASL_CYRUS 1)
endif()
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/auxdir)

View File

@ -1,7 +1,6 @@
// Originally generated by ./configure
#ifndef _CONFIG_H_
#define _CONFIG_H_
#define ARCH "x86_64"
#define BUILT_WITH "GCC GXX PKGCONFIG OSXLD LIBDL PLUGINS ZLIB SSL SASL_CYRUS ZSTD HDRHISTOGRAM LZ4_EXT SNAPPY SOCKEM SASL_SCRAM CRC32C_HW"
#define CPU "generic"
@ -60,14 +59,14 @@
// WITH_SOCKEM
#define WITH_SOCKEM 1
// libssl
#define WITH_SSL 1
#cmakedefine WITH_SSL 1
// WITH_SASL_SCRAM
#define WITH_SASL_SCRAM 1
#cmakedefine WITH_SASL_SCRAM 1
// WITH_SASL_OAUTHBEARER
#define WITH_SASL_OAUTHBEARER 1
#cmakedefine WITH_SASL_OAUTHBEARER 1
#cmakedefine WITH_SASL_CYRUS 1
// crc32chw
#if !defined(__PPC__)
#if !defined(__PPC__) && (!defined(__aarch64__) || defined(__ARM_FEATURE_CRC32))
#define WITH_CRC32C_HW 1
#endif
// regex

2
contrib/libuv vendored

@ -1 +1 @@
Subproject commit bc14c44b6269c458f2cc7e09eb300f4b64899903
Subproject commit e2e9b7e9f978ce8a1367b5fe781d97d1ce9f94ab

4
debian/changelog vendored
View File

@ -1,5 +1,5 @@
clickhouse (21.1.0) unstable; urgency=low
clickhouse (21.2.1.1) unstable; urgency=low
* Modified source code
-- Alexey Milovidov <milovidov@yandex-team.ru> Mon, 11 Jan 2021 03:51:08 +0300
-- clickhouse-release <clickhouse-release@yandex-team.ru> Mon, 11 Jan 2021 11:12:08 +0300

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.1.0
ARG version=21.2.1.*
RUN apt-get update \
&& apt-get install --yes --no-install-recommends \

View File

@ -1,7 +1,7 @@
FROM ubuntu:20.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.1.0
ARG version=21.2.1.*
ARG gosu_ver=1.10
# user/group precreated explicitly with fixed uid/gid on purpose.
@ -10,7 +10,6 @@ ARG gosu_ver=1.10
# We do that in advance at the begining of Dockerfile before any packages will be
# installed to prevent picking those uid / gid by some unrelated software.
# The same uid / gid (101) is used both for alpine and ubuntu.
# Number 101 is used by default in openshift
RUN groupadd -r clickhouse --gid=101 \
&& useradd -r -g clickhouse --uid=101 --home-dir=/var/lib/clickhouse --shell=/bin/bash clickhouse \
@ -37,7 +36,12 @@ RUN groupadd -r clickhouse --gid=101 \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& apt-get clean
&& apt-get clean \
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
# we need to allow "others" access to clickhouse folder, because docker container
# can be started with arbitrary uid (openshift usecase)
ADD https://github.com/tianon/gosu/releases/download/$gosu_ver/gosu-amd64 /bin/gosu

View File

@ -14,16 +14,18 @@ COPY alpine-root/ /
# We do that in advance at the begining of Dockerfile before any packages will be
# installed to prevent picking those uid / gid by some unrelated software.
# The same uid / gid (101) is used both for alpine and ubuntu.
# Number 101 is used by default in openshift
RUN addgroup -S -g 101 clickhouse \
&& adduser -S -h /var/lib/clickhouse -s /bin/bash -G clickhouse -g "ClickHouse server" -u 101 clickhouse \
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \
&& chown clickhouse:clickhouse /var/lib/clickhouse \
&& chmod 700 /var/lib/clickhouse \
&& chown root:clickhouse /var/log/clickhouse-server \
&& chmod 775 /var/log/clickhouse-server \
&& chmod +x /entrypoint.sh \
&& apk add --no-cache su-exec bash
&& apk add --no-cache su-exec bash \
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client
# we need to allow "others" access to clickhouse folder, because docker container
# can be started with arbitrary uid (openshift usecase)
EXPOSE 9000 8123 9009

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=21.1.0
ARG version=21.2.1.*
RUN apt-get update && \
apt-get install -y apt-transport-https dirmngr && \

View File

@ -269,6 +269,7 @@ function run_tests
01281_group_by_limit_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
01318_encrypt # Depends on OpenSSL
01318_decrypt # Depends on OpenSSL
01663_aes_msan # Depends on OpenSSL
01281_unsucceeded_insert_select_queries_counter
01292_create_user
01294_lazy_database_concurrent
@ -332,7 +333,7 @@ function run_tests
01622_defaults_for_url_engine
)
time clickhouse-test -j 8 --order=random --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" -- "$FASTTEST_FOCUS" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"
time clickhouse-test -j 8 --order=random --use-skip-list --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" -- "$FASTTEST_FOCUS" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"
# substr is to remove semicolon after test name
readarray -t FAILED_TESTS < <(awk '/\[ FAIL|TIMEOUT|ERROR \]/ { print substr($3, 1, length($3)-1) }' "$FASTTEST_OUTPUT/test_log.txt" | tee "$FASTTEST_OUTPUT/failed-parallel-tests.txt")

View File

@ -11,6 +11,10 @@
<max>10</max>
</max_execution_time>
<max_memory_usage>
<max>10G</max>
</max_memory_usage>
<!-- Not ready for production -->
<compile_expressions>
<readonly />

View File

@ -21,13 +21,13 @@ function clone
git init
git remote add origin https://github.com/ClickHouse/ClickHouse
git fetch --depth=1 origin "$SHA_TO_TEST"
git fetch --depth=1 origin master # Used to obtain the list of modified or added tests
git fetch --depth=100 origin "$SHA_TO_TEST"
git fetch --depth=100 origin master # Used to obtain the list of modified or added tests
# If not master, try to fetch pull/.../{head,merge}
if [ "$PR_TO_TEST" != "0" ]
then
git fetch --depth=1 origin "refs/pull/$PR_TO_TEST/*:refs/heads/pull/$PR_TO_TEST/*"
git fetch --depth=100 origin "refs/pull/$PR_TO_TEST/*:refs/heads/pull/$PR_TO_TEST/*"
fi
git checkout "$SHA_TO_TEST"
@ -75,7 +75,7 @@ function fuzz
{
# Obtain the list of newly added tests. They will be fuzzed in more extreme way than other tests.
cd ch
NEW_TESTS=$(git diff --name-only master "$SHA_TO_TEST" | grep -P 'tests/queries/0_stateless/.*\.sql' | sed -r -e 's!^!ch/!' | sort -R)
NEW_TESTS=$(git diff --name-only "$(git merge-base origin/master "$SHA_TO_TEST"~)" "$SHA_TO_TEST" | grep -P 'tests/queries/0_stateless/.*\.sql' | sed -r -e 's!^!ch/!' | sort -R)
cd ..
if [[ -n "$NEW_TESTS" ]]
then
@ -175,7 +175,7 @@ case "$stage" in
# Lost connection to the server. This probably means that the server died
# with abort.
echo "failure" > status.txt
if ! grep -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*" server.log > description.txt
if ! grep -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*" server.log > description.txt
then
echo "Lost connection to server. See the logs" > description.txt
fi

View File

@ -14,12 +14,12 @@ cd /sqlancer/sqlancer-master
export TIMEOUT=60
export NUM_QUERIES=1000
( java -jar target/SQLancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPWhere | tee /test_output/TLPWhere.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPWhere.err
( java -jar target/SQLancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPGroupBy | tee /test_output/TLPGroupBy.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPGroupBy.err
( java -jar target/SQLancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPHaving | tee /test_output/TLPHaving.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPHaving.err
( java -jar target/SQLancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPWhere --oracle TLPGroupBy | tee /test_output/TLPWhereGroupBy.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPWhereGroupBy.err
( java -jar target/SQLancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPDistinct | tee /test_output/TLPDistinct.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPDistinct.err
( java -jar target/SQLancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPAggregate | tee /test_output/TLPAggregate.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPAggregate.err
( java -jar target/sqlancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPWhere | tee /test_output/TLPWhere.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPWhere.err
( java -jar target/sqlancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPGroupBy | tee /test_output/TLPGroupBy.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPGroupBy.err
( java -jar target/sqlancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPHaving | tee /test_output/TLPHaving.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPHaving.err
( java -jar target/sqlancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPWhere --oracle TLPGroupBy | tee /test_output/TLPWhereGroupBy.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPWhereGroupBy.err
( java -jar target/sqlancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPDistinct | tee /test_output/TLPDistinct.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPDistinct.err
( java -jar target/sqlancer-*.jar --num-threads 10 --timeout-seconds $TIMEOUT --num-queries $NUM_QUERIES --username default --password "" clickhouse --oracle TLPAggregate | tee /test_output/TLPAggregate.out ) 3>&1 1>&2 2>&3 | tee /test_output/TLPAggregate.err
service clickhouse-server stop && sleep 10

View File

@ -16,7 +16,7 @@ $ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/inst
## Install Required Compilers, Tools, and Libraries {#install-required-compilers-tools-and-libraries}
``` bash
$ brew install cmake ninja libtool gettext
$ brew install cmake ninja libtool gettext llvm
```
## Checkout ClickHouse Sources {#checkout-clickhouse-sources}

View File

@ -14,8 +14,8 @@ You can also use the following database engines:
- [MySQL](../../engines/database-engines/mysql.md)
- [Lazy](../../engines/database-engines/lazy.md)
- [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md)
- [Lazy](../../engines/database-engines/lazy.md)
[Original article](https://clickhouse.tech/docs/en/database_engines/) <!--hide-->

View File

@ -5,9 +5,11 @@ toc_title: MaterializeMySQL
# MaterializeMySQL {#materialize-mysql}
Creates ClickHouse database with all the tables existing in MySQL, and all the data in those tables.
Creates ClickHouse database with all the tables existing in MySQL, and all the data in those tables.
ClickHouse server works as MySQL replica. It reads binlog and performs DDL and DML queries.
ClickHouse server works as MySQL replica. It reads binlog and performs DDL and DML queries.
This feature is experimental.
## Creating a Database {#creating-a-database}
@ -25,12 +27,12 @@ ENGINE = MaterializeMySQL('host:port', ['database' | database], 'user', 'passwor
## Virtual columns {#virtual-columns}
When working with the `MaterializeMySQL` database engine, [ReplacingMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns.
When working with the `MaterializeMySQL` database engine, [ReplacingMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns.
- `_version` — Transaction counter. Type [UInt64](../../sql-reference/data-types/int-uint.md).
- `_sign` — Deletion mark. Type [Int8](../../sql-reference/data-types/int-uint.md). Possible values:
- `1` — Row is not deleted,
- `-1` — Row is deleted.
- `_version` — Transaction counter. Type [UInt64](../../sql-reference/data-types/int-uint.md).
- `_sign` — Deletion mark. Type [Int8](../../sql-reference/data-types/int-uint.md). Possible values:
- `1` — Row is not deleted,
- `-1` — Row is deleted.
## Data Types Support {#data_types-support}
@ -61,9 +63,9 @@ Other types are not supported. If MySQL table contains a column of such type, Cl
MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop.md), [RENAME](../../sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored.
### Data Replication {#data-replication}
### Data Replication {#data-replication}
MaterializeMySQL does not support direct `INSERT`, `DELETE` and `UPDATE` queries. However, they are supported in terms of data replication:
`MaterializeMySQL` does not support direct `INSERT`, `DELETE` and `UPDATE` queries. However, they are supported in terms of data replication:
- MySQL `INSERT` query is converted into `INSERT` with `_sign=1`.
@ -73,11 +75,11 @@ MaterializeMySQL does not support direct `INSERT`, `DELETE` and `UPDATE` queries
### Selecting from MaterializeMySQL Tables {#select}
`SELECT` query form MaterializeMySQL tables has some specifics:
`SELECT` query from `MaterializeMySQL` tables has some specifics:
- If `_version` is not specified in the `SELECT` query, [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier is used. So only rows with `MAX(_version)` are selected.
- If `_sign` is not specified in the `SELECT` query, `WHERE _sign=1` is used by default, so the deleted rows are not included into the result set.
- If `_sign` is not specified in the `SELECT` query, `WHERE _sign=1` is used by default. So the deleted rows are not included into the result set.
### Index Conversion {#index-conversion}
@ -85,12 +87,12 @@ MySQL `PRIMARY KEY` and `INDEX` clauses are converted into `ORDER BY` tuples in
ClickHouse has only one physical order, which is determined by `ORDER BY` clause. To create a new physical order, use [materialized views](../../sql-reference/statements/create/view.md#materialized).
**Notes**
**Notes**
- Rows with `_sign=-1` are not deleted physically from the tables.
- Cascade `UPDATE/DELETE` queries are not supported by the `MaterializeMySQL` engine.
- Replication can be easily broken.
- Manual operations on database and tables are forbidden.
- Rows with `_sign=-1` are not deleted physically from the tables.
- Cascade `UPDATE/DELETE` queries are not supported by the `MaterializeMySQL` engine.
- Replication can be easily broken.
- Manual operations on database and tables are forbidden.
## Examples of Use {#examples-of-use}
@ -105,6 +107,7 @@ mysql> ALTER TABLE db.test ADD COLUMN c VARCHAR(16);
mysql> UPDATE db.test SET c='Wow!', b=222;
mysql> SELECT * FROM test;
```
```text
+---+------+------+
| a | b | c |

View File

@ -657,6 +657,96 @@ The `default` storage policy implies using only one volume, which consists of on
The number of threads performing background moves of data parts can be changed by [background_move_pool_size](../../../operations/settings/settings.md#background_move_pool_size) setting.
## Using S3 for Data Storage {#table_engine-mergetree-s3}
`MergeTree` family table engines is able to store data to [S3](https://aws.amazon.com/s3/) using a disk with type `s3`.
Configuration markup:
``` xml
<storage_configuration>
...
<disks>
<s3>
<type>s3</type>
<endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint>
<access_key_id>your_access_key_id</access_key_id>
<secret_access_key>your_secret_access_key</secret_access_key>
<proxy>
<uri>http://proxy1</uri>
<uri>http://proxy2</uri>
</proxy>
<connect_timeout_ms>10000</connect_timeout_ms>
<request_timeout_ms>5000</request_timeout_ms>
<max_connections>100</max_connections>
<retry_attempts>10</retry_attempts>
<min_bytes_for_seek>1000</min_bytes_for_seek>
<metadata_path>/var/lib/clickhouse/disks/s3/</metadata_path>
<cache_enabled>true</cache_enabled>
<cache_path>/var/lib/clickhouse/disks/s3/cache/</cache_path>
<skip_access_check>false</skip_access_check>
</s3>
</disks>
...
</storage_configuration>
```
Required parameters:
- `endpoint` — S3 endpoint url in `path` or `virtual hosted` [styles](https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Endpoint url should contain bucket and root path to store data.
- `access_key_id` — S3 access key id.
- `secret_access_key` — S3 secret access key.
Optional parameters:
- `use_environment_credentials` — Reads AWS credentials from the Environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN if they exist. Default value is `false`.
- `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL.
- `connect_timeout_ms` — Socket connect timeout in milliseconds. Default value is `10 seconds`.
- `request_timeout_ms` — Request timeout in milliseconds. Default value is `5 seconds`.
- `max_connections` — S3 connections pool size. Default value is `100`.
- `retry_attempts` — Number of retry attempts in case of failed request. Default value is `10`.
- `min_bytes_for_seek` — Minimal number of bytes to use seek operation instead of sequential read. Default value is `1 Mb`.
- `metadata_path` — Path on local FS to store metadata files for S3. Default value is `/var/lib/clickhouse/disks/<disk_name>/`.
- `cache_enabled` — Allows to cache mark and index files on local FS. Default value is `true`.
- `cache_path` — Path on local FS where to store cached mark and index files. Default value is `/var/lib/clickhouse/disks/<disk_name>/cache/`.
- `skip_access_check` — If true disk access checks will not be performed on disk start-up. Default value is `false`.
S3 disk can be configured as `main` or `cold` storage:
``` xml
<storage_configuration>
...
<disks>
<s3>
<type>s3</type>
<endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint>
<access_key_id>your_access_key_id</access_key_id>
<secret_access_key>your_secret_access_key</secret_access_key>
</s3>
</disks>
<policies>
<s3_main>
<volumes>
<main>
<disk>s3</disk>
</main>
</volumes>
</s3_main>
<s3_cold>
<volumes>
<main>
<disk>default</disk>
</main>
<external>
<disk>s3</disk>
</external>
</volumes>
<move_factor>0.2</move_factor>
</s3_cold>
</policies>
...
</storage_configuration>
```
In case of `cold` option a data can be moved to S3 if local disk free size will be smaller than `move_factor * disk_size` or by TTL move rule.
### Details {#details}
In the case of `MergeTree` tables, data is getting to disk in different ways:

View File

@ -110,17 +110,17 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
### Command Line Options {#command-line-options}
- `--host, -h` - The server name, localhost by default. You can use either the name or the IPv4 or IPv6 address.
- `--host, -h` The server name, localhost by default. You can use either the name or the IPv4 or IPv6 address.
- `--port` The port to connect to. Default value: 9000. Note that the HTTP interface and the native interface use different ports.
- `--user, -u` The username. Default value: default.
- `--password` The password. Default value: empty string.
- `--query, -q` The query to process when using non-interactive mode. You must specify either `query` or `queries-file` option.
- `--queries-file, -qf` - file path with queries to execute. You must specify either `query` or `queries-file` option.
- `--queries-file, -qf` file path with queries to execute. You must specify either `query` or `queries-file` option.
- `--database, -d` Select the current default database. Default value: the current database from the server settings (default by default).
- `--multiline, -m` If specified, allow multiline queries (do not send the query on Enter).
- `--multiquery, -n` If specified, allow processing multiple queries separated by semicolons.
- `--format, -f` Use the specified default format to output the result.
- `--vertical, -E` If specified, use the Vertical format by default to output the result. This is the same as format=Vertical. In this format, each value is printed on a separate line, which is helpful when displaying wide tables.
- `--vertical, -E` If specified, use the [Vertical format](../interfaces/formats.md#vertical) by default to output the result. This is the same as `format=Vertical`. In this format, each value is printed on a separate line, which is helpful when displaying wide tables.
- `--time, -t` If specified, print the query execution time to stderr in non-interactive mode.
- `--stacktrace` If specified, also print the stack trace if an exception occurs.
- `--config-file` The name of the configuration file.

View File

@ -515,9 +515,9 @@ Example:
## JSONAsString {#jsonasstring}
In this format, a single JSON object is interpreted as a single value. If input has several JSON objects (comma separated) they will be interpreted as a sepatate rows.
In this format, a single JSON object is interpreted as a single value. If the input has several JSON objects (comma separated) they will be interpreted as separate rows.
This format can only be parsed for table with a single field of type [String](../sql-reference/data-types/string.md). The remaining columns must be set to [DEFAULT](../sql-reference/statements/create/table.md#default) or [MATERIALIZED](../sql-reference/statements/create/table.md#materialized), or omitted. Once you collect whole JSON object to string you can use [JSON functions](../sql-reference/functions/json-functions.md) to process it.
This format can only be parsed for table with a single field of type [String](../sql-reference/data-types/string.md). The remaining columns must be set to [DEFAULT](../sql-reference/statements/create/table.md#default) or [MATERIALIZED](../sql-reference/statements/create/table.md#materialized), or omitted. Once you collect whole JSON object to string you can use [JSON functions](../sql-reference/functions/json-functions.md) to process it.
**Example**
@ -526,7 +526,7 @@ Query:
``` sql
DROP TABLE IF EXISTS json_as_string;
CREATE TABLE json_as_string (json String) ENGINE = Memory;
INSERT INTO json_as_string FORMAT JSONAsString {"foo":{"bar":{"x":"y"},"baz":1}},{},{"any json stucture":1}
INSERT INTO json_as_string (json) FORMAT JSONAsString {"foo":{"bar":{"x":"y"},"baz":1}},{},{"any json stucture":1}
SELECT * FROM json_as_string;
```
@ -540,7 +540,6 @@ Result:
└───────────────────────────────────┘
```
## JSONCompact {#jsoncompact}
## JSONCompactString {#jsoncompactstring}

View File

@ -120,5 +120,6 @@ toc_title: Adopters
| <a href="https://htc-cs.ru/" class="favicon">ЦВТ</a> | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) |
| <a href="https://mkb.ru/" class="favicon">МКБ</a> | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) |
| <a href="https://cft.ru/" class="favicon">ЦФТ</a> | Banking, Financial products, Payments | — | — | — | [Meetup in Russian, April 2020](https://team.cft.ru/events/162) |
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) |
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->

View File

@ -0,0 +1,26 @@
---
toc_priority: 65
toc_title: Caches
---
# Cache Types {#cache-types}
When performing queries, ClichHouse uses different caches.
Main cache types:
- `mark_cache` — Cache of marks used by table engines of the [MergeTree](../engines/table-engines/mergetree-family/mergetree.md) family.
- `uncompressed_cache` — Cache of uncompressed data used by table engines of the [MergeTree](../engines/table-engines/mergetree-family/mergetree.md) family.
Additional cache types:
- DNS cache
- [regexp](../interfaces/formats.md#data-format-regexp) cache
- compiled expressions cache
- [Avro format](../interfaces/formats.md#data-format-avro) schemas cache
- [dictionaries data cache](../sql-reference/dictionaries/index.md)
Indirectly used:
- OS page cache
To drop cache, use [SYSTEM DROP ... CACHE](../sql-reference/statements/system.md) statements.
[Original article](https://clickhouse.tech/docs/en/operations/caches/) <!--hide-->

View File

@ -2506,11 +2506,7 @@ Default value: 0.
Consider the following query with aggregate functions:
```sql
SELECT
SUM(-1),
MAX(0)
FROM system.one
WHERE 0
SELECT SUM(-1), MAX(0) FROM system.one WHERE 0;
```
With `aggregate_functions_null_for_empty = 0` it would produce:

View File

@ -7,7 +7,7 @@ toc_title: clickhouse-benchmark
Connects to a ClickHouse server and repeatedly sends specified queries.
Syntax:
**Syntax**
``` bash
$ clickhouse-benchmark --query ["single query"] [keys]
@ -28,35 +28,35 @@ $ clickhouse-benchmark [keys] <<< "single query"
If you want to send a set of queries, create a text file and place each query on the individual string in this file. For example:
``` sql
SELECT * FROM system.numbers LIMIT 10000000
SELECT 1
SELECT * FROM system.numbers LIMIT 10000000;
SELECT 1;
```
Then pass this file to a standard input of `clickhouse-benchmark`.
Then pass this file to a standard input of `clickhouse-benchmark`:
``` bash
clickhouse-benchmark [keys] < queries_file
clickhouse-benchmark [keys] < queries_file;
```
## Keys {#clickhouse-benchmark-keys}
- `--query=WORD` - Query to execute. If this parameter is not passed clickhouse-benchmark will read queries from standard input.
- `--query=QUERY` — Query to execute. If this parameter is not passed, `clickhouse-benchmark` will read queries from standard input.
- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` sends simultaneously. Default value: 1.
- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1.
- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-h` keys.
- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (to disable reports set 0). Default value: 1.
- `-h HOST`, `--host=HOST` — Server host. Default value: `localhost`. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-h` keys.
- `-p N`, `--port=N` — Server port. Default value: 9000. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-p` keys.
- `-i N`, `--iterations=N` — Total number of queries. Default value: 0 (repeat forever).
- `-r`, `--randomize` — Random order of queries execution if there is more then one input query.
- `-s`, `--secure` — Using TLS connection.
- `-r`, `--randomize` — Random order of queries execution if there is more than one input query.
- `-s`, `--secure` — Using `TLS` connection.
- `-t N`, `--timelimit=N` — Time limit in seconds. `clickhouse-benchmark` stops sending queries when the specified time limit is reached. Default value: 0 (time limit disabled).
- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [comparison mode](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` performs the [Independent two-sample Students t-test](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) test to determine whether the two distributions arent different with the selected level of confidence.
- `--confidence=N` — Level of confidence for T-test. Possible values: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Default value: 5. In the [comparison mode](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` performs the [Independent two-sample Students t-test](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) to determine whether the two distributions arent different with the selected level of confidence.
- `--cumulative` — Printing cumulative data instead of data per interval.
- `--database=DATABASE_NAME` — ClickHouse database name. Default value: `default`.
- `--json=FILEPATH` — JSON output. When the key is set, `clickhouse-benchmark` outputs a report to the specified JSON-file.
- `--json=FILEPATH``JSON` output. When the key is set, `clickhouse-benchmark` outputs a report to the specified JSON-file.
- `--user=USERNAME` — ClickHouse user name. Default value: `default`.
- `--password=PSWD` — ClickHouse user password. Default value: empty string.
- `--stacktrace` — Stack traces output. When the key is set, `clickhouse-bencmark` outputs stack traces of exceptions.
- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns answer to `clickhouse-benchmark` at the specified stage. Possible values: `complete`, `fetch_columns`, `with_mergeable_state`. Default value: `complete`.
- `--stage=WORD` — Query processing stage at server. ClickHouse stops query processing and returns an answer to `clickhouse-benchmark` at the specified stage. Possible values: `complete`, `fetch_columns`, `with_mergeable_state`. Default value: `complete`.
- `--help` — Shows the help message.
If you want to apply some [settings](../../operations/settings/index.md) for queries, pass them as a key `--<session setting name>= SETTING_VALUE`. For example, `--max_memory_usage=1048576`.
@ -96,11 +96,11 @@ In the report you can find:
- Endpoint of ClickHouse server.
- Number of processed queries.
- QPS: QPS: How many queries server performed per second during a period specified in the `--delay` argument.
- RPS: How many rows server read per second during a period specified in the `--delay` argument.
- MiB/s: How many mebibytes server read per second during a period specified in the `--delay` argument.
- result RPS: How many rows placed by server to the result of a query per second during a period specified in the `--delay` argument.
- result MiB/s. How many mebibytes placed by server to the result of a query per second during a period specified in the `--delay` argument.
- QPS: How many queries the server performed per second during a period specified in the `--delay` argument.
- RPS: How many rows the server reads per second during a period specified in the `--delay` argument.
- MiB/s: How many mebibytes the server reads per second during a period specified in the `--delay` argument.
- result RPS: How many rows placed by the server to the result of a query per second during a period specified in the `--delay` argument.
- result MiB/s. How many mebibytes placed by the server to the result of a query per second during a period specified in the `--delay` argument.
- Percentiles of queries execution time.
@ -159,3 +159,5 @@ localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, resu
99.900% 0.172 sec.
99.990% 0.172 sec.
```
[Original article](https://clickhouse.tech/docs/en/operations/utilities/clickhouse-benchmark.md) <!--hide-->

View File

@ -71,8 +71,8 @@ Parameters:
<remote_servers>
<source_cluster>
<!--
source cluster & destination clusters accepts exactly the same
parameters as parameters for usual Distributed table
source cluster & destination clusters accept exactly the same
parameters as parameters for the usual Distributed table
see https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
-->
<shard>

View File

@ -16,7 +16,7 @@ By default `clickhouse-local` does not have access to data on the same host, but
!!! warning "Warning"
It is not recommended to load production server configuration into `clickhouse-local` because data can be damaged in case of human error.
For temporary data, a unique temporary data directory is created by default. If you want to override this behavior, the data directory can be explicitly specified with the `-- --path` option.
For temporary data, a unique temporary data directory is created by default.
## Usage {#usage}
@ -32,15 +32,22 @@ Arguments:
- `-S`, `--structure` — table structure for input data.
- `-if`, `--input-format` — input format, `TSV` by default.
- `-f`, `--file` — path to data, `stdin` by default.
- `-q` `--query` — queries to execute with `;` as delimeter. You must specify either `query` or `queries-file` option.
- `-qf` `--queries-file` - file path with queries to execute. You must specify either `query` or `queries-file` option.
- `-q`, `--query` — queries to execute with `;` as delimeter. You must specify either `query` or `queries-file` option.
- `-qf`, `--queries-file` - file path with queries to execute. You must specify either `query` or `queries-file` option.
- `-N`, `--table` — table name where to put output data, `table` by default.
- `-of`, `--format`, `--output-format` — output format, `TSV` by default.
- `-d`, `--database` — default database, `_local` by default.
- `--stacktrace` — whether to dump debug output in case of exception.
- `--echo` — print query before execution.
- `--verbose` — more details on query execution.
- `-s` — disables `stderr` logging.
- `--config-file` — path to configuration file in same format as for ClickHouse server, by default the configuration empty.
- `--logger.console` — Log to console.
- `--logger.log` — Log file name.
- `--logger.level` — Log level.
- `--ignore-error` — do not stop processing if a query failed.
- `-c`, `--config-file` — path to configuration file in same format as for ClickHouse server, by default the configuration empty.
- `--no-system-tables` — do not attach system tables.
- `--help` — arguments references for `clickhouse-local`.
- `-V`, `--version` — print version information and exit.
Also there are arguments for each ClickHouse configuration variable which are more commonly used instead of `--config-file`.

View File

@ -4,6 +4,28 @@ toc_priority: 106
# argMax {#agg-function-argmax}
Syntax: `argMax(arg, val)`
Syntax: `argMax(arg, val)` or `argMax(tuple(arg, val))`
Calculates the `arg` value for a maximum `val` value. If there are several different values of `arg` for maximum values of `val`, the first of these values encountered is output.
Tuple version of this function will return the tuple with the maximum `val` value. It is convinient for use with `SimpleAggregateFunction`.
**Example:**
``` text
┌─user─────┬─salary─┐
│ director │ 5000 │
│ manager │ 3000 │
│ worker │ 1000 │
└──────────┴────────┘
```
``` sql
SELECT argMax(user, salary), argMax(tuple(user, salary)) FROM salary
```
``` text
┌─argMax(user, salary)─┬─argMax(tuple(user, salary))─┐
│ director │ ('director',5000) │
└──────────────────────┴─────────────────────────────┘
```

View File

@ -4,10 +4,12 @@ toc_priority: 105
# argMin {#agg-function-argmin}
Syntax: `argMin(arg, val)`
Syntax: `argMin(arg, val)` or `argMin(tuple(arg, val))`
Calculates the `arg` value for a minimal `val` value. If there are several different values of `arg` for minimal values of `val`, the first of these values encountered is output.
Tuple version of this function will return the tuple with the minimal `val` value. It is convinient for use with `SimpleAggregateFunction`.
**Example:**
``` text
@ -19,11 +21,11 @@ Calculates the `arg` value for a minimal `val` value. If there are several diffe
```
``` sql
SELECT argMin(user, salary) FROM salary
SELECT argMin(user, salary), argMin(tuple(user, salary)) FROM salary
```
``` text
┌─argMin(user, salary)─┐
│ worker │
└──────────────────────┘
┌─argMin(user, salary)─┬─argMin(tuple(user, salary))─
│ worker │ ('worker',1000) │
└──────────────────────┴─────────────────────────────
```

View File

@ -0,0 +1,29 @@
---
toc_priority: 61
toc_title: Multiword Type Names
---
# Multiword Types {#multiword-types}
When creating tables, you can use data types with a name consisting of several words. This is implemented for better SQL compatibility.
## Multiword Types Support {#multiword-types-support}
| Multiword types | Simple types |
|----------------------------------|--------------------------------------------------------------|
| DOUBLE PRECISION | [Float64](../../sql-reference/data-types/float.md) |
| CHAR LARGE OBJECT | [String](../../sql-reference/data-types/string.md) |
| CHAR VARYING | [String](../../sql-reference/data-types/string.md) |
| CHARACTER LARGE OBJECT | [String](../../sql-reference/data-types/string.md) |
| CHARACTER VARYING | [String](../../sql-reference/data-types/string.md) |
| NCHAR LARGE OBJECT | [String](../../sql-reference/data-types/string.md) |
| NCHAR VARYING | [String](../../sql-reference/data-types/string.md) |
| NATIONAL CHARACTER LARGE OBJECT | [String](../../sql-reference/data-types/string.md) |
| NATIONAL CHARACTER VARYING | [String](../../sql-reference/data-types/string.md) |
| NATIONAL CHAR VARYING | [String](../../sql-reference/data-types/string.md) |
| NATIONAL CHARACTER | [String](../../sql-reference/data-types/string.md) |
| NATIONAL CHAR | [String](../../sql-reference/data-types/string.md) |
| BINARY LARGE OBJECT | [String](../../sql-reference/data-types/string.md) |
| BINARY VARYING | [String](../../sql-reference/data-types/string.md) |
[Original article](https://clickhouse.tech/docs/en/sql-reference/data-types/multiword-types/) <!--hide-->

View File

@ -18,6 +18,8 @@ The following aggregate functions are supported:
- [`sumMap`](../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap)
- [`minMap`](../../sql-reference/aggregate-functions/reference/minmap.md#agg_functions-minmap)
- [`maxMap`](../../sql-reference/aggregate-functions/reference/maxmap.md#agg_functions-maxmap)
- [`argMin`](../../sql-reference/aggregate-functions/reference/argmin.md)
- [`argMax`](../../sql-reference/aggregate-functions/reference/argmax.md)
Values of the `SimpleAggregateFunction(func, Type)` look and stored the same way as `Type`, so you do not need to apply functions with `-Merge`/`-State` suffixes. `SimpleAggregateFunction` has better performance than `AggregateFunction` with same aggregation function.

View File

@ -1468,7 +1468,7 @@ Code: 395. DB::Exception: Received from localhost:9000. DB::Exception: Too many.
## identity {#identity}
Returns the same value that was used as its argument. Used for debugging and testing, allows to cancel using index, and get the query performance of a full scan. When query is analyzed for possible use of index, the analyzer doesnt look inside `identity` functions.
Returns the same value that was used as its argument. Used for debugging and testing, allows to cancel using index, and get the query performance of a full scan. When query is analyzed for possible use of index, the analyzer doesnt look inside `identity` functions. Also constant folding is not applied too.
**Syntax**

View File

@ -574,7 +574,7 @@ encodeXMLComponent(x)
- `x` — The sequence of characters. [String](../../sql-reference/data-types/string.md).
**Returned value(s)**
**Returned value**
- The sequence of characters with escape characters.

View File

@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
```
Creates a table named `name` in the `db` database or the current database if `db` is not set, with the structure specified in brackets and the `engine` engine.
The structure of the table is a list of column descriptions, secondary indexes and constraints . If primary key is supported by the engine, it will be indicated as parameter for the table engine.
The structure of the table is a list of column descriptions, secondary indexes and constraints . If [primary key](#primary-key) is supported by the engine, it will be indicated as parameter for the table engine.
A column description is `name type` in the simplest case. Example: `RegionID UInt32`.
@ -111,7 +111,7 @@ It is not possible to set default values for elements in nested data structures.
You can define a [primary key](../../../engines/table-engines/mergetree-family/mergetree.md#primary-keys-and-indexes-in-queries) when creating a table. Primary key can be specified in two ways:
- inside the column list
- Inside the column list
``` sql
CREATE TABLE db.table_name
@ -122,7 +122,7 @@ CREATE TABLE db.table_name
ENGINE = engine;
```
- outside the column list
- Outside the column list
``` sql
CREATE TABLE db.table_name
@ -133,7 +133,8 @@ ENGINE = engine
PRIMARY KEY(expr1[, expr2,...]);
```
You can't combine both ways in one query.
!!! warning "Warning"
You can't combine both ways in one query.
## Constraints {#constraints}

View File

@ -5,7 +5,11 @@ toc_title: file
# file {#file}
Creates a table from a file. This table function is similar to [url](../../sql-reference/table-functions/url.md) and [hdfs](../../sql-reference/table-functions/hdfs.md) ones.
Creates a table from a file. This table function is similar to [url](../../sql-reference/table-functions/url.md) and [hdfs](../../sql-reference/table-functions/hdfs.md) ones.
`file` function can be used in `SELECT` and `INSERT` queries on data in [File](../../engines/table-engines/special/file.md) tables.
**Syntax**
``` sql
file(path, format, structure)
@ -13,15 +17,15 @@ file(path, format, structure)
**Input parameters**
- `path` — The relative path to the file from [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, \``'abc', 'def'` — strings.
- `path` — The relative path to the file from [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Path to file support following globs in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc', 'def'` — strings.
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
- `structure` — Structure of the table. Format: `'column1_name column1_type, column2_name column2_type, ...'`.
**Returned value**
A table with the specified structure for reading or writing data in the specified file.
**Example**
**Examples**
Setting `user_files_path` and the contents of the file `test.csv`:
@ -35,12 +39,29 @@ $ cat /var/lib/clickhouse/user_files/test.csv
78,43,45
```
Table from`test.csv` and selection of the first two rows from it:
Getting data from a table in `test.csv` and selecting first two rows from it:
``` sql
SELECT *
FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32')
LIMIT 2
SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 2;
```
``` text
┌─column1─┬─column2─┬─column3─┐
│ 1 │ 2 │ 3 │
│ 3 │ 2 │ 1 │
└─────────┴─────────┴─────────┘
```
Getting the first 10 lines of a table that contains 3 columns of UInt32 type from a CSV file:
``` sql
SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 10;
```
Inserting data from a file into a table:
``` sql
INSERT INTO FUNCTION file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') VALUES (1, 2, 3), (3, 2, 1);
SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32');
```
``` text
@ -50,12 +71,8 @@ LIMIT 2
└─────────┴─────────┴─────────┘
```
``` sql
-- getting the first 10 lines of a table that contains 3 columns of UInt32 type from a CSV file
SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32') LIMIT 10
```
**Globs in path**
## Globs in Path {#globs-in-path}
Multiple path components can have globs. For being processed file should exists and matches to the whole path pattern (not only suffix or prefix).
@ -68,31 +85,25 @@ Constructions with `{}` are similar to the [remote table function](../../sql-ref
**Example**
1. Suppose we have several files with the following relative paths:
Suppose we have several files with the following relative paths:
- some_dir/some_file_1
- some_dir/some_file_2
- some_dir/some_file_3
- another_dir/some_file_1
- another_dir/some_file_2
- another_dir/some_file_3
- 'some_dir/some_file_1'
- 'some_dir/some_file_2'
- 'some_dir/some_file_3'
- 'another_dir/some_file_1'
- 'another_dir/some_file_2'
- 'another_dir/some_file_3'
1. Query the amount of rows in these files:
<!-- -->
Query the amount of rows in these files:
``` sql
SELECT count(*)
FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32')
SELECT count(*) FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32');
```
1. Query the amount of rows in all files of these two directories:
<!-- -->
Query the amount of rows in all files of these two directories:
``` sql
SELECT count(*)
FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32')
SELECT count(*) FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32');
```
!!! warning "Warning"
@ -103,8 +114,7 @@ FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32')
Query the data from files named `file000`, `file001`, … , `file999`:
``` sql
SELECT count(*)
FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32')
SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32');
```
## Virtual Columns {#virtual-columns}
@ -116,4 +126,4 @@ FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32')
- [Virtual columns](https://clickhouse.tech/docs/en/operations/table_engines/#table_engines-virtual_columns)
[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/file/) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/file/) <!--hide-->

View File

@ -5,9 +5,11 @@ toc_title: remote
# remote, remoteSecure {#remote-remotesecure}
Allows you to access remote servers without creating a `Distributed` table.
Allows to access remote servers without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. `remoteSecure` - same as `remote` but with secured connection.
Signatures:
Both functions can be used in `SELECT` and `INSERT` queries.
**Syntax**
``` sql
remote('addresses_expr', db, table[, 'user'[, 'password'], sharding_key])
@ -16,13 +18,40 @@ remoteSecure('addresses_expr', db, table[, 'user'[, 'password'], sharding_key])
remoteSecure('addresses_expr', db.table[, 'user'[, 'password'], sharding_key])
```
`addresses_expr` An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port`, or just `host`. The host can be specified as the server name, or as the IPv4 or IPv6 address. An IPv6 address is specified in square brackets. The port is the TCP port on the remote server. If the port is omitted, it uses `tcp_port` from the servers config file (by default, 9000).
`sharding_key` - We can specify sharding key to support distributing data across nodes. For example: `insert into remote('127.0.0.1:9000,127.0.0.2', db, table, 'default', rand())`.
**Input parameters**
- `addresses_expr` An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port`, or just `host`.
The host can be specified as the server name, or as the IPv4 or IPv6 address. An IPv6 address is specified in square brackets.
The port is the TCP port on the remote server. If the port is omitted, it uses [tcp_port](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port) from the servers config file in `remote` (by default, 9000) and [tcp_port_secure](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) in `remoteSecure` (by default, 9440).
!!! important "Important"
The port is required for an IPv6 address.
Examples:
Type: [String](../../sql-reference/data-types/string.md).
- `db` - Database name. Type: [String](../../sql-reference/data-types/string.md).
- `table` - Table name. Type: [String](../../sql-reference/data-types/string.md).
- `user` - User name. If the user is not specified, `default` is used. Type: [String](../../sql-reference/data-types/string.md).
- `password` - User password. If the password is not specified, an empty password is used. Type: [String](../../sql-reference/data-types/string.md).
- `sharding_key` - Sharding key to support distributing data across nodes. For example: `insert into remote('127.0.0.1:9000,127.0.0.2', db, table, 'default', rand())`. Type: [UInt32](../../sql-reference/data-types/int-uint.md).
**Returned value**
Dataset from remote servers.
**Usage**
Using the `remote` table function is less optimal than creating a `Distributed` table, because in this case the server connection is re-established for every request. In addition, if host names are set, the names are resolved, and errors are not counted when working with various replicas. When processing a large number of queries, always create the `Distributed` table ahead of time, and dont use the `remote` table function.
The `remote` table function can be useful in the following cases:
- Accessing a specific server for data comparison, debugging, and testing.
- Queries between various ClickHouse clusters for research purposes.
- Infrequent distributed requests that are made manually.
- Distributed requests where the set of servers is re-defined each time.
**Adresses**
``` text
example01-01-1
@ -33,9 +62,7 @@ localhost
[2a02:6b8:0:1111::11]:9000
```
Multiple addresses can be comma-separated. In this case, ClickHouse will use distributed processing, so it will send the query to all specified addresses (like to shards with different data).
Example:
Multiple addresses can be comma-separated. In this case, ClickHouse will use distributed processing, so it will send the query to all specified addresses (like to shards with different data). Example:
``` text
example01-01-1,example01-02-1
@ -55,30 +82,28 @@ example01-{01..02}-1
If you have multiple pairs of curly brackets, it generates the direct product of the corresponding sets.
Addresses and parts of addresses in curly brackets can be separated by the pipe symbol (\|). In this case, the corresponding sets of addresses are interpreted as replicas, and the query will be sent to the first healthy replica. However, the replicas are iterated in the order currently set in the [load_balancing](../../operations/settings/settings.md) setting.
Example:
Addresses and parts of addresses in curly brackets can be separated by the pipe symbol (\|). In this case, the corresponding sets of addresses are interpreted as replicas, and the query will be sent to the first healthy replica. However, the replicas are iterated in the order currently set in the [load_balancing](../../operations/settings/settings.md) setting. This example specifies two shards that each have two replicas:
``` text
example01-{01..02}-{1|2}
```
This example specifies two shards that each have two replicas.
The number of addresses generated is limited by a constant. Right now this is 1000 addresses.
Using the `remote` table function is less optimal than creating a `Distributed` table, because in this case, the server connection is re-established for every request. In addition, if host names are set, the names are resolved, and errors are not counted when working with various replicas. When processing a large number of queries, always create the `Distributed` table ahead of time, and dont use the `remote` table function.
**Examples**
The `remote` table function can be useful in the following cases:
Selecting data from a remote server:
- Accessing a specific server for data comparison, debugging, and testing.
- Queries between various ClickHouse clusters for research purposes.
- Infrequent distributed requests that are made manually.
- Distributed requests where the set of servers is re-defined each time.
``` sql
SELECT * FROM remote('127.0.0.1', db.remote_engine_table) LIMIT 3;
```
If the user is not specified, `default` is used.
If the password is not specified, an empty password is used.
Inserting data from a remote server into a table:
`remoteSecure` - same as `remote` but with secured connection. Default port — [tcp_port_secure](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) from config or 9440.
``` sql
CREATE TABLE remote_table (name String, value UInt32) ENGINE=Memory;
INSERT INTO FUNCTION remote('127.0.0.1', currentDatabase(), 'remote_table') VALUES ('test', 42);
SELECT * FROM remote_table;
```
[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/remote/) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/remote/) <!--hide-->

View File

@ -5,20 +5,40 @@ toc_title: url
# url {#url}
`url(URL, format, structure)` - returns a table created from the `URL` with given
`format` and `structure`.
`url` function creates a table from the `URL` with given `format` and `structure`.
URL - HTTP or HTTPS server address, which can accept `GET` and/or `POST` requests.
`url` function may be used in `SELECT` and `INSERT` queries on data in [URL](../../engines/table-engines/special/url.md) tables.
format - [format](../../interfaces/formats.md#formats) of the data.
structure - table structure in `'UserID UInt64, Name String'` format. Determines column names and types.
**Example**
**Syntax**
``` sql
-- getting the first 3 lines of a table that contains columns of String and UInt32 type from HTTP-server which answers in CSV format.
SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3
url(URL, format, structure)
```
[Original article](https://clickhouse.tech/docs/en/query_language/table_functions/url/) <!--hide-->
**Input parameters**
- `URL` - HTTP or HTTPS server address, which can accept `GET` (for `SELECT`) or `POST` (for `INSERT`) requests. Type: [String](../../sql-reference/data-types/string.md).
- `format` - [Format](../../interfaces/formats.md#formats) of the data. Type: [String](../../sql-reference/data-types/string.md).
- `structure` - Table structure in `'UserID UInt64, Name String'` format. Determines column names and types. Type: [String](../../sql-reference/data-types/string.md).
**Returned value**
A table with the specified format and structure and with data from the defined URL.
**Examples**
Getting the first 3 lines of a table that contains columns of `String` and `UInt32` type from HTTP-server which answers in `CSV` format.
``` sql
SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3;
```
Inserting data from a URL into a table:
``` sql
CREATE TABLE test_table (column1 String, column2 UInt32) ENGINE=Memory;
INSERT INTO FUNCTION url('http://127.0.0.1:8123/?query=INSERT+INTO+test_table+FORMAT+CSV', 'CSV', 'column1 String, column2 UInt32') VALUES ('http interface', 42);
SELECT * FROM test_table;
```
[Original article](https://clickhouse.tech/docs/en/sql-reference/table-functions/url/) <!--hide-->

View File

@ -5,6 +5,6 @@ toc_title: Roadmap
# Roadmap {#roadmap}
The roadmap for year 2021 is published for open discussion [here](https://github.com/ClickHouse/ClickHouse/issues/17623).
The roadmap for the year 2021 is published for open discussion [here](https://github.com/ClickHouse/ClickHouse/issues/17623).
{## [Original article](https://clickhouse.tech/docs/en/roadmap/) ##}

View File

@ -210,7 +210,7 @@ Mac OS X:
Для запуска сервера из под текущего пользователя, с выводом логов в терминал и с использованием примеров конфигурационных файлов, расположенных в исходниках, перейдите в директорию `ClickHouse/programs/server/` (эта директория находится не в директории build) и выполните:
../../../build/programs/clickhouse server
../../build/programs/clickhouse server
В этом случае, ClickHouse будет использовать конфигурационные файлы, расположенные в текущей директории. Вы можете запустить `clickhouse server` из любой директории, передав ему путь к конфигурационному файлу в аргументе командной строки `--config-file`.

View File

@ -4,7 +4,6 @@ toc_priority: 27
toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435"
---
# Движки баз данных {#dvizhki-baz-dannykh}
Движки баз данных обеспечивают работу с таблицами.
@ -13,4 +12,10 @@ toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435"
Также можно использовать следующие движки баз данных:
- [MySQL](mysql.md)
- [MySQL](../../engines/database-engines/mysql.md)
- [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md)
- [Lazy](../../engines/database-engines/lazy.md)
[Оригинальная статья](https://clickhouse.tech/docs/ru/database_engines/) <!--hide-->

View File

@ -0,0 +1,159 @@
---
toc_priority: 29
toc_title: MaterializeMySQL
---
# MaterializeMySQL {#materialize-mysql}
Создает базу данных ClickHouse со всеми таблицами, существующими в MySQL, и всеми данными в этих таблицах.
Сервер ClickHouse работает как реплика MySQL. Он читает файл binlog и выполняет DDL and DML-запросы.
`MaterializeMySQL` — экспериментальный движок баз данных.
## Создание базы данных {#creating-a-database}
``` sql
CREATE DATABASE [IF NOT EXISTS] db_name [ON CLUSTER cluster]
ENGINE = MaterializeMySQL('host:port', ['database' | database], 'user', 'password') [SETTINGS ...]
```
**Параметры движка**
- `host:port` — адрес сервера MySQL.
- `database` — имя базы данных на удалённом сервере.
- `user` — пользователь MySQL.
- `password` — пароль пользователя.
## Виртуальные столбцы {#virtual-columns}
При работе с движком баз данных `MaterializeMySQL` используются таблицы семейства [ReplacingMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) с виртуальными столбцами `_sign` и `_version`.
- `_version` — счетчик транзакций. Тип [UInt64](../../sql-reference/data-types/int-uint.md).
- `_sign` — метка удаления. Тип [Int8](../../sql-reference/data-types/int-uint.md). Возможные значения:
- `1` — строка не удалена,
- `-1` — строка удалена.
## Поддержка типов данных {#data_types-support}
| MySQL | ClickHouse |
|-------------------------|--------------------------------------------------------------|
| TINY | [Int8](../../sql-reference/data-types/int-uint.md) |
| SHORT | [Int16](../../sql-reference/data-types/int-uint.md) |
| INT24 | [Int32](../../sql-reference/data-types/int-uint.md) |
| LONG | [UInt32](../../sql-reference/data-types/int-uint.md) |
| LONGLONG | [UInt64](../../sql-reference/data-types/int-uint.md) |
| FLOAT | [Float32](../../sql-reference/data-types/float.md) |
| DOUBLE | [Float64](../../sql-reference/data-types/float.md) |
| DECIMAL, NEWDECIMAL | [Decimal](../../sql-reference/data-types/decimal.md) |
| DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) |
| DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) |
| DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) |
| STRING | [String](../../sql-reference/data-types/string.md) |
| VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) |
| BLOB | [String](../../sql-reference/data-types/string.md) |
Другие типы не поддерживаются. Если таблица MySQL содержит столбец другого типа, ClickHouse выдаст исключение "Неподдерживаемый тип данных" ("Unhandled data type") и остановит репликацию.
Тип [Nullable](../../sql-reference/data-types/nullable.md) поддерживается.
## Особенности и рекомендации {#specifics-and-recommendations}
### DDL-запросы {#ddl-queries}
DDL-запросы в MySQL конвертируются в соответствующие DDL-запросы в ClickHouse ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop.md), [RENAME](../../sql-reference/statements/rename.md)). Если ClickHouse не может конвертировать какой-либо DDL-запрос, он его игнорирует.
### Репликация данных {#data-replication}
Данные являются неизменяемыми со стороны пользователя ClickHouse, но автоматически обновляются путём репликации следующих запросов из MySQL:
- Запрос `INSERT` конвертируется в ClickHouse в `INSERT` с `_sign=1`.
- Запрос `DELETE` конвертируется в ClickHouse в `INSERT` с `_sign=-1`.
- Запрос `UPDATE` конвертируется в ClickHouse в `INSERT` с `_sign=-1` и `INSERT` с `_sign=1`.
### Выборка из таблиц движка MaterializeMySQL {#select}
Запрос `SELECT` из таблиц движка `MaterializeMySQL` имеет некоторую специфику:
- Если в запросе `SELECT` напрямую не указан столбец `_version`, то используется модификатор [FINAL](../../sql-reference/statements/select/from.md#select-from-final). Таким образом, выбираются только строки с `MAX(_version)`.
- Если в запросе `SELECT` напрямую не указан столбец `_sign`, то по умолчанию используется `WHERE _sign=1`. Таким образом, удаленные строки не включаются в результирующий набор.
### Конвертация индексов {#index-conversion}
Секции `PRIMARY KEY` и `INDEX` в MySQL конвертируются в кортежи `ORDER BY` в таблицах ClickHouse.
В таблицах ClickHouse данные физически хранятся в том порядке, который определяется секцией `ORDER BY`. Чтобы физически перегруппировать данные, используйте [материализованные представления](../../sql-reference/statements/create/view.md#materialized).
**Примечание**
- Строки с `_sign=-1` физически не удаляются из таблиц.
- Каскадные запросы `UPDATE/DELETE` не поддерживаются движком `MaterializeMySQL`.
- Репликация может быть легко нарушена.
- Прямые операции изменения данных в таблицах и базах данных `MaterializeMySQL` запрещены.
## Примеры использования {#examples-of-use}
Запросы в MySQL:
``` sql
mysql> CREATE DATABASE db;
mysql> CREATE TABLE db.test (a INT PRIMARY KEY, b INT);
mysql> INSERT INTO db.test VALUES (1, 11), (2, 22);
mysql> DELETE FROM db.test WHERE a=1;
mysql> ALTER TABLE db.test ADD COLUMN c VARCHAR(16);
mysql> UPDATE db.test SET c='Wow!', b=222;
mysql> SELECT * FROM test;
```
```text
+---+------+------+
| a | b | c |
+---+------+------+
| 2 | 222 | Wow! |
+---+------+------+
```
База данных в ClickHouse, обмен данными с сервером MySQL:
База данных и созданная таблица:
``` sql
CREATE DATABASE mysql ENGINE = MaterializeMySQL('localhost:3306', 'db', 'user', '***');
SHOW TABLES FROM mysql;
```
``` text
┌─name─┐
│ test │
└──────┘
```
После вставки данных:
``` sql
SELECT * FROM mysql.test;
```
``` text
┌─a─┬──b─┐
│ 1 │ 11 │
│ 2 │ 22 │
└───┴────┘
```
После удаления данных, добавления столбца и обновления:
``` sql
SELECT * FROM mysql.test;
```
``` text
┌─a─┬───b─┬─c────┐
│ 2 │ 222 │ Wow! │
└───┴─────┴──────┘
```
[Оригинальная статья](https://clickhouse.tech/docs/ru/database_engines/materialize-mysql/) <!--hide-->

View File

@ -88,6 +88,7 @@ ORDER BY expr
- `merge_max_block_size` — максимальное количество строк в блоке для операций слияния. Значение по умолчанию: 8192.
- `storage_policy` — политика хранения данных. Смотрите [Хранение данных таблицы на нескольких блочных устройствах](#table_engine-mergetree-multiple-volumes).
- `min_bytes_for_wide_part`, `min_rows_for_wide_part` — минимальное количество байт/строк в куске данных для хранения в формате `Wide`. Можно задать одну или обе настройки или не задавать ни одной. Подробнее см. в разделе [Хранение данных](#mergetree-data-storage).
- `max_parts_in_total` — максимальное количество кусков во всех партициях.
- `max_compress_block_size` — максимальный размер блоков несжатых данных перед сжатием для записи в таблицу. Вы также можете задать этот параметр в глобальных настройках (смотрите [max_compress_block_size](../../../operations/settings/settings.md#max-compress-block-size)). Настройка, которая задается при создании таблицы, имеет более высокий приоритет, чем глобальная.
- `min_compress_block_size` — минимальный размер блоков несжатых данных, необходимых для сжатия при записи следующей засечки. Вы также можете задать этот параметр в глобальных настройках (смотрите [min_compress_block_size](../../../operations/settings/settings.md#min-compress-block-size)). Настройка, которая задается при создании таблицы, имеет более высокий приоритет, чем глобальная.

View File

@ -153,7 +153,7 @@ CREATE TABLE table_name
```xml
<default_replica_path>/clickhouse/tables/{shard}/{database}/{table}</default_replica_path>
<default_replica_name>{replica}</default_replica_path>
<default_replica_name>{replica}</default_replica_name>
```
В этом случае можно опустить аргументы при создании таблиц:

View File

@ -116,18 +116,18 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe
### Параметры командной строки {#parametry-komandnoi-stroki}
- `--host, -h` — имя сервера, по умолчанию — localhost. Вы можете использовать как имя, так и IPv4 или IPv6 адрес.
- `--port` — порт, к которому соединяться, по умолчанию — 9000. Замечу, что для HTTP и родного интерфейса используются разные порты.
- `--user, -u` — имя пользователя, по умолчанию — default.
- `--host, -h` — имя сервера, по умолчанию — localhost. Вы можете использовать как имя, так и IPv4 или IPv6 адрес.
- `--port` — порт для подключения, по умолчанию — 9000. Обратите внимание: для HTTP-интерфейса и нативного интерфейса используются разные порты.
- `--user, -u` — имя пользователя, по умолчанию — default.
- `--password` — пароль, по умолчанию — пустая строка.
- `--query, -q` — запрос для выполнения, при использовании в неинтерактивном режиме.
- `--database, -d` — выбрать текущую БД, по умолчанию — текущая БД из настроек сервера (по умолчанию — БД default).
- `--database, -d` — выбрать текущую БД. Без указания значение берется из настроек сервера (по умолчанию — БД default).
- `--multiline, -m` — если указано — разрешить многострочные запросы, не отправлять запрос по нажатию Enter.
- `--multiquery, -n` — если указано — разрешить выполнять несколько запросов, разделённых точкой с запятой.
- `--format, -f` — использовать указанный формат по умолчанию для вывода результата.
- `--vertical, -E` — если указано, использовать формат Vertical по умолчанию для вывода результата. То же самое, что format=Vertical. В этом формате каждое значение выводится на отдельной строке, что удобно для отображения широких таблиц.
- `--time, -t` — если указано, в неинтерактивном режиме вывести время выполнения запроса в stderr.
- `--stacktrace` — если указано, в случае исключения, выводить также его стек трейс.
- `--vertical, -E` — если указано, использовать по умолчанию формат [Vertical](../interfaces/formats.md#vertical) для вывода результата. То же самое, что `format=Vertical`. В этом формате каждое значение выводится на отдельной строке, что удобно для отображения широких таблиц.
- `--time, -t` — если указано, в неинтерактивном режиме вывести время выполнения запроса в поток stderr.
- `--stacktrace` — если указано, в случае исключения, выводить также его стек-трейс.
- `--config-file` — имя конфигурационного файла.
- `--secure` — если указано, будет использован безопасный канал.
- `--param_<name>` — значение параметра для [запроса с параметрами](#cli-queries-with-parameters).

View File

@ -24,6 +24,7 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT
| [Vertical](#vertical) | ✗ | ✔ |
| [VerticalRaw](#verticalraw) | ✗ | ✔ |
| [JSON](#json) | ✗ | ✔ |
| [JSONAsString](#jsonasstring) | ✔ | ✗ |
| [JSONString](#jsonstring) | ✗ | ✔ |
| [JSONCompact](#jsoncompact) | ✗ | ✔ |
| [JSONCompactString](#jsoncompactstring) | ✗ | ✔ |
@ -490,6 +491,33 @@ ClickHouse поддерживает [NULL](../sql-reference/syntax.md), кото
}
```
## JSONAsString {#jsonasstring}
В этом формате один объект JSON интерпретируется как одно строковое значение. Если входные данные имеют несколько объектов JSON, разделенных запятой, то они будут интерпретироваться как отдельные строки таблицы.
В этом формате парситься может только таблица с единственным полем типа [String](../sql-reference/data-types/string.md). Остальные столбцы должны быть заданы как `DEFAULT` или `MATERIALIZED`(смотрите раздел [Значения по умолчанию](../sql-reference/statements/create/table.md#create-default-values)), либо отсутствовать. Для дальнейшей обработки объекта JSON, представленного в строке, вы можете использовать [функции для работы с JSON](../sql-reference/functions/json-functions.md).
**Пример**
Запрос:
``` sql
DROP TABLE IF EXISTS json_as_string;
CREATE TABLE json_as_string (json String) ENGINE = Memory;
INSERT INTO json_as_string (json) FORMAT JSONAsString {"foo":{"bar":{"x":"y"},"baz":1}},{},{"any json stucture":1}
SELECT * FROM json_as_string;
```
Результат:
``` text
┌─json──────────────────────────────┐
│ {"foo":{"bar":{"x":"y"},"baz":1}} │
│ {} │
│ {"any json stucture":1} │
└───────────────────────────────────┘
```
## JSONCompact {#jsoncompact}
## JSONCompactString {#jsoncompactstring}

View File

@ -2375,11 +2375,7 @@ SELECT number FROM numbers(3) FORMAT JSONEachRow;
Рассмотрим запрос с агрегирующими функциями:
```sql
SELECT
SUM(-1),
MAX(0)
FROM system.one
WHERE 0
SELECT SUM(-1), MAX(0) FROM system.one WHERE 0;
```
Результат запроса с настройкой `aggregate_functions_null_for_empty = 0`:
@ -2411,6 +2407,16 @@ WHERE 0
Смотрите примеры в разделе [UNION](../../sql-reference/statements/select/union.md).
## data_type_default_nullable {#data_type_default_nullable}
Позволяет использовать по умолчанию тип данных [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) в определении столбца без явных модификаторов [NULL или NOT NULL](../../sql-reference/statements/create/table.md#null-modifiers).
Возможные значения:
- 1 — типы данных в определении столбца заданы по умолчанию как `Nullable`.
- 0 — типы данных в определении столбца не заданы по умолчанию как `Nullable`.
Значение по умолчанию: `0`.
## execute_merges_on_single_replica_time_threshold {#execute-merges-on-single-replica-time-threshold}

View File

@ -1 +0,0 @@
../../../en/operations/utilities/clickhouse-benchmark.md

View File

@ -0,0 +1,163 @@
---
toc_priority: 61
toc_title: clickhouse-benchmark
---
# clickhouse-benchmark {#clickhouse-benchmark}
Устанавливает соединение с сервером ClickHouse и запускает циклическое выполнение указанных запросов.
**Синтаксис**
``` bash
$ clickhouse-benchmark --query ["single query"] [keys]
```
или
``` bash
$ echo "single query" | clickhouse-benchmark [keys]
```
или
``` bash
$ clickhouse-benchmark [keys] <<< "single query"
```
Если нужно выполнить набор запросов, создайте текстовый файл и расположите каждый запрос на отдельной строке в файле. Например:
``` sql
SELECT * FROM system.numbers LIMIT 10000000;
SELECT 1;
```
После этого передайте этот файл в стандартный ввод `clickhouse-benchmark`:
``` bash
clickhouse-benchmark [keys] < queries_file;
```
## Ключи {#clickhouse-benchmark-keys}
- `--query=QUERY` — исполняемый запрос. Если параметр не передан, `clickhouse-benchmark` будет считывать запросы из стандартного ввода.
- `-c N`, `--concurrency=N` — количество запросов, которые `clickhouse-benchmark` отправляет одновременно. Значение по умолчанию: 1.
- `-d N`, `--delay=N` — интервал в секундах между промежуточными отчетами (чтобы отключить отчеты, установите 0). Значение по умолчанию: 1.
- `-h HOST`, `--host=HOST` — хост сервера. Значение по умолчанию: `localhost`. Для [режима сравнения](#clickhouse-benchmark-comparison-mode) можно использовать несколько `-h` ключей.
- `-p N`, `--port=N` — порт сервера. Значение по умолчанию: 9000. Для [режима сравнения](#clickhouse-benchmark-comparison-mode) можно использовать несколько `-p` ключей.
- `-i N`, `--iterations=N` — общее число запросов. Значение по умолчанию: 0 (вечно будет повторяться).
- `-r`, `--randomize` — использовать случайный порядок выполнения запросов при наличии более одного входного запроса.
- `-s`, `--secure` — используется `TLS` соединение.
- `-t N`, `--timelimit=N` — лимит по времени в секундах. `clickhouse-benchmark` перестает отправлять запросы при достижении лимита по времени. Значение по умолчанию: 0 (лимит отключен).
- `--confidence=N` — уровень доверия для T-критерия. Возможные значения: 0 (80%), 1 (90%), 2 (95%), 3 (98%), 4 (99%), 5 (99.5%). Значение по умолчанию: 5. В [режиме сравнения](#clickhouse-benchmark-comparison-mode) `clickhouse-benchmark` проверяет [двухвыборочный t-критерий Стьюдента для независимых выборок](https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test) чтобы определить, различны ли две выборки при выбранном уровне доверия.
- `--cumulative` — выводить статистику за все время работы, а не за последний временной интервал.
- `--database=DATABASE_NAME` — имя базы данных ClickHouse. Значение по умолчанию: `default`.
- `--json=FILEPATH` — дополнительный вывод в формате `JSON`. Когда этот ключ указан, `clickhouse-benchmark` выводит отчет в указанный JSON-файл.
- `--user=USERNAME` — имя пользователя ClickHouse. Значение по умолчанию: `default`.
- `--password=PSWD` — пароль пользователя ClickHouse. Значение по умолчанию: пустая строка.
- `--stacktrace` — вывод трассировки стека исключений. Когда этот ключ указан, `clickhouse-bencmark` выводит трассировку стека исключений.
- `--stage=WORD` — стадия обработки запроса на сервере. ClickHouse останавливает обработку запроса и возвращает ответ `clickhouse-benchmark` на заданной стадии. Возможные значения: `complete`, `fetch_columns`, `with_mergeable_state`. Значение по умолчанию: `complete`.
- `--help` — показывает справку.
Если нужно применить [настройки](../../operations/settings/index.md) для запросов, их можно передать как ключ `--<session setting name>= SETTING_VALUE`. Например, `--max_memory_usage=1048576`.
## Вывод {#clickhouse-benchmark-output}
По умолчанию, `clickhouse-benchmark` выводит сообщение для каждого `--delay` интервала.
Пример сообщения:
``` text
Queries executed: 10.
localhost:9000, queries 10, QPS: 6.772, RPS: 67904487.440, MiB/s: 518.070, result RPS: 67721584.984, result MiB/s: 516.675.
0.000% 0.145 sec.
10.000% 0.146 sec.
20.000% 0.146 sec.
30.000% 0.146 sec.
40.000% 0.147 sec.
50.000% 0.148 sec.
60.000% 0.148 sec.
70.000% 0.148 sec.
80.000% 0.149 sec.
90.000% 0.150 sec.
95.000% 0.150 sec.
99.000% 0.150 sec.
99.900% 0.150 sec.
99.990% 0.150 sec.
```
В сообщении можно найти:
- Количество запросов в поле `Queries executed:`.
- Строка статуса, содержащая (в таком же порядке):
- Endpoint сервера ClickHouse.
- Число обработанных запросов.
- QPS: количество запросов, выполняемых сервером за секунду в течение `--delay` интервала.
- RPS: количество строк, читаемых сервером за секунду в течение `--delay` интервала.
- MiB/s: количество Мебибайтов, считываемых сервером за секунду в течение `--delay` интервала.
- result RPS: количество строк, добавленное сервером в результат запроса за секунду в течение `--delay` интервала.
- result MiB/s. количество Мебибайтов, размещаемое сервером в результат запроса за секунду в течение `--delay` интервала.
- Процентили времени выполнения запросов.
## Режим сравнения {#clickhouse-benchmark-comparison-mode}
`clickhouse-benchmark` может сравнивать производительность двух работающих серверов ClickHouse.
Для использования сравнительного режима укажите конечную точку двух серверов двумя парами ключей `--host`, `--port`. Связь ключей соответствует позициям в списке аргументов: первый `--host` соответствует первому `--port` и так далее. `clickhouse-benchmark` устанавливает соединение с обоими серверами и отсылает запросы. Каждый запрос адресован случайно выбранному серверу. Результаты выводятся отдельно для каждого сервера.
## Пример {#clickhouse-benchmark-example}
``` bash
$ echo "SELECT * FROM system.numbers LIMIT 10000000 OFFSET 10000000" | clickhouse-benchmark -i 10
```
``` text
Loaded 1 queries.
Queries executed: 6.
localhost:9000, queries 6, QPS: 6.153, RPS: 123398340.957, MiB/s: 941.455, result RPS: 61532982.200, result MiB/s: 469.459.
0.000% 0.159 sec.
10.000% 0.159 sec.
20.000% 0.159 sec.
30.000% 0.160 sec.
40.000% 0.160 sec.
50.000% 0.162 sec.
60.000% 0.164 sec.
70.000% 0.165 sec.
80.000% 0.166 sec.
90.000% 0.166 sec.
95.000% 0.167 sec.
99.000% 0.167 sec.
99.900% 0.167 sec.
99.990% 0.167 sec.
Queries executed: 10.
localhost:9000, queries 10, QPS: 6.082, RPS: 121959604.568, MiB/s: 930.478, result RPS: 60815551.642, result MiB/s: 463.986.
0.000% 0.159 sec.
10.000% 0.159 sec.
20.000% 0.160 sec.
30.000% 0.163 sec.
40.000% 0.164 sec.
50.000% 0.165 sec.
60.000% 0.166 sec.
70.000% 0.166 sec.
80.000% 0.167 sec.
90.000% 0.167 sec.
95.000% 0.170 sec.
99.000% 0.172 sec.
99.900% 0.172 sec.
99.990% 0.172 sec.
```
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/utilities/clickhouse-benchmark.md)

View File

@ -67,11 +67,21 @@ $ clickhouse-copier --daemon --config zookeeper.xml --task-path /task/path --bas
<!-- Configuration of clusters as in an ordinary server config -->
<remote_servers>
<source_cluster>
<!--
source cluster & destination clusters accept exactly the same
parameters as parameters for the usual Distributed table
see https://clickhouse.tech/docs/ru/engines/table-engines/special/distributed/
-->
<shard>
<internal_replication>false</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
<!--
<user>default</user>
<password>default</password>
<secure>1</secure>
-->
</replica>
</shard>
...

View File

@ -0,0 +1,29 @@
---
toc_priority: 61
toc_title: Составные типы
---
# Составные типы {#multiword-types}
При создании таблиц вы можете использовать типы данных с названием, состоящим из нескольких слов. Такие названия поддерживаются для лучшей совместимости с SQL.
## Поддержка составных типов {#multiword-types-support}
| Составные типы | Обычные типы |
|-------------------------------------|-----------------------------------------------------------|
| DOUBLE PRECISION | [Float64](../../sql-reference/data-types/float.md) |
| CHAR LARGE OBJECT | [String](../../sql-reference/data-types/string.md) |
| CHAR VARYING | [String](../../sql-reference/data-types/string.md) |
| CHARACTER LARGE OBJECT | [String](../../sql-reference/data-types/string.md) |
| CHARACTER VARYING | [String](../../sql-reference/data-types/string.md) |
| NCHAR LARGE OBJECT | [String](../../sql-reference/data-types/string.md) |
| NCHAR VARYING | [String](../../sql-reference/data-types/string.md) |
| NATIONAL CHARACTER LARGE OBJECT | [String](../../sql-reference/data-types/string.md) |
| NATIONAL CHARACTER VARYING | [String](../../sql-reference/data-types/string.md) |
| NATIONAL CHAR VARYING | [String](../../sql-reference/data-types/string.md) |
| NATIONAL CHARACTER | [String](../../sql-reference/data-types/string.md) |
| NATIONAL CHAR | [String](../../sql-reference/data-types/string.md) |
| BINARY LARGE OBJECT | [String](../../sql-reference/data-types/string.md) |
| BINARY VARYING | [String](../../sql-reference/data-types/string.md) |
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/data-types/multiword-types/) <!--hide-->

View File

@ -555,4 +555,46 @@ SELECT normalizedQueryHash('SELECT 1 AS `xyz`') != normalizedQueryHash('SELECT 1
└─────┘
```
## encodeXMLComponent {#encode-xml-component}
Экранирует символы для размещения строки в текстовом узле или атрибуте XML.
Экранируются символы, которые в формате XML являются зарезервированными (служебными): `<`, `&`, `>`, `"`, `'`.
**Синтаксис**
``` sql
encodeXMLComponent(x)
```
**Параметры**
- `x` — последовательность символов. [String](../../sql-reference/data-types/string.md).
**Возвращаемое значение**
- Строка, в которой зарезервированные символы экранированы.
Тип: [String](../../sql-reference/data-types/string.md).
**Пример**
Запрос:
``` sql
SELECT encodeXMLComponent('Hello, "world"!');
SELECT encodeXMLComponent('<123>');
SELECT encodeXMLComponent('&clickhouse');
SELECT encodeXMLComponent('\'foo\'');
```
Результат:
``` text
Hello, &quot;world&quot;!
&lt;123&gt;
&amp;clickhouse
&apos;foo&apos;
```
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/string_functions/) <!--hide-->

View File

@ -115,6 +115,10 @@ SELECT topLevelDomain('svn+ssh://www.some.svn-hosting.com:80/repo/trunk')
Например, `cutToFirstSignificantSubdomain('https://news.yandex.com.tr/') = 'yandex.com.tr'`.
### port(URL[, default_port = 0]) {#port}
Возвращает порт или значение `default_port`, если в URL-адресе нет порта (или передан невалидный URL)
### path {#path}
Возвращает путь. Пример: `/top/news.html` Путь не включает в себя query string.

View File

@ -10,8 +10,8 @@ toc_title: "\u0422\u0430\u0431\u043b\u0438\u0446\u0430"
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1],
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2],
name1 [type1] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1],
name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2],
...
) ENGINE = engine
```
@ -22,6 +22,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
Описание столбца, это `name type`, в простейшем случае. Пример: `RegionID UInt32`.
Также могут быть указаны выражения для значений по умолчанию - смотрите ниже.
При необходимости можно указать [первичный ключ](#primary-key) с одним или несколькими ключевыми выражениями.
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name AS [db2.]name2 [ENGINE = engine]
```
@ -44,6 +45,14 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ...
После секции `ENGINE` в запросе могут использоваться и другие секции в зависимости от движка. Подробную документацию по созданию таблиц смотрите в описаниях [движков таблиц](../../../engines/table-engines/index.md#table_engines).
## Модификатор NULL или NOT NULL {#null-modifiers}
Модификатор `NULL` или `NOT NULL`, указанный после типа данных в определении столбца, позволяет или не позволяет типу данных быть [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable).
Если тип не `Nullable` и указан модификатор `NULL`, то столбец будет иметь тип `Nullable`; если `NOT NULL`, то не `Nullable`. Например, `INT NULL` то же, что и `Nullable(INT)`. Если тип `Nullable` и указаны модификаторы `NULL` или `NOT NULL`, то будет вызвано исключение.
Смотрите также настройку [data_type_default_nullable](../../../operations/settings/settings.md#data_type_default_nullable).
### Значения по умолчанию {#create-default-values}
В описании столбца, может быть указано выражение для значения по умолчанию, одного из следующих видов:
@ -80,6 +89,35 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ...
Отсутствует возможность задать значения по умолчанию для элементов вложенных структур данных.
## Первичный ключ {#primary-key}
Вы можете определить [первичный ключ](../../../engines/table-engines/mergetree-family/mergetree.md#primary-keys-and-indexes-in-queries) при создании таблицы. Первичный ключ может быть указан двумя способами:
- в списке столбцов:
``` sql
CREATE TABLE db.table_name
(
name1 type1, name2 type2, ...,
PRIMARY KEY(expr1[, expr2,...])]
)
ENGINE = engine;
```
- вне списка столбцов:
``` sql
CREATE TABLE db.table_name
(
name1 type1, name2 type2, ...
)
ENGINE = engine
PRIMARY KEY(expr1[, expr2,...]);
```
!!! warning "Предупреждение"
Вы не можете сочетать оба способа в одном запросе.
### Ограничения (constraints) {#constraints}
Наряду с объявлением столбцов можно объявить ограничения на значения в столбцах таблицы:

View File

@ -193,7 +193,7 @@ Yandex官方当前使用GCC构建ClickHouse因为它生成的机器代码性
要以当前的用户身份运行服务,请进入到`ClickHouse/programs/server/` 目录(在`build`文件夹外)并运行:
../../../build/programs/clickhouse server
../../build/programs/clickhouse server
在这种情况下ClickHouse将使用位于当前目录中的配置文件。您可以从任何目录运行`Clickhouse server`,并将配置文件`--config-file`的路径指定为命令行参数。

View File

@ -3,11 +3,11 @@ toc_priority: 60
toc_title: clickhouse-local
---
# ClickHouse Local {#clickhouse-local}
# clickhouse-local {#clickhouse-local}
`clickhouse-local`模式可以使您能够对本地文件执行快速处理而无需部署和配置ClickHouse服务器。
[ClickHouse SQL语法](../../operations/utilities/clickhouse-local.md)支持对表格数据的查询.
接受表示表格tables的数据并使用[ClickHouse SQL方言](../../operations/utilities/clickhouse-local.md)查询它们。
`clickhouse-local`使用与ClickHouse Server相同的核心因此它支持大多数功能以及相同的格式和表引擎。
@ -16,6 +16,8 @@ toc_title: clickhouse-local
!!! warning "警告"
不建议将生产服务器配置加载到`clickhouse-local`因为数据可以在人为错误的情况下被损坏。
对于临时数据,默认情况下会创建一个唯一的临时数据目录。
## 用途 {#usage}
基本用法:
@ -29,14 +31,22 @@ clickhouse-local --structure "table_structure" --input-format "format_of_incomin
- `-S`, `--structure` — 输入数据的表结构。
- `-if`, `--input-format` — 输入格式化类型, 默认是`TSV`。
- `-f`, `--file` — 数据路径, 默认是`stdin`。
- `-q` `--query` — 要查询的SQL语句使用`;`做分隔符。
- `-q`, `--query` — 要查询的SQL语句使用`;`做分隔符。您必须指定`query`或`queries-file`选项。
- `-qf`, `--queries-file` - 包含执行查询的文件路径。您必须指定`query`或`queries-file`选项。
- `-N`, `--table` — 数据输出的表名,默认是`table`。
- `-of`, `--format`, `--output-format` — 输出格式化类型, 默认是`TSV`。
- `-d`, `--database` — 默认数据库名,默认是`_local`。
- `--stacktrace` — 是否在出现异常时输出栈信息。
- `--echo` — 执行前打印查询。
- `--verbose` — debug显示查询的详细信息。
- `-s` — 禁用`stderr`输出信息。
- `--config-file` — 与ClickHouse服务器格式相同配置文件的路径默认情况下配置为空。
- `--logger.console` — 日志显示到控制台。
- `--logger.log` — 日志文件名。
- `--logger.level` — 日志级别。
- `--ignore-error` — 当查询失败时,不停止处理。
- `-c`, `--config-file` — 与ClickHouse服务器格式相同配置文件的路径默认情况下配置为空。
- `--no-system-tables` — 不附加系统表。
- `--help``clickhouse-local`使用帮助信息。
- `-V`, `--version` — 打印版本信息并退出。
对于每个ClickHouse配置的参数也可以单独使用可以不使用`--config-file`指定。

View File

@ -680,6 +680,12 @@ private:
std::cerr << std::endl;
client_exception = std::make_unique<Exception>(e);
}
if (client_exception)
{
/// client_exception may have been set above or elsewhere.
/// Client-side exception during query execution can result in the loss of
/// sync in the connection protocol.
/// So we reconnect and allow to enter the next query.
@ -914,12 +920,6 @@ private:
void reportQueryError() const
{
// If we probably have progress bar, we should add additional
// newline, otherwise exception may display concatenated with
// the progress bar.
if (need_render_progress)
std::cerr << '\n';
if (server_exception)
{
std::string text = server_exception->displayText();
@ -937,7 +937,7 @@ private:
if (client_exception)
{
fmt::print(stderr,
"Error on processing query '{}':\n{}",
"Error on processing query '{}':\n{}\n",
full_query, client_exception->message());
}

View File

@ -479,7 +479,7 @@ void QueryFuzzer::addTableLike(const ASTPtr ast)
{
if (table_like_map.size() > 1000)
{
return;
table_like_map.clear();
}
const auto name = ast->formatForErrorMessage();
@ -493,7 +493,7 @@ void QueryFuzzer::addColumnLike(const ASTPtr ast)
{
if (column_like_map.size() > 1000)
{
return;
column_like_map.clear();
}
const auto name = ast->formatForErrorMessage();
@ -507,10 +507,12 @@ void QueryFuzzer::collectFuzzInfoRecurse(const ASTPtr ast)
{
if (auto * impl = dynamic_cast<ASTWithAlias *>(ast.get()))
{
if (aliases_set.size() < 1000)
if (aliases_set.size() > 1000)
{
aliases_set.insert(impl->alias);
aliases_set.clear();
}
aliases_set.insert(impl->alias);
}
if (typeid_cast<ASTLiteral *>(ast.get()))

View File

@ -1,14 +1,16 @@
#pragma once
#include <common/StringRef.h>
#include <DataTypes/IDataType.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <AggregateFunctions/AggregateFunctionMinMaxAny.h> // SingleValueDataString used in embedded compiler
#include <AggregateFunctions/IAggregateFunction.h>
#include <Columns/ColumnTuple.h>
#include <DataTypes/DataTypeTuple.h>
#include <DataTypes/IDataType.h>
#include <common/StringRef.h>
#include "Columns/IColumn.h"
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
@ -22,37 +24,49 @@ struct AggregateFunctionArgMinMaxData
using ResultData_t = ResultData;
using ValueData_t = ValueData;
ResultData result; // the argument at which the minimum/maximum value is reached.
ValueData value; // value for which the minimum/maximum is calculated.
ResultData result; // the argument at which the minimum/maximum value is reached.
ValueData value; // value for which the minimum/maximum is calculated.
static bool allocatesMemoryInArena()
{
return ResultData::allocatesMemoryInArena() || ValueData::allocatesMemoryInArena();
}
static bool allocatesMemoryInArena() { return ResultData::allocatesMemoryInArena() || ValueData::allocatesMemoryInArena(); }
static String name() { return StringRef(ValueData_t::name()) == StringRef("min") ? "argMin" : "argMax"; }
};
/// Returns the first arg value found for the minimum/maximum value. Example: argMax(arg, value).
template <typename Data>
class AggregateFunctionArgMinMax final : public IAggregateFunctionDataHelper<Data, AggregateFunctionArgMinMax<Data>>
class AggregateFunctionArgMinMax final : public IAggregateFunctionTupleArgHelper<Data, AggregateFunctionArgMinMax<Data>, 2>
{
private:
const DataTypePtr & type_res;
const DataTypePtr & type_val;
bool tuple_argument;
using Base = IAggregateFunctionTupleArgHelper<Data, AggregateFunctionArgMinMax<Data>, 2>;
public:
AggregateFunctionArgMinMax(const DataTypePtr & type_res_, const DataTypePtr & type_val_)
: IAggregateFunctionDataHelper<Data, AggregateFunctionArgMinMax<Data>>({type_res_, type_val_}, {}),
type_res(this->argument_types[0]), type_val(this->argument_types[1])
AggregateFunctionArgMinMax(const DataTypePtr & type_res_, const DataTypePtr & type_val_, const bool tuple_argument_)
: Base({type_res_, type_val_}, {}, tuple_argument_)
, type_res(this->argument_types[0])
, type_val(this->argument_types[1])
{
if (!type_val->isComparable())
throw Exception("Illegal type " + type_val->getName() + " of second argument of aggregate function " + getName()
+ " because the values of that data type are not comparable", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
throw Exception(
"Illegal type " + type_val->getName() + " of second argument of aggregate function " + getName()
+ " because the values of that data type are not comparable",
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
this->tuple_argument = tuple_argument_;
}
String getName() const override { return StringRef(Data::ValueData_t::name()) == StringRef("min") ? "argMin" : "argMax"; }
String getName() const override { return Data::name(); }
DataTypePtr getReturnType() const override
{
if (tuple_argument)
{
return std::make_shared<DataTypeTuple>(DataTypes{this->type_res, this->type_val});
}
return type_res;
}
@ -80,15 +94,21 @@ public:
this->data(place).value.read(buf, *type_val, arena);
}
bool allocatesMemoryInArena() const override
{
return Data::allocatesMemoryInArena();
}
bool allocatesMemoryInArena() const override { return Data::allocatesMemoryInArena(); }
void insertResultInto(AggregateDataPtr place, IColumn & to, Arena *) const override
{
this->data(place).result.insertResultInto(to);
if (tuple_argument)
{
auto & tup = assert_cast<ColumnTuple &>(to);
this->data(place).result.insertResultInto(tup.getColumn(0));
this->data(place).value.insertResultInto(tup.getColumn(1));
}
else
this->data(place).result.insertResultInto(to);
}
};
}

View File

@ -12,10 +12,10 @@
namespace DB
{
template <class T>
template <typename T>
using DecimalOrVectorCol = std::conditional_t<IsDecimalNumber<T>, ColumnDecimal<T>, ColumnVector<T>>;
template <class T> constexpr bool DecimalOrExtendedInt =
template <typename T> constexpr bool DecimalOrExtendedInt =
IsDecimalNumber<T>
|| std::is_same_v<T, Int128>
|| std::is_same_v<T, Int256>
@ -25,7 +25,7 @@ template <class T> constexpr bool DecimalOrExtendedInt =
/**
* Helper class to encapsulate values conversion for avg and avgWeighted.
*/
template <class Numerator, class Denominator>
template <typename Numerator, typename Denominator>
struct AvgFraction
{
Numerator numerator{0};
@ -84,7 +84,7 @@ struct AvgFraction
* @tparam Derived When deriving from this class, use the child class name as in CRTP, e.g.
* class Self : Agg<char, bool, bool, Self>.
*/
template <class Numerator, class Denominator, class Derived>
template <typename Numerator, typename Denominator, typename Derived>
class AggregateFunctionAvgBase : public
IAggregateFunctionDataHelper<AvgFraction<Numerator, Denominator>, Derived>
{
@ -98,7 +98,7 @@ public:
DataTypePtr getReturnType() const final { return std::make_shared<DataTypeNumber<Float64>>(); }
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override
void NO_SANITIZE_UNDEFINED merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override
{
this->data(place).numerator += this->data(rhs).numerator;
this->data(place).denominator += this->data(rhs).denominator;
@ -137,18 +137,18 @@ private:
UInt32 denom_scale;
};
template <class T>
template <typename T>
using AvgFieldType = std::conditional_t<IsDecimalNumber<T>,
std::conditional_t<std::is_same_v<T, Decimal256>, Decimal256, Decimal128>,
NearestFieldType<T>>;
template <class T>
template <typename T>
class AggregateFunctionAvg final : public AggregateFunctionAvgBase<AvgFieldType<T>, UInt64, AggregateFunctionAvg<T>>
{
public:
using AggregateFunctionAvgBase<AvgFieldType<T>, UInt64, AggregateFunctionAvg<T>>::AggregateFunctionAvgBase;
void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const final
void NO_SANITIZE_UNDEFINED add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const final
{
this->data(place).numerator += static_cast<const DecimalOrVectorCol<T> &>(*columns[0]).getData()[row_num];
++this->data(place).denominator;

View File

@ -20,14 +20,14 @@ struct AggregateFunctionSumData
{
T sum{};
void ALWAYS_INLINE add(T value)
void NO_SANITIZE_UNDEFINED ALWAYS_INLINE add(T value)
{
sum += value;
}
/// Vectorized version
template <typename Value>
void NO_INLINE addMany(const Value * __restrict ptr, size_t count)
void NO_SANITIZE_UNDEFINED NO_INLINE addMany(const Value * __restrict ptr, size_t count)
{
const auto * end = ptr + count;
@ -64,7 +64,7 @@ struct AggregateFunctionSumData
}
template <typename Value>
void NO_INLINE addManyNotNull(const Value * __restrict ptr, const UInt8 * __restrict null_map, size_t count)
void NO_SANITIZE_UNDEFINED NO_INLINE addManyNotNull(const Value * __restrict ptr, const UInt8 * __restrict null_map, size_t count)
{
const auto * end = ptr + count;
@ -99,7 +99,7 @@ struct AggregateFunctionSumData
sum += local_sum;
}
void merge(const AggregateFunctionSumData & rhs)
void NO_SANITIZE_UNDEFINED merge(const AggregateFunctionSumData & rhs)
{
sum += rhs.sum;
}
@ -287,7 +287,7 @@ public:
void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override
{
const auto & column = static_cast<const ColVecType &>(*columns[0]);
const auto & column = assert_cast<const ColVecType &>(*columns[0]);
if constexpr (is_big_int_v<T>)
this->data(place).add(static_cast<TResult>(column.getData()[row_num]));
else
@ -309,7 +309,7 @@ public:
}
else
{
const auto & column = static_cast<const ColVecType &>(*columns[0]);
const auto & column = assert_cast<const ColVecType &>(*columns[0]);
this->data(place).addMany(column.getData().data(), batch_size);
}
}
@ -327,7 +327,7 @@ public:
}
else
{
const auto & column = static_cast<const ColVecType &>(*columns[0]);
const auto & column = assert_cast<const ColVecType &>(*columns[0]);
this->data(place).addManyNotNull(column.getData().data(), null_map, batch_size);
}
}
@ -349,7 +349,7 @@ public:
void insertResultInto(AggregateDataPtr place, IColumn & to, Arena *) const override
{
auto & column = static_cast<ColVecResult &>(to);
auto & column = assert_cast<ColVecResult &>(to);
column.getData().push_back(this->data(place).get());
}

View File

@ -31,6 +31,12 @@
M(Float32) \
M(Float64)
#define FOR_DECIMAL_TYPES(M) \
M(Decimal32) \
M(Decimal64) \
M(Decimal128)
namespace DB
{

View File

@ -8,10 +8,14 @@
#include <DataTypes/DataTypeDate.h>
#include <DataTypes/DataTypeDateTime.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeTuple.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
/// min, max, any, anyLast, anyHeavy, etc...
template <template <typename> class AggregateFunctionTemplate, template <typename> class Data>
@ -26,6 +30,7 @@ static IAggregateFunction * createAggregateFunctionSingleValue(const String & na
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<TYPE>>>(argument_type);
FOR_NUMERIC_TYPES(DISPATCH)
FOR_DECIMAL_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Date)
@ -34,12 +39,6 @@ static IAggregateFunction * createAggregateFunctionSingleValue(const String & na
return new AggregateFunctionTemplate<Data<SingleValueDataFixed<DataTypeDateTime::FieldType>>>(argument_type);
if (which.idx == TypeIndex::DateTime64)
return new AggregateFunctionTemplate<Data<SingleValueDataFixed<DateTime64>>>(argument_type);
if (which.idx == TypeIndex::Decimal32)
return new AggregateFunctionTemplate<Data<SingleValueDataFixed<Decimal32>>>(argument_type);
if (which.idx == TypeIndex::Decimal64)
return new AggregateFunctionTemplate<Data<SingleValueDataFixed<Decimal64>>>(argument_type);
if (which.idx == TypeIndex::Decimal128)
return new AggregateFunctionTemplate<Data<SingleValueDataFixed<Decimal128>>>(argument_type);
if (which.idx == TypeIndex::String)
return new AggregateFunctionTemplate<Data<SingleValueDataString>>(argument_type);
@ -49,66 +48,77 @@ static IAggregateFunction * createAggregateFunctionSingleValue(const String & na
/// argMin, argMax
template <template <typename> class MinMaxData, typename ResData>
static IAggregateFunction * createAggregateFunctionArgMinMaxSecond(const DataTypePtr & res_type, const DataTypePtr & val_type)
static IAggregateFunction * createAggregateFunctionArgMinMaxSecond(const DataTypePtr & res_type, const DataTypePtr & val_type, bool is_tuple)
{
WhichDataType which(val_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) \
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<TYPE>>>>(res_type, val_type);
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<TYPE>>>>(res_type, val_type, is_tuple);
FOR_NUMERIC_TYPES(DISPATCH)
FOR_DECIMAL_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Date)
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<DataTypeDate::FieldType>>>>(res_type, val_type);
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<DataTypeDate::FieldType>>>>(res_type, val_type, is_tuple);
if (which.idx == TypeIndex::DateTime)
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<DataTypeDateTime::FieldType>>>>(res_type, val_type);
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<DataTypeDateTime::FieldType>>>>(res_type, val_type, is_tuple);
if (which.idx == TypeIndex::DateTime64)
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<DateTime64>>>>(res_type, val_type);
if (which.idx == TypeIndex::Decimal32)
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<Decimal32>>>>(res_type, val_type);
if (which.idx == TypeIndex::Decimal64)
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<Decimal64>>>>(res_type, val_type);
if (which.idx == TypeIndex::Decimal128)
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<Decimal128>>>>(res_type, val_type);
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<DateTime64>>>>(res_type, val_type, is_tuple);
if (which.idx == TypeIndex::String)
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataString>>>(res_type, val_type);
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataString>>>(res_type, val_type, is_tuple);
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataGeneric>>>(res_type, val_type, is_tuple);
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataGeneric>>>(res_type, val_type);
}
template <template <typename> class MinMaxData>
static IAggregateFunction * createAggregateFunctionArgMinMax(const String & name, const DataTypes & argument_types, const Array & parameters)
{
assertNoParameters(name, parameters);
assertBinary(name, argument_types);
const DataTypePtr & res_type = argument_types[0];
const DataTypePtr & val_type = argument_types[1];
DataTypePtr res_type, val_type;
bool is_tuple = false;
// argMin and argMax could get tuple of two as arguments
if (argument_types.size() == 1 && argument_types[0]->getTypeId() == TypeIndex::Tuple)
{
const auto * tuple_type = assert_cast<const DataTypeTuple *>(argument_types[0].get());
if (tuple_type->getElements().size() != 2)
{
throw Exception("Aggregate function " + name + " expects two elements in tuple argument", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
}
res_type = tuple_type->getElements()[0];
val_type = tuple_type->getElements()[1];
is_tuple = true;
}
else
{
assertBinary(name, argument_types);
res_type = argument_types[0];
val_type = argument_types[1];
}
WhichDataType which(res_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) \
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<TYPE>>(res_type, val_type);
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<TYPE>>(res_type, val_type, is_tuple);
FOR_NUMERIC_TYPES(DISPATCH)
FOR_DECIMAL_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Date)
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<DataTypeDate::FieldType>>(res_type, val_type);
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<DataTypeDate::FieldType>>(res_type, val_type, is_tuple);
if (which.idx == TypeIndex::DateTime)
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<DataTypeDateTime::FieldType>>(res_type, val_type);
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<DataTypeDateTime::FieldType>>(res_type, val_type, is_tuple);
if (which.idx == TypeIndex::DateTime64)
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<DateTime64>>(res_type, val_type);
if (which.idx == TypeIndex::Decimal32)
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<Decimal32>>(res_type, val_type);
if (which.idx == TypeIndex::Decimal64)
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<Decimal64>>(res_type, val_type);
if (which.idx == TypeIndex::Decimal128)
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<Decimal128>>(res_type, val_type);
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<DateTime64>>(res_type, val_type, is_tuple);
if (which.idx == TypeIndex::String)
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataString>(res_type, val_type);
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataString>(res_type, val_type, is_tuple);
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataGeneric>(res_type, val_type);
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataGeneric>(res_type, val_type, is_tuple);
}
}

View File

@ -2,14 +2,15 @@
#include <cstddef>
#include <memory>
#include <vector>
#include <type_traits>
#include <vector>
#include <common/types.h>
#include <Core/ColumnNumbers.h>
#include <Core/Block.h>
#include <Common/Exception.h>
#include <Core/Block.h>
#include <Core/ColumnNumbers.h>
#include <Core/Field.h>
#include <Columns/ColumnTuple.h>
#include <Columns/ColumnsNumber.h>
@ -48,7 +49,9 @@ class IAggregateFunction
{
public:
IAggregateFunction(const DataTypes & argument_types_, const Array & parameters_)
: argument_types(argument_types_), parameters(parameters_) {}
: argument_types(argument_types_), parameters(parameters_)
{
}
/// Get main function name.
virtual String getName() const = 0;
@ -100,10 +103,7 @@ public:
virtual void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena * arena) const = 0;
/// Returns true if a function requires Arena to handle own states (see add(), merge(), deserialize()).
virtual bool allocatesMemoryInArena() const
{
return false;
}
virtual bool allocatesMemoryInArena() const { return false; }
/// Inserts results into a column. This method might modify the state (e.g.
/// sort an array), so must be called once, from single thread. The state
@ -177,12 +177,8 @@ public:
* "places" contains a large number of same values consecutively.
*/
virtual void addBatchArray(
size_t batch_size,
AggregateDataPtr * places,
size_t place_offset,
const IColumn ** columns,
const UInt64 * offsets,
Arena * arena) const = 0;
size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, const UInt64 * offsets, Arena * arena)
const = 0;
/** The case when the aggregation key is UInt8
* and pointers to aggregation states are stored in AggregateDataPtr[256] lookup table.
@ -203,8 +199,10 @@ public:
* arguments and params are for nested_function.
*/
virtual AggregateFunctionPtr getOwnNullAdapter(
const AggregateFunctionPtr & /*nested_function*/, const DataTypes & /*arguments*/,
const Array & /*params*/, const AggregateFunctionProperties & /*properties*/) const
const AggregateFunctionPtr & /*nested_function*/,
const DataTypes & /*arguments*/,
const Array & /*params*/,
const AggregateFunctionProperties & /*properties*/) const
{
return nullptr;
}
@ -235,7 +233,9 @@ private:
public:
IAggregateFunctionHelper(const DataTypes & argument_types_, const Array & parameters_)
: IAggregateFunction(argument_types_, parameters_) {}
: IAggregateFunction(argument_types_, parameters_)
{
}
AddFunc getAddressOfAddFunction() const override { return &addFree; }
@ -387,40 +387,27 @@ class IAggregateFunctionDataHelper : public IAggregateFunctionHelper<Derived>
protected:
using Data = T;
static Data & data(AggregateDataPtr place) { return *reinterpret_cast<Data*>(place); }
static const Data & data(ConstAggregateDataPtr place) { return *reinterpret_cast<const Data*>(place); }
static Data & data(AggregateDataPtr place) { return *reinterpret_cast<Data *>(place); }
static const Data & data(ConstAggregateDataPtr place) { return *reinterpret_cast<const Data *>(place); }
public:
// Derived class can `override` this to flag that DateTime64 is not supported.
static constexpr bool DateTime64Supported = true;
IAggregateFunctionDataHelper(const DataTypes & argument_types_, const Array & parameters_)
: IAggregateFunctionHelper<Derived>(argument_types_, parameters_) {}
void create(AggregateDataPtr place) const override
: IAggregateFunctionHelper<Derived>(argument_types_, parameters_)
{
new (place) Data;
}
void destroy(AggregateDataPtr place) const noexcept override
{
data(place).~Data();
}
void create(AggregateDataPtr place) const override { new (place) Data; }
bool hasTrivialDestructor() const override
{
return std::is_trivially_destructible_v<Data>;
}
void destroy(AggregateDataPtr place) const noexcept override { data(place).~Data(); }
size_t sizeOfData() const override
{
return sizeof(Data);
}
bool hasTrivialDestructor() const override { return std::is_trivially_destructible_v<Data>; }
size_t alignOfData() const override
{
return alignof(Data);
}
size_t sizeOfData() const override { return sizeof(Data); }
size_t alignOfData() const override { return alignof(Data); }
void addBatchLookupTable8(
size_t batch_size,
@ -499,6 +486,135 @@ public:
}
};
/// Implements tuple argument unwrapper when the tuple just masks arguments
template <typename T, typename Derived, size_t args_count>
class IAggregateFunctionTupleArgHelper : public IAggregateFunctionDataHelper<T, Derived>
{
private:
using Base = IAggregateFunctionDataHelper<T, Derived>;
static void addFree(const IAggregateFunction * that, AggregateDataPtr place, const IColumn ** columns_, size_t row_num, Arena * arena)
{
if (const auto * col = checkAndGetColumn<ColumnTuple>(*columns_[0]))
{
const IColumn * columns[args_count];
const auto & tup_columns = col->getColumns();
assert(tup_columns.size() == args_count);
for (size_t i = 0; i < tup_columns.size(); ++i)
{
columns[i] = tup_columns[i].get();
}
static_cast<const Derived &>(*that).add(place, columns, row_num, arena);
}
else
static_cast<const Derived &>(*that).add(place, columns_, row_num, arena);
}
protected:
void extractColumns(const IColumn ** columns, const IColumn ** aggr_columns) const
{
if (tuple_argument)
{
auto tup_columns = assert_cast<const ColumnTuple *>(aggr_columns[0])->getColumns();
for (size_t i = 0; i < args_count; ++i)
columns[i] = tup_columns[i].get();
}
else
{
for (size_t i = 0; i < args_count; ++i)
columns[i] = aggr_columns[i];
}
}
bool tuple_argument;
public:
IAggregateFunctionTupleArgHelper(const DataTypes & argument_types_, const Array & parameters_, bool tuple_argument_)
: Base(argument_types_, parameters_)
{
tuple_argument = tuple_argument_;
}
IAggregateFunction::AddFunc getAddressOfAddFunction() const override { return &addFree; }
/*
* We're overriding addBatch* functions just to avoid extracting columns
* in 'add' functions
*/
void addBatch(
size_t batch_size,
AggregateDataPtr * places,
size_t place_offset,
const IColumn ** columns,
Arena * arena,
ssize_t if_argument_pos = -1) const override
{
const IColumn * ex_columns[args_count];
extractColumns(ex_columns, columns);
Base::addBatch(batch_size, places, place_offset, ex_columns, arena, if_argument_pos);
}
void addBatchSinglePlace(
size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1) const override
{
const IColumn * ex_columns[args_count];
extractColumns(ex_columns, columns);
Base::addBatchSinglePlace(batch_size, place, ex_columns, arena, if_argument_pos);
}
void addBatchSinglePlaceNotNull(
size_t batch_size,
AggregateDataPtr place,
const IColumn ** columns,
const UInt8 * null_map,
Arena * arena,
ssize_t if_argument_pos = -1) const override
{
const IColumn * ex_columns[args_count];
extractColumns(ex_columns, columns);
Base::addBatchSinglePlaceNotNull(batch_size, place, ex_columns, null_map, arena, if_argument_pos);
}
void addBatchSinglePlaceFromInterval(
size_t batch_begin, size_t batch_end, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1)
const override
{
const IColumn * ex_columns[args_count];
extractColumns(ex_columns, columns);
Base::addBatchSinglePlaceFromInterval(batch_begin, batch_end, place, ex_columns, arena, if_argument_pos);
}
void addBatchArray(
size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, const UInt64 * offsets, Arena * arena)
const override
{
const IColumn * ex_columns[args_count];
extractColumns(ex_columns, columns);
Base::addBatchArray(batch_size, places, place_offset, ex_columns, offsets, arena);
}
void addBatchLookupTable8(
size_t batch_size,
AggregateDataPtr * map,
size_t place_offset,
std::function<void(AggregateDataPtr &)> init,
const UInt8 * key,
const IColumn ** columns,
Arena * arena) const override
{
const IColumn * ex_columns[args_count];
extractColumns(ex_columns, columns);
Base::addBatchLookupTable8(batch_size, map, place_offset, init, key, ex_columns, arena);
}
};
/// Properties of aggregate function that are independent of argument types and parameters.
struct AggregateFunctionProperties

View File

@ -84,8 +84,15 @@ struct QuantileExactWeighted
std::unique_ptr<Pair[]> array_holder(new Pair[size]);
Pair * array = array_holder.get();
/// Note: 64-bit integer weight can overflow.
/// We do some implementation specific behaviour (return approximate or garbage results).
/// Float64 is used as accumulator here to get approximate results.
/// But weight can be already overflowed in computations in 'add' and 'merge' methods.
/// It will be reasonable to change the type of weight to Float64 in the map,
/// but we don't do that for compatibility of serialized data.
size_t i = 0;
UInt64 sum_weight = 0;
Float64 sum_weight = 0;
for (const auto & pair : map)
{
sum_weight += pair.getMapped();
@ -95,8 +102,8 @@ struct QuantileExactWeighted
std::sort(array, array + size, [](const Pair & a, const Pair & b) { return a.first < b.first; });
UInt64 threshold = std::ceil(sum_weight * level);
UInt64 accumulated = 0;
Float64 threshold = std::ceil(sum_weight * level);
Float64 accumulated = 0;
const Pair * it = array;
const Pair * end = array + size;
@ -135,7 +142,7 @@ struct QuantileExactWeighted
Pair * array = array_holder.get();
size_t i = 0;
UInt64 sum_weight = 0;
Float64 sum_weight = 0;
for (const auto & pair : map)
{
sum_weight += pair.getMapped();
@ -145,13 +152,13 @@ struct QuantileExactWeighted
std::sort(array, array + size, [](const Pair & a, const Pair & b) { return a.first < b.first; });
UInt64 accumulated = 0;
Float64 accumulated = 0;
const Pair * it = array;
const Pair * end = array + size;
size_t level_index = 0;
UInt64 threshold = std::ceil(sum_weight * levels[indices[level_index]]);
Float64 threshold = std::ceil(sum_weight * levels[indices[level_index]]);
while (it < end)
{

View File

@ -803,6 +803,9 @@ Packet Connection::receivePacket(std::function<void(Poco::Net::Socket &)> async_
}
catch (Exception & e)
{
/// This is to consider ATTEMPT_TO_READ_AFTER_EOF as a remote exception.
e.setRemoteException();
/// Add server address to exception message, if need.
if (e.code() != ErrorCodes::UNKNOWN_PACKET_FROM_SERVER)
e.addMessage("while receiving packet from " + getDescription());
@ -892,7 +895,7 @@ void Connection::setDescription()
std::unique_ptr<Exception> Connection::receiveException()
{
return std::make_unique<Exception>(readException(*in, "Received from " + getDescription()));
return std::make_unique<Exception>(readException(*in, "Received from " + getDescription(), true /* remote */));
}

View File

@ -63,6 +63,9 @@
M(PartsOutdated, "Not active data part, but could be used by only current SELECTs, could be deleted after SELECTs finishes.") \
M(PartsDeleting, "Not active data part with identity refcounter, it is deleting right now by a cleaner.") \
M(PartsDeleteOnDestroy, "Part was moved to another disk and should be deleted in own destructor.") \
M(PartsWide, "Wide parts.") \
M(PartsCompact, "Compact parts.") \
M(PartsInMemory, "In-memory parts.") \
namespace CurrentMetrics
{

View File

@ -50,8 +50,9 @@ void handle_error_code([[maybe_unused]] const std::string & msg, int code)
ErrorCodes::increment(code);
}
Exception::Exception(const std::string & msg, int code)
Exception::Exception(const std::string & msg, int code, bool remote_)
: Poco::Exception(msg, code)
, remote(remote_)
{
handle_error_code(msg, code);
}

View File

@ -25,7 +25,7 @@ class Exception : public Poco::Exception
{
public:
Exception() = default;
Exception(const std::string & msg, int code);
Exception(const std::string & msg, int code, bool remote_ = false);
Exception(const std::string & msg, const Exception & nested, int code);
Exception(int code, const std::string & message)
@ -61,12 +61,17 @@ public:
extendedMessage(message);
}
/// Used to distinguish local exceptions from the one that was received from remote node.
void setRemoteException(bool remote_ = true) { remote = remote_; }
bool isRemoteException() const { return remote; }
std::string getStackTraceString() const;
private:
#ifndef STD_EXCEPTION_HAS_STACK_TRACE
StackTrace trace;
#endif
bool remote = false;
const char * className() const throw() override { return "DB::Exception"; }
};

View File

@ -14,7 +14,6 @@ namespace DB
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int TIMEOUT_EXCEEDED;
extern const int BAD_ARGUMENTS;
}
@ -39,8 +38,9 @@ static String baseName(const String & path)
return path.substr(rslash_pos + 1);
}
static void processWatchesImpl(const String & path, TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches, Coordination::Event event_type)
static TestKeeperStorage::ResponsesForSessions processWatchesImpl(const String & path, TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches, Coordination::Event event_type)
{
TestKeeperStorage::ResponsesForSessions result;
auto it = watches.find(path);
if (it != watches.end())
{
@ -50,9 +50,8 @@ static void processWatchesImpl(const String & path, TestKeeperStorage::Watches &
watch_response->zxid = -1;
watch_response->type = event_type;
watch_response->state = Coordination::State::CONNECTED;
for (auto & watcher : it->second)
if (watcher.watch_callback)
watcher.watch_callback(watch_response);
for (auto watcher_session : it->second)
result.push_back(TestKeeperStorage::ResponseForSession{watcher_session, watch_response});
watches.erase(it);
}
@ -67,19 +66,17 @@ static void processWatchesImpl(const String & path, TestKeeperStorage::Watches &
watch_list_response->zxid = -1;
watch_list_response->type = Coordination::Event::CHILD;
watch_list_response->state = Coordination::State::CONNECTED;
for (auto & watcher : it->second)
if (watcher.watch_callback)
watcher.watch_callback(watch_list_response);
for (auto watcher_session : it->second)
result.push_back(TestKeeperStorage::ResponseForSession{watcher_session, watch_list_response});
list_watches.erase(it);
}
return result;
}
TestKeeperStorage::TestKeeperStorage()
{
container.emplace("/", Node());
processing_thread = ThreadFromGlobalPool([this] { processingThread(); });
}
using Undo = std::function<void()>;
@ -92,7 +89,7 @@ struct TestKeeperStorageRequest
: zk_request(zk_request_)
{}
virtual std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & ephemerals, int64_t zxid, int64_t session_id) const = 0;
virtual void processWatches(TestKeeperStorage::Watches & /*watches*/, TestKeeperStorage::Watches & /*list_watches*/) const {}
virtual TestKeeperStorage::ResponsesForSessions processWatches(TestKeeperStorage::Watches & /*watches*/, TestKeeperStorage::Watches & /*list_watches*/) const { return {}; }
virtual ~TestKeeperStorageRequest() = default;
};
@ -111,9 +108,9 @@ struct TestKeeperStorageCreateRequest final : public TestKeeperStorageRequest
{
using TestKeeperStorageRequest::TestKeeperStorageRequest;
void processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
TestKeeperStorage::ResponsesForSessions processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
{
processWatchesImpl(zk_request->getPath(), watches, list_watches, Coordination::Event::CREATED);
return processWatchesImpl(zk_request->getPath(), watches, list_watches, Coordination::Event::CREATED);
}
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & ephemerals, int64_t zxid, int64_t session_id) const override
@ -271,9 +268,9 @@ struct TestKeeperStorageRemoveRequest final : public TestKeeperStorageRequest
return { response_ptr, undo };
}
void processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
TestKeeperStorage::ResponsesForSessions processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
{
processWatchesImpl(zk_request->getPath(), watches, list_watches, Coordination::Event::DELETED);
return processWatchesImpl(zk_request->getPath(), watches, list_watches, Coordination::Event::DELETED);
}
};
@ -344,9 +341,9 @@ struct TestKeeperStorageSetRequest final : public TestKeeperStorageRequest
return { response_ptr, undo };
}
void processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
TestKeeperStorage::ResponsesForSessions processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
{
processWatchesImpl(zk_request->getPath(), watches, list_watches, Coordination::Event::CHANGED);
return processWatchesImpl(zk_request->getPath(), watches, list_watches, Coordination::Event::CHANGED);
}
};
@ -502,10 +499,15 @@ struct TestKeeperStorageMultiRequest final : public TestKeeperStorageRequest
}
}
void processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
TestKeeperStorage::ResponsesForSessions processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
{
TestKeeperStorage::ResponsesForSessions result;
for (const auto & generic_request : concrete_requests)
generic_request->processWatches(watches, list_watches);
{
auto responses = generic_request->processWatches(watches, list_watches);
result.insert(result.end(), responses.begin(), responses.end());
}
return result;
}
};
@ -518,160 +520,49 @@ struct TestKeeperStorageCloseRequest final : public TestKeeperStorageRequest
}
};
void TestKeeperStorage::processingThread()
TestKeeperStorage::ResponsesForSessions TestKeeperStorage::finalize(const RequestsForSessions & expired_requests)
{
setThreadName("TestKeeperSProc");
if (finalized)
throw DB::Exception("Testkeeper storage already finalized", ErrorCodes::LOGICAL_ERROR);
try
finalized = true;
ResponsesForSessions finalize_results;
auto finish_watch = [] (const auto & watch_pair) -> ResponsesForSessions
{
while (!shutdown)
{
RequestInfo info;
ResponsesForSessions results;
std::shared_ptr<Coordination::ZooKeeperWatchResponse> response = std::make_shared<Coordination::ZooKeeperWatchResponse>();
response->type = Coordination::SESSION;
response->state = Coordination::EXPIRED_SESSION;
response->error = Coordination::Error::ZSESSIONEXPIRED;
UInt64 max_wait = UInt64(operation_timeout.totalMilliseconds());
for (auto & watcher_session : watch_pair.second)
results.push_back(ResponseForSession{watcher_session, response});
return results;
};
if (requests_queue.tryPop(info, max_wait))
{
if (shutdown)
break;
auto zk_request = info.request->zk_request;
if (zk_request->getOpNum() == Coordination::OpNum::Close)
{
auto it = ephemerals.find(info.session_id);
if (it != ephemerals.end())
{
for (const auto & ephemeral_path : it->second)
{
container.erase(ephemeral_path);
processWatchesImpl(ephemeral_path, watches, list_watches, Coordination::Event::DELETED);
}
ephemerals.erase(it);
}
clearDeadWatches(info.session_id);
/// Finish connection
auto response = std::make_shared<Coordination::ZooKeeperCloseResponse>();
response->xid = zk_request->xid;
response->zxid = getZXID();
info.response_callback(response);
}
else
{
auto [response, _] = info.request->process(container, ephemerals, zxid, info.session_id);
if (info.watch_callback)
{
if (response->error == Coordination::Error::ZOK)
{
auto & watches_type = zk_request->getOpNum() == Coordination::OpNum::List || zk_request->getOpNum() == Coordination::OpNum::SimpleList
? list_watches
: watches;
watches_type[zk_request->getPath()].emplace_back(Watcher{info.session_id, info.watch_callback});
sessions_and_watchers[info.session_id].emplace(zk_request->getPath());
}
else if (response->error == Coordination::Error::ZNONODE && zk_request->getOpNum() == Coordination::OpNum::Exists)
{
watches[zk_request->getPath()].emplace_back(Watcher{info.session_id, info.watch_callback});
sessions_and_watchers[info.session_id].emplace(zk_request->getPath());
}
else
{
std::shared_ptr<Coordination::ZooKeeperWatchResponse> watch_response = std::make_shared<Coordination::ZooKeeperWatchResponse>();
watch_response->path = zk_request->getPath();
watch_response->xid = -1;
watch_response->error = response->error;
watch_response->type = Coordination::Event::NOTWATCHING;
info.watch_callback(watch_response);
}
}
if (response->error == Coordination::Error::ZOK)
info.request->processWatches(watches, list_watches);
response->xid = zk_request->xid;
response->zxid = getZXID();
info.response_callback(response);
}
}
}
}
catch (...)
for (auto & path_watch : watches)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
finalize();
}
}
void TestKeeperStorage::finalize()
{
{
std::lock_guard lock(push_request_mutex);
if (shutdown)
return;
shutdown = true;
if (processing_thread.joinable())
processing_thread.join();
auto watch_responses = finish_watch(path_watch);
finalize_results.insert(finalize_results.end(), watch_responses.begin(), watch_responses.end());
}
try
watches.clear();
for (auto & path_watch : list_watches)
{
{
auto finish_watch = [] (const auto & watch_pair)
{
Coordination::ZooKeeperWatchResponse response;
response.type = Coordination::SESSION;
response.state = Coordination::EXPIRED_SESSION;
response.error = Coordination::Error::ZSESSIONEXPIRED;
auto list_watch_responses = finish_watch(path_watch);
finalize_results.insert(finalize_results.end(), list_watch_responses.begin(), list_watch_responses.end());
}
list_watches.clear();
sessions_and_watchers.clear();
for (auto & watcher : watch_pair.second)
{
if (watcher.watch_callback)
{
try
{
watcher.watch_callback(std::make_shared<Coordination::ZooKeeperWatchResponse>(response));
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
}
};
for (auto & path_watch : watches)
finish_watch(path_watch);
watches.clear();
for (auto & path_watch : list_watches)
finish_watch(path_watch);
list_watches.clear();
sessions_and_watchers.clear();
}
RequestInfo info;
while (requests_queue.tryPop(info))
{
auto response = info.request->zk_request->makeResponse();
response->error = Coordination::Error::ZSESSIONEXPIRED;
try
{
info.response_callback(response);
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
}
catch (...)
for (const auto & [session_id, zk_request] : expired_requests)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
auto response = zk_request->makeResponse();
response->error = Coordination::Error::ZSESSIONEXPIRED;
finalize_results.push_back(ResponseForSession{session_id, response});
}
return finalize_results;
}
@ -731,55 +622,80 @@ TestKeeperWrapperFactory::TestKeeperWrapperFactory()
registerTestKeeperRequestWrapper<Coordination::OpNum::Multi, TestKeeperStorageMultiRequest>(*this);
}
void TestKeeperStorage::putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, ResponseCallback callback)
TestKeeperStorage::ResponsesForSessions TestKeeperStorage::processRequest(const Coordination::ZooKeeperRequestPtr & zk_request, int64_t session_id)
{
TestKeeperStorageRequestPtr storage_request = TestKeeperWrapperFactory::instance().get(request);
RequestInfo request_info;
request_info.time = clock::now();
request_info.request = storage_request;
request_info.session_id = session_id;
request_info.response_callback = callback;
std::lock_guard lock(push_request_mutex);
/// Put close requests without timeouts
if (request->getOpNum() == Coordination::OpNum::Close)
requests_queue.push(std::move(request_info));
else if (!requests_queue.tryPush(std::move(request_info), operation_timeout.totalMilliseconds()))
throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED);
}
void TestKeeperStorage::putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, ResponseCallback callback, ResponseCallback watch_callback)
{
TestKeeperStorageRequestPtr storage_request = TestKeeperWrapperFactory::instance().get(request);
RequestInfo request_info;
request_info.time = clock::now();
request_info.request = storage_request;
request_info.session_id = session_id;
request_info.response_callback = callback;
if (request->has_watch)
request_info.watch_callback = watch_callback;
std::lock_guard lock(push_request_mutex);
/// Put close requests without timeouts
if (request->getOpNum() == Coordination::OpNum::Close)
requests_queue.push(std::move(request_info));
else if (!requests_queue.tryPush(std::move(request_info), operation_timeout.totalMilliseconds()))
throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED);
}
TestKeeperStorage::~TestKeeperStorage()
{
try
TestKeeperStorage::ResponsesForSessions results;
if (zk_request->getOpNum() == Coordination::OpNum::Close)
{
finalize();
auto it = ephemerals.find(session_id);
if (it != ephemerals.end())
{
for (const auto & ephemeral_path : it->second)
{
container.erase(ephemeral_path);
auto responses = processWatchesImpl(ephemeral_path, watches, list_watches, Coordination::Event::DELETED);
results.insert(results.end(), responses.begin(), responses.end());
}
ephemerals.erase(it);
}
clearDeadWatches(session_id);
/// Finish connection
auto response = std::make_shared<Coordination::ZooKeeperCloseResponse>();
response->xid = zk_request->xid;
response->zxid = getZXID();
results.push_back(ResponseForSession{session_id, response});
}
catch (...)
else
{
tryLogCurrentException(__PRETTY_FUNCTION__);
TestKeeperStorageRequestPtr storage_request = TestKeeperWrapperFactory::instance().get(zk_request);
auto [response, _] = storage_request->process(container, ephemerals, zxid, session_id);
if (zk_request->has_watch)
{
if (response->error == Coordination::Error::ZOK)
{
auto & watches_type = zk_request->getOpNum() == Coordination::OpNum::List || zk_request->getOpNum() == Coordination::OpNum::SimpleList
? list_watches
: watches;
watches_type[zk_request->getPath()].emplace_back(session_id);
sessions_and_watchers[session_id].emplace(zk_request->getPath());
}
else if (response->error == Coordination::Error::ZNONODE && zk_request->getOpNum() == Coordination::OpNum::Exists)
{
watches[zk_request->getPath()].emplace_back(session_id);
sessions_and_watchers[session_id].emplace(zk_request->getPath());
}
else
{
std::shared_ptr<Coordination::ZooKeeperWatchResponse> watch_response = std::make_shared<Coordination::ZooKeeperWatchResponse>();
watch_response->path = zk_request->getPath();
watch_response->xid = -1;
watch_response->error = response->error;
watch_response->type = Coordination::Event::NOTWATCHING;
results.push_back(ResponseForSession{session_id, watch_response});
}
}
if (response->error == Coordination::Error::ZOK)
{
auto watch_responses = storage_request->processWatches(watches, list_watches);
results.insert(results.end(), watch_responses.begin(), watch_responses.end());
}
response->xid = zk_request->xid;
response->zxid = getZXID();
results.push_back(ResponseForSession{session_id, response});
}
return results;
}
void TestKeeperStorage::clearDeadWatches(int64_t session_id)
{
auto watches_it = sessions_and_watchers.find(session_id);
@ -793,7 +709,7 @@ void TestKeeperStorage::clearDeadWatches(int64_t session_id)
auto & watches_for_path = watch->second;
for (auto w_it = watches_for_path.begin(); w_it != watches_for_path.end();)
{
if (w_it->session_id == session_id)
if (*w_it == session_id)
w_it = watches_for_path.erase(w_it);
else
++w_it;
@ -808,7 +724,7 @@ void TestKeeperStorage::clearDeadWatches(int64_t session_id)
auto & list_watches_for_path = list_watch->second;
for (auto w_it = list_watches_for_path.begin(); w_it != list_watches_for_path.end();)
{
if (w_it->session_id == session_id)
if (*w_it == session_id)
w_it = list_watches_for_path.erase(w_it);
else
++w_it;

View File

@ -4,9 +4,9 @@
#include <Common/ZooKeeper/IKeeper.h>
#include <Common/ConcurrentBoundedQueue.h>
#include <Common/ZooKeeper/ZooKeeperCommon.h>
#include <future>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace zkutil
{
@ -18,10 +18,7 @@ using ResponseCallback = std::function<void(const Coordination::ZooKeeperRespons
class TestKeeperStorage
{
public:
Poco::Timespan operation_timeout{0, Coordination::DEFAULT_OPERATION_TIMEOUT_MS * 1000};
std::atomic<int64_t> session_id_counter{0};
struct Node
@ -34,71 +31,56 @@ public:
int32_t seq_num = 0;
};
struct Watcher
struct ResponseForSession
{
int64_t session_id;
ResponseCallback watch_callback;
Coordination::ZooKeeperResponsePtr response;
};
using ResponsesForSessions = std::vector<ResponseForSession>;
struct RequestForSession
{
int64_t session_id;
Coordination::ZooKeeperRequestPtr request;
};
using RequestsForSessions = std::vector<RequestForSession>;
using Container = std::map<std::string, Node>;
using Ephemerals = std::unordered_map<int64_t, std::unordered_set<String>>;
using SessionAndWatcher = std::unordered_map<int64_t, std::unordered_set<String>>;
using SessionIDs = std::vector<int64_t>;
using WatchCallbacks = std::vector<Watcher>;
using Watches = std::map<String /* path, relative of root_path */, WatchCallbacks>;
using Watches = std::map<String /* path, relative of root_path */, SessionIDs>;
Container container;
Ephemerals ephemerals;
SessionAndWatcher sessions_and_watchers;
std::atomic<int64_t> zxid{0};
std::atomic<bool> shutdown{false};
std::atomic<bool> finalized{false};
Watches watches;
Watches list_watches; /// Watches for 'list' request (watches on children).
using clock = std::chrono::steady_clock;
struct RequestInfo
{
TestKeeperStorageRequestPtr request;
ResponseCallback response_callback;
ResponseCallback watch_callback;
clock::time_point time;
int64_t session_id;
};
std::mutex push_request_mutex;
using RequestsQueue = ConcurrentBoundedQueue<RequestInfo>;
RequestsQueue requests_queue{1};
void finalize();
ThreadFromGlobalPool processing_thread;
void processingThread();
void clearDeadWatches(int64_t session_id);
public:
using AsyncResponse = std::future<Coordination::ZooKeeperResponsePtr>;
TestKeeperStorage();
~TestKeeperStorage();
struct ResponsePair
int64_t getZXID()
{
AsyncResponse response;
std::optional<AsyncResponse> watch_response;
};
void putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, ResponseCallback callback);
void putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, ResponseCallback callback, ResponseCallback watch_callback);
return zxid.fetch_add(1);
}
public:
TestKeeperStorage();
ResponsesForSessions processRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id);
ResponsesForSessions finalize(const RequestsForSessions & expired_requests);
int64_t getSessionID()
{
return session_id_counter.fetch_add(1);
}
int64_t getZXID()
{
return zxid.fetch_add(1);
}
};
}

View File

@ -0,0 +1,139 @@
#include <Common/ZooKeeper/TestKeeperStorageDispatcher.h>
#include <Common/setThreadName.h>
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int TIMEOUT_EXCEEDED;
}
}
namespace zkutil
{
void TestKeeperStorageDispatcher::processingThread()
{
setThreadName("TestKeeperSProc");
try
{
while (!shutdown)
{
RequestInfo info;
UInt64 max_wait = UInt64(operation_timeout.totalMilliseconds());
if (requests_queue.tryPop(info, max_wait))
{
if (shutdown)
break;
auto responses = storage.processRequest(info.request, info.session_id);
for (const auto & response_for_session : responses)
setResponse(response_for_session.session_id, response_for_session.response);
}
}
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
finalize();
}
}
void TestKeeperStorageDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response)
{
std::lock_guard lock(session_to_response_callback_mutex);
auto session_writer = session_to_response_callback.find(session_id);
if (session_writer == session_to_response_callback.end())
return;
session_writer->second(response);
/// Session closed, no more writes
if (response->xid != Coordination::WATCH_XID && response->getOpNum() == Coordination::OpNum::Close)
session_to_response_callback.erase(session_writer);
}
void TestKeeperStorageDispatcher::finalize()
{
{
std::lock_guard lock(push_request_mutex);
if (shutdown)
return;
shutdown = true;
if (processing_thread.joinable())
processing_thread.join();
}
RequestInfo info;
TestKeeperStorage::RequestsForSessions expired_requests;
while (requests_queue.tryPop(info))
expired_requests.push_back(TestKeeperStorage::RequestForSession{info.session_id, info.request});
auto expired_responses = storage.finalize(expired_requests);
for (const auto & response_for_session : expired_responses)
setResponse(response_for_session.session_id, response_for_session.response);
}
void TestKeeperStorageDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id)
{
{
std::lock_guard lock(session_to_response_callback_mutex);
if (session_to_response_callback.count(session_id) == 0)
throw Exception(DB::ErrorCodes::LOGICAL_ERROR, "Unknown session id {}", session_id);
}
RequestInfo request_info;
request_info.time = clock::now();
request_info.request = request;
request_info.session_id = session_id;
std::lock_guard lock(push_request_mutex);
/// Put close requests without timeouts
if (request->getOpNum() == Coordination::OpNum::Close)
requests_queue.push(std::move(request_info));
else if (!requests_queue.tryPush(std::move(request_info), operation_timeout.totalMilliseconds()))
throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED);
}
TestKeeperStorageDispatcher::TestKeeperStorageDispatcher()
{
processing_thread = ThreadFromGlobalPool([this] { processingThread(); });
}
TestKeeperStorageDispatcher::~TestKeeperStorageDispatcher()
{
try
{
finalize();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
void TestKeeperStorageDispatcher::registerSession(int64_t session_id, ZooKeeperResponseCallback callback)
{
std::lock_guard lock(session_to_response_callback_mutex);
if (!session_to_response_callback.try_emplace(session_id, callback).second)
throw Exception(DB::ErrorCodes::LOGICAL_ERROR, "Session with id {} already registered in dispatcher", session_id);
}
void TestKeeperStorageDispatcher::finishSession(int64_t session_id)
{
std::lock_guard lock(session_to_response_callback_mutex);
auto session_it = session_to_response_callback.find(session_id);
if (session_it != session_to_response_callback.end())
session_to_response_callback.erase(session_it);
}
}

View File

@ -0,0 +1,60 @@
#pragma once
#include <Common/ThreadPool.h>
#include <Common/ConcurrentBoundedQueue.h>
#include <Common/ZooKeeper/TestKeeperStorage.h>
#include <functional>
namespace zkutil
{
using ZooKeeperResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr & response)>;
class TestKeeperStorageDispatcher
{
private:
Poco::Timespan operation_timeout{0, Coordination::DEFAULT_OPERATION_TIMEOUT_MS * 1000};
using clock = std::chrono::steady_clock;
struct RequestInfo
{
Coordination::ZooKeeperRequestPtr request;
clock::time_point time;
int64_t session_id;
};
std::mutex push_request_mutex;
using RequestsQueue = ConcurrentBoundedQueue<RequestInfo>;
RequestsQueue requests_queue{1};
std::atomic<bool> shutdown{false};
using SessionToResponseCallback = std::unordered_map<int64_t, ZooKeeperResponseCallback>;
std::mutex session_to_response_callback_mutex;
SessionToResponseCallback session_to_response_callback;
ThreadFromGlobalPool processing_thread;
TestKeeperStorage storage;
private:
void processingThread();
void finalize();
void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response);
public:
TestKeeperStorageDispatcher();
~TestKeeperStorageDispatcher();
void putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id);
int64_t getSessionID()
{
return storage.getSessionID();
}
void registerSession(int64_t session_id, ZooKeeperResponseCallback callback);
/// Call if we don't need any responses for this session no more (session was expired)
void finishSession(int64_t session_id);
};
}

File diff suppressed because it is too large Load Diff

View File

@ -85,6 +85,7 @@ SRCS(
ZooKeeper/IKeeper.cpp
ZooKeeper/TestKeeper.cpp
ZooKeeper/TestKeeperStorage.cpp
ZooKeeper/TestKeeperStorageDispatcher.cpp
ZooKeeper/ZooKeeper.cpp
ZooKeeper/ZooKeeperCommon.cpp
ZooKeeper/ZooKeeperConstants.cpp

View File

@ -74,9 +74,10 @@
#define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET 54441
#define DBMS_MIN_REVISION_WITH_X_FORWARDED_FOR_IN_CLIENT_INFO 54443
#define DBMS_MIN_REVISION_WITH_REFERER_IN_CLIENT_INFO 54447
/// Version of ClickHouse TCP protocol. Increment it manually when you change the protocol.
#define DBMS_TCP_PROTOCOL_VERSION 54443
#define DBMS_TCP_PROTOCOL_VERSION 54447
/// The boundary on which the blocks for asynchronous file operations should be aligned.
#define DEFAULT_AIO_FILE_BLOCK_SIZE 4096

View File

@ -416,6 +416,7 @@ class IColumn;
M(Bool, use_antlr_parser, false, "Parse incoming queries using ANTLR-generated experimental parser", 0) \
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
\
M(Bool, optimize_rewrite_sum_if_to_count_if, true, "Rewrite sumIf() and sum(if()) function countIf() function when logically equivalent", 0) \
/** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \
\
M(UInt64, max_memory_usage_for_all_queries, 0, "Obsolete. Will be removed after 2020-10-20", 0) \

View File

@ -17,6 +17,7 @@
#include <Storages/StorageValues.h>
#include <Storages/LiveView/StorageLiveView.h>
#include <Storages/StorageMaterializedView.h>
#include <common/logger_useful.h>
namespace DB
@ -30,6 +31,7 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream(
bool no_destination)
: storage(storage_)
, metadata_snapshot(metadata_snapshot_)
, log(&Poco::Logger::get("PushingToViewsBlockOutputStream"))
, context(context_)
, query_ptr(query_ptr_)
{
@ -144,6 +146,8 @@ Block PushingToViewsBlockOutputStream::getHeader() const
void PushingToViewsBlockOutputStream::write(const Block & block)
{
Stopwatch watch;
/** Throw an exception if the sizes of arrays - elements of nested data structures doesn't match.
* We have to make this assertion before writing to table, because storage engine may assume that they have equal sizes.
* NOTE It'd better to do this check in serialization of nested structures (in place when this assumption is required),
@ -198,6 +202,14 @@ void PushingToViewsBlockOutputStream::write(const Block & block)
std::rethrow_exception(views[view_num].exception);
}
}
UInt64 milliseconds = watch.elapsedMilliseconds();
if (views.size() > 1)
{
LOG_TRACE(log, "Pushing from {} to {} views took {} ms.",
storage->getStorageID().getNameForLogs(), views.size(),
milliseconds);
}
}
void PushingToViewsBlockOutputStream::writePrefix()
@ -304,6 +316,7 @@ void PushingToViewsBlockOutputStream::flush()
void PushingToViewsBlockOutputStream::process(const Block & block, size_t view_num)
{
Stopwatch watch;
auto & view = views[view_num];
try
@ -365,6 +378,12 @@ void PushingToViewsBlockOutputStream::process(const Block & block, size_t view_n
{
view.exception = std::current_exception();
}
UInt64 milliseconds = watch.elapsedMilliseconds();
LOG_TRACE(log, "Pushing from {} to {} took {} ms.",
storage->getStorageID().getNameForLogs(),
view.table_id.getNameForLogs(),
milliseconds);
}
}

View File

@ -4,6 +4,10 @@
#include <Parsers/IAST_fwd.h>
#include <Storages/IStorage.h>
namespace Poco
{
class Logger;
};
namespace DB
{
@ -36,6 +40,7 @@ private:
StorageMetadataPtr metadata_snapshot;
BlockOutputStreamPtr output;
ReplicatedMergeTreeBlockOutputStream * replicated_output = nullptr;
Poco::Logger * log;
const Context & context;
ASTPtr query_ptr;

View File

@ -29,7 +29,8 @@ void DataTypeCustomSimpleAggregateFunction::checkSupportedFunctions(const Aggreg
{
static const std::vector<String> supported_functions{"any", "anyLast", "min",
"max", "sum", "sumWithOverflow", "groupBitAnd", "groupBitOr", "groupBitXor",
"sumMap", "minMap", "maxMap", "groupArrayArray", "groupUniqArrayArray"};
"sumMap", "minMap", "maxMap", "groupArrayArray", "groupUniqArrayArray",
"argMin", "argMax"};
// check function
if (std::find(std::begin(supported_functions), std::end(supported_functions), function->getName()) == std::end(supported_functions))

View File

@ -51,7 +51,13 @@ public:
bool isParametric() const override { return false; }
bool haveSubtypes() const override { return false; }
bool shouldAlignRightInPrettyFormats() const override { return true; }
bool shouldAlignRightInPrettyFormats() const override
{
/// Just a number, without customizations. Counterexample: IPv4.
return !custom_text_serialization;
}
bool textCanContainOnlyValidUTF8() const override { return true; }
bool isComparable() const override { return true; }
bool isValueRepresentedByNumber() const override { return true; }

View File

@ -497,7 +497,7 @@ public:
/// For all other substreams (like ArraySizes, NullMasks, etc.) we use only
/// generic compression codecs like LZ4.
static bool isSpecialCompressionAllowed(const SubstreamPath & path);
private:
protected:
friend class DataTypeFactory;
friend class AggregateFunctionSimpleState;
/// Customize this DataType

View File

@ -175,9 +175,9 @@ void DiskDecorator::truncateFile(const String & path, size_t size)
delegate->truncateFile(path, size);
}
int DiskDecorator::open(const String & path, mode_t mode) const
int DiskDecorator::open(const String & path, int flags) const
{
return delegate->open(path, mode);
return delegate->open(path, flags);
}
void DiskDecorator::close(int fd) const

View File

@ -48,7 +48,7 @@ public:
void setReadOnly(const String & path) override;
void createHardLink(const String & src_path, const String & dst_path) override;
void truncateFile(const String & path, size_t size) override;
int open(const String & path, mode_t mode) const override;
int open(const String & path, int flags) const override;
void close(int fd) const override;
void sync(int fd) const override;
const String getType() const override { return delegate->getType(); }

View File

@ -315,10 +315,10 @@ void DiskLocal::copy(const String & from_path, const std::shared_ptr<IDisk> & to
IDisk::copy(from_path, to_disk, to_path); /// Copy files through buffers.
}
int DiskLocal::open(const String & path, mode_t mode) const
int DiskLocal::open(const String & path, int flags) const
{
String full_path = disk_path + path;
int fd = ::open(full_path.c_str(), mode);
int fd = ::open(full_path.c_str(), flags);
if (-1 == fd)
throwFromErrnoWithPath("Cannot open file " + full_path, full_path,
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE);

View File

@ -98,7 +98,7 @@ public:
void createHardLink(const String & src_path, const String & dst_path) override;
int open(const String & path, mode_t mode) const override;
int open(const String & path, int flags) const override;
void close(int fd) const override;
void sync(int fd) const override;

View File

@ -436,7 +436,7 @@ void DiskMemory::setReadOnly(const String &)
throw Exception("Method setReadOnly is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED);
}
int DiskMemory::open(const String & /*path*/, mode_t /*mode*/) const
int DiskMemory::open(const String & /*path*/, int /*flags*/) const
{
throw Exception("Method open is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED);
}

View File

@ -89,7 +89,7 @@ public:
void createHardLink(const String & src_path, const String & dst_path) override;
int open(const String & path, mode_t mode) const override;
int open(const String & path, int flags) const override;
void close(int fd) const override;
void sync(int fd) const override;

View File

@ -175,7 +175,7 @@ public:
virtual void createHardLink(const String & src_path, const String & dst_path) = 0;
/// Wrapper for POSIX open
virtual int open(const String & path, mode_t mode) const = 0;
virtual int open(const String & path, int flags) const = 0;
/// Wrapper for POSIX close
virtual void close(int fd) const = 0;

View File

@ -878,7 +878,7 @@ void DiskS3::setReadOnly(const String & path)
metadata.save();
}
int DiskS3::open(const String & /*path*/, mode_t /*mode*/) const
int DiskS3::open(const String & /*path*/, int /*flags*/) const
{
throw Exception("Method open is not implemented for S3 disks", ErrorCodes::NOT_IMPLEMENTED);
}

View File

@ -105,7 +105,7 @@ public:
void setReadOnly(const String & path) override;
int open(const String & path, mode_t mode) const override;
int open(const String & path, int flags) const override;
void close(int fd) const override;
void sync(int fd) const override;

Some files were not shown because too many files have changed in this diff Show More