mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge remote-tracking branch 'origin/master' into HEAD
This commit is contained in:
commit
7ea5bd4a3f
@ -516,9 +516,9 @@ include (cmake/find/fast_float.cmake)
|
||||
include (cmake/find/rapidjson.cmake)
|
||||
include (cmake/find/fastops.cmake)
|
||||
include (cmake/find/odbc.cmake)
|
||||
include (cmake/find/nanodbc.cmake)
|
||||
include (cmake/find/rocksdb.cmake)
|
||||
include (cmake/find/libpqxx.cmake)
|
||||
include (cmake/find/nanodbc.cmake)
|
||||
include (cmake/find/nuraft.cmake)
|
||||
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
|
||||
|
||||
#if defined(__PPC__)
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
||||
#endif
|
||||
#endif
|
||||
@ -1266,7 +1266,7 @@ public:
|
||||
};
|
||||
|
||||
#if defined(__PPC__)
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
#endif
|
||||
|
@ -159,9 +159,9 @@ public:
|
||||
*/
|
||||
Pool(const std::string & db_,
|
||||
const std::string & server_,
|
||||
const std::string & user_ = "",
|
||||
const std::string & password_ = "",
|
||||
unsigned port_ = 0,
|
||||
const std::string & user_,
|
||||
const std::string & password_,
|
||||
unsigned port_,
|
||||
const std::string & socket_ = "",
|
||||
unsigned connect_timeout_ = MYSQLXX_DEFAULT_TIMEOUT,
|
||||
unsigned rw_timeout_ = MYSQLXX_DEFAULT_RW_TIMEOUT,
|
||||
|
@ -1,9 +1,9 @@
|
||||
# This strings autochanged from release_lib.sh:
|
||||
SET(VERSION_REVISION 54450)
|
||||
SET(VERSION_REVISION 54451)
|
||||
SET(VERSION_MAJOR 21)
|
||||
SET(VERSION_MINOR 5)
|
||||
SET(VERSION_MINOR 6)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 3827789b3d8fd2021952e57e5110343d26daa1a1)
|
||||
SET(VERSION_DESCRIBE v21.5.1.1-prestable)
|
||||
SET(VERSION_STRING 21.5.1.1)
|
||||
SET(VERSION_GITHASH 96fced4c3cf432fb0b401d2ab01f0c56e5f74a96)
|
||||
SET(VERSION_DESCRIBE v21.6.1.1-prestable)
|
||||
SET(VERSION_STRING 21.6.1.1)
|
||||
# end of autochange
|
||||
|
@ -1,35 +1,16 @@
|
||||
option(ENABLE_NANODBC "Enalbe nanodbc" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT ENABLE_NANODBC)
|
||||
set (USE_ODBC 0)
|
||||
return()
|
||||
endif()
|
||||
|
||||
if (NOT ENABLE_ODBC)
|
||||
set (USE_NANODBC 0)
|
||||
message (STATUS "Using nanodbc=${USE_NANODBC}")
|
||||
return()
|
||||
endif()
|
||||
return ()
|
||||
endif ()
|
||||
|
||||
if (NOT USE_INTERNAL_NANODBC_LIBRARY)
|
||||
message (FATAL_ERROR "Only the bundled nanodbc library can be used")
|
||||
endif ()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/CMakeLists.txt")
|
||||
message (WARNING "submodule contrib/nanodbc is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal nanodbc library")
|
||||
set (USE_NANODBC 0)
|
||||
return()
|
||||
message (FATAL_ERROR "submodule contrib/nanodbc is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
endif()
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/unixodbc/include")
|
||||
message (ERROR "submodule contrib/unixodbc is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal unixodbc needed for nanodbc")
|
||||
set (USE_NANODBC 0)
|
||||
return()
|
||||
endif()
|
||||
|
||||
set (USE_NANODBC 1)
|
||||
|
||||
set (NANODBC_LIBRARY nanodbc)
|
||||
set (NANODBC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/nanodbc")
|
||||
|
||||
set (NANODBC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/nanodbce")
|
||||
|
||||
message (STATUS "Using nanodbc=${USE_NANODBC}: ${NANODBC_INCLUDE_DIR} : ${NANODBC_LIBRARY}")
|
||||
message (STATUS "Using unixodbc")
|
||||
message (STATUS "Using nanodbc: ${NANODBC_INCLUDE_DIR} : ${NANODBC_LIBRARY}")
|
||||
|
@ -50,4 +50,6 @@ if (NOT EXTERNAL_ODBC_LIBRARY_FOUND)
|
||||
set (USE_INTERNAL_ODBC_LIBRARY 1)
|
||||
endif ()
|
||||
|
||||
set (USE_INTERNAL_NANODBC_LIBRARY 1)
|
||||
|
||||
message (STATUS "Using unixodbc")
|
||||
|
@ -171,6 +171,7 @@ elseif (COMPILER_GCC)
|
||||
add_cxx_compile_options(-Wtrampolines)
|
||||
# Obvious
|
||||
add_cxx_compile_options(-Wunused)
|
||||
add_cxx_compile_options(-Wundef)
|
||||
# Warn if vector operation is not implemented via SIMD capabilities of the architecture
|
||||
add_cxx_compile_options(-Wvector-operation-performance)
|
||||
# XXX: libstdc++ has some of these for 3way compare
|
||||
|
5
contrib/CMakeLists.txt
vendored
5
contrib/CMakeLists.txt
vendored
@ -47,6 +47,7 @@ add_subdirectory (lz4-cmake)
|
||||
add_subdirectory (murmurhash)
|
||||
add_subdirectory (replxx-cmake)
|
||||
add_subdirectory (unixodbc-cmake)
|
||||
add_subdirectory (nanodbc-cmake)
|
||||
|
||||
if (USE_INTERNAL_XZ_LIBRARY)
|
||||
add_subdirectory (xz)
|
||||
@ -320,10 +321,6 @@ if (USE_LIBPQXX)
|
||||
add_subdirectory (libpqxx-cmake)
|
||||
endif()
|
||||
|
||||
if (USE_NANODBC)
|
||||
add_subdirectory (nanodbc-cmake)
|
||||
endif()
|
||||
|
||||
if (USE_NURAFT)
|
||||
add_subdirectory(nuraft-cmake)
|
||||
endif()
|
||||
|
2
contrib/datasketches-cpp
vendored
2
contrib/datasketches-cpp
vendored
@ -1 +1 @@
|
||||
Subproject commit 45885c0c8c0807bb9480886d60ca7042000a4c43
|
||||
Subproject commit f915d35b2de676683493c86c585141a1e1c83334
|
@ -1,3 +1,7 @@
|
||||
if (NOT USE_INTERNAL_NANODBC_LIBRARY)
|
||||
return ()
|
||||
endif ()
|
||||
|
||||
set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/nanodbc)
|
||||
|
||||
if (NOT TARGET unixodbc)
|
||||
|
2
contrib/zlib-ng
vendored
2
contrib/zlib-ng
vendored
@ -1 +1 @@
|
||||
Subproject commit 7f254522fd676ff4e906c6d4e9b30d4df4214c2d
|
||||
Subproject commit 5cc4d232020dc66d1d6c5438834457e2a2f6127b
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@ -1,5 +1,5 @@
|
||||
clickhouse (21.5.1.1) unstable; urgency=low
|
||||
clickhouse (21.6.1.1) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Fri, 02 Apr 2021 18:34:26 +0300
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Tue, 20 Apr 2021 01:48:16 +0300
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=21.5.1.*
|
||||
ARG version=21.6.1.*
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
|
@ -42,15 +42,12 @@ RUN apt-get update \
|
||||
clang-tidy-10 \
|
||||
clang-tidy-11 \
|
||||
cmake \
|
||||
cmake \
|
||||
curl \
|
||||
g++-9 \
|
||||
gcc-9 \
|
||||
gdb \
|
||||
git \
|
||||
gperf \
|
||||
gperf \
|
||||
intel-opencl-icd \
|
||||
libicu-dev \
|
||||
libreadline-dev \
|
||||
lld-10 \
|
||||
@ -61,10 +58,7 @@ RUN apt-get update \
|
||||
llvm-11-dev \
|
||||
moreutils \
|
||||
ninja-build \
|
||||
ocl-icd-libopencl1 \
|
||||
opencl-headers \
|
||||
pigz \
|
||||
pixz \
|
||||
rename \
|
||||
tzdata \
|
||||
--yes --no-install-recommends
|
||||
|
@ -35,9 +35,6 @@ RUN apt-get update \
|
||||
libjemalloc-dev \
|
||||
libmsgpack-dev \
|
||||
libcurl4-openssl-dev \
|
||||
opencl-headers \
|
||||
ocl-icd-libopencl1 \
|
||||
intel-opencl-icd \
|
||||
unixodbc-dev \
|
||||
odbcinst \
|
||||
tzdata \
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=21.5.1.*
|
||||
ARG version=21.6.1.*
|
||||
ARG gosu_ver=1.10
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=21.5.1.*
|
||||
ARG version=21.6.1.*
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y apt-transport-https dirmngr && \
|
||||
|
@ -308,12 +308,8 @@ function run_tests
|
||||
01354_order_by_tuple_collate_const
|
||||
01355_ilike
|
||||
01411_bayesian_ab_testing
|
||||
01532_collate_in_low_cardinality
|
||||
01533_collate_in_nullable
|
||||
01542_collate_in_array
|
||||
01543_collate_in_tuple
|
||||
01798_uniq_theta_sketch
|
||||
01799_long_uniq_theta_sketch
|
||||
collate
|
||||
collation
|
||||
_orc_
|
||||
arrow
|
||||
avro
|
||||
|
@ -1149,20 +1149,21 @@ function upload_results
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Surprisingly, clickhouse-client doesn't understand --host 127.0.0.1:9000
|
||||
# so I have to extract host and port with clickhouse-local. I tried to use
|
||||
# Poco URI parser to support this in the client, but it's broken and can't
|
||||
# parse host:port.
|
||||
set +x # Don't show password in the log
|
||||
clickhouse-client \
|
||||
$(clickhouse-local --query "with '${CHPC_DATABASE_URL}' as url select '--host ' || domain(url) || ' --port ' || toString(port(url)) format TSV") \
|
||||
--secure \
|
||||
--user "${CHPC_DATABASE_USER}" \
|
||||
--password "${CHPC_DATABASE_PASSWORD}" \
|
||||
--config "right/config/client_config.xml" \
|
||||
--database perftest \
|
||||
--date_time_input_format=best_effort \
|
||||
--query "
|
||||
client=(clickhouse-client
|
||||
# Surprisingly, clickhouse-client doesn't understand --host 127.0.0.1:9000
|
||||
# so I have to extract host and port with clickhouse-local. I tried to use
|
||||
# Poco URI parser to support this in the client, but it's broken and can't
|
||||
# parse host:port.
|
||||
$(clickhouse-local --query "with '${CHPC_DATABASE_URL}' as url select '--host ' || domain(url) || ' --port ' || toString(port(url)) format TSV")
|
||||
--secure
|
||||
--user "${CHPC_DATABASE_USER}"
|
||||
--password "${CHPC_DATABASE_PASSWORD}"
|
||||
--config "right/config/client_config.xml"
|
||||
--database perftest
|
||||
--date_time_input_format=best_effort)
|
||||
|
||||
"${client[@]}" --query "
|
||||
insert into query_metrics_v2
|
||||
select
|
||||
toDate(event_time) event_date,
|
||||
@ -1185,6 +1186,25 @@ function upload_results
|
||||
format TSV
|
||||
settings date_time_input_format='best_effort'
|
||||
" < report/all-query-metrics.tsv # Don't leave whitespace after INSERT: https://github.com/ClickHouse/ClickHouse/issues/16652
|
||||
|
||||
# Upload some run attributes. I use this weird form because it is the same
|
||||
# form that can be used for historical data when you only have compare.log.
|
||||
cat compare.log \
|
||||
| sed -n '
|
||||
s/.*Model name:[[:space:]]\+\(.*\)$/metric lscpu-model-name \1/p;
|
||||
s/.*L1d cache:[[:space:]]\+\(.*\)$/metric lscpu-l1d-cache \1/p;
|
||||
s/.*L1i cache:[[:space:]]\+\(.*\)$/metric lscpu-l1i-cache \1/p;
|
||||
s/.*L2 cache:[[:space:]]\+\(.*\)$/metric lscpu-l2-cache \1/p;
|
||||
s/.*L3 cache:[[:space:]]\+\(.*\)$/metric lscpu-l3-cache \1/p;
|
||||
s/.*left_sha=\(.*\)$/old-sha \1/p;
|
||||
s/.*right_sha=\(.*\)/new-sha \1/p' \
|
||||
| awk '
|
||||
BEGIN { FS = "\t"; OFS = "\t" }
|
||||
/^old-sha/ { old_sha=$2 }
|
||||
/^new-sha/ { new_sha=$2 }
|
||||
/^metric/ { print old_sha, new_sha, $2, $3 }' \
|
||||
| "${client[@]}" --query "INSERT INTO run_attributes_v1 FORMAT TSV"
|
||||
|
||||
set -x
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,9 @@
|
||||
|
||||
<!-- One NUMA node w/o hyperthreading -->
|
||||
<max_threads>12</max_threads>
|
||||
|
||||
<!-- mmap shows some improvements in perf tests -->
|
||||
<min_bytes_to_use_mmap_io>64Mi</min_bytes_to_use_mmap_io>
|
||||
</default>
|
||||
</profiles>
|
||||
<users>
|
||||
|
@ -43,4 +43,3 @@ CMD echo "Running PVS version $PKG_VERSION" && cd /repo_folder && pvs-studio-ana
|
||||
&& pvs-studio-analyzer analyze -o pvs-studio.log -e contrib -j 4 -l ./licence.lic; \
|
||||
plog-converter -a GA:1,2 -t fullhtml -o /test_output/pvs-studio-html-report pvs-studio.log; \
|
||||
plog-converter -a GA:1,2 -t tasklist -o /test_output/pvs-studio-task-report.txt pvs-studio.log
|
||||
|
||||
|
@ -104,6 +104,12 @@ clickhouse-client -q "system flush logs" ||:
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
|
||||
clickhouse-client -q "select * from system.query_log format TSVWithNamesAndTypes" | pigz > /test_output/query-log.tsv.gz &
|
||||
clickhouse-client -q "select * from system.query_thread_log format TSVWithNamesAndTypes" | pigz > /test_output/query-thread-log.tsv.gz &
|
||||
clickhouse-client --allow_introspection_functions=1 -q "
|
||||
WITH
|
||||
arrayMap(x -> concat(demangle(addressToSymbol(x)), ':', addressToLine(x)), trace) AS trace_array,
|
||||
arrayStringConcat(trace_array, '\n') AS trace_string
|
||||
SELECT * EXCEPT(trace), trace_string FROM system.trace_log FORMAT TSVWithNamesAndTypes
|
||||
" | pigz > /test_output/trace-log.tsv.gz &
|
||||
wait ||:
|
||||
|
||||
mv /var/log/clickhouse-server/stderr.log /test_output/ ||:
|
||||
|
@ -14,9 +14,7 @@ RUN apt-get --allow-unauthenticated update -y \
|
||||
expect \
|
||||
gdb \
|
||||
gperf \
|
||||
gperf \
|
||||
heimdal-multidev \
|
||||
intel-opencl-icd \
|
||||
libboost-filesystem-dev \
|
||||
libboost-iostreams-dev \
|
||||
libboost-program-options-dev \
|
||||
@ -50,9 +48,7 @@ RUN apt-get --allow-unauthenticated update -y \
|
||||
moreutils \
|
||||
ncdu \
|
||||
netcat-openbsd \
|
||||
ocl-icd-libopencl1 \
|
||||
odbcinst \
|
||||
opencl-headers \
|
||||
openssl \
|
||||
perl \
|
||||
pigz \
|
||||
|
@ -136,6 +136,7 @@ pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhous
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
mv /var/log/clickhouse-server/stderr.log /test_output/
|
||||
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
|
||||
tar -chf /test_output/trace_log_dump.tar /var/lib/clickhouse/data/system/trace_log ||:
|
||||
|
||||
# Write check result into check_status.tsv
|
||||
clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by (lower(test) like '%hung%') LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv
|
||||
|
@ -27,53 +27,20 @@ Or cmake3 instead of cmake on older systems.
|
||||
|
||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||
|
||||
```bash
|
||||
```bash
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
For other Linux distribution - check the availability of the [prebuild packages](https://releases.llvm.org/download.html) or build clang [from sources](https://clang.llvm.org/get_started.html).
|
||||
|
||||
#### Use clang-11 for Builds {#use-gcc-10-for-builds}
|
||||
#### Use clang-11 for Builds
|
||||
|
||||
``` bash
|
||||
$ export CC=clang-11
|
||||
$ export CXX=clang++-11
|
||||
```
|
||||
|
||||
### Install GCC 10 {#install-gcc-10}
|
||||
|
||||
We recommend building ClickHouse with clang-11, GCC-10 also supported, but it is not used for production builds.
|
||||
|
||||
If you want to use GCC-10 there are several ways to install it.
|
||||
|
||||
#### Install from Repository {#install-from-repository}
|
||||
|
||||
On Ubuntu 19.10 or newer:
|
||||
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install gcc-10 g++-10
|
||||
|
||||
#### Install from a PPA Package {#install-from-a-ppa-package}
|
||||
|
||||
On older Ubuntu:
|
||||
|
||||
``` bash
|
||||
$ sudo apt-get install software-properties-common
|
||||
$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install gcc-10 g++-10
|
||||
```
|
||||
|
||||
#### Install from Sources {#install-from-sources}
|
||||
|
||||
See [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
|
||||
|
||||
#### Use GCC 10 for Builds {#use-gcc-10-for-builds}
|
||||
|
||||
``` bash
|
||||
$ export CC=gcc-10
|
||||
$ export CXX=g++-10
|
||||
```
|
||||
Gcc can also be used though it is discouraged.
|
||||
|
||||
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
||||
|
||||
|
@ -131,17 +131,18 @@ ClickHouse uses several external libraries for building. All of them do not need
|
||||
|
||||
## C++ Compiler {#c-compiler}
|
||||
|
||||
Compilers GCC starting from version 10 and Clang version 8 or above are supported for building ClickHouse.
|
||||
Compilers Clang starting from version 11 is supported for building ClickHouse.
|
||||
|
||||
Official Yandex builds currently use GCC because it generates machine code of slightly better performance (yielding a difference of up to several percent according to our benchmarks). And Clang is more convenient for development usually. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations.
|
||||
Clang should be used instead of gcc. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations.
|
||||
|
||||
To install GCC on Ubuntu run: `sudo apt install gcc g++`
|
||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||
|
||||
Check the version of gcc: `gcc --version`. If it is below 10, then follow the instruction here: https://clickhouse.tech/docs/en/development/build/#install-gcc-10.
|
||||
```bash
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
Mac OS X build is supported only for Clang. Just run `brew install llvm`
|
||||
Mac OS X build is also supported. Just run `brew install llvm`
|
||||
|
||||
If you decide to use Clang, you can also install `libc++` and `lld`, if you know what it is. Using `ccache` is also recommended.
|
||||
|
||||
## The Building Process {#the-building-process}
|
||||
|
||||
@ -152,14 +153,7 @@ Now that you are ready to build ClickHouse we recommend you to create a separate
|
||||
|
||||
You can have several different directories (build_release, build_debug, etc.) for different types of build.
|
||||
|
||||
While inside the `build` directory, configure your build by running CMake. Before the first run, you need to define environment variables that specify compiler (version 10 gcc compiler in this example).
|
||||
|
||||
Linux:
|
||||
|
||||
export CC=gcc-10 CXX=g++-10
|
||||
cmake ..
|
||||
|
||||
Mac OS X:
|
||||
While inside the `build` directory, configure your build by running CMake. Before the first run, you need to define environment variables that specify compiler.
|
||||
|
||||
export CC=clang CXX=clang++
|
||||
cmake ..
|
||||
|
@ -701,7 +701,7 @@ But other things being equal, cross-platform or portable code is preferred.
|
||||
|
||||
**2.** Language: C++20 (see the list of available [C++20 features](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features)).
|
||||
|
||||
**3.** Compiler: `gcc`. At this time (August 2020), the code is compiled using version 9.3. (It can also be compiled using `clang 8`.)
|
||||
**3.** Compiler: `clang`. At this time (April 2021), the code is compiled using clang version 11. (It can also be compiled using `gcc` version 10, but it's untested and not suitable for production usage).
|
||||
|
||||
The standard library is used (`libc++`).
|
||||
|
||||
@ -711,7 +711,7 @@ The standard library is used (`libc++`).
|
||||
|
||||
The CPU instruction set is the minimum supported set among our servers. Currently, it is SSE 4.2.
|
||||
|
||||
**6.** Use `-Wall -Wextra -Werror` compilation flags.
|
||||
**6.** Use `-Wall -Wextra -Werror` compilation flags. Also `-Weverything` is used with few exceptions.
|
||||
|
||||
**7.** Use static linking with all libraries except those that are difficult to connect to statically (see the output of the `ldd` command).
|
||||
|
||||
|
@ -19,26 +19,26 @@ ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, structure,
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||
- `compression` — Compression type. Supported values: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. Parameter is optional. By default, it will autodetect compression by file extension.
|
||||
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will autodetect compression by file extension.
|
||||
|
||||
**Example:**
|
||||
**Example**
|
||||
|
||||
**1.** Set up the `s3_engine_table` table:
|
||||
1. Set up the `s3_engine_table` table:
|
||||
|
||||
```sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip')
|
||||
``` sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip');
|
||||
```
|
||||
|
||||
**2.** Fill file:
|
||||
2. Fill file:
|
||||
|
||||
```sql
|
||||
INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3)
|
||||
``` sql
|
||||
INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3);
|
||||
```
|
||||
|
||||
**3.** Query the data:
|
||||
3. Query the data:
|
||||
|
||||
```sql
|
||||
SELECT * FROM s3_engine_table LIMIT 2
|
||||
``` sql
|
||||
SELECT * FROM s3_engine_table LIMIT 2;
|
||||
```
|
||||
|
||||
```text
|
||||
@ -73,13 +73,63 @@ For more information about virtual columns see [here](../../../engines/table-eng
|
||||
|
||||
Constructions with `{}` are similar to the [remote](../../../sql-reference/table-functions/remote.md) table function.
|
||||
|
||||
## S3-related Settings {#s3-settings}
|
||||
**Example**
|
||||
|
||||
1. Suppose we have several files in CSV format with the following URIs on S3:
|
||||
|
||||
- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv’
|
||||
- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv’
|
||||
- ‘https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv’
|
||||
- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv’
|
||||
- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv’
|
||||
- ‘https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv’
|
||||
|
||||
There are several ways to make a table consisting of all six files:
|
||||
|
||||
The first way:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_range (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV');
|
||||
```
|
||||
|
||||
Another way:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV');
|
||||
```
|
||||
|
||||
Table consists of all the files in both directories (all files should satisfy format and schema described in query):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV');
|
||||
```
|
||||
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
|
||||
**Example**
|
||||
|
||||
Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV');
|
||||
```
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
|
||||
- `_path` — Path to the file.
|
||||
- `_file` — Name of the file.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
|
||||
## S3-related settings {#settings}
|
||||
|
||||
The following settings can be set before query execution or placed into configuration file.
|
||||
|
||||
- `s3_max_single_part_upload_size` — The maximum size of object to upload using singlepart upload to S3. Default value is `64Mb`.
|
||||
- `s3_max_single_part_upload_size` — The maximum size of object to upload using singlepart upload to S3. Default value is `64Mb`.
|
||||
- `s3_min_upload_part_size` — The minimum size of part to upload during multipart upload to [S3 Multipart upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). Default value is `512Mb`.
|
||||
- `s3_max_redirects` — Max number of S3 redirects hops allowed. Default value is `10`.
|
||||
- `s3_max_redirects` — Max number of S3 redirects hops allowed. Default value is `10`.
|
||||
|
||||
Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max_redirects` must be set to zero to avoid [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) attacks; or alternatively, `remote_host_filter` must be specified in server configuration.
|
||||
|
||||
@ -156,5 +206,3 @@ ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-
|
||||
## See also
|
||||
|
||||
- [S3 table function](../../../sql-reference/table-functions/s3.md)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/s3/) <!--hide-->
|
||||
|
@ -77,7 +77,8 @@ toc_title: Adopters
|
||||
| <a href="https://tech.mymarilyn.ru" class="favicon">Marilyn</a> | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) |
|
||||
| <a href="https://mellodesign.ru/" class="favicon">Mello</a> | Marketing | Analytics | 1 server | — | [Article, Oct 2020](https://vc.ru/marketing/166180-razrabotka-tipovogo-otcheta-skvoznoy-analitiki) |
|
||||
| <a href="https://www.messagebird.com" class="favicon">MessageBird</a> | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) |
|
||||
| <a href="https://www.mindsdb.com/" class="favicon">MindsDB</a> | Machine Learning | Main Product | — | — | [Official Website](https://www.mindsdb.com/blog/machine-learning-models-as-tables-in-ch) |x
|
||||
| <a href="https://clarity.microsoft.com/" class="favicon">Microsoft</a> | Web Analytics | Clarity (Main Product) | — | — | [A question on GitHub](https://github.com/ClickHouse/ClickHouse/issues/21556) |
|
||||
| <a href="https://www.mindsdb.com/" class="favicon">MindsDB</a> | Machine Learning | Main Product | — | — | [Official Website](https://www.mindsdb.com/blog/machine-learning-models-as-tables-in-ch) |
|
||||
| <a href="https://mux.com/" class="favicon">MUX</a> | Online Video | Video Analytics | — | — | [Talk in English, August 2019](https://altinity.com/presentations/2019/8/13/how-clickhouse-became-the-default-analytics-database-for-mux/) |
|
||||
| <a href="https://www.mgid.com/" class="favicon">MGID</a> | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) |
|
||||
| <a href="https://www.netskope.com/" class="favicon">Netskope</a> | Network Security | — | — | — | [Job advertisement, March 2021](https://www.mendeley.com/careers/job/senior-software-developer-backend-developer-1346348) |
|
||||
|
@ -12,6 +12,7 @@ With this instruction you can run basic ClickHouse performance test on any serve
|
||||
3. Copy the link to `clickhouse` binary for amd64 or aarch64.
|
||||
4. ssh to the server and download it with wget:
|
||||
```bash
|
||||
# These links are outdated, please obtain the fresh link from the "commits" page.
|
||||
# For amd64:
|
||||
wget https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse
|
||||
# For aarch64:
|
||||
|
@ -854,8 +854,6 @@ For example, when reading from a table, if it is possible to evaluate expression
|
||||
|
||||
Default value: the number of physical CPU cores.
|
||||
|
||||
If less than one SELECT query is normally run on a server at a time, set this parameter to a value slightly less than the actual number of processor cores.
|
||||
|
||||
For queries that are completed quickly because of a LIMIT, you can set a lower ‘max_threads’. For example, if the necessary number of entries are located in every block and max_threads = 8, then 8 blocks are retrieved, although it would have been enough to read just one.
|
||||
|
||||
The smaller the `max_threads` value, the less memory is consumed.
|
||||
|
@ -15,16 +15,16 @@ Columns:
|
||||
- `node_name` ([String](../../sql-reference/data-types/string.md)) — Node name in ZooKeeper.
|
||||
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Type of the task in the queue, one of:
|
||||
- `GET_PART` - Get the part from another replica.
|
||||
- `ATTACH_PART` - Attach the part, possibly from our own replica (if found in `detached` folder).
|
||||
You may think of it as a `GET_PART` with some optimisations as they're nearly identical.
|
||||
- `MERGE_PARTS` - Merge the parts.
|
||||
- `DROP_RANGE` - Delete the parts in the specified partition in the specified number range.
|
||||
- `CLEAR_COLUMN` - NOTE: Deprecated. Drop specific column from specified partition.
|
||||
- `CLEAR_INDEX` - NOTE: Deprecated. Drop specific index from specified partition.
|
||||
- `REPLACE_RANGE` - Drop certain range of partitions and replace them by new ones
|
||||
- `MUTATE_PART` - Apply one or several mutations to the part.
|
||||
- `ALTER_METADATA` - Apply alter modification according to global /metadata and /columns paths
|
||||
|
||||
- `GET_PART` — Get the part from another replica.
|
||||
- `ATTACH_PART` — Attach the part, possibly from our own replica (if found in the `detached` folder). You may think of it as a `GET_PART` with some optimizations as they're nearly identical.
|
||||
- `MERGE_PARTS` — Merge the parts.
|
||||
- `DROP_RANGE` — Delete the parts in the specified partition in the specified number range.
|
||||
- `CLEAR_COLUMN` — NOTE: Deprecated. Drop specific column from specified partition.
|
||||
- `CLEAR_INDEX` — NOTE: Deprecated. Drop specific index from specified partition.
|
||||
- `REPLACE_RANGE` — Drop a certain range of parts and replace them with new ones.
|
||||
- `MUTATE_PART` — Apply one or several mutations to the part.
|
||||
- `ALTER_METADATA` — Apply alter modification according to global /metadata and /columns paths.
|
||||
|
||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the task was submitted for execution.
|
||||
|
||||
|
@ -29,6 +29,3 @@ $ sudo apt-get update
|
||||
$ sudo apt-get install clickhouse-server=xx.yy.a.b clickhouse-client=xx.yy.a.b clickhouse-common-static=xx.yy.a.b
|
||||
$ sudo service clickhouse-server restart
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
@ -27,7 +27,37 @@ Example 2: `uniqArray(arr)` – Counts the number of unique elements in all ‘a
|
||||
|
||||
## -SimpleState {#agg-functions-combinator-simplestate}
|
||||
|
||||
If you apply this combinator, the aggregate function returns the same value but with a different type. This is an `SimpleAggregateFunction(...)` that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) table engines.
|
||||
If you apply this combinator, the aggregate function returns the same value but with a different type. This is a [SimpleAggregateFunction(...)](../../sql-reference/data-types/simpleaggregatefunction.md) that can be stored in a table to work with [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) tables.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
<aggFunction>SimpleState(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — Aggregate function parameters.
|
||||
|
||||
**Returned values**
|
||||
|
||||
The value of an aggregate function with the `SimpleAggregateFunction(...)` type.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH anySimpleState(number) AS c SELECT toTypeName(c), c FROM numbers(1);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─toTypeName(c)────────────────────────┬─c─┐
|
||||
│ SimpleAggregateFunction(any, UInt64) │ 0 │
|
||||
└──────────────────────────────────────┴───┘
|
||||
```
|
||||
|
||||
## -State {#agg-functions-combinator-state}
|
||||
|
||||
@ -249,4 +279,3 @@ FROM people
|
||||
└────────┴───────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
|
@ -6,20 +6,12 @@ toc_priority: 106
|
||||
|
||||
Calculates the `arg` value for a maximum `val` value. If there are several different values of `arg` for maximum values of `val`, returns the first of these values encountered.
|
||||
|
||||
Tuple version of this function will return the tuple with the maximum `val` value. It is convenient for use with [SimpleAggregateFunction](../../../sql-reference/data-types/simpleaggregatefunction.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
argMax(arg, val)
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` sql
|
||||
argMax(tuple(arg, val))
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `arg` — Argument.
|
||||
@ -29,13 +21,7 @@ argMax(tuple(arg, val))
|
||||
|
||||
- `arg` value that corresponds to maximum `val` value.
|
||||
|
||||
Type: matches `arg` type.
|
||||
|
||||
For tuple in the input:
|
||||
|
||||
- Tuple `(arg, val)`, where `val` is the maximum value and `arg` is a corresponding value.
|
||||
|
||||
Type: [Tuple](../../../sql-reference/data-types/tuple.md).
|
||||
Type: matches `arg` type.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -52,15 +38,13 @@ Input table:
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT argMax(user, salary), argMax(tuple(user, salary), salary), argMax(tuple(user, salary)) FROM salary;
|
||||
SELECT argMax(user, salary) FROM salary;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─argMax(user, salary)─┬─argMax(tuple(user, salary), salary)─┬─argMax(tuple(user, salary))─┐
|
||||
│ director │ ('director',5000) │ ('director',5000) │
|
||||
└──────────────────────┴─────────────────────────────────────┴─────────────────────────────┘
|
||||
┌─argMax(user, salary)─┐
|
||||
│ director │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/argmax/) <!--hide-->
|
||||
|
@ -6,20 +6,12 @@ toc_priority: 105
|
||||
|
||||
Calculates the `arg` value for a minimum `val` value. If there are several different values of `arg` for minimum values of `val`, returns the first of these values encountered.
|
||||
|
||||
Tuple version of this function will return the tuple with the minimum `val` value. It is convenient for use with [SimpleAggregateFunction](../../../sql-reference/data-types/simpleaggregatefunction.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
argMin(arg, val)
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` sql
|
||||
argMin(tuple(arg, val))
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `arg` — Argument.
|
||||
@ -29,13 +21,7 @@ argMin(tuple(arg, val))
|
||||
|
||||
- `arg` value that corresponds to minimum `val` value.
|
||||
|
||||
Type: matches `arg` type.
|
||||
|
||||
For tuple in the input:
|
||||
|
||||
- Tuple `(arg, val)`, where `val` is the minimum value and `arg` is a corresponding value.
|
||||
|
||||
Type: [Tuple](../../../sql-reference/data-types/tuple.md).
|
||||
Type: matches `arg` type.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -52,15 +38,13 @@ Input table:
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT argMin(user, salary), argMin(tuple(user, salary)) FROM salary;
|
||||
SELECT argMin(user, salary) FROM salary
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─argMin(user, salary)─┬─argMin(tuple(user, salary))─┐
|
||||
│ worker │ ('worker',1000) │
|
||||
└──────────────────────┴─────────────────────────────┘
|
||||
┌─argMin(user, salary)─┐
|
||||
│ worker │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/argmin/) <!--hide-->
|
||||
|
@ -6,7 +6,7 @@ toc_priority: 207
|
||||
|
||||
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a numeric data sequence using the [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf) algorithm.
|
||||
|
||||
The maximum error is 1%. Memory consumption is `log(n)`, where `n` is a number of values. The result depends on the order of running the query, and is nondeterministic.
|
||||
Memory consumption is `log(n)`, where `n` is a number of values. The result depends on the order of running the query, and is nondeterministic.
|
||||
|
||||
The performance of the function is lower than performance of [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md#quantile) or [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md#quantiletiming). In terms of the ratio of State size to precision, this function is much better than `quantile`.
|
||||
|
||||
|
@ -38,4 +38,3 @@ We recommend using this function in almost all scenarios.
|
||||
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
|
||||
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
|
||||
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
||||
- [uniqThetaSketch](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)
|
||||
|
@ -49,4 +49,3 @@ Compared to the [uniq](../../../sql-reference/aggregate-functions/reference/uniq
|
||||
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
|
||||
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
|
||||
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
||||
- [uniqThetaSketch](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)
|
||||
|
@ -23,4 +23,3 @@ The function takes a variable number of parameters. Parameters can be `Tuple`, `
|
||||
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
|
||||
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqcombined)
|
||||
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqhll12)
|
||||
- [uniqThetaSketch](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)
|
||||
|
@ -37,4 +37,3 @@ We don’t recommend using this function. In most cases, use the [uniq](../../..
|
||||
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
|
||||
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined)
|
||||
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
||||
- [uniqThetaSketch](../../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)
|
||||
|
@ -1,39 +0,0 @@
|
||||
---
|
||||
toc_priority: 195
|
||||
---
|
||||
|
||||
# uniqThetaSketch {#agg_function-uniqthetasketch}
|
||||
|
||||
Calculates the approximate number of different argument values, using the [Theta Sketch Framework](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html).
|
||||
|
||||
``` sql
|
||||
uniqThetaSketch(x[, ...])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
The function takes a variable number of parameters. Parameters can be `Tuple`, `Array`, `Date`, `DateTime`, `String`, or numeric types.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A [UInt64](../../../sql-reference/data-types/int-uint.md)-type number.
|
||||
|
||||
**Implementation details**
|
||||
|
||||
Function:
|
||||
|
||||
- Calculates a hash for all parameters in the aggregate, then uses it in calculations.
|
||||
|
||||
- Uses the [KMV](https://datasketches.apache.org/docs/Theta/InverseEstimate.html) algorithm to approximate the number of different argument values.
|
||||
|
||||
4096(2^12) 64-bit sketch are used. The size of the state is about 41 KB.
|
||||
|
||||
- The relative error is 3.125% (95% confidence), see the [relative error table](https://datasketches.apache.org/docs/Theta/ThetaErrorTable.html) for detail.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
|
||||
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined)
|
||||
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
|
||||
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
|
||||
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
@ -2,6 +2,8 @@
|
||||
|
||||
`SimpleAggregateFunction(name, types_of_arguments…)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we don’t have to store and process any extra data.
|
||||
|
||||
The common way to produce an aggregate function value is by calling the aggregate function with the [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate) suffix.
|
||||
|
||||
The following aggregate functions are supported:
|
||||
|
||||
- [`any`](../../sql-reference/aggregate-functions/reference/any.md#agg_function-any)
|
||||
@ -18,8 +20,6 @@ The following aggregate functions are supported:
|
||||
- [`sumMap`](../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap)
|
||||
- [`minMap`](../../sql-reference/aggregate-functions/reference/minmap.md#agg_functions-minmap)
|
||||
- [`maxMap`](../../sql-reference/aggregate-functions/reference/maxmap.md#agg_functions-maxmap)
|
||||
- [`argMin`](../../sql-reference/aggregate-functions/reference/argmin.md)
|
||||
- [`argMax`](../../sql-reference/aggregate-functions/reference/argmax.md)
|
||||
|
||||
|
||||
!!! note "Note"
|
||||
|
@ -16,46 +16,60 @@ The following assumptions are made:
|
||||
|
||||
## visitParamHas(params, name) {#visitparamhasparams-name}
|
||||
|
||||
Checks whether there is a field with the ‘name’ name.
|
||||
Checks whether there is a field with the `name` name.
|
||||
|
||||
Alias: `simpleJSONHas`.
|
||||
|
||||
## visitParamExtractUInt(params, name) {#visitparamextractuintparams-name}
|
||||
|
||||
Parses UInt64 from the value of the field named ‘name’. If this is a string field, it tries to parse a number from the beginning of the string. If the field doesn’t exist, or it exists but doesn’t contain a number, it returns 0.
|
||||
Parses UInt64 from the value of the field named `name`. If this is a string field, it tries to parse a number from the beginning of the string. If the field doesn’t exist, or it exists but doesn’t contain a number, it returns 0.
|
||||
|
||||
Alias: `simpleJSONExtractUInt`.
|
||||
|
||||
## visitParamExtractInt(params, name) {#visitparamextractintparams-name}
|
||||
|
||||
The same as for Int64.
|
||||
|
||||
Alias: `simpleJSONExtractInt`.
|
||||
|
||||
## visitParamExtractFloat(params, name) {#visitparamextractfloatparams-name}
|
||||
|
||||
The same as for Float64.
|
||||
|
||||
Alias: `simpleJSONExtractFloat`.
|
||||
|
||||
## visitParamExtractBool(params, name) {#visitparamextractboolparams-name}
|
||||
|
||||
Parses a true/false value. The result is UInt8.
|
||||
|
||||
Alias: `simpleJSONExtractBool`.
|
||||
|
||||
## visitParamExtractRaw(params, name) {#visitparamextractrawparams-name}
|
||||
|
||||
Returns the value of a field, including separators.
|
||||
|
||||
Alias: `simpleJSONExtractRaw`.
|
||||
|
||||
Examples:
|
||||
|
||||
``` sql
|
||||
visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"'
|
||||
visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}'
|
||||
visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"';
|
||||
visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}';
|
||||
```
|
||||
|
||||
## visitParamExtractString(params, name) {#visitparamextractstringparams-name}
|
||||
|
||||
Parses the string in double quotes. The value is unescaped. If unescaping failed, it returns an empty string.
|
||||
|
||||
Alias: `simpleJSONExtractString`.
|
||||
|
||||
Examples:
|
||||
|
||||
``` sql
|
||||
visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0'
|
||||
visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺'
|
||||
visitParamExtractString('{"abc":"\\u263"}', 'abc') = ''
|
||||
visitParamExtractString('{"abc":"hello}', 'abc') = ''
|
||||
visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0';
|
||||
visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺';
|
||||
visitParamExtractString('{"abc":"\\u263"}', 'abc') = '';
|
||||
visitParamExtractString('{"abc":"hello}', 'abc') = '';
|
||||
```
|
||||
|
||||
There is currently no support for code points in the format `\uXXXX\uYYYY` that are not from the basic multilingual plane (they are converted to CESU-8 instead of UTF-8).
|
||||
|
@ -1192,6 +1192,109 @@ SELECT defaultValueOfTypeName('Nullable(Int8)')
|
||||
└──────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## indexHint {#indexhint}
|
||||
The function is intended for debugging and introspection purposes. The function ignores it's argument and always returns 1. Arguments are not even evaluated.
|
||||
|
||||
But for the purpose of index analysis, the argument of this function is analyzed as if it was present directly without being wrapped inside `indexHint` function. This allows to select data in index ranges by the corresponding condition but without further filtering by this condition. The index in ClickHouse is sparse and using `indexHint` will yield more data than specifying the same condition directly.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
SELECT * FROM table WHERE indexHint(<expression>)
|
||||
```
|
||||
|
||||
**Returned value**
|
||||
|
||||
1. Type: [Uint8](https://clickhouse.yandex/docs/en/data_types/int_uint/#diapazony-uint).
|
||||
|
||||
**Example**
|
||||
|
||||
Here is the example of test data from the table [ontime](../../getting-started/example-datasets/ontime.md).
|
||||
|
||||
Input table:
|
||||
|
||||
```sql
|
||||
SELECT count() FROM ontime
|
||||
```
|
||||
|
||||
```text
|
||||
┌─count()─┐
|
||||
│ 4276457 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
The table has indexes on the fields `(FlightDate, (Year, FlightDate))`.
|
||||
|
||||
Create a query, where the index is not used.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k
|
||||
```
|
||||
|
||||
ClickHouse processed the entire table (`Processed 4.28 million rows`).
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌──────────k─┬─count()─┐
|
||||
│ 2017-01-01 │ 13970 │
|
||||
│ 2017-01-02 │ 15882 │
|
||||
........................
|
||||
│ 2017-09-28 │ 16411 │
|
||||
│ 2017-09-29 │ 16384 │
|
||||
│ 2017-09-30 │ 12520 │
|
||||
└────────────┴─────────┘
|
||||
```
|
||||
|
||||
To apply the index, select a specific date.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k
|
||||
```
|
||||
|
||||
By using the index, ClickHouse processed a significantly smaller number of rows (`Processed 32.74 thousand rows`).
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌──────────k─┬─count()─┐
|
||||
│ 2017-09-15 │ 16428 │
|
||||
└────────────┴─────────┘
|
||||
```
|
||||
|
||||
Now wrap the expression `k = '2017-09-15'` into `indexHint` function.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
FlightDate AS k,
|
||||
count()
|
||||
FROM ontime
|
||||
WHERE indexHint(k = '2017-09-15')
|
||||
GROUP BY k
|
||||
ORDER BY k ASC
|
||||
```
|
||||
|
||||
ClickHouse used the index in the same way as the previous time (`Processed 32.74 thousand rows`).
|
||||
The expression `k = '2017-09-15'` was not used when generating the result.
|
||||
In examle the `indexHint` function allows to see adjacent dates.
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌──────────k─┬─count()─┐
|
||||
│ 2017-09-14 │ 7071 │
|
||||
│ 2017-09-15 │ 16428 │
|
||||
│ 2017-09-16 │ 1077 │
|
||||
│ 2017-09-30 │ 8167 │
|
||||
└────────────┴─────────┘
|
||||
```
|
||||
|
||||
## replicate {#other-functions-replicate}
|
||||
|
||||
Creates an array with a single value.
|
||||
|
@ -88,12 +88,10 @@ Read more about setting the partition expression in a section [How to specify th
|
||||
This query is replicated. The replica-initiator checks whether there is data in the `detached` directory.
|
||||
If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table.
|
||||
|
||||
If the non-initiator replica, receiving the attach command, finds the part with the correct checksums in its own
|
||||
`detached` folder, it attaches the data without fetching it from other replicas.
|
||||
If the non-initiator replica, receiving the attach command, finds the part with the correct checksums in its own `detached` folder, it attaches the data without fetching it from other replicas.
|
||||
If there is no part with the correct checksums, the data is downloaded from any replica having the part.
|
||||
|
||||
You can put data to the `detached` directory on one replica and use the `ALTER ... ATTACH` query to add it to the
|
||||
table on all replicas.
|
||||
You can put data to the `detached` directory on one replica and use the `ALTER ... ATTACH` query to add it to the table on all replicas.
|
||||
|
||||
## ATTACH PARTITION FROM {#alter_attach-partition-from}
|
||||
|
||||
@ -101,8 +99,8 @@ table on all replicas.
|
||||
ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1
|
||||
```
|
||||
|
||||
This query copies the data partition from the `table1` to `table2`.
|
||||
Note that data won't be deleted neither from `table1` nor from `table2`.
|
||||
This query copies the data partition from `table1` to `table2`.
|
||||
Note that data will be deleted neither from `table1` nor from `table2`.
|
||||
|
||||
For the query to run successfully, the following conditions must be met:
|
||||
|
||||
|
@ -264,9 +264,7 @@ Wait until a `ReplicatedMergeTree` table will be synced with other replicas in a
|
||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
|
||||
```
|
||||
|
||||
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from
|
||||
the common replicated log into its own replication queue, and then the query waits till the replica processes all
|
||||
of the fetched commands.
|
||||
After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands.
|
||||
|
||||
### RESTART REPLICA {#query_language-system-restart-replica}
|
||||
|
||||
|
@ -18,7 +18,7 @@ s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compres
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [here](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||
- `compression` — Parameter is optional. Supported values: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. By default, it will autodetect compression by file extension.
|
||||
- `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension.
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -19,28 +19,17 @@ $ sudo apt-get install git cmake python ninja-build
|
||||
|
||||
古いシステムではcmakeの代わりにcmake3。
|
||||
|
||||
## GCC9のインストール {#install-gcc-10}
|
||||
## Clang 11 のインストール
|
||||
|
||||
これを行うにはいくつかの方法があります。
|
||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||
|
||||
### PPAパッケージからインストール {#install-from-a-ppa-package}
|
||||
|
||||
``` bash
|
||||
$ sudo apt-get install software-properties-common
|
||||
$ sudo apt-add-repository ppa:ubuntu-toolchain-r/test
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install gcc-10 g++-10
|
||||
```bash
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
### ソースからインスト {#install-from-sources}
|
||||
|
||||
見て [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
|
||||
|
||||
## ビルドにGCC9を使用する {#use-gcc-10-for-builds}
|
||||
|
||||
``` bash
|
||||
$ export CC=gcc-10
|
||||
$ export CXX=g++-10
|
||||
$ export CC=clang
|
||||
$ export CXX=clang++
|
||||
```
|
||||
|
||||
## ツつィツ姪"ツ債ツつケ {#checkout-clickhouse-sources}
|
||||
@ -76,7 +65,7 @@ $ cd ..
|
||||
- Git(ソースをチェックアウトするためにのみ使用され、ビルドには必要ありません)
|
||||
- CMake3.10以降
|
||||
- 忍者(推奨)または作る
|
||||
- C++コンパイラ:gcc9またはclang8以降
|
||||
- C++コンパイラ:clang11以降
|
||||
- リンカ:lldまたはgold(古典的なGNU ldは動作しません)
|
||||
- Python(LLVMビルド内でのみ使用され、オプションです)
|
||||
|
||||
|
@ -133,19 +133,19 @@ ArchまたはGentooを使用する場合は、おそらくCMakeのインスト
|
||||
|
||||
ClickHouseはビルドに複数の外部ライブラリを使用します。 それらのすべては、サブモジュールにあるソースからClickHouseと一緒に構築されているので、別々にインストールする必要はありません。 リストは次の場所で確認できます `contrib`.
|
||||
|
||||
# C++コンパイラ {#c-compiler}
|
||||
## C++ Compiler {#c-compiler}
|
||||
|
||||
ClickHouseのビルドには、バージョン9以降のGCCとClangバージョン8以降のコンパイラがサポートされます。
|
||||
Compilers Clang starting from version 11 is supported for building ClickHouse.
|
||||
|
||||
公式のYandexビルドは、わずかに優れたパフォーマンスのマシンコードを生成するため、GCCを使用しています(私たちのベンチマークに応じて最大数パーセントの そしてClangは開発のために通常より便利です。 が、当社の継続的インテグレーション(CI)プラットフォームを運チェックのための十数の組み合わせとなります。
|
||||
Clang should be used instead of gcc. Though, our continuous integration (CI) platform runs checks for about a dozen of build combinations.
|
||||
|
||||
UBUNTUにGCCをインストールするには: `sudo apt install gcc g++`
|
||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||
|
||||
Gccのバージョンを確認する: `gcc --version`. の場合は下記9その指示に従う。https://clickhouse.tech/docs/ja/development/build/#install-gcc-10.
|
||||
```bash
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
Mac OS XのビルドはClangでのみサポートされています。 ちょうど実行 `brew install llvm`
|
||||
|
||||
Clangを使用する場合は、次のものもインストールできます `libc++` と `lld` あなたがそれが何であるか知っていれば。 を使用して `ccache` また、推奨されます。
|
||||
Mac OS X build is also supported. Just run `brew install llvm`
|
||||
|
||||
# 建築プロセス {#the-building-process}
|
||||
|
||||
@ -158,13 +158,6 @@ ClickHouseを構築する準備ができたので、別のディレクトリを
|
||||
|
||||
中の間 `build` cmakeを実行してビルドを構成します。 最初の実行の前に、コンパイラ(この例ではバージョン9gccコンパイラ)を指定する環境変数を定義する必要があります。
|
||||
|
||||
Linux:
|
||||
|
||||
export CC=gcc-10 CXX=g++-10
|
||||
cmake ..
|
||||
|
||||
Mac OS X:
|
||||
|
||||
export CC=clang CXX=clang++
|
||||
cmake ..
|
||||
|
||||
|
@ -136,18 +136,18 @@ ClickHouse использует для сборки некоторое коли
|
||||
|
||||
## Компилятор C++ {#kompiliator-c}
|
||||
|
||||
В качестве компилятора C++ поддерживается GCC начиная с версии 9 или Clang начиная с версии 8.
|
||||
В качестве компилятора C++ поддерживается Clang начиная с версии 11.
|
||||
|
||||
Официальные сборки от Яндекса, на данный момент, используют GCC, так как он генерирует слегка более производительный машинный код (разница в среднем до нескольких процентов по нашим бенчмаркам). Clang обычно более удобен для разработки. Впрочем, наша среда continuous integration проверяет около десятка вариантов сборки.
|
||||
Впрочем, наша среда continuous integration проверяет около десятка вариантов сборки, включая gcc, но сборка с помощью gcc непригодна для использования в продакшене.
|
||||
|
||||
Для установки GCC под Ubuntu, выполните: `sudo apt install gcc g++`.
|
||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||
|
||||
Проверьте версию gcc: `gcc --version`. Если версия меньше 10, то следуйте инструкции: https://clickhouse.tech/docs/ru/development/build/#install-gcc-10.
|
||||
```bash
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
Сборка под Mac OS X поддерживается только для компилятора Clang. Чтобы установить его выполните `brew install llvm`
|
||||
|
||||
Если вы решили использовать Clang, вы также можете установить `libc++` и `lld`, если вы знаете, что это такое. При желании, установите `ccache`.
|
||||
|
||||
## Процесс сборки {#protsess-sborki}
|
||||
|
||||
Теперь вы готовы к сборке ClickHouse. Для размещения собранных файлов, рекомендуется создать отдельную директорию build внутри директории ClickHouse:
|
||||
@ -158,14 +158,7 @@ ClickHouse использует для сборки некоторое коли
|
||||
Вы можете иметь несколько разных директорий (build_release, build_debug) для разных вариантов сборки.
|
||||
|
||||
Находясь в директории build, выполните конфигурацию сборки с помощью CMake.
|
||||
Перед первым запуском необходимо выставить переменные окружения, отвечающие за выбор компилятора (в данном примере это - gcc версии 9).
|
||||
|
||||
Linux:
|
||||
|
||||
export CC=gcc-10 CXX=g++-10
|
||||
cmake ..
|
||||
|
||||
Mac OS X:
|
||||
Перед первым запуском необходимо выставить переменные окружения, отвечающие за выбор компилятора.
|
||||
|
||||
export CC=clang CXX=clang++
|
||||
cmake ..
|
||||
|
@ -747,7 +747,7 @@ The dictionary is configured incorrectly.
|
||||
Есть два основных варианта проверки на такие ошибки:
|
||||
|
||||
* Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке.
|
||||
* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок.
|
||||
* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок.
|
||||
|
||||
Пример сообщения, у которого должен быть код `LOGICAL_ERROR`:
|
||||
`Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!`
|
||||
@ -780,7 +780,7 @@ The dictionary is configured incorrectly.
|
||||
|
||||
**2.** Язык - C++20 (см. список доступных [C++20 фич](https://en.cppreference.com/w/cpp/compiler_support#C.2B.2B20_features)).
|
||||
|
||||
**3.** Компилятор - `gcc`. На данный момент (август 2020), код собирается версией 9.3. (Также код может быть собран `clang` версий 10 и 9)
|
||||
**3.** Компилятор - `clang`. На данный момент (апрель 2021), код собирается версией 11. (Также код может быть собран `gcc` версии 10, но такая сборка не тестируется и непригодна для продакшена).
|
||||
|
||||
Используется стандартная библиотека (реализация `libc++`).
|
||||
|
||||
|
@ -48,6 +48,14 @@ toc_title: "Введение"
|
||||
|
||||
Движки семейства:
|
||||
|
||||
- [Kafka](integrations/kafka.md#kafka)
|
||||
- [MySQL](integrations/mysql.md#mysql)
|
||||
- [ODBC](integrations/odbc.md#table-engine-odbc)
|
||||
- [JDBC](integrations/jdbc.md#table-engine-jdbc)
|
||||
- [S3](integrations/s3.md#table-engine-s3)
|
||||
|
||||
### Специальные движки {#spetsialnye-dvizhki}
|
||||
|
||||
- [ODBC](../../engines/table-engines/integrations/odbc.md)
|
||||
- [JDBC](../../engines/table-engines/integrations/jdbc.md)
|
||||
- [MySQL](../../engines/table-engines/integrations/mysql.md)
|
||||
@ -84,4 +92,3 @@ toc_title: "Введение"
|
||||
Чтобы получить данные из виртуального столбца, необходимо указать его название в запросе `SELECT`. `SELECT *` не отображает данные из виртуальных столбцов.
|
||||
|
||||
При создании таблицы со столбцом, имя которого совпадает с именем одного из виртуальных столбцов таблицы, виртуальный столбец становится недоступным. Не делайте так. Чтобы помочь избежать конфликтов, имена виртуальных столбцов обычно предваряются подчеркиванием.
|
||||
|
||||
|
@ -19,7 +19,7 @@ ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, structure,
|
||||
- `path` — URL-адрес бакета с указанием пути к файлу. Поддерживает следующие подстановочные знаки в режиме "только чтение": `*`, `?`, `{abc,def}` и `{N..M}` где `N`, `M` — числа, `'abc'`, `'def'` — строки. Подробнее смотри [ниже](#wildcards-in-path).
|
||||
- `format` — [формат](../../../interfaces/formats.md#formats) файла.
|
||||
- `structure` — структура таблицы в формате `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||
- `compression` — тип сжатия. Возможные значения: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. Необязательный параметр. Если не указано, то тип сжатия определяется автоматически по расширению файла.
|
||||
- `compression` — тип сжатия. Возможные значения: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Необязательный параметр. Если не указано, то тип сжатия определяется автоматически по расширению файла.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -73,17 +73,17 @@ SELECT * FROM s3_engine_table LIMIT 2;
|
||||
|
||||
Соображение безопасности: если злонамеренный пользователь попробует указать произвольные URL-адреса S3, параметр `s3_max_redirects` должен быть установлен в ноль, чтобы избежать атак [SSRF] (https://en.wikipedia.org/wiki/Server-side_request_forgery). Как альтернатива, в конфигурации сервера должен быть указан `remote_host_filter`.
|
||||
|
||||
## Настройки конечных точек {#endpoint-settings}
|
||||
## Настройки точки приема запроса {#endpoint-settings}
|
||||
|
||||
Для конечной точки (которая соответствует точному префиксу URL-адреса) в конфигурационном файле могут быть заданы следующие настройки:
|
||||
Для точки приема запроса (которая соответствует точному префиксу URL-адреса) в конфигурационном файле могут быть заданы следующие настройки:
|
||||
|
||||
Обязательная настройка:
|
||||
- `endpoint` — указывает префикс конечной точки.
|
||||
- `endpoint` — указывает префикс точки приема запроса.
|
||||
|
||||
Необязательные настройки:
|
||||
- `access_key_id` и `secret_access_key` — указывают учетные данные для использования с данной конечной точкой.
|
||||
- `use_environment_credentials` — если `true`, S3-клиент будет пытаться получить учетные данные из переменных среды и метаданных Amazon EC2 для данной конечной точки. Значение по умолчанию - `false`.
|
||||
- `header` — добавляет указанный HTTP-заголовок к запросу на заданную конечную точку. Может быть определен несколько раз.
|
||||
- `access_key_id` и `secret_access_key` — указывают учетные данные для использования с данной точкой приема запроса.
|
||||
- `use_environment_credentials` — если `true`, S3-клиент будет пытаться получить учетные данные из переменных среды и метаданных Amazon EC2 для данной точки приема запроса. Значение по умолчанию - `false`.
|
||||
- `header` — добавляет указанный HTTP-заголовок к запросу на заданную точку приема запроса. Может быть определен несколько раз.
|
||||
- `server_side_encryption_customer_key_base64` — устанавливает необходимые заголовки для доступа к объектам S3 с шифрованием SSE-C.
|
||||
|
||||
**Пример**
|
||||
@ -133,8 +133,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV');
|
||||
```
|
||||
|
||||
!!! warning "Warning"
|
||||
Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`.
|
||||
Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`.
|
||||
|
||||
4. Создание таблицы из файлов с именами `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
|
||||
|
||||
@ -145,6 +144,3 @@ ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-
|
||||
**Смотрите также**
|
||||
|
||||
- [Табличная функция S3](../../../sql-reference/table-functions/s3.md)
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/s3/) <!--hide-->
|
||||
|
||||
|
@ -844,8 +844,6 @@ SELECT type, query FROM system.query_log WHERE log_comment = 'log_comment test'
|
||||
|
||||
Значение по умолчанию: количество процессорных ядер без учёта Hyper-Threading.
|
||||
|
||||
Если на сервере обычно исполняется менее одного запроса SELECT одновременно, то выставите этот параметр в значение чуть меньше количества реальных процессорных ядер.
|
||||
|
||||
Для запросов, которые быстро завершаются из-за LIMIT-а, имеет смысл выставить max_threads поменьше. Например, если нужное количество записей находится в каждом блоке, то при max_threads = 8 будет считано 8 блоков, хотя достаточно было прочитать один.
|
||||
|
||||
Чем меньше `max_threads`, тем меньше будет использоваться оперативки.
|
||||
|
@ -14,7 +14,17 @@
|
||||
|
||||
- `node_name` ([String](../../sql-reference/data-types/string.md)) — имя узла в ZooKeeper.
|
||||
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — тип задачи в очереди: `GET_PARTS`, `MERGE_PARTS`, `DETACH_PARTS`, `DROP_PARTS` или `MUTATE_PARTS`.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — тип задачи в очереди:
|
||||
|
||||
- `GET_PART` — скачать кусок с другой реплики.
|
||||
- `ATTACH_PART` — присоединить кусок. Задача может быть выполнена и с куском из нашей собственной реплики (если он находится в папке `detached`). Эта задача практически идентична задаче `GET_PART`, лишь немного оптимизирована.
|
||||
- `MERGE_PARTS` — выполнить слияние кусков.
|
||||
- `DROP_RANGE` — удалить куски в партициях из указнного диапазона.
|
||||
- `CLEAR_COLUMN` — удалить указанный столбец из указанной партиции. Примечание: не используется с 20.4.
|
||||
- `CLEAR_INDEX` — удалить указанный индекс из указанной партиции. Примечание: не используется с 20.4.
|
||||
- `REPLACE_RANGE` — удалить указанный диапазон кусков и заменить их на новые.
|
||||
- `MUTATE_PART` — применить одну или несколько мутаций к куску.
|
||||
- `ALTER_METADATA` — применить изменения структуры таблицы в результате запросов с выражением `ALTER`.
|
||||
|
||||
- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — дата и время отправки задачи на выполнение.
|
||||
|
||||
@ -77,4 +87,3 @@ last_postpone_time: 1970-01-01 03:00:00
|
||||
**Смотрите также**
|
||||
|
||||
- [Управление таблицами ReplicatedMergeTree](../../sql-reference/statements/system.md#query-language-system-replicated)
|
||||
|
||||
|
@ -18,10 +18,12 @@ ClickHouse создает эту таблицу когда утсановлен
|
||||
|
||||
Во время соединения с сервером через `clickhouse-client`, вы видите строку похожую на `Connected to ClickHouse server version 19.18.1 revision 54429.`. Это поле содержит номер после `revision`, но не содержит строку после `version`.
|
||||
|
||||
- `timer_type`([Enum8](../../sql-reference/data-types/enum.md)) — тип таймера:
|
||||
- `trace_type`([Enum8](../../sql-reference/data-types/enum.md)) — тип трассировки:
|
||||
|
||||
- `Real` означает wall-clock время.
|
||||
- `CPU` означает относительное CPU время.
|
||||
- `Real` — сбор трассировок стека адресов вызова по времени wall-clock.
|
||||
- `CPU` — сбор трассировок стека адресов вызова по времени CPU.
|
||||
- `Memory` — сбор выделенной памяти, когда ее размер превышает относительный инкремент.
|
||||
- `MemorySample` — сбор случайно выделенной памяти.
|
||||
|
||||
- `thread_number`([UInt32](../../sql-reference/data-types/int-uint.md)) — идентификатор треда.
|
||||
|
||||
|
@ -29,5 +29,3 @@ $ sudo apt-get update
|
||||
$ sudo apt-get install clickhouse-server=xx.yy.a.b clickhouse-client=xx.yy.a.b clickhouse-common-static=xx.yy.a.b
|
||||
$ sudo service clickhouse-server restart
|
||||
```
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/update/) <!--hide-->
|
||||
|
@ -27,6 +27,40 @@ toc_title: "Комбинаторы агрегатных функций"
|
||||
|
||||
Комбинаторы -If и -Array можно сочетать. При этом, должен сначала идти Array, а потом If. Примеры: `uniqArrayIf(arr, cond)`, `quantilesTimingArrayIf(level1, level2)(arr, cond)`. Из-за такого порядка получается, что аргумент cond не должен быть массивом.
|
||||
|
||||
## -SimpleState {#agg-functions-combinator-simplestate}
|
||||
|
||||
При использовании этого комбинатора агрегатная функция возвращает то же значение, но типа [SimpleAggregateFunction(...)](../../sql-reference/data-types/simpleaggregatefunction.md). Текущее значение функции может храниться в таблице для последующей работы с таблицами семейства [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
<aggFunction>SimpleState(x)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `x` — параметры агрегатной функции.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Значение агрегатной функции типа `SimpleAggregateFunction(...)`.
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
WITH anySimpleState(number) AS c SELECT toTypeName(c), c FROM numbers(1);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─toTypeName(c)────────────────────────┬─c─┐
|
||||
│ SimpleAggregateFunction(any, UInt64) │ 0 │
|
||||
└──────────────────────────────────────┴───┘
|
||||
```
|
||||
|
||||
## -State {#state}
|
||||
|
||||
В случае применения этого комбинатора, агрегатная функция возвращает не готовое значение (например, в случае функции [uniq](reference/uniq.md#agg_function-uniq) — количество уникальных значений), а промежуточное состояние агрегации (например, в случае функции `uniq` — хэш-таблицу для расчёта количества уникальных значений), которое имеет тип `AggregateFunction(...)` и может использоваться для дальнейшей обработки или может быть сохранено в таблицу для последующей доагрегации.
|
||||
@ -247,4 +281,3 @@ FROM people
|
||||
│ [3,2] │ [11.5,12.949999809265137] │
|
||||
└────────┴───────────────────────────┘
|
||||
```
|
||||
|
||||
|
@ -3,6 +3,8 @@
|
||||
Хранит только текущее значение агрегатной функции и не сохраняет ее полное состояние, как это делает [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md). Такая оптимизация может быть применена к функциям, которые обладают следующим свойством: результат выполнения функции `f` к набору строк `S1 UNION ALL S2` может быть получен путем выполнения `f` к отдельным частям набора строк,
|
||||
а затем повторного выполнения `f` к результатам: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. Это свойство гарантирует, что результатов частичной агрегации достаточно для вычисления комбинированной, поэтому хранить и обрабатывать какие-либо дополнительные данные не требуется.
|
||||
|
||||
Чтобы получить промежуточное значение, обычно используются агрегатные функции с суффиксом [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate).
|
||||
|
||||
Поддерживаются следующие агрегатные функции:
|
||||
|
||||
- [`any`](../../sql-reference/aggregate-functions/reference/any.md#agg_function-any)
|
||||
|
@ -16,51 +16,65 @@ toc_title: JSON
|
||||
|
||||
## visitParamHas(params, name) {#visitparamhasparams-name}
|
||||
|
||||
Проверить наличие поля с именем name.
|
||||
Проверяет наличие поля с именем `name`.
|
||||
|
||||
Алиас: `simpleJSONHas`.
|
||||
|
||||
## visitParamExtractUInt(params, name) {#visitparamextractuintparams-name}
|
||||
|
||||
Распарсить UInt64 из значения поля с именем name. Если поле строковое - попытаться распарсить число из начала строки. Если такого поля нет, или если оно есть, но содержит не число, то вернуть 0.
|
||||
Пытается выделить число типа UInt64 из значения поля с именем `name`. Если поле строковое, пытается выделить число из начала строки. Если такого поля нет, или если оно есть, но содержит не число, то возвращает 0.
|
||||
|
||||
Алиас: `simpleJSONExtractUInt`.
|
||||
|
||||
## visitParamExtractInt(params, name) {#visitparamextractintparams-name}
|
||||
|
||||
Аналогично для Int64.
|
||||
|
||||
Алиас: `simpleJSONExtractInt`.
|
||||
|
||||
## visitParamExtractFloat(params, name) {#visitparamextractfloatparams-name}
|
||||
|
||||
Аналогично для Float64.
|
||||
|
||||
Алиас: `simpleJSONExtractFloat`.
|
||||
|
||||
## visitParamExtractBool(params, name) {#visitparamextractboolparams-name}
|
||||
|
||||
Распарсить значение true/false. Результат - UInt8.
|
||||
Пытается выделить значение true/false. Результат — UInt8.
|
||||
|
||||
Алиас: `simpleJSONExtractBool`.
|
||||
|
||||
## visitParamExtractRaw(params, name) {#visitparamextractrawparams-name}
|
||||
|
||||
Вернуть значение поля, включая разделители.
|
||||
Возвращает значение поля, включая разделители.
|
||||
|
||||
Алиас: `simpleJSONExtractRaw`.
|
||||
|
||||
Примеры:
|
||||
|
||||
``` sql
|
||||
visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"'
|
||||
visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}'
|
||||
visitParamExtractRaw('{"abc":"\\n\\u0000"}', 'abc') = '"\\n\\u0000"';
|
||||
visitParamExtractRaw('{"abc":{"def":[1,2,3]}}', 'abc') = '{"def":[1,2,3]}';
|
||||
```
|
||||
|
||||
## visitParamExtractString(params, name) {#visitparamextractstringparams-name}
|
||||
|
||||
Распарсить строку в двойных кавычках. У значения убирается экранирование. Если убрать экранированные символы не удалось, то возвращается пустая строка.
|
||||
Разбирает строку в двойных кавычках. У значения убирается экранирование. Если убрать экранированные символы не удалось, то возвращается пустая строка.
|
||||
|
||||
Алиас: `simpleJSONExtractString`.
|
||||
|
||||
Примеры:
|
||||
|
||||
``` sql
|
||||
visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0'
|
||||
visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺'
|
||||
visitParamExtractString('{"abc":"\\u263"}', 'abc') = ''
|
||||
visitParamExtractString('{"abc":"hello}', 'abc') = ''
|
||||
visitParamExtractString('{"abc":"\\n\\u0000"}', 'abc') = '\n\0';
|
||||
visitParamExtractString('{"abc":"\\u263a"}', 'abc') = '☺';
|
||||
visitParamExtractString('{"abc":"\\u263"}', 'abc') = '';
|
||||
visitParamExtractString('{"abc":"hello}', 'abc') = '';
|
||||
```
|
||||
|
||||
На данный момент, не поддерживаются записанные в формате `\uXXXX\uYYYY` кодовые точки не из basic multilingual plane (они переводятся не в UTF-8, а в CESU-8).
|
||||
На данный момент не поддерживаются записанные в формате `\uXXXX\uYYYY` кодовые точки не из basic multilingual plane (они переводятся не в UTF-8, а в CESU-8).
|
||||
|
||||
Следующие функции используют [simdjson](https://github.com/lemire/simdjson) который разработан под более сложные требования для разбора JSON. Упомянутое выше предположение 2 по-прежнему применимо.
|
||||
Следующие функции используют [simdjson](https://github.com/lemire/simdjson), который разработан под более сложные требования для разбора JSON. Упомянутое выше допущение 2 по-прежнему применимо.
|
||||
|
||||
## isValidJSON(json) {#isvalidjsonjson}
|
||||
|
||||
@ -292,4 +306,3 @@ SELECT JSONExtractKeysAndValuesRaw('{"a": [-100, 200.0], "b":{"c": {"d": "hello"
|
||||
│ [('d','"hello"'),('f','"world"')] │
|
||||
└───────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
@ -1133,6 +1133,111 @@ SELECT defaultValueOfTypeName('Nullable(Int8)')
|
||||
└──────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## indexHint {#indexhint}
|
||||
Возвращает все данные из диапазона, в который попадают данные, соответствующие указанному выражению.
|
||||
Переданное выражение не будет вычислено. Выбор диапазона производится по индексу.
|
||||
Индекс в ClickHouse разреженный, при чтении диапазона в ответ попадают «лишние» соседние данные.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
```sql
|
||||
SELECT * FROM table WHERE indexHint(<expression>)
|
||||
```
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
Возвращает диапазон индекса, в котором выполняется заданное условие.
|
||||
|
||||
Тип: [Uint8](https://clickhouse.yandex/docs/ru/data_types/int_uint/#diapazony-uint).
|
||||
|
||||
**Пример**
|
||||
|
||||
Рассмотрим пример с использованием тестовых данных таблицы [ontime](../../getting-started/example-datasets/ontime.md).
|
||||
|
||||
Исходная таблица:
|
||||
|
||||
```sql
|
||||
SELECT count() FROM ontime
|
||||
```
|
||||
|
||||
```text
|
||||
┌─count()─┐
|
||||
│ 4276457 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
В таблице есть индексы по полям `(FlightDate, (Year, FlightDate))`.
|
||||
|
||||
Выполним выборку по дате, где индекс не используется.
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k
|
||||
```
|
||||
|
||||
ClickHouse обработал всю таблицу (`Processed 4.28 million rows`).
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌──────────k─┬─count()─┐
|
||||
│ 2017-01-01 │ 13970 │
|
||||
│ 2017-01-02 │ 15882 │
|
||||
........................
|
||||
│ 2017-09-28 │ 16411 │
|
||||
│ 2017-09-29 │ 16384 │
|
||||
│ 2017-09-30 │ 12520 │
|
||||
└────────────┴─────────┘
|
||||
```
|
||||
|
||||
Для подключения индекса выбираем конкретную дату.
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k
|
||||
```
|
||||
|
||||
При использовании индекса ClickHouse обработал значительно меньшее количество строк (`Processed 32.74 thousand rows`).
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌──────────k─┬─count()─┐
|
||||
│ 2017-09-15 │ 16428 │
|
||||
└────────────┴─────────┘
|
||||
```
|
||||
|
||||
Передадим в функцию `indexHint` выражение `k = '2017-09-15'`.
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
FlightDate AS k,
|
||||
count()
|
||||
FROM ontime
|
||||
WHERE indexHint(k = '2017-09-15')
|
||||
GROUP BY k
|
||||
ORDER BY k ASC
|
||||
```
|
||||
|
||||
ClickHouse применил индекс по аналогии с примером выше (`Processed 32.74 thousand rows`).
|
||||
Выражение `k = '2017-09-15'` не используется при формировании результата.
|
||||
Функция `indexHint` позволяет увидеть соседние данные.
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌──────────k─┬─count()─┐
|
||||
│ 2017-09-14 │ 7071 │
|
||||
│ 2017-09-15 │ 16428 │
|
||||
│ 2017-09-16 │ 1077 │
|
||||
│ 2017-09-30 │ 8167 │
|
||||
└────────────┴─────────┘
|
||||
```
|
||||
|
||||
## replicate {#other-functions-replicate}
|
||||
|
||||
Создает массив, заполненный одним значением.
|
||||
|
@ -38,7 +38,7 @@ ALTER TABLE mt DETACH PART 'all_2_2_0';
|
||||
|
||||
После того как запрос будет выполнен, вы сможете производить любые операции с данными в директории `detached`. Например, можно удалить их из файловой системы.
|
||||
|
||||
Запрос реплицируется — данные будут перенесены в директорию `detached` и забыты на всех репликах. Обратите внимание, запрос может быть отправлен только на реплику-лидер. Чтобы узнать, является ли реплика лидером, выполните запрос `SELECT` к системной таблице [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas). Либо можно выполнить запрос `DETACH` на всех репликах — тогда на всех репликах, кроме реплики-лидера, запрос вернет ошибку.
|
||||
Запрос реплицируется — данные будут перенесены в директорию `detached` и забыты на всех репликах. Обратите внимание, запрос может быть отправлен только на реплику-лидер. Чтобы узнать, является ли реплика лидером, выполните запрос `SELECT` к системной таблице [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas). Либо можно выполнить запрос `DETACH` на всех репликах — тогда на всех репликах, кроме реплик-лидеров (поскольку допускается несколько лидеров), запрос вернет ошибку.
|
||||
|
||||
## DROP PARTITION\|PART {#alter_drop-partition}
|
||||
|
||||
@ -83,9 +83,13 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0;
|
||||
|
||||
Как корректно задать имя партиции или куска, см. в разделе [Как задавать имя партиции в запросах ALTER](#alter-how-to-specify-part-expr).
|
||||
|
||||
Этот запрос реплицируется. Реплика-иницатор проверяет, есть ли данные в директории `detached`. Если данные есть, то запрос проверяет их целостность. В случае успеха данные добавляются в таблицу. Все остальные реплики загружают данные с реплики-инициатора запроса.
|
||||
Этот запрос реплицируется. Реплика-иницатор проверяет, есть ли данные в директории `detached`.
|
||||
Если данные есть, то запрос проверяет их целостность. В случае успеха данные добавляются в таблицу.
|
||||
|
||||
Это означает, что вы можете разместить данные в директории `detached` на одной реплике и с помощью запроса `ALTER ... ATTACH` добавить их в таблицу на всех репликах.
|
||||
Если реплика, не являющаяся инициатором запроса, получив команду присоединения, находит кусок с правильными контрольными суммами в своей собственной папке `detached`, она присоединяет данные, не скачивая их с других реплик.
|
||||
Если нет куска с правильными контрольными суммами, данные загружаются из любой реплики, имеющей этот кусок.
|
||||
|
||||
Вы можете поместить данные в директорию `detached` на одной реплике и с помощью запроса `ALTER ... ATTACH` добавить их в таблицу на всех репликах.
|
||||
|
||||
## ATTACH PARTITION FROM {#alter_attach-partition-from}
|
||||
|
||||
@ -93,7 +97,8 @@ ALTER TABLE visits ATTACH PART 201901_2_2_0;
|
||||
ALTER TABLE table2 ATTACH PARTITION partition_expr FROM table1
|
||||
```
|
||||
|
||||
Копирует партицию из таблицы `table1` в таблицу `table2` и добавляет к существующим данным `table2`. Данные из `table1` не удаляются.
|
||||
Копирует партицию из таблицы `table1` в таблицу `table2`.
|
||||
Обратите внимание, что данные не удаляются ни из `table1`, ни из `table2`.
|
||||
|
||||
Следует иметь в виду:
|
||||
|
||||
@ -305,4 +310,3 @@ OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL;
|
||||
`IN PARTITION` указывает на партицию, для которой применяются выражения [UPDATE](../../../sql-reference/statements/alter/update.md#alter-table-update-statements) или [DELETE](../../../sql-reference/statements/alter/delete.md#alter-mutations) в результате запроса `ALTER TABLE`. Новые куски создаются только в указанной партиции. Таким образом, `IN PARTITION` помогает снизить нагрузку, когда таблица разбита на множество партиций, а вам нужно обновить данные лишь точечно.
|
||||
|
||||
Примеры запросов `ALTER ... PARTITION` можно посмотреть в тестах: [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) и [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql).
|
||||
|
||||
|
@ -204,6 +204,7 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name]
|
||||
ClickHouse может управлять фоновыми процессами связанными c репликацией в таблицах семейства [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md).
|
||||
|
||||
### STOP FETCHES {#query_language-system-stop-fetches}
|
||||
|
||||
Позволяет остановить фоновые процессы синхронизации новыми вставленными кусками данных с другими репликами в кластере для таблиц семейства `ReplicatedMergeTree`:
|
||||
Всегда возвращает `Ok.` вне зависимости от типа таблицы и даже если таблица или база данных не существет.
|
||||
|
||||
@ -212,6 +213,7 @@ SYSTEM STOP FETCHES [[db.]replicated_merge_tree_family_table_name]
|
||||
```
|
||||
|
||||
### START FETCHES {#query_language-system-start-fetches}
|
||||
|
||||
Позволяет запустить фоновые процессы синхронизации новыми вставленными кусками данных с другими репликами в кластере для таблиц семейства `ReplicatedMergeTree`:
|
||||
Всегда возвращает `Ok.` вне зависимости от типа таблицы и даже если таблица или база данных не существет.
|
||||
|
||||
@ -220,6 +222,7 @@ SYSTEM START FETCHES [[db.]replicated_merge_tree_family_table_name]
|
||||
```
|
||||
|
||||
### STOP REPLICATED SENDS {#query_language-system-start-replicated-sends}
|
||||
|
||||
Позволяет остановить фоновые процессы отсылки новых вставленных кусков данных другим репликам в кластере для таблиц семейства `ReplicatedMergeTree`:
|
||||
|
||||
``` sql
|
||||
@ -227,6 +230,7 @@ SYSTEM STOP REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name]
|
||||
```
|
||||
|
||||
### START REPLICATED SENDS {#query_language-system-start-replicated-sends}
|
||||
|
||||
Позволяет запустить фоновые процессы отсылки новых вставленных кусков данных другим репликам в кластере для таблиц семейства `ReplicatedMergeTree`:
|
||||
|
||||
``` sql
|
||||
@ -234,6 +238,7 @@ SYSTEM START REPLICATED SENDS [[db.]replicated_merge_tree_family_table_name]
|
||||
```
|
||||
|
||||
### STOP REPLICATION QUEUES {#query_language-system-stop-replication-queues}
|
||||
|
||||
Останавливает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER:
|
||||
|
||||
``` sql
|
||||
@ -241,6 +246,7 @@ SYSTEM STOP REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
|
||||
```
|
||||
|
||||
### START REPLICATION QUEUES {#query_language-system-start-replication-queues}
|
||||
|
||||
Запускает фоновые процессы разбора заданий из очереди репликации которая хранится в Zookeeper для таблиц семейства `ReplicatedMergeTree`. Возможные типы заданий - merges, fetches, mutation, DDL запросы с ON CLUSTER:
|
||||
|
||||
``` sql
|
||||
@ -248,20 +254,24 @@ SYSTEM START REPLICATION QUEUES [[db.]replicated_merge_tree_family_table_name]
|
||||
```
|
||||
|
||||
### SYNC REPLICA {#query_language-system-sync-replica}
|
||||
|
||||
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, будет работать до достижения `receive_timeout`, если синхронизация для таблицы отключена в настоящий момент времени:
|
||||
|
||||
``` sql
|
||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
|
||||
```
|
||||
|
||||
После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` синхронизирует команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все синхронизированные команды.
|
||||
|
||||
### RESTART REPLICA {#query_language-system-restart-replica}
|
||||
Реинициализация состояния Zookeeper сессий для таблицы семейства `ReplicatedMergeTree`, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо
|
||||
Инициализация очереди репликации на основе данных ZooKeeper, происходит так же как при attach table. На короткое время таблица станет недоступной для любых операций.
|
||||
|
||||
Реинициализация состояния Zookeeper-сессий для таблицы семейства `ReplicatedMergeTree`. Сравнивает текущее состояние с тем, что хранится в Zookeeper, как источник правды, и добавляет задачи в очередь репликации в Zookeeper, если необходимо.
|
||||
Инициализация очереди репликации на основе данных ZooKeeper происходит так же, как при attach table. На короткое время таблица станет недоступной для любых операций.
|
||||
|
||||
``` sql
|
||||
SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
|
||||
```
|
||||
|
||||
### RESTART REPLICAS {#query_language-system-restart-replicas}
|
||||
Реинициализация состояния Zookeeper сессий для всех `ReplicatedMergeTree` таблиц, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо
|
||||
|
||||
Реинициализация состояния ZooKeeper-сессий для всех `ReplicatedMergeTree` таблиц. Сравнивает текущее состояние реплики с тем, что хранится в ZooKeeper, как c источником правды, и добавляет задачи в очередь репликации в ZooKeeper, если необходимо.
|
||||
|
@ -18,7 +18,7 @@ s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compres
|
||||
- `path` — URL-адрес бакета с указанием пути к файлу. Поддерживает следующие подстановочные знаки в режиме "только чтение": `*, ?, {abc,def} и {N..M}` где `N, M` — числа, `'abc', 'def'` — строки. Подробнее смотри [здесь](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||
- `format` — [формат](../../interfaces/formats.md#formats) файла.
|
||||
- `structure` — cтруктура таблицы. Формат `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||
- `compression` — автоматически обнаруживает сжатие по расширению файла. Возможные значения: none, gzip/gz, brotli/br, xz/LZMA, zstd/zst. Необязательный параметр.
|
||||
- `compression` — автоматически обнаруживает сжатие по расширению файла. Возможные значения: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Необязательный параметр.
|
||||
|
||||
**Возвращаемые значения**
|
||||
|
||||
|
@ -35,28 +35,12 @@ sudo apt-get install git cmake ninja-build
|
||||
或cmake3而不是旧系统上的cmake。
|
||||
或者在早期版本的系统中用 cmake3 替代 cmake
|
||||
|
||||
## 安装 GCC 10 {#an-zhuang-gcc-10}
|
||||
## 安装 Clang
|
||||
|
||||
有几种方法可以做到这一点。
|
||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||
|
||||
### 安装 PPA 包 {#an-zhuang-ppa-bao}
|
||||
|
||||
``` bash
|
||||
sudo apt-get install software-properties-common
|
||||
sudo apt-add-repository ppa:ubuntu-toolchain-r/test
|
||||
sudo apt-get update
|
||||
sudo apt-get install gcc-10 g++-10
|
||||
```
|
||||
|
||||
### 源码安装 gcc {#yuan-ma-an-zhuang-gcc}
|
||||
|
||||
请查看 [utils/ci/build-gcc-from-sources.sh](https://github.com/ClickHouse/ClickHouse/blob/master/utils/ci/build-gcc-from-sources.sh)
|
||||
|
||||
## 使用 GCC 10 来编译 {#shi-yong-gcc-10-lai-bian-yi}
|
||||
|
||||
``` bash
|
||||
export CC=gcc-10
|
||||
export CXX=g++-10
|
||||
```bash
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
## 拉取 ClickHouse 源码 {#la-qu-clickhouse-yuan-ma-1}
|
||||
|
@ -123,17 +123,13 @@ ClickHouse使用多个外部库进行构建。大多数外部库不需要单独
|
||||
|
||||
# C++ 编译器 {#c-bian-yi-qi}
|
||||
|
||||
GCC编译器从版本9开始,以及Clang版本\>=8都可支持构建ClickHouse。
|
||||
We support clang starting from version 11.
|
||||
|
||||
Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性能较好(根据测评,最多可以相差几个百分点)。Clang通常可以更加便捷的开发。我们的持续集成(CI)平台会运行大约十二种构建组合的检查。
|
||||
On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/))
|
||||
|
||||
在Ubuntu上安装GCC,请执行:`sudo apt install gcc g++`
|
||||
|
||||
请使用`gcc --version`查看gcc的版本。如果gcc版本低于9,请参考此处的指示:https://clickhouse.tech/docs/zh/development/build/#an-zhuang-gcc-10 。
|
||||
|
||||
在Mac OS X上安装GCC,请执行:`brew install gcc`
|
||||
|
||||
如果您决定使用Clang,还可以同时安装 `libc++`以及`lld`,前提是您也熟悉它们。此外,也推荐使用`ccache`。
|
||||
```bash
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
# 构建的过程 {#gou-jian-de-guo-cheng}
|
||||
|
||||
@ -146,7 +142,7 @@ Yandex官方当前使用GCC构建ClickHouse,因为它生成的机器代码性
|
||||
|
||||
在`build`目录下,通过运行CMake配置构建。 在第一次运行之前,请定义用于指定编译器的环境变量(本示例中为gcc 9 编译器)。
|
||||
|
||||
export CC=gcc-10 CXX=g++-10
|
||||
export CC=clang CXX=clang++
|
||||
cmake ..
|
||||
|
||||
`CC`变量指代C的编译器(C Compiler的缩写),而`CXX`变量指代要使用哪个C++编译器进行编译。
|
||||
|
@ -696,7 +696,7 @@ auto s = std::string{"Hello"};
|
||||
|
||||
**2.** 语言: C++20.
|
||||
|
||||
**3.** 编译器: `gcc`。 此时(2020年08月),代码使用9.3版编译。(它也可以使用`clang 8` 编译)
|
||||
**3.** 编译器: `clang`。 此时(2021年03月),代码使用11版编译。(它也可以使用`gcc` 编译 but it is not suitable for production)
|
||||
|
||||
使用标准库 (`libc++`)。
|
||||
|
||||
|
@ -477,6 +477,103 @@ FROM
|
||||
|
||||
1 rows in set. Elapsed: 0.002 sec.
|
||||
|
||||
|
||||
## indexHint {#indexhint}
|
||||
输出符合索引选择范围内的所有数据,同时不实用参数中的表达式进行过滤。
|
||||
|
||||
传递给函数的表达式参数将不会被计算,但ClickHouse使用参数中的表达式进行索引过滤。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 1。
|
||||
|
||||
**示例**
|
||||
|
||||
这是一个包含[ontime](../../getting-started/example-datasets/ontime.md)测试数据集的测试表。
|
||||
|
||||
```
|
||||
SELECT count() FROM ontime
|
||||
|
||||
┌─count()─┐
|
||||
│ 4276457 │
|
||||
└─────────┘
|
||||
```
|
||||
|
||||
该表使用`(FlightDate, (Year, FlightDate))`作为索引。
|
||||
|
||||
对该表进行如下的查询:
|
||||
|
||||
```
|
||||
:) SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k
|
||||
|
||||
SELECT
|
||||
FlightDate AS k,
|
||||
count()
|
||||
FROM ontime
|
||||
GROUP BY k
|
||||
ORDER BY k ASC
|
||||
|
||||
┌──────────k─┬─count()─┐
|
||||
│ 2017-01-01 │ 13970 │
|
||||
│ 2017-01-02 │ 15882 │
|
||||
........................
|
||||
│ 2017-09-28 │ 16411 │
|
||||
│ 2017-09-29 │ 16384 │
|
||||
│ 2017-09-30 │ 12520 │
|
||||
└────────────┴─────────┘
|
||||
|
||||
273 rows in set. Elapsed: 0.072 sec. Processed 4.28 million rows, 8.55 MB (59.00 million rows/s., 118.01 MB/s.)
|
||||
```
|
||||
|
||||
在这个查询中,由于没有使用索引,所以ClickHouse将处理整个表的所有数据(`Processed 4.28 million rows`)。使用下面的查询尝试使用索引进行查询:
|
||||
|
||||
```
|
||||
:) SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k
|
||||
|
||||
SELECT
|
||||
FlightDate AS k,
|
||||
count()
|
||||
FROM ontime
|
||||
WHERE k = '2017-09-15'
|
||||
GROUP BY k
|
||||
ORDER BY k ASC
|
||||
|
||||
┌──────────k─┬─count()─┐
|
||||
│ 2017-09-15 │ 16428 │
|
||||
└────────────┴─────────┘
|
||||
|
||||
1 rows in set. Elapsed: 0.014 sec. Processed 32.74 thousand rows, 65.49 KB (2.31 million rows/s., 4.63 MB/s.)
|
||||
```
|
||||
|
||||
在最后一行的显示中,通过索引ClickHouse处理的行数明显减少(`Processed 32.74 thousand rows`)。
|
||||
|
||||
现在将表达式`k = '2017-09-15'`传递给`indexHint`函数:
|
||||
|
||||
```
|
||||
:) SELECT FlightDate AS k, count() FROM ontime WHERE indexHint(k = '2017-09-15') GROUP BY k ORDER BY k
|
||||
|
||||
SELECT
|
||||
FlightDate AS k,
|
||||
count()
|
||||
FROM ontime
|
||||
WHERE indexHint(k = '2017-09-15')
|
||||
GROUP BY k
|
||||
ORDER BY k ASC
|
||||
|
||||
┌──────────k─┬─count()─┐
|
||||
│ 2017-09-14 │ 7071 │
|
||||
│ 2017-09-15 │ 16428 │
|
||||
│ 2017-09-16 │ 1077 │
|
||||
│ 2017-09-30 │ 8167 │
|
||||
└────────────┴─────────┘
|
||||
|
||||
4 rows in set. Elapsed: 0.004 sec. Processed 32.74 thousand rows, 65.49 KB (8.97 million rows/s., 17.94 MB/s.)
|
||||
```
|
||||
|
||||
对于这个请求,根据ClickHouse显示ClickHouse与上一次相同的方式应用了索引(`Processed 32.74 thousand rows`)。但是,最终返回的结果集中并没有根据`k = '2017-09-15'`表达式进行过滤结果。
|
||||
|
||||
由于ClickHouse中使用稀疏索引,因此在读取范围时(本示例中为相邻日期),"额外"的数据将包含在索引结果中。使用`indexHint`函数可以查看到它们。
|
||||
|
||||
## 复制 {#replicate}
|
||||
|
||||
使用单个值填充一个数组。
|
||||
|
@ -33,8 +33,12 @@ option (ENABLE_CLICKHOUSE_OBFUSCATOR "Table data obfuscator (convert real data t
|
||||
${ENABLE_CLICKHOUSE_ALL})
|
||||
|
||||
# https://clickhouse.tech/docs/en/operations/utilities/odbc-bridge/
|
||||
option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "HTTP-server working like a proxy to ODBC driver"
|
||||
${ENABLE_CLICKHOUSE_ALL})
|
||||
if (ENABLE_ODBC)
|
||||
option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "HTTP-server working like a proxy to ODBC driver"
|
||||
${ENABLE_CLICKHOUSE_ALL})
|
||||
else ()
|
||||
option (ENABLE_CLICKHOUSE_ODBC_BRIDGE "HTTP-server working like a proxy to ODBC driver" OFF)
|
||||
endif ()
|
||||
|
||||
option (ENABLE_CLICKHOUSE_LIBRARY_BRIDGE "HTTP-server working like a proxy to Library dictionary source"
|
||||
${ENABLE_CLICKHOUSE_ALL})
|
||||
|
11
programs/server/.gitignore
vendored
11
programs/server/.gitignore
vendored
@ -1,8 +1,11 @@
|
||||
/access
|
||||
/dictionaries_lib
|
||||
/flags
|
||||
/format_schemas
|
||||
/metadata
|
||||
/metadata_dropped
|
||||
/data
|
||||
/store
|
||||
/access
|
||||
/flags
|
||||
/dictionaries_lib
|
||||
/format_schemas
|
||||
/preprocessed_configs
|
||||
/shadow
|
||||
/tmp
|
||||
|
@ -19,6 +19,7 @@ set (CLICKHOUSE_SERVER_LINK
|
||||
clickhouse_storages_system
|
||||
clickhouse_table_functions
|
||||
string_utils
|
||||
jemalloc
|
||||
|
||||
${LINK_RESOURCE_LIB}
|
||||
|
||||
|
@ -101,6 +101,10 @@
|
||||
# include <Server/KeeperTCPHandlerFactory.h>
|
||||
#endif
|
||||
|
||||
#if USE_JEMALLOC
|
||||
# include <jemalloc/jemalloc.h>
|
||||
#endif
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric Revision;
|
||||
@ -109,11 +113,35 @@ namespace CurrentMetrics
|
||||
extern const Metric MaxDDLEntryID;
|
||||
}
|
||||
|
||||
#if USE_JEMALLOC
|
||||
static bool jemallocOptionEnabled(const char *name)
|
||||
{
|
||||
bool value;
|
||||
size_t size = sizeof(value);
|
||||
|
||||
if (mallctl(name, reinterpret_cast<void *>(&value), &size, /* newp= */ nullptr, /* newlen= */ 0))
|
||||
throw Poco::SystemException("mallctl() failed");
|
||||
|
||||
return value;
|
||||
}
|
||||
#else
|
||||
static bool jemallocOptionEnabled(const char *) { return 0; }
|
||||
#endif
|
||||
|
||||
|
||||
int mainEntryClickHouseServer(int argc, char ** argv)
|
||||
{
|
||||
DB::Server app;
|
||||
|
||||
if (jemallocOptionEnabled("opt.background_thread"))
|
||||
{
|
||||
LOG_ERROR(&app.logger(),
|
||||
"jemalloc.background_thread was requested, "
|
||||
"however ClickHouse uses percpu_arena and background_thread most likely will not give any benefits, "
|
||||
"and also background_thread is not compatible with ClickHouse watchdog "
|
||||
"(that can be disabled with CLICKHOUSE_WATCHDOG_ENABLE=0)");
|
||||
}
|
||||
|
||||
/// Do not fork separate process from watchdog if we attached to terminal.
|
||||
/// Otherwise it breaks gdb usage.
|
||||
/// Can be overridden by environment variable (cannot use server config at this moment).
|
||||
|
3
programs/server/data/.gitignore
vendored
3
programs/server/data/.gitignore
vendored
@ -1,3 +0,0 @@
|
||||
*.txt
|
||||
*.dat
|
||||
*.idx
|
1
programs/server/metadata/.gitignore
vendored
1
programs/server/metadata/.gitignore
vendored
@ -1 +0,0 @@
|
||||
*.sql
|
@ -1,16 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionMinMaxAny.h> // SingleValueDataString used in embedded compiler
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <common/StringRef.h>
|
||||
#include "Columns/IColumn.h"
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/AggregateFunctionMinMaxAny.h> // SingleValueDataString used in embedded compiler
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
@ -24,53 +22,47 @@ struct AggregateFunctionArgMinMaxData
|
||||
using ResultData_t = ResultData;
|
||||
using ValueData_t = ValueData;
|
||||
|
||||
ResultData result; // the argument at which the minimum/maximum value is reached.
|
||||
ValueData value; // value for which the minimum/maximum is calculated.
|
||||
ResultData result; // the argument at which the minimum/maximum value is reached.
|
||||
ValueData value; // value for which the minimum/maximum is calculated.
|
||||
|
||||
static bool allocatesMemoryInArena() { return ResultData::allocatesMemoryInArena() || ValueData::allocatesMemoryInArena(); }
|
||||
|
||||
static String name() { return StringRef(ValueData_t::name()) == StringRef("min") ? "argMin" : "argMax"; }
|
||||
static bool allocatesMemoryInArena()
|
||||
{
|
||||
return ResultData::allocatesMemoryInArena() || ValueData::allocatesMemoryInArena();
|
||||
}
|
||||
};
|
||||
|
||||
/// Returns the first arg value found for the minimum/maximum value. Example: argMax(arg, value).
|
||||
template <typename Data>
|
||||
class AggregateFunctionArgMinMax final : public IAggregateFunctionTupleArgHelper<Data, AggregateFunctionArgMinMax<Data>, 2>
|
||||
class AggregateFunctionArgMinMax final : public IAggregateFunctionDataHelper<Data, AggregateFunctionArgMinMax<Data>>
|
||||
{
|
||||
private:
|
||||
const DataTypePtr & type_res;
|
||||
const DataTypePtr & type_val;
|
||||
const SerializationPtr serialization_res;
|
||||
const SerializationPtr serialization_val;
|
||||
bool tuple_argument;
|
||||
|
||||
using Base = IAggregateFunctionTupleArgHelper<Data, AggregateFunctionArgMinMax<Data>, 2>;
|
||||
using Base = IAggregateFunctionDataHelper<Data, AggregateFunctionArgMinMax<Data>>;
|
||||
|
||||
public:
|
||||
AggregateFunctionArgMinMax(const DataTypePtr & type_res_, const DataTypePtr & type_val_, const bool tuple_argument_)
|
||||
: Base({type_res_, type_val_}, {}, tuple_argument_)
|
||||
AggregateFunctionArgMinMax(const DataTypePtr & type_res_, const DataTypePtr & type_val_)
|
||||
: Base({type_res_, type_val_}, {})
|
||||
, type_res(this->argument_types[0])
|
||||
, type_val(this->argument_types[1])
|
||||
, serialization_res(type_res->getDefaultSerialization())
|
||||
, serialization_val(type_val->getDefaultSerialization())
|
||||
{
|
||||
if (!type_val->isComparable())
|
||||
throw Exception(
|
||||
"Illegal type " + type_val->getName() + " of second argument of aggregate function " + getName()
|
||||
+ " because the values of that data type are not comparable",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
this->tuple_argument = tuple_argument_;
|
||||
throw Exception("Illegal type " + type_val->getName() + " of second argument of aggregate function " + getName()
|
||||
+ " because the values of that data type are not comparable", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
}
|
||||
|
||||
String getName() const override { return Data::name(); }
|
||||
String getName() const override
|
||||
{
|
||||
return StringRef(Data::ValueData_t::name()) == StringRef("min") ? "argMin" : "argMax";
|
||||
}
|
||||
|
||||
DataTypePtr getReturnType() const override
|
||||
{
|
||||
if (tuple_argument)
|
||||
{
|
||||
return std::make_shared<DataTypeTuple>(DataTypes{this->type_res, this->type_val});
|
||||
}
|
||||
|
||||
return type_res;
|
||||
}
|
||||
|
||||
@ -98,21 +90,15 @@ public:
|
||||
this->data(place).value.read(buf, *serialization_val, arena);
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return Data::allocatesMemoryInArena(); }
|
||||
bool allocatesMemoryInArena() const override
|
||||
{
|
||||
return Data::allocatesMemoryInArena();
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
if (tuple_argument)
|
||||
{
|
||||
auto & tup = assert_cast<ColumnTuple &>(to);
|
||||
|
||||
this->data(place).result.insertResultInto(tup.getColumn(0));
|
||||
this->data(place).value.insertResultInto(tup.getColumn(1));
|
||||
}
|
||||
else
|
||||
this->data(place).result.insertResultInto(to);
|
||||
this->data(place).result.insertResultInto(to);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ public:
|
||||
place_data->sum += rhs_data->sum + (rhs_data->first - place_data->last);
|
||||
place_data->last = rhs_data->last;
|
||||
}
|
||||
else if ((rhs_data->last < place_data->first && rhs_data->seen_last && place_data->seen_first))
|
||||
else if ((rhs_data->first < place_data->last && rhs_data->seen_last && place_data->seen_first))
|
||||
{
|
||||
// In the opposite scenario, the lhs comes after the rhs, e.g. [4, 6] [1, 2]. Since we
|
||||
// assume the input interval states are sorted by time, we assume this is a counter
|
||||
@ -87,9 +87,9 @@ public:
|
||||
// rhs last value.
|
||||
|
||||
place_data->sum += rhs_data->sum;
|
||||
place_data->first = rhs_data->first;
|
||||
place_data->last = rhs_data->last;
|
||||
}
|
||||
else if (rhs_data->seen_first)
|
||||
else if (rhs_data->seen_first && !place_data->seen_first)
|
||||
{
|
||||
// If we're here then the lhs is an empty state and the rhs does have some state, so
|
||||
// we'll just take that state.
|
||||
|
@ -132,12 +132,6 @@ void registerAggregateFunctionsUniq(AggregateFunctionFactory & factory)
|
||||
|
||||
factory.registerFunction("uniqExact",
|
||||
{createAggregateFunctionUniq<true, AggregateFunctionUniqExactData, AggregateFunctionUniqExactData<String>>, properties});
|
||||
|
||||
#if USE_DATASKETCHES
|
||||
factory.registerFunction("uniqThetaSketch",
|
||||
{createAggregateFunctionUniq<AggregateFunctionUniqThetaSketchData, AggregateFunctionUniqThetaSketchData>, properties});
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -22,7 +22,6 @@
|
||||
|
||||
#include <AggregateFunctions/UniquesHashSet.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/ThetaSketchData.h>
|
||||
#include <AggregateFunctions/UniqVariadicHash.h>
|
||||
|
||||
|
||||
@ -125,19 +124,6 @@ struct AggregateFunctionUniqExactData<String>
|
||||
};
|
||||
|
||||
|
||||
/// uniqThetaSketch
|
||||
#if USE_DATASKETCHES
|
||||
|
||||
struct AggregateFunctionUniqThetaSketchData
|
||||
{
|
||||
using Set = ThetaSketchData<UInt64>;
|
||||
Set set;
|
||||
|
||||
static String getName() { return "uniqThetaSketch"; }
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
namespace detail
|
||||
{
|
||||
|
||||
@ -203,12 +189,6 @@ struct OneAdder
|
||||
data.set.insert(key);
|
||||
}
|
||||
}
|
||||
#if USE_DATASKETCHES
|
||||
else if constexpr (std::is_same_v<Data, AggregateFunctionUniqThetaSketchData>)
|
||||
{
|
||||
data.set.insertOriginal(column.getDataAt(row_num));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Warray-bounds"
|
||||
#endif
|
||||
@ -280,7 +280,7 @@ public:
|
||||
|
||||
}
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
|
@ -31,12 +31,6 @@
|
||||
M(Float32) \
|
||||
M(Float64)
|
||||
|
||||
#define FOR_DECIMAL_TYPES(M) \
|
||||
M(Decimal32) \
|
||||
M(Decimal64) \
|
||||
M(Decimal128)
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -8,14 +8,10 @@
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
}
|
||||
|
||||
/// min, max, any, anyLast, anyHeavy, etc...
|
||||
template <template <typename> class AggregateFunctionTemplate, template <typename> class Data>
|
||||
@ -30,7 +26,6 @@ static IAggregateFunction * createAggregateFunctionSingleValue(const String & na
|
||||
#define DISPATCH(TYPE) \
|
||||
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<TYPE>>>(argument_type);
|
||||
FOR_NUMERIC_TYPES(DISPATCH)
|
||||
FOR_DECIMAL_TYPES(DISPATCH)
|
||||
#undef DISPATCH
|
||||
|
||||
if (which.idx == TypeIndex::Date)
|
||||
@ -39,6 +34,12 @@ static IAggregateFunction * createAggregateFunctionSingleValue(const String & na
|
||||
return new AggregateFunctionTemplate<Data<SingleValueDataFixed<DataTypeDateTime::FieldType>>>(argument_type);
|
||||
if (which.idx == TypeIndex::DateTime64)
|
||||
return new AggregateFunctionTemplate<Data<SingleValueDataFixed<DateTime64>>>(argument_type);
|
||||
if (which.idx == TypeIndex::Decimal32)
|
||||
return new AggregateFunctionTemplate<Data<SingleValueDataFixed<Decimal32>>>(argument_type);
|
||||
if (which.idx == TypeIndex::Decimal64)
|
||||
return new AggregateFunctionTemplate<Data<SingleValueDataFixed<Decimal64>>>(argument_type);
|
||||
if (which.idx == TypeIndex::Decimal128)
|
||||
return new AggregateFunctionTemplate<Data<SingleValueDataFixed<Decimal128>>>(argument_type);
|
||||
if (which.idx == TypeIndex::String)
|
||||
return new AggregateFunctionTemplate<Data<SingleValueDataString>>(argument_type);
|
||||
|
||||
@ -48,77 +49,66 @@ static IAggregateFunction * createAggregateFunctionSingleValue(const String & na
|
||||
|
||||
/// argMin, argMax
|
||||
template <template <typename> class MinMaxData, typename ResData>
|
||||
static IAggregateFunction * createAggregateFunctionArgMinMaxSecond(const DataTypePtr & res_type, const DataTypePtr & val_type, bool is_tuple)
|
||||
static IAggregateFunction * createAggregateFunctionArgMinMaxSecond(const DataTypePtr & res_type, const DataTypePtr & val_type)
|
||||
{
|
||||
WhichDataType which(val_type);
|
||||
|
||||
#define DISPATCH(TYPE) \
|
||||
if (which.idx == TypeIndex::TYPE) \
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<TYPE>>>>(res_type, val_type, is_tuple);
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<TYPE>>>>(res_type, val_type);
|
||||
FOR_NUMERIC_TYPES(DISPATCH)
|
||||
FOR_DECIMAL_TYPES(DISPATCH)
|
||||
#undef DISPATCH
|
||||
|
||||
if (which.idx == TypeIndex::Date)
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<DataTypeDate::FieldType>>>>(res_type, val_type, is_tuple);
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<DataTypeDate::FieldType>>>>(res_type, val_type);
|
||||
if (which.idx == TypeIndex::DateTime)
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<DataTypeDateTime::FieldType>>>>(res_type, val_type, is_tuple);
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<DataTypeDateTime::FieldType>>>>(res_type, val_type);
|
||||
if (which.idx == TypeIndex::DateTime64)
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<DateTime64>>>>(res_type, val_type, is_tuple);
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<DateTime64>>>>(res_type, val_type);
|
||||
if (which.idx == TypeIndex::Decimal32)
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<Decimal32>>>>(res_type, val_type);
|
||||
if (which.idx == TypeIndex::Decimal64)
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<Decimal64>>>>(res_type, val_type);
|
||||
if (which.idx == TypeIndex::Decimal128)
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<Decimal128>>>>(res_type, val_type);
|
||||
if (which.idx == TypeIndex::String)
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataString>>>(res_type, val_type, is_tuple);
|
||||
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataGeneric>>>(res_type, val_type, is_tuple);
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataString>>>(res_type, val_type);
|
||||
|
||||
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataGeneric>>>(res_type, val_type);
|
||||
}
|
||||
|
||||
template <template <typename> class MinMaxData>
|
||||
static IAggregateFunction * createAggregateFunctionArgMinMax(const String & name, const DataTypes & argument_types, const Array & parameters)
|
||||
{
|
||||
assertNoParameters(name, parameters);
|
||||
assertBinary(name, argument_types);
|
||||
|
||||
DataTypePtr res_type, val_type;
|
||||
bool is_tuple = false;
|
||||
|
||||
// argMin and argMax could get tuple of two as arguments
|
||||
if (argument_types.size() == 1 && argument_types[0]->getTypeId() == TypeIndex::Tuple)
|
||||
{
|
||||
const auto * tuple_type = assert_cast<const DataTypeTuple *>(argument_types[0].get());
|
||||
|
||||
if (tuple_type->getElements().size() != 2)
|
||||
{
|
||||
throw Exception("Aggregate function " + name + " expects two elements in tuple argument", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
}
|
||||
|
||||
res_type = tuple_type->getElements()[0];
|
||||
val_type = tuple_type->getElements()[1];
|
||||
is_tuple = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
assertBinary(name, argument_types);
|
||||
res_type = argument_types[0];
|
||||
val_type = argument_types[1];
|
||||
}
|
||||
const DataTypePtr & res_type = argument_types[0];
|
||||
const DataTypePtr & val_type = argument_types[1];
|
||||
|
||||
WhichDataType which(res_type);
|
||||
#define DISPATCH(TYPE) \
|
||||
if (which.idx == TypeIndex::TYPE) \
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<TYPE>>(res_type, val_type, is_tuple);
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<TYPE>>(res_type, val_type);
|
||||
FOR_NUMERIC_TYPES(DISPATCH)
|
||||
FOR_DECIMAL_TYPES(DISPATCH)
|
||||
#undef DISPATCH
|
||||
|
||||
if (which.idx == TypeIndex::Date)
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<DataTypeDate::FieldType>>(res_type, val_type, is_tuple);
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<DataTypeDate::FieldType>>(res_type, val_type);
|
||||
if (which.idx == TypeIndex::DateTime)
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<DataTypeDateTime::FieldType>>(res_type, val_type, is_tuple);
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<DataTypeDateTime::FieldType>>(res_type, val_type);
|
||||
if (which.idx == TypeIndex::DateTime64)
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<DateTime64>>(res_type, val_type, is_tuple);
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<DateTime64>>(res_type, val_type);
|
||||
if (which.idx == TypeIndex::Decimal32)
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<Decimal32>>(res_type, val_type);
|
||||
if (which.idx == TypeIndex::Decimal64)
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<Decimal64>>(res_type, val_type);
|
||||
if (which.idx == TypeIndex::Decimal128)
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<Decimal128>>(res_type, val_type);
|
||||
if (which.idx == TypeIndex::String)
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataString>(res_type, val_type, is_tuple);
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataString>(res_type, val_type);
|
||||
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataGeneric>(res_type, val_type, is_tuple);
|
||||
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataGeneric>(res_type, val_type);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -11,8 +11,8 @@
|
||||
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
#include <type_traits>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -52,9 +52,7 @@ class IAggregateFunction
|
||||
{
|
||||
public:
|
||||
IAggregateFunction(const DataTypes & argument_types_, const Array & parameters_)
|
||||
: argument_types(argument_types_), parameters(parameters_)
|
||||
{
|
||||
}
|
||||
: argument_types(argument_types_), parameters(parameters_) {}
|
||||
|
||||
/// Get main function name.
|
||||
virtual String getName() const = 0;
|
||||
@ -180,8 +178,12 @@ public:
|
||||
* "places" contains a large number of same values consecutively.
|
||||
*/
|
||||
virtual void addBatchArray(
|
||||
size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, const UInt64 * offsets, Arena * arena)
|
||||
const = 0;
|
||||
size_t batch_size,
|
||||
AggregateDataPtr * places,
|
||||
size_t place_offset,
|
||||
const IColumn ** columns,
|
||||
const UInt64 * offsets,
|
||||
Arena * arena) const = 0;
|
||||
|
||||
/** The case when the aggregation key is UInt8
|
||||
* and pointers to aggregation states are stored in AggregateDataPtr[256] lookup table.
|
||||
@ -202,10 +204,8 @@ public:
|
||||
* arguments and params are for nested_function.
|
||||
*/
|
||||
virtual AggregateFunctionPtr getOwnNullAdapter(
|
||||
const AggregateFunctionPtr & /*nested_function*/,
|
||||
const DataTypes & /*arguments*/,
|
||||
const Array & /*params*/,
|
||||
const AggregateFunctionProperties & /*properties*/) const
|
||||
const AggregateFunctionPtr & /*nested_function*/, const DataTypes & /*arguments*/,
|
||||
const Array & /*params*/, const AggregateFunctionProperties & /*properties*/) const
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
@ -250,9 +250,7 @@ private:
|
||||
|
||||
public:
|
||||
IAggregateFunctionHelper(const DataTypes & argument_types_, const Array & parameters_)
|
||||
: IAggregateFunction(argument_types_, parameters_)
|
||||
{
|
||||
}
|
||||
: IAggregateFunction(argument_types_, parameters_) {}
|
||||
|
||||
AddFunc getAddressOfAddFunction() const override { return &addFree; }
|
||||
|
||||
@ -414,19 +412,32 @@ public:
|
||||
static constexpr bool DateTime64Supported = true;
|
||||
|
||||
IAggregateFunctionDataHelper(const DataTypes & argument_types_, const Array & parameters_)
|
||||
: IAggregateFunctionHelper<Derived>(argument_types_, parameters_)
|
||||
: IAggregateFunctionHelper<Derived>(argument_types_, parameters_) {}
|
||||
|
||||
void create(AggregateDataPtr place) const override
|
||||
{
|
||||
new (place) Data;
|
||||
}
|
||||
|
||||
void create(AggregateDataPtr __restrict place) const override { new (place) Data; }
|
||||
void destroy(AggregateDataPtr __restrict place) const noexcept override
|
||||
{
|
||||
data(place).~Data();
|
||||
}
|
||||
|
||||
void destroy(AggregateDataPtr __restrict place) const noexcept override { data(place).~Data(); }
|
||||
bool hasTrivialDestructor() const override
|
||||
{
|
||||
return std::is_trivially_destructible_v<Data>;
|
||||
}
|
||||
|
||||
bool hasTrivialDestructor() const override { return std::is_trivially_destructible_v<Data>; }
|
||||
size_t sizeOfData() const override
|
||||
{
|
||||
return sizeof(Data);
|
||||
}
|
||||
|
||||
size_t sizeOfData() const override { return sizeof(Data); }
|
||||
|
||||
size_t alignOfData() const override { return alignof(Data); }
|
||||
size_t alignOfData() const override
|
||||
{
|
||||
return alignof(Data);
|
||||
}
|
||||
|
||||
void addBatchLookupTable8(
|
||||
size_t batch_size,
|
||||
@ -505,142 +516,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
/// Implements tuple argument unwrapper when the tuple just masks arguments
|
||||
template <typename T, typename Derived, size_t args_count>
|
||||
class IAggregateFunctionTupleArgHelper : public IAggregateFunctionDataHelper<T, Derived>
|
||||
{
|
||||
private:
|
||||
using Base = IAggregateFunctionDataHelper<T, Derived>;
|
||||
|
||||
static void addFree(const IAggregateFunction * that, AggregateDataPtr place, const IColumn ** columns_, size_t row_num, Arena * arena)
|
||||
{
|
||||
if (const auto * col = checkAndGetColumn<ColumnTuple>(*columns_[0]))
|
||||
{
|
||||
const IColumn * columns[args_count];
|
||||
const auto & tup_columns = col->getColumns();
|
||||
|
||||
assert(tup_columns.size() == args_count);
|
||||
for (size_t i = 0; i < tup_columns.size(); ++i)
|
||||
{
|
||||
columns[i] = tup_columns[i].get();
|
||||
}
|
||||
|
||||
static_cast<const Derived &>(*that).add(place, columns, row_num, arena);
|
||||
}
|
||||
else
|
||||
static_cast<const Derived &>(*that).add(place, columns_, row_num, arena);
|
||||
}
|
||||
|
||||
protected:
|
||||
ssize_t extractColumns(const IColumn ** columns, const IColumn ** aggr_columns, ssize_t if_argument_pos) const
|
||||
{
|
||||
if (tuple_argument)
|
||||
{
|
||||
auto tup_columns = assert_cast<const ColumnTuple *>(aggr_columns[0])->getColumns();
|
||||
for (size_t i = 0; i < args_count; ++i)
|
||||
columns[i] = tup_columns[i].get();
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 0; i < args_count; ++i)
|
||||
columns[i] = aggr_columns[i];
|
||||
}
|
||||
if (if_argument_pos >= 0)
|
||||
{
|
||||
columns[args_count] = aggr_columns[if_argument_pos];
|
||||
return args_count;
|
||||
}
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool tuple_argument;
|
||||
|
||||
public:
|
||||
IAggregateFunctionTupleArgHelper(const DataTypes & argument_types_, const Array & parameters_, bool tuple_argument_)
|
||||
: Base(argument_types_, parameters_)
|
||||
{
|
||||
tuple_argument = tuple_argument_;
|
||||
}
|
||||
|
||||
IAggregateFunction::AddFunc getAddressOfAddFunction() const override { return &addFree; }
|
||||
|
||||
/*
|
||||
* We're overriding addBatch* functions just to avoid extracting columns
|
||||
* in 'add' functions
|
||||
*/
|
||||
void addBatch(
|
||||
size_t batch_size,
|
||||
AggregateDataPtr * places,
|
||||
size_t place_offset,
|
||||
const IColumn ** columns,
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos = -1) const override
|
||||
{
|
||||
const IColumn * ex_columns[args_count + (if_argument_pos >= 0)];
|
||||
if_argument_pos = extractColumns(ex_columns, columns, if_argument_pos);
|
||||
|
||||
Base::addBatch(batch_size, places, place_offset, ex_columns, arena, if_argument_pos);
|
||||
}
|
||||
|
||||
void addBatchSinglePlace(
|
||||
size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1) const override
|
||||
{
|
||||
const IColumn * ex_columns[args_count + (if_argument_pos >= 0)];
|
||||
if_argument_pos = extractColumns(ex_columns, columns, if_argument_pos);
|
||||
|
||||
Base::addBatchSinglePlace(batch_size, place, ex_columns, arena, if_argument_pos);
|
||||
}
|
||||
|
||||
void addBatchSinglePlaceNotNull(
|
||||
size_t batch_size,
|
||||
AggregateDataPtr place,
|
||||
const IColumn ** columns,
|
||||
const UInt8 * null_map,
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos = -1) const override
|
||||
{
|
||||
const IColumn * ex_columns[args_count + (if_argument_pos >= 0)];
|
||||
if_argument_pos = extractColumns(ex_columns, columns, if_argument_pos);
|
||||
|
||||
Base::addBatchSinglePlaceNotNull(batch_size, place, ex_columns, null_map, arena, if_argument_pos);
|
||||
}
|
||||
|
||||
void addBatchSinglePlaceFromInterval(
|
||||
size_t batch_begin, size_t batch_end, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1)
|
||||
const override
|
||||
{
|
||||
const IColumn * ex_columns[args_count + (if_argument_pos >= 0)];
|
||||
if_argument_pos = extractColumns(ex_columns, columns, if_argument_pos);
|
||||
|
||||
Base::addBatchSinglePlaceFromInterval(batch_begin, batch_end, place, ex_columns, arena, if_argument_pos);
|
||||
}
|
||||
|
||||
void addBatchArray(
|
||||
size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, const UInt64 * offsets, Arena * arena)
|
||||
const override
|
||||
{
|
||||
const IColumn * ex_columns[args_count];
|
||||
extractColumns(ex_columns, columns, -1);
|
||||
|
||||
Base::addBatchArray(batch_size, places, place_offset, ex_columns, offsets, arena);
|
||||
}
|
||||
|
||||
void addBatchLookupTable8(
|
||||
size_t batch_size,
|
||||
AggregateDataPtr * map,
|
||||
size_t place_offset,
|
||||
std::function<void(AggregateDataPtr &)> init,
|
||||
const UInt8 * key,
|
||||
const IColumn ** columns,
|
||||
Arena * arena) const override
|
||||
{
|
||||
const IColumn * ex_columns[args_count];
|
||||
extractColumns(ex_columns, columns, -1);
|
||||
|
||||
Base::addBatchLookupTable8(batch_size, map, place_offset, init, key, ex_columns, arena);
|
||||
}
|
||||
};
|
||||
|
||||
/// Properties of aggregate function that are independent of argument types and parameters.
|
||||
struct AggregateFunctionProperties
|
||||
|
@ -163,7 +163,7 @@ public:
|
||||
sorted = false;
|
||||
}
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wclass-memaccess"
|
||||
#endif
|
||||
@ -191,7 +191,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
|
@ -1,119 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config.h>
|
||||
#endif
|
||||
|
||||
#if USE_DATASKETCHES
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <memory>
|
||||
#include <theta_sketch.hpp> // Y_IGNORE
|
||||
#include <theta_union.hpp> // Y_IGNORE
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
template <typename Key>
|
||||
class ThetaSketchData : private boost::noncopyable
|
||||
{
|
||||
private:
|
||||
std::unique_ptr<datasketches::update_theta_sketch> sk_update;
|
||||
std::unique_ptr<datasketches::theta_union> sk_union;
|
||||
|
||||
inline datasketches::update_theta_sketch * getSkUpdate()
|
||||
{
|
||||
if (!sk_update)
|
||||
sk_update = std::make_unique<datasketches::update_theta_sketch>(datasketches::update_theta_sketch::builder().build());
|
||||
return sk_update.get();
|
||||
}
|
||||
|
||||
inline datasketches::theta_union * getSkUnion()
|
||||
{
|
||||
if (!sk_union)
|
||||
sk_union = std::make_unique<datasketches::theta_union>(datasketches::theta_union::builder().build());
|
||||
return sk_union.get();
|
||||
}
|
||||
|
||||
public:
|
||||
using value_type = Key;
|
||||
|
||||
ThetaSketchData() = default;
|
||||
~ThetaSketchData() = default;
|
||||
|
||||
/// Insert original value without hash, as `datasketches::update_theta_sketch.update` will do the hash internal.
|
||||
void insertOriginal(const StringRef & value)
|
||||
{
|
||||
getSkUpdate()->update(value.data, value.size);
|
||||
}
|
||||
|
||||
/// Note that `datasketches::update_theta_sketch.update` will do the hash again.
|
||||
void insert(Key value)
|
||||
{
|
||||
getSkUpdate()->update(value);
|
||||
}
|
||||
|
||||
UInt64 size() const
|
||||
{
|
||||
if (sk_union)
|
||||
return static_cast<UInt64>(sk_union->get_result().get_estimate());
|
||||
else if (sk_update)
|
||||
return static_cast<UInt64>(sk_update->get_estimate());
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
void merge(const ThetaSketchData & rhs)
|
||||
{
|
||||
datasketches::theta_union * u = getSkUnion();
|
||||
|
||||
if (sk_update)
|
||||
{
|
||||
u->update(*sk_update);
|
||||
sk_update.reset(nullptr);
|
||||
}
|
||||
|
||||
if (rhs.sk_update)
|
||||
u->update(*rhs.sk_update);
|
||||
else if (rhs.sk_union)
|
||||
u->update(rhs.sk_union->get_result());
|
||||
}
|
||||
|
||||
/// You can only call for an empty object.
|
||||
void read(DB::ReadBuffer & in)
|
||||
{
|
||||
datasketches::compact_theta_sketch::vector_bytes bytes;
|
||||
readVectorBinary(bytes, in);
|
||||
if (!bytes.empty())
|
||||
{
|
||||
auto sk = datasketches::compact_theta_sketch::deserialize(bytes.data(), bytes.size());
|
||||
getSkUnion()->update(sk);
|
||||
}
|
||||
}
|
||||
|
||||
void write(DB::WriteBuffer & out) const
|
||||
{
|
||||
if (sk_update)
|
||||
{
|
||||
auto bytes = sk_update->compact().serialize();
|
||||
writeVectorBinary(bytes, out);
|
||||
}
|
||||
else if (sk_union)
|
||||
{
|
||||
auto bytes = sk_union->get_result().serialize();
|
||||
writeVectorBinary(bytes, out);
|
||||
}
|
||||
else
|
||||
{
|
||||
datasketches::compact_theta_sketch::vector_bytes bytes;
|
||||
writeVectorBinary(bytes, out);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -50,6 +50,7 @@ SRCS(
|
||||
AggregateFunctionStatisticsSimple.cpp
|
||||
AggregateFunctionStudentTTest.cpp
|
||||
AggregateFunctionSum.cpp
|
||||
AggregateFunctionSumCount.cpp
|
||||
AggregateFunctionSumMap.cpp
|
||||
AggregateFunctionTopK.cpp
|
||||
AggregateFunctionUniq.cpp
|
||||
|
@ -13,6 +13,7 @@ namespace ErrorCodes
|
||||
extern const int MISMATCH_REPLICAS_DATA_SOURCES;
|
||||
extern const int NO_AVAILABLE_REPLICA;
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
extern const int UNKNOWN_PACKET_FROM_SERVER;
|
||||
}
|
||||
|
||||
|
||||
@ -278,7 +279,22 @@ Packet MultiplexedConnections::receivePacketUnlocked(AsyncCallback async_callbac
|
||||
Packet packet;
|
||||
{
|
||||
AsyncCallbackSetter async_setter(current_connection, std::move(async_callback));
|
||||
packet = current_connection->receivePacket();
|
||||
|
||||
try
|
||||
{
|
||||
packet = current_connection->receivePacket();
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
if (e.code() == ErrorCodes::UNKNOWN_PACKET_FROM_SERVER)
|
||||
{
|
||||
/// Exception may happen when packet is received, e.g. when got unknown packet.
|
||||
/// In this case, invalidate replica, so that we would not read from it anymore.
|
||||
current_connection->disconnect();
|
||||
invalidateReplica(state);
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
switch (packet.type)
|
||||
|
@ -111,7 +111,7 @@ public:
|
||||
}
|
||||
|
||||
/// Suppress gcc 7.3.1 warning: '*((void*)&<anonymous> +8)' may be used uninitialized in this function
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
||||
#endif
|
||||
@ -128,7 +128,7 @@ public:
|
||||
offsets.push_back(new_size);
|
||||
}
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
|
@ -277,7 +277,7 @@ private:
|
||||
* GCC 4.9 mistakenly assumes that we can call `free` from a pointer to the stack.
|
||||
* In fact, the combination of conditions inside AllocatorWithStackMemory does not allow this.
|
||||
*/
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wfree-nonheap-object"
|
||||
#endif
|
||||
@ -359,6 +359,6 @@ extern template class Allocator<true, false>;
|
||||
extern template class Allocator<false, true>;
|
||||
extern template class Allocator<true, true>;
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
@ -19,7 +19,7 @@ namespace DB
|
||||
struct UInt128
|
||||
{
|
||||
/// Suppress gcc7 warnings: 'prev_key.DB::UInt128::low' may be used uninitialized in this function
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
||||
#endif
|
||||
@ -92,7 +92,7 @@ struct UInt128
|
||||
return static_cast<T>(low);
|
||||
}
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
@ -150,7 +150,7 @@ struct DummyUInt256
|
||||
{
|
||||
|
||||
/// Suppress gcc7 warnings: 'prev_key.DB::UInt256::a' may be used uninitialized in this function
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
||||
#endif
|
||||
@ -179,7 +179,7 @@ struct DummyUInt256
|
||||
bool operator== (const UInt64 rhs) const { return a == rhs && b == 0 && c == 0 && d == 0; }
|
||||
bool operator!= (const UInt64 rhs) const { return !operator==(rhs); }
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
|
@ -15,4 +15,3 @@
|
||||
#cmakedefine01 USE_GRPC
|
||||
#cmakedefine01 USE_STATS
|
||||
#cmakedefine01 CLICKHOUSE_SPLIT_BINARY
|
||||
#cmakedefine01 USE_DATASKETCHES
|
||||
|
@ -181,7 +181,7 @@ std::vector<std::pair<String, uint16_t>> parseRemoteDescriptionForExternalDataba
|
||||
size_t colon = address.find(':');
|
||||
if (colon == String::npos)
|
||||
{
|
||||
LOG_WARNING(&Poco::Logger::get("ParseRemoteDescription"), "Port is not found for host: {}. Using default port {}", default_port);
|
||||
LOG_WARNING(&Poco::Logger::get("ParseRemoteDescription"), "Port is not found for host: {}. Using default port {}", address, default_port);
|
||||
result.emplace_back(std::make_pair(address, default_port));
|
||||
}
|
||||
else
|
||||
|
@ -1,5 +1,5 @@
|
||||
/// Bug in GCC: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59124
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Warray-bounds"
|
||||
#endif
|
||||
@ -263,6 +263,6 @@ int main()
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
@ -69,7 +69,7 @@ static void aggregate1(Map & map, Source::const_iterator begin, Source::const_it
|
||||
++map[*it];
|
||||
}
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
||||
#endif
|
||||
@ -122,7 +122,7 @@ static void aggregate22(MapTwoLevel & map, Source::const_iterator begin, Source:
|
||||
}
|
||||
}
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
|
@ -62,7 +62,7 @@ struct AggregateIndependent
|
||||
}
|
||||
};
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
||||
#endif
|
||||
@ -115,7 +115,7 @@ struct AggregateIndependentWithSequentialKeysOptimization
|
||||
}
|
||||
};
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
@ -265,7 +265,7 @@ struct Creator
|
||||
void operator()(Value &) const {}
|
||||
};
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
||||
#endif
|
||||
@ -275,7 +275,7 @@ struct Updater
|
||||
void operator()(Value & x) const { ++x; }
|
||||
};
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
|
@ -484,7 +484,7 @@ DataTypes Block::getDataTypes() const
|
||||
|
||||
|
||||
template <typename ReturnType>
|
||||
static ReturnType checkBlockStructure(const Block & lhs, const Block & rhs, const std::string & context_description)
|
||||
static ReturnType checkBlockStructure(const Block & lhs, const Block & rhs, const std::string & context_description, bool allow_remove_constants)
|
||||
{
|
||||
auto on_error = [](const std::string & message [[maybe_unused]], int code [[maybe_unused]])
|
||||
{
|
||||
@ -515,7 +515,16 @@ static ReturnType checkBlockStructure(const Block & lhs, const Block & rhs, cons
|
||||
if (!actual.column || !expected.column)
|
||||
continue;
|
||||
|
||||
if (actual.column->getName() != expected.column->getName())
|
||||
const IColumn * actual_column = actual.column.get();
|
||||
|
||||
/// If we allow to remove constants, and expected column is not const, then unwrap actual constant column.
|
||||
if (allow_remove_constants && !isColumnConst(*expected.column))
|
||||
{
|
||||
if (const auto * column_const = typeid_cast<const ColumnConst *>(actual_column))
|
||||
actual_column = &column_const->getDataColumn();
|
||||
}
|
||||
|
||||
if (actual_column->getName() != expected.column->getName())
|
||||
return on_error("Block structure mismatch in " + context_description + " stream: different columns:\n"
|
||||
+ lhs.dumpStructure() + "\n" + rhs.dumpStructure(), ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
@ -537,13 +546,25 @@ static ReturnType checkBlockStructure(const Block & lhs, const Block & rhs, cons
|
||||
|
||||
bool blocksHaveEqualStructure(const Block & lhs, const Block & rhs)
|
||||
{
|
||||
return checkBlockStructure<bool>(lhs, rhs, {});
|
||||
return checkBlockStructure<bool>(lhs, rhs, {}, false);
|
||||
}
|
||||
|
||||
|
||||
void assertBlocksHaveEqualStructure(const Block & lhs, const Block & rhs, const std::string & context_description)
|
||||
{
|
||||
checkBlockStructure<void>(lhs, rhs, context_description);
|
||||
checkBlockStructure<void>(lhs, rhs, context_description, false);
|
||||
}
|
||||
|
||||
|
||||
bool isCompatibleHeader(const Block & actual, const Block & desired)
|
||||
{
|
||||
return checkBlockStructure<bool>(actual, desired, {}, true);
|
||||
}
|
||||
|
||||
|
||||
void assertCompatibleHeader(const Block & actual, const Block & desired, const std::string & context_description)
|
||||
{
|
||||
checkBlockStructure<void>(actual, desired, context_description, true);
|
||||
}
|
||||
|
||||
|
||||
|
@ -184,6 +184,12 @@ bool blocksHaveEqualStructure(const Block & lhs, const Block & rhs);
|
||||
/// Throw exception when blocks are different.
|
||||
void assertBlocksHaveEqualStructure(const Block & lhs, const Block & rhs, const std::string & context_description);
|
||||
|
||||
/// Actual header is compatible to desired if block have equal structure except constants.
|
||||
/// It is allowed when column from actual header is constant, but in desired is not.
|
||||
/// If both columns are constant, it is checked that they have the same value.
|
||||
bool isCompatibleHeader(const Block & actual, const Block & desired);
|
||||
void assertCompatibleHeader(const Block & actual, const Block & desired, const std::string & context_description);
|
||||
|
||||
/// Calculate difference in structure of blocks and write description into output strings. NOTE It doesn't compare values of constant columns.
|
||||
void getBlocksDifference(const Block & lhs, const Block & rhs, std::string & out_lhs_diff, std::string & out_rhs_diff);
|
||||
|
||||
|
@ -96,7 +96,7 @@ template <typename T> bool decimalEqual(T x, T y, UInt32 x_scale, UInt32 y_scale
|
||||
template <typename T> bool decimalLess(T x, T y, UInt32 x_scale, UInt32 y_scale);
|
||||
template <typename T> bool decimalLessOrEqual(T x, T y, UInt32 x_scale, UInt32 y_scale);
|
||||
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
||||
#endif
|
||||
@ -159,7 +159,7 @@ private:
|
||||
T dec;
|
||||
UInt32 scale;
|
||||
};
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
@ -563,7 +563,7 @@ public:
|
||||
{
|
||||
case Types::Null: return f(field.template get<Null>());
|
||||
// gcc 8.2.1
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
||||
#endif
|
||||
@ -583,7 +583,7 @@ public:
|
||||
case Types::Int128: return f(field.template get<Int128>());
|
||||
case Types::UInt256: return f(field.template get<UInt256>());
|
||||
case Types::Int256: return f(field.template get<Int256>());
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
}
|
||||
|
@ -70,6 +70,7 @@ class IColumn;
|
||||
M(UInt64, connections_with_failover_max_tries, DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES, "The maximum number of attempts to connect to replicas.", 0) \
|
||||
M(UInt64, s3_min_upload_part_size, 512*1024*1024, "The minimum size of part to upload during multipart upload to S3.", 0) \
|
||||
M(UInt64, s3_max_single_part_upload_size, 64*1024*1024, "The maximum size of object to upload using singlepart upload to S3.", 0) \
|
||||
M(UInt64, s3_max_single_read_retries, 4, "The maximum number of retries during single S3 read.", 0) \
|
||||
M(UInt64, s3_max_redirects, 10, "Max number of S3 redirects hops allowed.", 0) \
|
||||
M(UInt64, s3_max_connections, 1024, "The maximum number of connections per server.", 0) \
|
||||
M(Bool, extremes, false, "Calculate minimums and maximums of the result columns. They can be output in JSON-formats.", IMPORTANT) \
|
||||
@ -142,7 +143,7 @@ class IColumn;
|
||||
M(UInt64, optimize_min_equality_disjunction_chain_length, 3, "The minimum length of the expression `expr = x1 OR ... expr = xN` for optimization ", 0) \
|
||||
\
|
||||
M(UInt64, min_bytes_to_use_direct_io, 0, "The minimum number of bytes for reading the data with O_DIRECT option during SELECT queries execution. 0 - disabled.", 0) \
|
||||
M(UInt64, min_bytes_to_use_mmap_io, (64 * 1024 * 1024), "The minimum number of bytes for reading the data with mmap option during SELECT queries execution. 0 - disabled.", 0) \
|
||||
M(UInt64, min_bytes_to_use_mmap_io, 0, "The minimum number of bytes for reading the data with mmap option during SELECT queries execution. 0 - disabled.", 0) \
|
||||
M(Bool, checksum_on_read, true, "Validate checksums on reading. It is enabled by default and should be always enabled in production. Please do not expect any benefits in disabling this setting. It may only be used for experiments and benchmarks. The setting only applicable for tables of MergeTree family. Checksums are always validated for other table engines and when receiving data over network.", 0) \
|
||||
\
|
||||
M(Bool, force_index_by_date, 0, "Throw an exception if there is a partition key in a table, and it is not used.", 0) \
|
||||
@ -224,6 +225,7 @@ class IColumn;
|
||||
/** Settings for testing hedged requests */ \
|
||||
M(Milliseconds, sleep_in_send_tables_status_ms, 0, "Time to sleep in sending tables status response in TCPHandler", 0) \
|
||||
M(Milliseconds, sleep_in_send_data_ms, 0, "Time to sleep in sending data in TCPHandler", 0) \
|
||||
M(UInt64, unknown_packet_in_send_data, 0, "Send unknown packet instead of data Nth data packet", 0) \
|
||||
\
|
||||
M(Bool, insert_allow_materialized_columns, 0, "If setting is enabled, Allow materialized columns in INSERT.", 0) \
|
||||
M(Seconds, http_connection_timeout, DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT, "HTTP connection timeout.", 0) \
|
||||
@ -446,6 +448,8 @@ class IColumn;
|
||||
M(Bool, database_replicated_always_detach_permanently, false, "Execute DETACH TABLE as DETACH TABLE PERMANENTLY if database engine is Replicated", 0) \
|
||||
M(DistributedDDLOutputMode, distributed_ddl_output_mode, DistributedDDLOutputMode::THROW, "Format of distributed DDL query result", 0) \
|
||||
M(UInt64, distributed_ddl_entry_format_version, 1, "Version of DDL entry to write into ZooKeeper", 0) \
|
||||
M(UInt64, external_storage_max_read_rows, 0, "Limit maximum number of rows when table with external engine should flush history data. Now supported only for MySQL table engine, database engine, dictionary and MaterializeMySQL. If equal to 0, this setting is disabled", 0) \
|
||||
M(UInt64, external_storage_max_read_bytes, 0, "Limit maximum number of bytes when table with external engine should flush history data. Now supported only for MySQL table engine, database engine, dictionary and MaterializeMySQL. If equal to 0, this setting is disabled", 0) \
|
||||
\
|
||||
/** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \
|
||||
\
|
||||
|
@ -15,7 +15,7 @@ namespace DB
|
||||
struct Null {};
|
||||
|
||||
/// Ignore strange gcc warning https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55776
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wshadow"
|
||||
#endif
|
||||
@ -59,7 +59,7 @@ enum class TypeIndex
|
||||
LowCardinality,
|
||||
Map,
|
||||
};
|
||||
#if !__clang__
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user