Merge branch 'master' into remove-part-special-case

This commit is contained in:
Alexey Milovidov 2020-04-17 06:14:15 +03:00
commit fb095ad787
316 changed files with 4488 additions and 1924 deletions

View File

@ -11,10 +11,9 @@ ClickHouse is an open-source column-oriented database management system that all
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-d2zxkf9e-XyxDa_ucfPxzuH4SJIm~Ng) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announces and reports about events.
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
* You can also [fill this form](https://forms.yandex.com/surveys/meet-yandex-clickhouse-team/) to meet Yandex ClickHouse team in person.
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.
## Upcoming Events
* [ClickHouse Monitoring Round Table (online in English)](https://www.eventbrite.com/e/clickhouse-april-virtual-meetup-tickets-102272923066) on April 15, 2020.
* [ClickHouse Workshop in Novosibirsk](https://2020.codefest.ru/lecture/1628) on TBD date.
* [Yandex C++ Open-Source Sprints in Moscow](https://events.yandex.ru/events/otkrytyj-kod-v-yandek-28-03-2020) on TBD date.

View File

@ -3,8 +3,10 @@ if (USE_CLANG_TIDY)
endif ()
add_subdirectory (common)
add_subdirectory (loggers)
add_subdirectory (daemon)
add_subdirectory (loggers)
add_subdirectory (pcg-random)
add_subdirectory (widechar_width)
if (USE_MYSQL)
add_subdirectory (mysqlxx)

View File

@ -11,6 +11,10 @@ using Int16 = int16_t;
using Int32 = int32_t;
using Int64 = int64_t;
#if __cplusplus <= 201703L
using char8_t = unsigned char;
#endif
using UInt8 = char8_t;
using UInt16 = uint16_t;
using UInt32 = uint32_t;

View File

@ -1,12 +1,47 @@
LIBRARY()
ADDINCL(
GLOBAL clickhouse/base
contrib/libs/cctz/include
)
CFLAGS (GLOBAL -DARCADIA_BUILD)
IF (OS_DARWIN)
CFLAGS (GLOBAL -DOS_DARWIN)
ELSEIF (OS_FREEBSD)
CFLAGS (GLOBAL -DOS_FREEBSD)
ELSEIF (OS_LINUX)
CFLAGS (GLOBAL -DOS_LINUX)
ENDIF ()
PEERDIR(
contrib/libs/cctz/src
contrib/libs/cxxsupp/libcxx-filesystem
contrib/libs/poco/Net
contrib/libs/poco/Util
contrib/restricted/boost
contrib/restricted/cityhash-1.0.2
)
SRCS(
argsToConfig.cpp
coverage.cpp
DateLUT.cpp
DateLUTImpl.cpp
demangle.cpp
getFQDNOrHostName.cpp
getMemoryAmount.cpp
getThreadId.cpp
JSON.cpp
LineReader.cpp
mremap.cpp
phdr_cache.cpp
preciseExp10.c
setTerminalEcho.cpp
shift10.cpp
sleep.cpp
terminalColors.cpp
)
END()

View File

@ -50,11 +50,13 @@
#include <Common/getMultipleKeysFromConfig.h>
#include <Common/ClickHouseRevision.h>
#include <Common/Config/ConfigProcessor.h>
#include <Common/config_version.h>
#ifdef __APPLE__
// ucontext is not available without _XOPEN_SOURCE
#define _XOPEN_SOURCE 700
#if !defined(ARCADIA_BUILD)
# include <Common/config_version.h>
#endif
#if defined(OS_DARWIN)
# define _XOPEN_SOURCE 700 // ucontext is not available without _XOPEN_SOURCE
#endif
#include <ucontext.h>
@ -410,7 +412,7 @@ std::string BaseDaemon::getDefaultCorePath() const
void BaseDaemon::closeFDs()
{
#if defined(__FreeBSD__) || (defined(__APPLE__) && defined(__MACH__))
#if defined(OS_FREEBSD) || defined(OS_DARWIN)
Poco::File proc_path{"/dev/fd"};
#else
Poco::File proc_path{"/proc/self/fd"};
@ -430,7 +432,7 @@ void BaseDaemon::closeFDs()
else
{
int max_fd = -1;
#ifdef _SC_OPEN_MAX
#if defined(_SC_OPEN_MAX)
max_fd = sysconf(_SC_OPEN_MAX);
if (max_fd == -1)
#endif
@ -448,7 +450,7 @@ namespace
/// the maximum is 1000, and chromium uses 300 for its tab processes. Ignore
/// whatever errors that occur, because it's just a debugging aid and we don't
/// care if it breaks.
#if defined(__linux__) && !defined(NDEBUG)
#if defined(OS_LINUX) && !defined(NDEBUG)
void debugIncreaseOOMScore()
{
const std::string new_score = "555";

14
base/daemon/ya.make Normal file
View File

@ -0,0 +1,14 @@
LIBRARY()
NO_COMPILER_WARNINGS()
PEERDIR(
clickhouse/src/Common
)
SRCS(
BaseDaemon.cpp
GraphiteWriter.cpp
)
END()

15
base/loggers/ya.make Normal file
View File

@ -0,0 +1,15 @@
LIBRARY()
PEERDIR(
clickhouse/src/Common
)
SRCS(
ExtendedLogChannel.cpp
Loggers.cpp
OwnFormattingChannel.cpp
OwnPatternFormatter.cpp
OwnSplitChannel.cpp
)
END()

View File

@ -0,0 +1,2 @@
add_library(pcg_random INTERFACE)
target_include_directories(pcg_random INTERFACE .)

View File

@ -292,7 +292,7 @@ inline itype rotl(itype value, bitcount_t rot)
{
constexpr bitcount_t bits = sizeof(itype) * 8;
constexpr bitcount_t mask = bits - 1;
#if PCG_USE_ZEROCHECK_ROTATE_IDIOM
#if defined(PCG_USE_ZEROCHECK_ROTATE_IDIOM)
return rot ? (value << rot) | (value >> (bits - rot)) : value;
#else
return (value << rot) | (value >> ((- rot) & mask));
@ -304,7 +304,7 @@ inline itype rotr(itype value, bitcount_t rot)
{
constexpr bitcount_t bits = sizeof(itype) * 8;
constexpr bitcount_t mask = bits - 1;
#if PCG_USE_ZEROCHECK_ROTATE_IDIOM
#if defined(PCG_USE_ZEROCHECK_ROTATE_IDIOM)
return rot ? (value >> rot) | (value << (bits - rot)) : value;
#else
return (value >> rot) | (value << ((- rot) & mask));
@ -318,7 +318,7 @@ inline itype rotr(itype value, bitcount_t rot)
*
* These overloads will be preferred over the general template code above.
*/
#if PCG_USE_INLINE_ASM && __GNUC__ && (__x86_64__ || __i386__)
#if defined(PCG_USE_INLINE_ASM) && __GNUC__ && (__x86_64__ || __i386__)
inline uint8_t rotr(uint8_t value, bitcount_t rot)
{
@ -600,7 +600,7 @@ std::ostream& operator<<(std::ostream& out, printable_typename<T>) {
#ifdef __GNUC__
int status;
char* pretty_name =
abi::__cxa_demangle(implementation_typename, NULL, NULL, &status);
abi::__cxa_demangle(implementation_typename, nullptr, nullptr, &status);
if (status == 0)
out << pretty_name;
free(static_cast<void*>(pretty_name));

5
base/pcg-random/ya.make Normal file
View File

@ -0,0 +1,5 @@
LIBRARY()
ADDINCL (GLOBAL clickhouse/base/pcg-random)
END()

View File

@ -0,0 +1,9 @@
LIBRARY()
ADDINCL(GLOBAL clickhouse/base/widechar_width)
SRCS(
widechar_width.cpp
)
END()

View File

@ -1,3 +1,7 @@
RECURSE(
common
daemon
loggers
pcg-random
widechar_width
)

View File

@ -2,4 +2,3 @@ set(DIVIDE_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libdivide)
set(DBMS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/src ${ClickHouse_BINARY_DIR}/src)
set(DOUBLE_CONVERSION_CONTRIB_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/double-conversion)
set(METROHASH_CONTRIB_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libmetrohash/src)
set(PCG_RANDOM_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/libpcg-random/include)

View File

@ -333,6 +333,5 @@ add_subdirectory(grpc-cmake)
add_subdirectory(replxx-cmake)
add_subdirectory(FastMemcpy)
add_subdirectory(widecharwidth)
add_subdirectory(consistent-hashing)
add_subdirectory(consistent-hashing-sumbur)

View File

@ -1,52 +0,0 @@
# PCG Random Number Generation, C++ Edition
[PCG-Random website]: http://www.pcg-random.org
This code provides an implementation of the PCG family of random number
generators, which are fast, statistically excellent, and offer a number of
useful features.
Full details can be found at the [PCG-Random website]. This version
of the code provides many family members -- if you just want one
simple generator, you may prefer the minimal C version of the library.
There are two kinds of generator, normal generators and extended generators.
Extended generators provide *k* dimensional equidistribution and can perform
party tricks, but generally speaking most people only need the normal
generators.
There are two ways to access the generators, using a convenience typedef
or by using the underlying templates directly (similar to C++11's `std::mt19937` typedef vs its `std::mersenne_twister_engine` template). For most users, the convenience typedef is what you want, and probably you're fine with `pcg32` for 32-bit numbers. If you want 64-bit numbers, either use `pcg64` (or, if you're on a 32-bit system, making 64 bits from two calls to `pcg32_k2` may be faster).
## Documentation and Examples
Visit [PCG-Random website] for information on how to use this library, or look
at the sample code in the `sample` directory -- hopefully it should be fairly
self explanatory.
## Building
The code is written in C++11, as an include-only library (i.e., there is
nothing you need to build). There are some provided demo programs and tests
however. On a Unix-style system (e.g., Linux, Mac OS X) you should be able
to just type
make
To build the demo programs.
## Testing
Run
make test
## Directory Structure
The directories are arranged as follows:
* `include` -- contains `pcg_random.hpp` and supporting include files
* `test-high` -- test code for the high-level API where the functions have
shorter, less scary-looking names.
* `sample` -- sample code, some similar to the code in `test-high` but more
human readable, some other examples too

View File

@ -1,10 +1,10 @@
{
"docker/packager/deb": "yandex/clickhouse-deb-builder",
"docker/packager/binary": "yandex/clickhouse-binary-builder",
"docker/test/coverage": "yandex/clickhouse-coverage",
"docker/test/compatibility/centos": "yandex/clickhouse-test-old-centos",
"docker/test/compatibility/ubuntu": "yandex/clickhouse-test-old-ubuntu",
"docker/test/integration": "yandex/clickhouse-integration-test",
"docker/test/performance": "yandex/clickhouse-performance-test",
"docker/test/performance-comparison": "yandex/clickhouse-performance-comparison",
"docker/test/pvs": "yandex/clickhouse-pvs-test",
"docker/test/stateful": "yandex/clickhouse-stateful-test",
@ -14,5 +14,6 @@
"docker/test/unit": "yandex/clickhouse-unit-test",
"docker/test/stress": "yandex/clickhouse-stress-test",
"docker/test/split_build_smoke_test": "yandex/clickhouse-split-build-smoke-test",
"docker/test/codebrowser": "yandex/clickhouse-codebrowser",
"tests/integration/image": "yandex/clickhouse-integration-tests-runner"
}

View File

@ -87,9 +87,6 @@ git -C ch diff --name-only "$SHA_TO_TEST" "$(git -C ch merge-base "$SHA_TO_TEST"
# Set python output encoding so that we can print queries with Russian letters.
export PYTHONIOENCODING=utf-8
# Use a default number of runs if not told otherwise
export CHPC_RUNS=${CHPC_RUNS:-7}
# By default, use the main comparison script from the tested package, so that we
# can change it in PRs.
script_path="right/scripts"

View File

@ -25,7 +25,7 @@ parser = argparse.ArgumentParser(description='Run performance test.')
parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file')
parser.add_argument('--host', nargs='*', default=['localhost'], help="Server hostname(s). Corresponds to '--port' options.")
parser.add_argument('--port', nargs='*', default=[9000], help="Server port(s). Corresponds to '--host' options.")
parser.add_argument('--runs', type=int, default=int(os.environ.get('CHPC_RUNS', 7)), help='Number of query runs per server. Defaults to CHPC_RUNS environment variable.')
parser.add_argument('--runs', type=int, default=int(os.environ.get('CHPC_RUNS', 11)), help='Number of query runs per server. Defaults to CHPC_RUNS environment variable.')
parser.add_argument('--no-long', type=bool, default=True, help='Skip the tests tagged as long.')
args = parser.parse_args()

View File

@ -14,6 +14,11 @@ kill_clickhouse () {
sleep 10
fi
done
echo "Will try to send second kill signal for sure"
kill `pgrep -u clickhouse` 2>/dev/null
sleep 5
echo "clickhouse pids" `ps aux | grep clickhouse` | ts '%Y-%m-%d %H:%M:%S'
}
start_clickhouse () {
@ -50,6 +55,7 @@ ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/con
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \

View File

@ -1,3 +1,8 @@
---
toc_title: Cloud
toc_priority: 1
---
# ClickHouse Cloud Service Providers {#clickhouse-cloud-service-providers}
!!! info "Info"

View File

@ -0,0 +1,21 @@
---
toc_title: Support
toc_priority: 3
---
# ClickHouse Commercial Support Service Providers {#clickhouse-commercial-support-service-providers}
!!! info "Info"
If you have launched a ClickHouse commercial support service, feel free to [open a pull-request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/support.md) adding it to the following list.
## Altinity {#altinity}
[Service description](https://www.altinity.com/24x7-support)
## Mafiree {#mafiree}
[Service description](http://mafiree.com/clickhouse-analytics-services.php)
## MinervaDB {#minervadb}
[Service description](https://minervadb.com/index.php/clickhouse-consulting-and-support-by-minervadb/)

View File

@ -1,5 +1,5 @@
---
toc_folder_title: Development
toc_folder_title: Разработка
toc_hidden: true
toc_priority: 58
toc_title: hidden

View File

@ -38,7 +38,7 @@ sudo apt-get update
sudo apt-get install clickhouse-client clickhouse-server
```
You can also download and install packages manually from here: https://repo.yandex.ru/clickhouse/deb/stable/main/.
You can also download and install packages manually from [here](https://repo.yandex.ru/clickhouse/deb/stable/main/).
#### Packages {#packages}
@ -67,7 +67,7 @@ Then run these commands to install packages:
sudo yum install clickhouse-server clickhouse-client
```
You can also download and install packages manually from here: https://repo.clickhouse.tech/rpm/stable/x86\_64.
You can also download and install packages manually from [here](https://repo.clickhouse.tech/rpm/stable/x86_64).
### From Tgz Archives {#from-tgz-archives}

View File

@ -78,48 +78,6 @@ See the difference?
For example, the query “count the number of records for each advertising platform” requires reading one “advertising platform ID” column, which takes up 1 byte uncompressed. If most of the traffic was not from advertising platforms, you can expect at least 10-fold compression of this column. When using a quick compression algorithm, data decompression is possible at a speed of at least several gigabytes of uncompressed data per second. In other words, this query can be processed at a speed of approximately several billion rows per second on a single server. This speed is actually achieved in practice.
<details markdown="1">
<summary>Example</summary>
``` bash
$ clickhouse-client
ClickHouse client version 0.0.52053.
Connecting to localhost:9000.
Connected to ClickHouse server version 0.0.52053.
```
``` sql
SELECT CounterID, count() FROM hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20
```
``` text
┌─CounterID─┬──count()─┐
│ 114208 │ 56057344 │
│ 115080 │ 51619590 │
│ 3228 │ 44658301 │
│ 38230 │ 42045932 │
│ 145263 │ 42042158 │
│ 91244 │ 38297270 │
│ 154139 │ 26647572 │
│ 150748 │ 24112755 │
│ 242232 │ 21302571 │
│ 338158 │ 13507087 │
│ 62180 │ 12229491 │
│ 82264 │ 12187441 │
│ 232261 │ 12148031 │
│ 146272 │ 11438516 │
│ 168777 │ 11403636 │
│ 4120072 │ 11227824 │
│ 10938808 │ 10519739 │
│ 74088 │ 9047015 │
│ 115079 │ 8837972 │
│ 337234 │ 8205961 │
└───────────┴──────────┘
```
</details>
### CPU {#cpu}
Since executing a query requires processing a large number of rows, it helps to dispatch all operations for entire vectors instead of for separate rows, or to implement the query engine so that there is almost no dispatching cost. If you dont do this, with any half-decent disk subsystem, the query interpreter inevitably stalls the CPU. It makes sense to both store data in columns and process it, when possible, by columns.

View File

@ -24,7 +24,10 @@ toc_title: Integrations
- [ClickHouseMigrator](https://github.com/zlzforever/ClickHouseMigrator)
- Message queues
- [Kafka](https://kafka.apache.org)
- [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (uses [Go client](https://github.com/kshvakov/clickhouse/))
- [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (uses [Go client](https://github.com/ClickHouse/clickhouse-go/))
- Stream processing
- [Flink](https://flink.apache.org)
- [flink-clickhouse-sink](https://github.com/ivi-ru/flink-clickhouse-sink)
- Object storages
- [S3](https://en.wikipedia.org/wiki/Amazon_S3)
- [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup)
@ -72,6 +75,9 @@ toc_title: Integrations
- [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (uses [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [pandas](https://pandas.pydata.org)
- [pandahouse](https://github.com/kszucs/pandahouse)
- PHP
- [Doctrine](https://www.doctrine-project.org/)
- [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse)
- R
- [dplyr](https://db.rstudio.com/dplyr/)
- [RClickHouse](https://github.com/IMSMWU/RClickHouse) (uses [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp))

View File

@ -77,5 +77,9 @@ toc_title: Adopters
| [МКБ](https://mkb.ru/) | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) |
| [金数据](https://jinshuju.net) | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) |
| [Instana](https://www.instana.com) | APM Platform | Main product | — | — | [Twitter post](https://twitter.com/mieldonkers/status/1248884119158882304) |
| [Wargaming](https://wargaming.com/en/) | Games | | — | — | [Interview](https://habr.com/en/post/496954/) |
| [Crazypanda](https://crazypanda.ru/en/) | Games | | — | — | Live session on ClickHouse meetup |
| [FunCorp](https://fun.co/rp) | Games | | — | — | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) |
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->

View File

@ -5,6 +5,6 @@ toc_title: Set
# Set {#set}
Used for the right half of an [IN](../../../sql_reference/statements/select.md#select-in-operators) expression.
Used for the right half of an [IN](../../statements/select.md#select-in-operators) expression.
[Original article](https://clickhouse.tech/docs/en/data_types/special_data_types/set/) <!--hide-->

View File

@ -248,7 +248,7 @@ Here, a sample of 10% is taken from the second half of the data.
### ARRAY JOIN Clause {#select-array-join-clause}
Allows executing `JOIN` with an array or nested data structure. The intent is similar to the [arrayJoin](../../sql_reference/functions/array_join.md#functions_arrayjoin) function, but its functionality is broader.
Allows executing `JOIN` with an array or nested data structure. The intent is similar to the [arrayJoin](../functions/array_join.md#functions_arrayjoin) function, but its functionality is broader.
``` sql
SELECT <expr_list>
@ -602,7 +602,777 @@ USING (equi_column1, ... equi_columnN, asof_column)
For example, consider the following tables:
\`\`\` text
table\_1 table\_2
table_1 table_2
event | ev_time | user_id event | ev_time | user_id
----------|---------|---------- ----------|---------|----------
... ...
event_1_1 | 12:00 | 42 event_2_1 | 11:59 | 42
... event_2_2 | 12:30 | 42
event_1_2 | 13:00 | 42 event_2_3 | 13:00 | 42
... ...
event \| ev\_time \| user\_id event \| ev\_time \| user\_id
`ASOF JOIN` can take the timestamp of a user event from `table_1` and find an event in `table_2` where the timestamp is closest to the timestamp of the event from `table_1` corresponding to the closest match condition. Equal timestamp values are the closest if available. Here, the `user_id` column can be used for joining on equality and the `ev_time` column can be used for joining on the closest match. In our example, `event_1_1` can be joined with `event_2_1` and `event_1_2` can be joined with `event_2_3`, but `event_2_2` cant be joined.
!!! note "Note"
`ASOF` join is **not** supported in the [Join](../../engines/table_engines/special/join.md) table engine.
To set the default strictness value, use the session configuration parameter [join\_default\_strictness](../../operations/settings/settings.md#settings-join_default_strictness).
#### GLOBAL JOIN {#global-join}
When using a normal `JOIN`, the query is sent to remote servers. Subqueries are run on each of them in order to make the right table, and the join is performed with this table. In other words, the right table is formed on each server separately.
When using `GLOBAL ... JOIN`, first the requestor server runs a subquery to calculate the right table. This temporary table is passed to each remote server, and queries are run on them using the temporary data that was transmitted.
Be careful when using `GLOBAL`. For more information, see the section [Distributed subqueries](#select-distributed-subqueries).
#### Usage Recommendations {#usage-recommendations}
When running a `JOIN`, there is no optimization of the order of execution in relation to other stages of the query. The join (a search in the right table) is run before filtering in `WHERE` and before aggregation. In order to explicitly set the processing order, we recommend running a `JOIN` subquery with a subquery.
Example:
``` sql
SELECT
CounterID,
hits,
visits
FROM
(
SELECT
CounterID,
count() AS hits
FROM test.hits
GROUP BY CounterID
) ANY LEFT JOIN
(
SELECT
CounterID,
sum(Sign) AS visits
FROM test.visits
GROUP BY CounterID
) USING CounterID
ORDER BY hits DESC
LIMIT 10
```
``` text
┌─CounterID─┬───hits─┬─visits─┐
│ 1143050 │ 523264 │ 13665 │
│ 731962 │ 475698 │ 102716 │
│ 722545 │ 337212 │ 108187 │
│ 722889 │ 252197 │ 10547 │
│ 2237260 │ 196036 │ 9522 │
│ 23057320 │ 147211 │ 7689 │
│ 722818 │ 90109 │ 17847 │
│ 48221 │ 85379 │ 4652 │
│ 19762435 │ 77807 │ 7026 │
│ 722884 │ 77492 │ 11056 │
└───────────┴────────┴────────┘
```
Subqueries dont allow you to set names or use them for referencing a column from a specific subquery.
The columns specified in `USING` must have the same names in both subqueries, and the other columns must be named differently. You can use aliases to change the names of columns in subqueries (the example uses the aliases `hits` and `visits`).
The `USING` clause specifies one or more columns to join, which establishes the equality of these columns. The list of columns is set without brackets. More complex join conditions are not supported.
The right table (the subquery result) resides in RAM. If there isnt enough memory, you cant run a `JOIN`.
Each time a query is run with the same `JOIN`, the subquery is run again because the result is not cached. To avoid this, use the special [Join](../../engines/table_engines/special/join.md) table engine, which is a prepared array for joining that is always in RAM.
In some cases, it is more efficient to use `IN` instead of `JOIN`.
Among the various types of `JOIN`, the most efficient is `ANY LEFT JOIN`, then `ANY INNER JOIN`. The least efficient are `ALL LEFT JOIN` and `ALL INNER JOIN`.
If you need a `JOIN` for joining with dimension tables (these are relatively small tables that contain dimension properties, such as names for advertising campaigns), a `JOIN` might not be very convenient due to the fact that the right table is re-accessed for every query. For such cases, there is an “external dictionaries” feature that you should use instead of `JOIN`. For more information, see the section [External dictionaries](../dictionaries/external_dictionaries/external_dicts.md).
**Memory Limitations**
ClickHouse uses the [hash join](https://en.wikipedia.org/wiki/Hash_join) algorithm. ClickHouse takes the `<right_subquery>` and creates a hash table for it in RAM. If you need to restrict join operation memory consumption use the following settings:
- [max\_rows\_in\_join](../../operations/settings/query_complexity.md#settings-max_rows_in_join) — Limits number of rows in the hash table.
- [max\_bytes\_in\_join](../../operations/settings/query_complexity.md#settings-max_bytes_in_join) — Limits size of the hash table.
When any of these limits is reached, ClickHouse acts as the [join\_overflow\_mode](../../operations/settings/query_complexity.md#settings-join_overflow_mode) setting instructs.
#### Processing of Empty or NULL Cells {#processing-of-empty-or-null-cells}
While joining tables, the empty cells may appear. The setting [join\_use\_nulls](../../operations/settings/settings.md#join_use_nulls) define how ClickHouse fills these cells.
If the `JOIN` keys are [Nullable](../data_types/nullable.md) fields, the rows where at least one of the keys has the value [NULL](../syntax.md#null-literal) are not joined.
#### Syntax Limitations {#syntax-limitations}
For multiple `JOIN` clauses in a single `SELECT` query:
- Taking all the columns via `*` is available only if tables are joined, not subqueries.
- The `PREWHERE` clause is not available.
For `ON`, `WHERE`, and `GROUP BY` clauses:
- Arbitrary expressions cannot be used in `ON`, `WHERE`, and `GROUP BY` clauses, but you can define an expression in a `SELECT` clause and then use it in these clauses via an alias.
### WHERE Clause {#select-where}
If there is a WHERE clause, it must contain an expression with the UInt8 type. This is usually an expression with comparison and logical operators.
This expression will be used for filtering data before all other transformations.
If indexes are supported by the database table engine, the expression is evaluated on the ability to use indexes.
### PREWHERE Clause {#prewhere-clause}
This clause has the same meaning as the WHERE clause. The difference is in which data is read from the table.
When using PREWHERE, first only the columns necessary for executing PREWHERE are read. Then the other columns are read that are needed for running the query, but only those blocks where the PREWHERE expression is true.
It makes sense to use PREWHERE if there are filtration conditions that are used by a minority of the columns in the query, but that provide strong data filtration. This reduces the volume of data to read.
For example, it is useful to write PREWHERE for queries that extract a large number of columns, but that only have filtration for a few columns.
PREWHERE is only supported by tables from the `*MergeTree` family.
A query may simultaneously specify PREWHERE and WHERE. In this case, PREWHERE precedes WHERE.
If the optimize\_move\_to\_prewhere setting is set to 1 and PREWHERE is omitted, the system uses heuristics to automatically move parts of expressions from WHERE to PREWHERE.
### GROUP BY Clause {#select-group-by-clause}
This is one of the most important parts of a column-oriented DBMS.
If there is a GROUP BY clause, it must contain a list of expressions. Each expression will be referred to here as a “key”.
All the expressions in the SELECT, HAVING, and ORDER BY clauses must be calculated from keys or from aggregate functions. In other words, each column selected from the table must be used either in keys or inside aggregate functions.
If a query contains only table columns inside aggregate functions, the GROUP BY clause can be omitted, and aggregation by an empty set of keys is assumed.
Example:
``` sql
SELECT
count(),
median(FetchTiming > 60 ? 60 : FetchTiming),
count() - sum(Refresh)
FROM hits
```
However, in contrast to standard SQL, if the table doesnt have any rows (either there arent any at all, or there arent any after using WHERE to filter), an empty result is returned, and not the result from one of the rows containing the initial values of aggregate functions.
As opposed to MySQL (and conforming to standard SQL), you cant get some value of some column that is not in a key or aggregate function (except constant expressions). To work around this, you can use the any aggregate function (get the first encountered value) or min/max.
Example:
``` sql
SELECT
domainWithoutWWW(URL) AS domain,
count(),
any(Title) AS title -- getting the first occurred page header for each domain.
FROM hits
GROUP BY domain
```
For every different key value encountered, GROUP BY calculates a set of aggregate function values.
GROUP BY is not supported for array columns.
A constant cant be specified as arguments for aggregate functions. Example: sum(1). Instead of this, you can get rid of the constant. Example: `count()`.
#### NULL processing {#null-processing}
For grouping, ClickHouse interprets [NULL](../syntax.md) as a value, and `NULL=NULL`.
Heres an example to show what this means.
Assume you have this table:
``` text
┌─x─┬────y─┐
│ 1 │ 2 │
│ 2 │ ᴺᵁᴸᴸ │
│ 3 │ 2 │
│ 3 │ 3 │
│ 3 │ ᴺᵁᴸᴸ │
└───┴──────┘
```
The query `SELECT sum(x), y FROM t_null_big GROUP BY y` results in:
``` text
┌─sum(x)─┬────y─┐
│ 4 │ 2 │
│ 3 │ 3 │
│ 5 │ ᴺᵁᴸᴸ │
└────────┴──────┘
```
You can see that `GROUP BY` for `y = NULL` summed up `x`, as if `NULL` is this value.
If you pass several keys to `GROUP BY`, the result will give you all the combinations of the selection, as if `NULL` were a specific value.
#### WITH TOTALS Modifier {#with-totals-modifier}
If the WITH TOTALS modifier is specified, another row will be calculated. This row will have key columns containing default values (zeros or empty lines), and columns of aggregate functions with the values calculated across all the rows (the “total” values).
This extra row is output in JSON\*, TabSeparated\*, and Pretty\* formats, separately from the other rows. In the other formats, this row is not output.
In JSON\* formats, this row is output as a separate totals field. In TabSeparated\* formats, the row comes after the main result, preceded by an empty row (after the other data). In Pretty\* formats, the row is output as a separate table after the main result.
`WITH TOTALS` can be run in different ways when HAVING is present. The behavior depends on the totals\_mode setting.
By default, `totals_mode = 'before_having'`. In this case, totals is calculated across all rows, including the ones that dont pass through HAVING and max\_rows\_to\_group\_by.
The other alternatives include only the rows that pass through HAVING in totals, and behave differently with the setting `max_rows_to_group_by` and `group_by_overflow_mode = 'any'`.
`after_having_exclusive` Dont include rows that didnt pass through `max_rows_to_group_by`. In other words, totals will have less than or the same number of rows as it would if `max_rows_to_group_by` were omitted.
`after_having_inclusive` Include all the rows that didnt pass through max\_rows\_to\_group\_by in totals. In other words, totals will have more than or the same number of rows as it would if `max_rows_to_group_by` were omitted.
`after_having_auto` Count the number of rows that passed through HAVING. If it is more than a certain amount (by default, 50%), include all the rows that didnt pass through max\_rows\_to\_group\_by in totals. Otherwise, do not include them.
`totals_auto_threshold` By default, 0.5. The coefficient for `after_having_auto`.
If `max_rows_to_group_by` and `group_by_overflow_mode = 'any'` are not used, all variations of `after_having` are the same, and you can use any of them (for example, `after_having_auto`).
You can use WITH TOTALS in subqueries, including subqueries in the JOIN clause (in this case, the respective total values are combined).
#### GROUP BY in External Memory {#select-group-by-in-external-memory}
You can enable dumping temporary data to the disk to restrict memory usage during `GROUP BY`.
The [max\_bytes\_before\_external\_group\_by](../../operations/settings/settings.md#settings-max_bytes_before_external_group_by) setting determines the threshold RAM consumption for dumping `GROUP BY` temporary data to the file system. If set to 0 (the default), it is disabled.
When using `max_bytes_before_external_group_by`, we recommend that you set `max_memory_usage` about twice as high. This is necessary because there are two stages to aggregation: reading the date and forming intermediate data (1) and merging the intermediate data (2). Dumping data to the file system can only occur during stage 1. If the temporary data wasnt dumped, then stage 2 might require up to the same amount of memory as in stage 1.
For example, if [max\_memory\_usage](../../operations/settings/settings.md#settings_max_memory_usage) was set to 10000000000 and you want to use external aggregation, it makes sense to set `max_bytes_before_external_group_by` to 10000000000, and max\_memory\_usage to 20000000000. When external aggregation is triggered (if there was at least one dump of temporary data), maximum consumption of RAM is only slightly more than `max_bytes_before_external_group_by`.
With distributed query processing, external aggregation is performed on remote servers. In order for the requester server to use only a small amount of RAM, set `distributed_aggregation_memory_efficient` to 1.
When merging data flushed to the disk, as well as when merging results from remote servers when the `distributed_aggregation_memory_efficient` setting is enabled, consumes up to `1/256 * the_number_of_threads` from the total amount of RAM.
When external aggregation is enabled, if there was less than `max_bytes_before_external_group_by` of data (i.e. data was not flushed), the query runs just as fast as without external aggregation. If any temporary data was flushed, the run time will be several times longer (approximately three times).
If you have an `ORDER BY` with a `LIMIT` after `GROUP BY`, then the amount of used RAM depends on the amount of data in `LIMIT`, not in the whole table. But if the `ORDER BY` doesnt have `LIMIT`, dont forget to enable external sorting (`max_bytes_before_external_sort`).
### LIMIT BY Clause {#limit-by-clause}
A query with the `LIMIT n BY expressions` clause selects the first `n` rows for each distinct value of `expressions`. The key for `LIMIT BY` can contain any number of [expressions](../syntax.md#syntax-expressions).
ClickHouse supports the following syntax:
- `LIMIT [offset_value, ]n BY expressions`
- `LIMIT n OFFSET offset_value BY expressions`
During query processing, ClickHouse selects data ordered by sorting key. The sorting key is set explicitly using an [ORDER BY](#select-order-by) clause or implicitly as a property of the table engine. Then ClickHouse applies `LIMIT n BY expressions` and returns the first `n` rows for each distinct combination of `expressions`. If `OFFSET` is specified, then for each data block that belongs to a distinct combination of `expressions`, ClickHouse skips `offset_value` number of rows from the beginning of the block and returns a maximum of `n` rows as a result. If `offset_value` is bigger than the number of rows in the data block, ClickHouse returns zero rows from the block.
`LIMIT BY` is not related to `LIMIT`. They can both be used in the same query.
**Examples**
Sample table:
``` sql
CREATE TABLE limit_by(id Int, val Int) ENGINE = Memory;
INSERT INTO limit_by values(1, 10), (1, 11), (1, 12), (2, 20), (2, 21);
```
Queries:
``` sql
SELECT * FROM limit_by ORDER BY id, val LIMIT 2 BY id
```
``` text
┌─id─┬─val─┐
│ 1 │ 10 │
│ 1 │ 11 │
│ 2 │ 20 │
│ 2 │ 21 │
└────┴─────┘
```
``` sql
SELECT * FROM limit_by ORDER BY id, val LIMIT 1, 2 BY id
```
``` text
┌─id─┬─val─┐
│ 1 │ 11 │
│ 1 │ 12 │
│ 2 │ 21 │
└────┴─────┘
```
The `SELECT * FROM limit_by ORDER BY id, val LIMIT 2 OFFSET 1 BY id` query returns the same result.
The following query returns the top 5 referrers for each `domain, device_type` pair with a maximum of 100 rows in total (`LIMIT n BY + LIMIT`).
``` sql
SELECT
domainWithoutWWW(URL) AS domain,
domainWithoutWWW(REFERRER_URL) AS referrer,
device_type,
count() cnt
FROM hits
GROUP BY domain, referrer, device_type
ORDER BY cnt DESC
LIMIT 5 BY domain, device_type
LIMIT 100
```
### HAVING Clause {#having-clause}
Allows filtering the result received after GROUP BY, similar to the WHERE clause.
WHERE and HAVING differ in that WHERE is performed before aggregation (GROUP BY), while HAVING is performed after it.
If aggregation is not performed, HAVING cant be used.
### ORDER BY Clause {#select-order-by}
The ORDER BY clause contains a list of expressions, which can each be assigned DESC or ASC (the sorting direction). If the direction is not specified, ASC is assumed. ASC is sorted in ascending order, and DESC in descending order. The sorting direction applies to a single expression, not to the entire list. Example: `ORDER BY Visits DESC, SearchPhrase`
For sorting by String values, you can specify collation (comparison). Example: `ORDER BY SearchPhrase COLLATE 'tr'` - for sorting by keyword in ascending order, using the Turkish alphabet, case insensitive, assuming that strings are UTF-8 encoded. COLLATE can be specified or not for each expression in ORDER BY independently. If ASC or DESC is specified, COLLATE is specified after it. When using COLLATE, sorting is always case-insensitive.
We only recommend using COLLATE for final sorting of a small number of rows, since sorting with COLLATE is less efficient than normal sorting by bytes.
Rows that have identical values for the list of sorting expressions are output in an arbitrary order, which can also be nondeterministic (different each time).
If the ORDER BY clause is omitted, the order of the rows is also undefined, and may be nondeterministic as well.
`NaN` and `NULL` sorting order:
- With the modifier `NULLS FIRST` — First `NULL`, then `NaN`, then other values.
- With the modifier `NULLS LAST` — First the values, then `NaN`, then `NULL`.
- Default — The same as with the `NULLS LAST` modifier.
Example:
For the table
``` text
┌─x─┬────y─┐
│ 1 │ ᴺᵁᴸᴸ │
│ 2 │ 2 │
│ 1 │ nan │
│ 2 │ 2 │
│ 3 │ 4 │
│ 5 │ 6 │
│ 6 │ nan │
│ 7 │ ᴺᵁᴸᴸ │
│ 6 │ 7 │
│ 8 │ 9 │
└───┴──────┘
```
Run the query `SELECT * FROM t_null_nan ORDER BY y NULLS FIRST` to get:
``` text
┌─x─┬────y─┐
│ 1 │ ᴺᵁᴸᴸ │
│ 7 │ ᴺᵁᴸᴸ │
│ 1 │ nan │
│ 6 │ nan │
│ 2 │ 2 │
│ 2 │ 2 │
│ 3 │ 4 │
│ 5 │ 6 │
│ 6 │ 7 │
│ 8 │ 9 │
└───┴──────┘
```
When floating point numbers are sorted, NaNs are separate from the other values. Regardless of the sorting order, NaNs come at the end. In other words, for ascending sorting they are placed as if they are larger than all the other numbers, while for descending sorting they are placed as if they are smaller than the rest.
Less RAM is used if a small enough LIMIT is specified in addition to ORDER BY. Otherwise, the amount of memory spent is proportional to the volume of data for sorting. For distributed query processing, if GROUP BY is omitted, sorting is partially done on remote servers, and the results are merged on the requestor server. This means that for distributed sorting, the volume of data to sort can be greater than the amount of memory on a single server.
If there is not enough RAM, it is possible to perform sorting in external memory (creating temporary files on a disk). Use the setting `max_bytes_before_external_sort` for this purpose. If it is set to 0 (the default), external sorting is disabled. If it is enabled, when the volume of data to sort reaches the specified number of bytes, the collected data is sorted and dumped into a temporary file. After all data is read, all the sorted files are merged and the results are output. Files are written to the /var/lib/clickhouse/tmp/ directory in the config (by default, but you can use the tmp\_path parameter to change this setting).
Running a query may use more memory than max\_bytes\_before\_external\_sort. For this reason, this setting must have a value significantly smaller than max\_memory\_usage. As an example, if your server has 128 GB of RAM and you need to run a single query, set max\_memory\_usage to 100 GB, and max\_bytes\_before\_external\_sort to 80 GB.
External sorting works much less effectively than sorting in RAM.
### SELECT Clause {#select-select}
[Expressions](../syntax.md#syntax-expressions) specified in the `SELECT` clause are calculated after all the operations in the clauses described above are finished. These expressions work as if they apply to separate rows in the result. If expressions in the `SELECT` clause contain aggregate functions, then ClickHouse processes aggregate functions and expressions used as their arguments during the [GROUP BY](#select-group-by-clause) aggregation.
If you want to include all columns in the result, use the asterisk (`*`) symbol. For example, `SELECT * FROM ...`.
To match some columns in the result with a [re2](https://en.wikipedia.org/wiki/RE2_(software)) regular expression, you can use the `COLUMNS` expression.
``` sql
COLUMNS('regexp')
```
For example, consider the table:
``` sql
CREATE TABLE default.col_names (aa Int8, ab Int8, bc Int8) ENGINE = TinyLog
```
The following query selects data from all the columns containing the `a` symbol in their name.
``` sql
SELECT COLUMNS('a') FROM col_names
```
``` text
┌─aa─┬─ab─┐
│ 1 │ 1 │
└────┴────┘
```
The selected columns are returned not in the alphabetical order.
You can use multiple `COLUMNS` expressions in a query and apply functions to them.
For example:
``` sql
SELECT COLUMNS('a'), COLUMNS('c'), toTypeName(COLUMNS('c')) FROM col_names
```
``` text
┌─aa─┬─ab─┬─bc─┬─toTypeName(bc)─┐
│ 1 │ 1 │ 1 │ Int8 │
└────┴────┴────┴────────────────┘
```
Each column returned by the `COLUMNS` expression is passed to the function as a separate argument. Also you can pass other arguments to the function if it supports them. Be careful when using functions. If a function doesnt support the number of arguments you have passed to it, ClickHouse throws an exception.
For example:
``` sql
SELECT COLUMNS('a') + COLUMNS('c') FROM col_names
```
``` text
Received exception from server (version 19.14.1):
Code: 42. DB::Exception: Received from localhost:9000. DB::Exception: Number of arguments for function plus doesn't match: passed 3, should be 2.
```
In this example, `COLUMNS('a')` returns two columns: `aa` and `ab`. `COLUMNS('c')` returns the `bc` column. The `+` operator cant apply to 3 arguments, so ClickHouse throws an exception with the relevant message.
Columns that matched the `COLUMNS` expression can have different data types. If `COLUMNS` doesnt match any columns and is the only expression in `SELECT`, ClickHouse throws an exception.
### DISTINCT Clause {#select-distinct}
If DISTINCT is specified, only a single row will remain out of all the sets of fully matching rows in the result.
The result will be the same as if GROUP BY were specified across all the fields specified in SELECT without aggregate functions. But there are several differences from GROUP BY:
- DISTINCT can be applied together with GROUP BY.
- When ORDER BY is omitted and LIMIT is defined, the query stops running immediately after the required number of different rows has been read.
- Data blocks are output as they are processed, without waiting for the entire query to finish running.
DISTINCT is not supported if SELECT has at least one array column.
`DISTINCT` works with [NULL](../syntax.md) as if `NULL` were a specific value, and `NULL=NULL`. In other words, in the `DISTINCT` results, different combinations with `NULL` only occur once.
ClickHouse supports using the `DISTINCT` and `ORDER BY` clauses for different columns in one query. The `DISTINCT` clause is executed before the `ORDER BY` clause.
Example table:
``` text
┌─a─┬─b─┐
│ 2 │ 1 │
│ 1 │ 2 │
│ 3 │ 3 │
│ 2 │ 4 │
└───┴───┘
```
When selecting data with the `SELECT DISTINCT a FROM t1 ORDER BY b ASC` query, we get the following result:
``` text
┌─a─┐
│ 2 │
│ 1 │
│ 3 │
└───┘
```
If we change the sorting direction `SELECT DISTINCT a FROM t1 ORDER BY b DESC`, we get the following result:
``` text
┌─a─┐
│ 3 │
│ 1 │
│ 2 │
└───┘
```
Row `2, 4` was cut before sorting.
Take this implementation specificity into account when programming queries.
### LIMIT Clause {#limit-clause}
`LIMIT m` allows you to select the first `m` rows from the result.
`LIMIT n, m` allows you to select the first `m` rows from the result after skipping the first `n` rows. The `LIMIT m OFFSET n` syntax is also supported.
`n` and `m` must be non-negative integers.
If there isnt an `ORDER BY` clause that explicitly sorts results, the result may be arbitrary and nondeterministic.
### UNION ALL Clause {#union-all-clause}
You can use UNION ALL to combine any number of queries. Example:
``` sql
SELECT CounterID, 1 AS table, toInt64(count()) AS c
FROM test.hits
GROUP BY CounterID
UNION ALL
SELECT CounterID, 2 AS table, sum(Sign) AS c
FROM test.visits
GROUP BY CounterID
HAVING c > 0
```
Only UNION ALL is supported. The regular UNION (UNION DISTINCT) is not supported. If you need UNION DISTINCT, you can write SELECT DISTINCT from a subquery containing UNION ALL.
Queries that are parts of UNION ALL can be run simultaneously, and their results can be mixed together.
The structure of results (the number and type of columns) must match for the queries. But the column names can differ. In this case, the column names for the final result will be taken from the first query. Type casting is performed for unions. For example, if two queries being combined have the same field with non-`Nullable` and `Nullable` types from a compatible type, the resulting `UNION ALL` has a `Nullable` type field.
Queries that are parts of UNION ALL cant be enclosed in brackets. ORDER BY and LIMIT are applied to separate queries, not to the final result. If you need to apply a conversion to the final result, you can put all the queries with UNION ALL in a subquery in the FROM clause.
### INTO OUTFILE Clause {#into-outfile-clause}
Add the `INTO OUTFILE filename` clause (where filename is a string literal) to redirect query output to the specified file.
In contrast to MySQL, the file is created on the client side. The query will fail if a file with the same filename already exists.
This functionality is available in the command-line client and clickhouse-local (a query sent via HTTP interface will fail).
The default output format is TabSeparated (the same as in the command-line client batch mode).
### FORMAT Clause {#format-clause}
Specify FORMAT format to get data in any specified format.
You can use this for convenience, or for creating dumps.
For more information, see the section “Formats”.
If the FORMAT clause is omitted, the default format is used, which depends on both the settings and the interface used for accessing the DB. For the HTTP interface and the command-line client in batch mode, the default format is TabSeparated. For the command-line client in interactive mode, the default format is PrettyCompact (it has attractive and compact tables).
When using the command-line client, data is passed to the client in an internal efficient format. The client independently interprets the FORMAT clause of the query and formats the data itself (thus relieving the network and the server from the load).
### IN Operators {#select-in-operators}
The `IN`, `NOT IN`, `GLOBAL IN`, and `GLOBAL NOT IN` operators are covered separately, since their functionality is quite rich.
The left side of the operator is either a single column or a tuple.
Examples:
``` sql
SELECT UserID IN (123, 456) FROM ...
SELECT (CounterID, UserID) IN ((34, 123), (101500, 456)) FROM ...
```
If the left side is a single column that is in the index, and the right side is a set of constants, the system uses the index for processing the query.
Dont list too many values explicitly (i.e. millions). If a data set is large, put it in a temporary table (for example, see the section “External data for query processing”), then use a subquery.
The right side of the operator can be a set of constant expressions, a set of tuples with constant expressions (shown in the examples above), or the name of a database table or SELECT subquery in brackets.
If the right side of the operator is the name of a table (for example, `UserID IN users`), this is equivalent to the subquery `UserID IN (SELECT * FROM users)`. Use this when working with external data that is sent along with the query. For example, the query can be sent together with a set of user IDs loaded to the users temporary table, which should be filtered.
If the right side of the operator is a table name that has the Set engine (a prepared data set that is always in RAM), the data set will not be created over again for each query.
The subquery may specify more than one column for filtering tuples.
Example:
``` sql
SELECT (CounterID, UserID) IN (SELECT CounterID, UserID FROM ...) FROM ...
```
The columns to the left and right of the IN operator should have the same type.
The IN operator and subquery may occur in any part of the query, including in aggregate functions and lambda functions.
Example:
``` sql
SELECT
EventDate,
avg(UserID IN
(
SELECT UserID
FROM test.hits
WHERE EventDate = toDate('2014-03-17')
)) AS ratio
FROM test.hits
GROUP BY EventDate
ORDER BY EventDate ASC
```
``` text
┌──EventDate─┬────ratio─┐
│ 2014-03-17 │ 1 │
│ 2014-03-18 │ 0.807696 │
│ 2014-03-19 │ 0.755406 │
│ 2014-03-20 │ 0.723218 │
│ 2014-03-21 │ 0.697021 │
│ 2014-03-22 │ 0.647851 │
│ 2014-03-23 │ 0.648416 │
└────────────┴──────────┘
```
For each day after March 17th, count the percentage of pageviews made by users who visited the site on March 17th.
A subquery in the IN clause is always run just one time on a single server. There are no dependent subqueries.
#### NULL processing {#null-processing-1}
During request processing, the IN operator assumes that the result of an operation with [NULL](../syntax.md) is always equal to `0`, regardless of whether `NULL` is on the right or left side of the operator. `NULL` values are not included in any dataset, do not correspond to each other and cannot be compared.
Here is an example with the `t_null` table:
``` text
┌─x─┬────y─┐
│ 1 │ ᴺᵁᴸᴸ │
│ 2 │ 3 │
└───┴──────┘
```
Running the query `SELECT x FROM t_null WHERE y IN (NULL,3)` gives you the following result:
``` text
┌─x─┐
│ 2 │
└───┘
```
You can see that the row in which `y = NULL` is thrown out of the query results. This is because ClickHouse cant decide whether `NULL` is included in the `(NULL,3)` set, returns `0` as the result of the operation, and `SELECT` excludes this row from the final output.
``` sql
SELECT y IN (NULL, 3)
FROM t_null
```
``` text
┌─in(y, tuple(NULL, 3))─┐
│ 0 │
│ 1 │
└───────────────────────┘
```
#### Distributed Subqueries {#select-distributed-subqueries}
There are two options for IN-s with subqueries (similar to JOINs): normal `IN` / `JOIN` and `GLOBAL IN` / `GLOBAL JOIN`. They differ in how they are run for distributed query processing.
!!! attention "Attention"
Remember that the algorithms described below may work differently depending on the [settings](../../operations/settings/settings.md) `distributed_product_mode` setting.
When using the regular IN, the query is sent to remote servers, and each of them runs the subqueries in the `IN` or `JOIN` clause.
When using `GLOBAL IN` / `GLOBAL JOINs`, first all the subqueries are run for `GLOBAL IN` / `GLOBAL JOINs`, and the results are collected in temporary tables. Then the temporary tables are sent to each remote server, where the queries are run using this temporary data.
For a non-distributed query, use the regular `IN` / `JOIN`.
Be careful when using subqueries in the `IN` / `JOIN` clauses for distributed query processing.
Lets look at some examples. Assume that each server in the cluster has a normal **local\_table**. Each server also has a **distributed\_table** table with the **Distributed** type, which looks at all the servers in the cluster.
For a query to the **distributed\_table**, the query will be sent to all the remote servers and run on them using the **local\_table**.
For example, the query
``` sql
SELECT uniq(UserID) FROM distributed_table
```
will be sent to all remote servers as
``` sql
SELECT uniq(UserID) FROM local_table
```
and run on each of them in parallel, until it reaches the stage where intermediate results can be combined. Then the intermediate results will be returned to the requestor server and merged on it, and the final result will be sent to the client.
Now lets examine a query with IN:
``` sql
SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34)
```
- Calculation of the intersection of audiences of two sites.
This query will be sent to all remote servers as
``` sql
SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM local_table WHERE CounterID = 34)
```
In other words, the data set in the IN clause will be collected on each server independently, only across the data that is stored locally on each of the servers.
This will work correctly and optimally if you are prepared for this case and have spread data across the cluster servers such that the data for a single UserID resides entirely on a single server. In this case, all the necessary data will be available locally on each server. Otherwise, the result will be inaccurate. We refer to this variation of the query as “local IN”.
To correct how the query works when data is spread randomly across the cluster servers, you could specify **distributed\_table** inside a subquery. The query would look like this:
``` sql
SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34)
```
This query will be sent to all remote servers as
``` sql
SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID IN (SELECT UserID FROM distributed_table WHERE CounterID = 34)
```
The subquery will begin running on each remote server. Since the subquery uses a distributed table, the subquery that is on each remote server will be resent to every remote server as
``` sql
SELECT UserID FROM local_table WHERE CounterID = 34
```
For example, if you have a cluster of 100 servers, executing the entire query will require 10,000 elementary requests, which is generally considered unacceptable.
In such cases, you should always use GLOBAL IN instead of IN. Lets look at how it works for the query
``` sql
SELECT uniq(UserID) FROM distributed_table WHERE CounterID = 101500 AND UserID GLOBAL IN (SELECT UserID FROM distributed_table WHERE CounterID = 34)
```
The requestor server will run the subquery
``` sql
SELECT UserID FROM distributed_table WHERE CounterID = 34
```
and the result will be put in a temporary table in RAM. Then the request will be sent to each remote server as
``` sql
SELECT uniq(UserID) FROM local_table WHERE CounterID = 101500 AND UserID GLOBAL IN _data1
```
and the temporary table `_data1` will be sent to every remote server with the query (the name of the temporary table is implementation-defined).
This is more optimal than using the normal IN. However, keep the following points in mind:
1. When creating a temporary table, data is not made unique. To reduce the volume of data transmitted over the network, specify DISTINCT in the subquery. (You dont need to do this for a normal IN.)
2. The temporary table will be sent to all the remote servers. Transmission does not account for network topology. For example, if 10 remote servers reside in a datacenter that is very remote in relation to the requestor server, the data will be sent 10 times over the channel to the remote datacenter. Try to avoid large data sets when using GLOBAL IN.
3. When transmitting data to remote servers, restrictions on network bandwidth are not configurable. You might overload the network.
4. Try to distribute data across servers so that you dont need to use GLOBAL IN on a regular basis.
5. If you need to use GLOBAL IN often, plan the location of the ClickHouse cluster so that a single group of replicas resides in no more than one data center with a fast network between them, so that a query can be processed entirely within a single data center.
It also makes sense to specify a local table in the `GLOBAL IN` clause, in case this local table is only available on the requestor server and you want to use data from it on remote servers.
### Extreme Values {#extreme-values}
In addition to results, you can also get minimum and maximum values for the results columns. To do this, set the **extremes** setting to 1. Minimums and maximums are calculated for numeric types, dates, and dates with times. For other columns, the default values are output.
An extra two rows are calculated the minimums and maximums, respectively. These extra two rows are output in `JSON*`, `TabSeparated*`, and `Pretty*` [formats](../../interfaces/formats.md), separate from the other rows. They are not output for other formats.
In `JSON*` formats, the extreme values are output in a separate extremes field. In `TabSeparated*` formats, the row comes after the main result, and after totals if present. It is preceded by an empty row (after the other data). In `Pretty*` formats, the row is output as a separate table after the main result, and after `totals` if present.
Extreme values are calculated for rows before `LIMIT`, but after `LIMIT BY`. However, when using `LIMIT offset, size`, the rows before `offset` are included in `extremes`. In stream requests, the result may also include a small number of rows that passed through `LIMIT`.
### Notes {#notes}
The `GROUP BY` and `ORDER BY` clauses do not support positional arguments. This contradicts MySQL, but conforms to standard SQL.
For example, `GROUP BY 1, 2` will be interpreted as grouping by constants (i.e. aggregation of all rows into one).
You can use synonyms (`AS` aliases) in any part of a query.
You can put an asterisk in any part of a query instead of an expression. When the query is analyzed, the asterisk is expanded to a list of all table columns (excluding the `MATERIALIZED` and `ALIAS` columns). There are only a few cases when using an asterisk is justified:
- When creating a table dump.
- For tables containing just a few columns, such as system tables.
- For getting information about what columns are in a table. In this case, set `LIMIT 1`. But it is better to use the `DESC TABLE` query.
- When there is strong filtration on a small number of columns using `PREWHERE`.
- In subqueries (since columns that arent needed for the external query are excluded from subqueries).
In all other cases, we dont recommend using the asterisk, since it only gives you the drawbacks of a columnar DBMS instead of the advantages. In other words using the asterisk is not recommended.
[Original article](https://clickhouse.tech/docs/en/query_language/select/) <!--hide-->

View File

@ -80,48 +80,6 @@ Ver la diferencia?
Por ejemplo, la consulta “count the number of records for each advertising platform” requiere leer uno “advertising platform ID” columna, que ocupa 1 byte sin comprimir. Si la mayor parte del tráfico no proviene de plataformas publicitarias, puede esperar al menos una compresión de 10 veces de esta columna. Cuando se utiliza un algoritmo de compresión rápida, la descompresión de datos es posible a una velocidad de al menos varios gigabytes de datos sin comprimir por segundo. En otras palabras, esta consulta se puede procesar a una velocidad de aproximadamente varios miles de millones de filas por segundo en un único servidor. Esta velocidad se logra realmente en la práctica.
<details markdown="1">
<summary>Ejemplo</summary>
``` bash
$ clickhouse-client
ClickHouse client version 0.0.52053.
Connecting to localhost:9000.
Connected to ClickHouse server version 0.0.52053.
```
``` sql
SELECT CounterID, count() FROM hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20
```
``` text
┌─CounterID─┬──count()─┐
│ 114208 │ 56057344 │
│ 115080 │ 51619590 │
│ 3228 │ 44658301 │
│ 38230 │ 42045932 │
│ 145263 │ 42042158 │
│ 91244 │ 38297270 │
│ 154139 │ 26647572 │
│ 150748 │ 24112755 │
│ 242232 │ 21302571 │
│ 338158 │ 13507087 │
│ 62180 │ 12229491 │
│ 82264 │ 12187441 │
│ 232261 │ 12148031 │
│ 146272 │ 11438516 │
│ 168777 │ 11403636 │
│ 4120072 │ 11227824 │
│ 10938808 │ 10519739 │
│ 74088 │ 9047015 │
│ 115079 │ 8837972 │
│ 337234 │ 8205961 │
└───────────┴──────────┘
```
</details>
### CPU {#cpu}
Dado que la ejecución de una consulta requiere procesar un gran número de filas, ayuda enviar todas las operaciones para vectores completos en lugar de para filas separadas, o implementar el motor de consultas para que casi no haya costo de envío. Si no hace esto, con cualquier subsistema de disco medio decente, el intérprete de consultas inevitablemente detiene la CPU. Tiene sentido almacenar datos en columnas y procesarlos, cuando sea posible, por columnas.

View File

@ -26,7 +26,10 @@ toc_title: "Integraci\xF3n"
- [Método de codificación de datos:](https://github.com/zlzforever/ClickHouseMigrator)
- Colas de mensajes
- [Kafka](https://kafka.apache.org)
- [Método de codificación de datos:](https://github.com/housepower/clickhouse_sinker) (utilizar [Ir cliente](https://github.com/kshvakov/clickhouse/))
- [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (usos [Go client](https://github.com/ClickHouse/clickhouse-go/))
- Procesamiento de flujo
- [Flink](https://flink.apache.org)
- [flink-clickhouse-sink](https://github.com/ivi-ru/flink-clickhouse-sink)
- Almacenamiento de objetos
- [S3](https://en.wikipedia.org/wiki/Amazon_S3)
- [Haga clic en el botón de copia de seguridad](https://github.com/AlexAkulov/clickhouse-backup)
@ -74,6 +77,9 @@ toc_title: "Integraci\xF3n"
- [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (utilizar [InformaciónSistema abierto.](https://github.com/Infinidat/infi.clickhouse_orm))
- [pandas](https://pandas.pydata.org)
- [Pandahouse](https://github.com/kszucs/pandahouse)
- PHP
- [Doctrine](https://www.doctrine-project.org/)
- [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse)
- R
- [Dplyr](https://db.rstudio.com/dplyr/)
- [Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica](https://github.com/IMSMWU/RClickhouse) (utilizar [Bienvenidos](https://github.com/artpaul/clickhouse-cpp))

View File

@ -79,54 +79,6 @@ ClickHouse یک مدیریت دیتابیس (DBMS) ستون گرا برای پر
برای مثال، query «تعداد رکوردها به ازای هر بستر نیازمندی» نیازمند خواندن ستون «آیدی بستر آگهی»، که 1 بایت بدون فشرده طول می کشد، خواهد بود. اگر بیشتر ترافیک مربوط به بستر های نیازمندی نبود، شما می توانید انتظار حداقل 10 برابر فشرده سازی این ستون را داشته باشید. زمانی که از الگوریتم فشرده سازی quick استفاده می کنید، عملیات decompression داده ها با سرعت حداقل چندین گیگابایت در ثانیه انجام می شود. به عبارت دیگر، این query توانایی پردازش تقریبا چندین میلیارد رکورد در ثانیه به ازای یک سرور را دارد. این سرعت در عمل واقعی و دست یافتنی است.
<details markdown="1">
<summary>مثال</summary>
$ clickhouse-client
ClickHouse client version 0.0.52053.
Connecting to localhost:9000.
Connected to ClickHouse server version 0.0.52053.
:) SELECT CounterID, count() FROM hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20
SELECT
CounterID,
count()
FROM hits
GROUP BY CounterID
ORDER BY count() DESC
LIMIT 20
┌─CounterID─┬──count()─┐
│ 114208 │ 56057344 │
│ 115080 │ 51619590 │
│ 3228 │ 44658301 │
│ 38230 │ 42045932 │
│ 145263 │ 42042158 │
│ 91244 │ 38297270 │
│ 154139 │ 26647572 │
│ 150748 │ 24112755 │
│ 242232 │ 21302571 │
│ 338158 │ 13507087 │
│ 62180 │ 12229491 │
│ 82264 │ 12187441 │
│ 232261 │ 12148031 │
│ 146272 │ 11438516 │
│ 168777 │ 11403636 │
│ 4120072 │ 11227824 │
│ 10938808 │ 10519739 │
│ 74088 │ 9047015 │
│ 115079 │ 8837972 │
│ 337234 │ 8205961 │
└───────────┴──────────┘
20 rows in set. Elapsed: 0.153 sec. Processed 1.00 billion rows, 4.00 GB (6.53 billion rows/s., 26.10 GB/s.)
:)
</details>
### CPU {#cpu}
از آنجایی که اجرای یک query نیازمند پردازش تعداد زیادی سطر می باشد، این کمک می کند تا تمام عملیات ها به جای ارسال به سطرهای جداگانه، برای کل بردار ارسال شود، یا برای ترکیب query engine به طوری که هیچ هزینه ی ارسالی وجود ندارد. اگر این کار رو نکنید، با هر half-decent disk subsystem، تفسیرگر query ناگزیر است که CPU را متوقف کند. این منطقی است که که در صورت امکان هر دو کار ذخیره سازی داده در ستون ها و پردازش ستون ها با هم انجام شود.

View File

@ -74,6 +74,9 @@ toc_title: "\u06CC\u06A9\u067E\u0627\u0631\u0686\u06AF\u06CC"
- [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (استفاده [اطالعات.کلیک \_شورم](https://github.com/Infinidat/infi.clickhouse_orm))
- [پانداها](https://pandas.pydata.org)
- [پانداهاوس](https://github.com/kszucs/pandahouse)
- PHP
- [Doctrine](https://www.doctrine-project.org/)
- [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse)
- R
- [هواپیمای دوباله](https://db.rstudio.com/dplyr/)
- [خانه روستایی](https://github.com/IMSMWU/RClickhouse) (استفاده [صفحه اصلی](https://github.com/artpaul/clickhouse-cpp))

View File

@ -80,48 +80,6 @@ Vous voyez la différence?
Par exemple, la requête “count the number of records for each advertising platform” nécessite la lecture d'un “advertising platform ID” colonne, qui prend 1 octet non compressé. Si la majeure partie du trafic ne provenait pas de plates-formes publicitaires, vous pouvez vous attendre à une compression d'au moins 10 fois de cette colonne. Lors de l'utilisation d'un algorithme de compression rapide, la décompression des données est possible à une vitesse d'au moins plusieurs gigaoctets de données non compressées par seconde. En d'autres termes, cette requête ne peut être traitée qu'à une vitesse d'environ plusieurs milliards de lignes par seconde sur un seul serveur. Cette vitesse est effectivement atteinte dans la pratique.
<details markdown="1">
<summary>Exemple</summary>
``` bash
$ clickhouse-client
ClickHouse client version 0.0.52053.
Connecting to localhost:9000.
Connected to ClickHouse server version 0.0.52053.
```
``` sql
SELECT CounterID, count() FROM hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20
```
``` text
┌─CounterID─┬──count()─┐
│ 114208 │ 56057344 │
│ 115080 │ 51619590 │
│ 3228 │ 44658301 │
│ 38230 │ 42045932 │
│ 145263 │ 42042158 │
│ 91244 │ 38297270 │
│ 154139 │ 26647572 │
│ 150748 │ 24112755 │
│ 242232 │ 21302571 │
│ 338158 │ 13507087 │
│ 62180 │ 12229491 │
│ 82264 │ 12187441 │
│ 232261 │ 12148031 │
│ 146272 │ 11438516 │
│ 168777 │ 11403636 │
│ 4120072 │ 11227824 │
│ 10938808 │ 10519739 │
│ 74088 │ 9047015 │
│ 115079 │ 8837972 │
│ 337234 │ 8205961 │
└───────────┴──────────┘
```
</details>
### CPU {#cpu}
Étant donné que l'exécution d'une requête nécessite le traitement d'un grand nombre de lignes, il est utile de répartir toutes les opérations pour des vecteurs entiers au lieu de lignes séparées, ou d'implémenter le moteur de requête de sorte qu'il n'y ait presque aucun coût d'expédition. Si vous ne le faites pas, avec un sous-système de disque à moitié décent, l'interpréteur de requête bloque inévitablement le processeur. Il est logique de stocker des données dans des colonnes et de les traiter, si possible, par des colonnes.

View File

@ -74,6 +74,9 @@ toc_title: "Int\xE9gration"
- [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (utiliser [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [Panda](https://pandas.pydata.org)
- [pandahouse](https://github.com/kszucs/pandahouse)
- PHP
- [Doctrine](https://www.doctrine-project.org/)
- [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse)
- R
- [dplyr](https://db.rstudio.com/dplyr/)
- [RClickhouse](https://github.com/IMSMWU/RClickhouse) (utiliser [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp))

View File

@ -82,48 +82,6 @@ OLAPシナリオは、他の一般的なシナリオOLTPやKey-Valueアクセ
たとえば、「各広告プラットフォームのレコード数をカウントする」クエリでは、1つの「広告プラットフォームID」列を読み取る必要がありますが、これは非圧縮では1バイトの領域を要します。トラフィックのほとんどが広告プラットフォームからのものではない場合、この列は少なくとも10倍の圧縮が期待できます。高速な圧縮アルゴリズムを使用すれば、1秒あたり少なくとも非圧縮データに換算して数ギガバイトの速度でデータを展開できます。つまり、このクエリは、単一のサーバーで1秒あたり約数十億行の速度で処理できます。この速度はまさに実際に達成されます。
<details markdown="1">
<summary>Example</summary>
``` bash
$ clickhouse-client
ClickHouse client version 0.0.52053.
Connecting to localhost:9000.
Connected to ClickHouse server version 0.0.52053.
```
``` sql
SELECT CounterID, count() FROM hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20
```
``` text
┌─CounterID─┬──count()─┐
│ 114208 │ 56057344 │
│ 115080 │ 51619590 │
│ 3228 │ 44658301 │
│ 38230 │ 42045932 │
│ 145263 │ 42042158 │
│ 91244 │ 38297270 │
│ 154139 │ 26647572 │
│ 150748 │ 24112755 │
│ 242232 │ 21302571 │
│ 338158 │ 13507087 │
│ 62180 │ 12229491 │
│ 82264 │ 12187441 │
│ 232261 │ 12148031 │
│ 146272 │ 11438516 │
│ 168777 │ 11403636 │
│ 4120072 │ 11227824 │
│ 10938808 │ 10519739 │
│ 74088 │ 9047015 │
│ 115079 │ 8837972 │
│ 337234 │ 8205961 │
└───────────┴──────────┘
```
</details>
### CPU {#cpu}
クエリを実行するには大量の行を処理する必要があるため、個別の行ではなくベクター全体のすべての操作をディスパッチするか、ディスパッチコストがほとんどないようにクエリエンジンを実装すると効率的です。 適切なディスクサブシステムでこれを行わないと、クエリインタープリターが必然的にCPUを失速させます。

View File

@ -74,6 +74,9 @@ toc_title: "\u7D71\u5408"
- [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (用途 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [パンダ](https://pandas.pydata.org)
- [パンダハウス](https://github.com/kszucs/pandahouse)
- PHP
- [Doctrine](https://www.doctrine-project.org/)
- [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse)
- R
- [dplyr](https://db.rstudio.com/dplyr/)
- [Rクリックハウス](https://github.com/IMSMWU/RClickhouse) (用途 [クリックハウス-cpp](https://github.com/artpaul/clickhouse-cpp))

View File

@ -43,6 +43,7 @@ dicts/external_dicts_dict_sources.md query_language/dicts/external_dicts_dict_so
dicts/external_dicts_dict_structure.md query_language/dicts/external_dicts_dict_structure.md
dicts/index.md query_language/dicts/index.md
dicts/internal_dicts.md query_language/dicts/internal_dicts.md
extended_roadmap.md whats_new/extended_roadmap.md
formats.md interfaces/formats.md
formats/capnproto.md interfaces/formats.md
formats/csv.md interfaces/formats.md

View File

@ -1,7 +1,7 @@
---
toc_folder_title: Commercial
toc_folder_title: Коммерческие услуги
toc_priority: 70
toc_title: Commercial
toc_title: Коммерческие услуги
---

View File

@ -1,5 +1,5 @@
---
toc_folder_title: Engines
toc_folder_title: Движки
toc_priority: 25
---

View File

@ -1,3 +1,8 @@
---
toc_folder_title: Тестовые массивы данных
toc_priority: 12
toc_title: Обзор
---
# Тестовые массивы данных
Этот раздел описывает как получить тестовые массивы данных и загрузить их в ClickHouse.

View File

@ -1,3 +1,10 @@
---
toc_folder_title: Начало работы
toc_hidden: true
toc_priority: 8
toc_title: hidden
---
# Начало работы {#nachalo-raboty}
Если вы новичок в ClickHouse и хотите вживую оценить его производительность, прежде всего нужно пройти через [процесс установки](install.md).

View File

@ -1,3 +1,9 @@
---
toc_folder_title: Руководства
toc_priority: 38
toc_title: Обзор
---
# Руководства {#rukovodstva}
Подробные пошаговые инструкции, которые помогут вам решать различные задачи с помощью ClickHouse.

View File

@ -1,3 +1,8 @@
---
toc_priority: 0
toc_title: Обзор
---
# Что такое ClickHouse {#chto-takoe-clickhouse}
ClickHouse - столбцовая система управления базами данных (СУБД) для онлайн обработки аналитических запросов (OLAP).
@ -77,48 +82,6 @@ ClickHouse - столбцовая система управления базам
Например, для запроса «посчитать количество записей для каждой рекламной системы», требуется прочитать один столбец «идентификатор рекламной системы», который занимает 1 байт в несжатом виде. Если большинство переходов было не с рекламных систем, то можно рассчитывать хотя бы на десятикратное сжатие этого столбца. При использовании быстрого алгоритма сжатия, возможно разжатие данных со скоростью более нескольких гигабайт несжатых данных в секунду. То есть, такой запрос может выполняться со скоростью около нескольких миллиардов строк в секунду на одном сервере. На практике, такая скорость действительно достигается.
<details markdown="1">
<summary>Пример</summary>
``` bash
$ clickhouse-client
ClickHouse client version 0.0.52053.
Connecting to localhost:9000.
Connected to ClickHouse server version 0.0.52053.
```
``` sql
SELECT CounterID, count() FROM hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20
```
``` text
┌─CounterID─┬──count()─┐
│ 114208 │ 56057344 │
│ 115080 │ 51619590 │
│ 3228 │ 44658301 │
│ 38230 │ 42045932 │
│ 145263 │ 42042158 │
│ 91244 │ 38297270 │
│ 154139 │ 26647572 │
│ 150748 │ 24112755 │
│ 242232 │ 21302571 │
│ 338158 │ 13507087 │
│ 62180 │ 12229491 │
│ 82264 │ 12187441 │
│ 232261 │ 12148031 │
│ 146272 │ 11438516 │
│ 168777 │ 11403636 │
│ 4120072 │ 11227824 │
│ 10938808 │ 10519739 │
│ 74088 │ 9047015 │
│ 115079 │ 8837972 │
│ 337234 │ 8205961 │
└───────────┴──────────┘
```
</details>
### По вычислениям {#po-vychisleniiam}
Так как для выполнения запроса надо обработать достаточно большое количество строк, становится актуальным диспетчеризовывать все операции не для отдельных строк, а для целых векторов, или реализовать движок выполнения запроса так, чтобы издержки на диспетчеризацию были примерно нулевыми. Если этого не делать, то при любой не слишком плохой дисковой подсистеме, интерпретатор запроса неизбежно упрётся в CPU.

View File

@ -1,3 +1,9 @@
---
toc_folder_title: Интерфейсы
toc_priority: 14
toc_title: Введение
---
# Интерфейсы {#interfaces}
ClickHouse предоставляет два сетевых интерфейса (оба могут быть дополнительно обернуты в TLS для дополнительной безопасности):

View File

@ -1,5 +1,5 @@
---
toc_folder_title: Third-Party
toc_folder_title: От сторонних разработчиков
toc_priority: 24
---

View File

@ -7,66 +7,72 @@
- Реляционные системы управления базами данных
- [MySQL](https://www.mysql.com)
- [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
- [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader)
- [horgh-replicator](https://github.com/larsnovikov/horgh-replicator)
- [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
- [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader)
- [horgh-replicator](https://github.com/larsnovikov/horgh-replicator)
- [PostgreSQL](https://www.postgresql.org)
- [clickhousedb\_fdw](https://github.com/Percona-Lab/clickhousedb_fdw)
- [infi.clickhouse\_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (использует [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [pg2ch](https://github.com/mkabilov/pg2ch)
- [clickhouse\_fdw](https://github.com/adjust/clickhouse_fdw)
- [clickhousedb\_fdw](https://github.com/Percona-Lab/clickhousedb_fdw)
- [infi.clickhouse\_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (использует [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [pg2ch](https://github.com/mkabilov/pg2ch)
- [clickhouse\_fdw](https://github.com/adjust/clickhouse_fdw)
- [MSSQL](https://en.wikipedia.org/wiki/Microsoft_SQL_Server)
- [ClickHouseMightrator](https://github.com/zlzforever/ClickHouseMigrator)
- [ClickHouseMightrator](https://github.com/zlzforever/ClickHouseMigrator)
- Очереди сообщений
- [Kafka](https://kafka.apache.org)
- [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (использует [Go client](https://github.com/kshvakov/clickhouse/))
- [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (использует [Go client](https://github.com/ClickHouse/clickhouse-go/))
- Потоковая обработка
- [Flink](https://flink.apache.org)
- [flink-clickhouse-sink](https://github.com/ivi-ru/flink-clickhouse-sink)
- Хранилища объектов
- [S3](https://en.wikipedia.org/wiki/Amazon_S3)
- [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup)
- [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup)
- Оркестрация контейнеров
- [Kubernetes](https://kubernetes.io)
- [clickhouse-operator](https://github.com/Altinity/clickhouse-operator)
- [clickhouse-operator](https://github.com/Altinity/clickhouse-operator)
- Системы управления конфигурацией
- [puppet](https://puppet.com)
- [innogames/clickhouse](https://forge.puppet.com/innogames/clickhouse)
- [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse)
- [innogames/clickhouse](https://forge.puppet.com/innogames/clickhouse)
- [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse)
- Мониторинг
- [Graphite](https://graphiteapp.org)
- [graphouse](https://github.com/yandex/graphouse)
- [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) +
- [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse)
- [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - оптимизирует партиции таблиц [\*GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md#graphitemergetree) согласно правилам в [конфигурации rollup](../../engines/table_engines/mergetree_family/graphitemergetree.md#rollup-configuration)
- [graphouse](https://github.com/yandex/graphouse)
- [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) +
- [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse)
- [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - оптимизирует партиции таблиц [\*GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md#graphitemergetree) согласно правилам в [конфигурации rollup](../../engines/table_engines/mergetree_family/graphitemergetree.md#rollup-configuration)
- [Grafana](https://grafana.com/)
- [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana)
- [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana)
- [Prometheus](https://prometheus.io/)
- [clickhouse\_exporter](https://github.com/f1yegor/clickhouse_exporter)
- [PromHouse](https://github.com/Percona-Lab/PromHouse)
- [clickhouse\_exporter](https://github.com/hot-wifi/clickhouse_exporter) (использует [Go client](https://github.com/kshvakov/clickhouse/))
- [clickhouse\_exporter](https://github.com/f1yegor/clickhouse_exporter)
- [PromHouse](https://github.com/Percona-Lab/PromHouse)
- [clickhouse\_exporter](https://github.com/hot-wifi/clickhouse_exporter) (использует [Go client](https://github.com/kshvakov/clickhouse/))
- [Nagios](https://www.nagios.org/)
- [check\_clickhouse](https://github.com/exogroup/check_clickhouse/)
- [check\_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py)
- [check\_clickhouse](https://github.com/exogroup/check_clickhouse/)
- [check\_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py)
- [Zabbix](https://www.zabbix.com)
- [clickhouse-zabbix-template](https://github.com/Altinity/clickhouse-zabbix-template)
- [clickhouse-zabbix-template](https://github.com/Altinity/clickhouse-zabbix-template)
- [Sematext](https://sematext.com/)
- [clickhouse интеграция](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse)
- [clickhouse интеграция](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse)
- Логирование
- [rsyslog](https://www.rsyslog.com/)
- [omclickhouse](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html)
- [omclickhouse](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html)
- [fluentd](https://www.fluentd.org)
- [loghouse](https://github.com/flant/loghouse) (для [Kubernetes](https://kubernetes.io))
- [logagent](https://www.sematext.com/logagent)
- [logagent output-plugin-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/)
- [loghouse](https://github.com/flant/loghouse) (для [Kubernetes](https://kubernetes.io))
- [Sematext](https://www.sematext.com/logagent)
- [logagent output-plugin-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/)
- Гео
- [MaxMind](https://dev.maxmind.com/geoip/)
- [clickhouse-maxmind-geoip](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip)
- [clickhouse-maxmind-geoip](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip)
## Экосистемы вокруг языков программирования {#ekosistemy-vokrug-iazykov-programmirovaniia}
- Python
- [SQLAlchemy](https://www.sqlalchemy.org)
- [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (использует [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (использует [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [pandas](https://pandas.pydata.org)
- [pandahouse](https://github.com/kszucs/pandahouse)
- [pandahouse](https://github.com/kszucs/pandahouse)
- PHP
- [Doctrine](https://www.doctrine-project.org/)
- [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse)
- R
- [dplyr](https://db.rstudio.com/dplyr/)
- [RClickhouse](https://github.com/IMSMWU/RClickhouse) (использует [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp))

View File

@ -1,5 +1,5 @@
---
toc_folder_title: Introduction
toc_folder_title: Введение
toc_priority: 1
---

View File

@ -1,3 +1,9 @@
---
toc_folder_title: Эксплуатация
toc_priority: 41
toc_title: Введение
---
# Эксплуатация {#ekspluatatsiia}
Руководство по эксплуатации ClickHouse состоит из следующих основных разделов:

View File

@ -1,3 +1,9 @@
---
toc_folder_title: Агрегатные функции
toc_priority: 33
toc_title: Введение
---
# Агрегатные функции {#aggregate-functions}
Агрегатные функции работают в [привычном](http://www.sql-tutorial.com/sql-aggregate-functions-sql-tutorial) для специалистов по базам данных смысле.

View File

@ -1,3 +1,9 @@
---
toc_folder_title: Типы данных
toc_priority: 37
toc_title: Введение
---
# Типы данных {#data_types}
ClickHouse может сохранять в ячейках таблиц данные различных типов.

View File

@ -1,3 +1,10 @@
---
toc_folder_title: Справка по SQL
toc_hidden: true
toc_priority: 28
toc_title: hidden
---
# Справка по SQL {#spravka-po-sql}
- [SELECT](statements/select.md)

View File

@ -1,3 +1,8 @@
---
toc_priority: 36
toc_title: ALTER
---
## ALTER {#query_language_queries_alter}
Запрос `ALTER` поддерживается только для таблиц типа `*MergeTree`, а также `Merge` и `Distributed`. Запрос имеет несколько вариантов.

View File

@ -1,3 +1,8 @@
---
toc_priority: 35
toc_title: CREATE
---
## CREATE DATABASE {#query-language-create-database}
Создает базу данных.

View File

@ -1,5 +1,5 @@
---
toc_folder_title: Statements
toc_folder_title: Выражения
toc_priority: 31
---

View File

@ -1,3 +1,8 @@
---
toc_priority: 34
toc_title: INSERT INTO
---
## INSERT {#insert}
Добавление данных.

View File

@ -1,3 +1,8 @@
---
toc_priority: 33
toc_title: SELECT
---
# Синтаксис запросов SELECT {#sintaksis-zaprosov-select}
`SELECT` осуществляет выборку данных.

View File

@ -1,3 +1,9 @@
---
toc_folder_title: Табличные функции
toc_priority: 34
toc_title: Введение
---
# Табличные функции {#tablichnye-funktsii}
Табличные функции — это метод создания таблиц.

View File

@ -15,9 +15,7 @@
Задача «normalized z-Order curve» в перспективе может быть полезна для БК и Метрики, так как позволяет смешивать OrderID и PageID и избежать дублирования данных.
В задаче также вводится способ индексации путём обращения функции нескольких аргументов на интервале, что имеет смысл для дальнейшего развития.
Изначально делал [Андрей Чулков](https://github.com/achulkov2), ВШЭ, теперь (не) доделывает [Ольга Хвостикова](https://github.com/stavrolia), но сроки немного сдвинуты из-за задачи 25.9. Будем надеятся на лучшее.
Upd. Доделывать будет другой человек. Приоритет не высокий.
[Андрей Чулков](https://github.com/achulkov2), ВШЭ.
### 1.2. Wait-free каталог баз данных. {#wait-free-katalog-baz-dannykh}
@ -869,8 +867,6 @@ Upd. Нас заставляют переписать эту библиотек
### 10.14. Поддержка всех типов в функции transform. {#podderzhka-vsekh-tipov-v-funktsii-transform}
Задачу взяла Ольга Хвостикова. Upd. Статус неизвестен.
### 10.15. Использование словарей как специализированного layout для Join. {#ispolzovanie-slovarei-kak-spetsializirovannogo-layout-dlia-join}
### 10.16. Словари на локальном SSD. {#slovari-na-lokalnom-ssd}
@ -1414,8 +1410,6 @@ N.Vartolomei.
### 22.3. Защита от абсурдно заданных пользователем кодеков. {#zashchita-ot-absurdno-zadannykh-polzovatelem-kodekov}
В очереди, скорее всего [Ольга Хвостикова](https://github.com/stavrolia).
### 22.4. Исправление оставшихся deadlocks в табличных RWLock-ах. {#ispravlenie-ostavshikhsia-deadlocks-v-tablichnykh-rwlock-akh}
Александр Казаков. Нужно для Яндекс.Метрики и Datalens. Задача постепенно тащится и исправлениями в соседних местах стала менее актуальна.

View File

@ -1,5 +1,5 @@
---
toc_folder_title: What's New
toc_folder_title: Что нового?
toc_priority: 72
---

View File

@ -111,6 +111,7 @@ def build_for_lang(lang, args):
'codehilite',
'nl2br',
'sane_lists',
'pymdownx.details',
'pymdownx.magiclink',
'pymdownx.superfences',
'extra',
@ -384,7 +385,7 @@ if __name__ == '__main__':
arg_parser.add_argument('--output-dir', default='build')
arg_parser.add_argument('--enable-stable-releases', action='store_true')
arg_parser.add_argument('--stable-releases-limit', type=int, default='4')
arg_parser.add_argument('--lts-releases-limit', type=int, default='1')
arg_parser.add_argument('--lts-releases-limit', type=int, default='2')
arg_parser.add_argument('--version-prefix', type=str, default='')
arg_parser.add_argument('--is-stable-release', action='store_true')
arg_parser.add_argument('--skip-single-page', action='store_true')

View File

@ -1,17 +1,15 @@
---
machine_translated: true
machine_translated_rev: e8cd92bba3269f47787db090899f7c242adf7818
toc_priority: 0
toc_title: "Genel bak\u0131\u015F"
---
# ClickHouse nedir? {#what-is-clickhouse}
ClickHouse, sorguların çevrimiçi analitik işlenmesi (OLAP) için sütun odaklı bir veritabanı yönetim sistemidir (DBMS).
ClickHouse, sorguların çevrimiçi analitik işlenmesi (*Online Analytical Processing* - OLAP) için sütun odaklı bir Veritabanı Yönetim Sistemidir (*DataBase Management System* - DBMS).
İn a “normal” satır yönelimli DBMS, veri bu sırayla saklanır:
“Normal” bir satır odaklı DBMS içinde veriler şu şekilde saklanır:
| Satır | Watchıd | JavaEnable | Başlık | GoodEvent | EventTime |
| Satır | WatchId | JavaEnable | Başlık | İyiOlay | OlayZamanı |
|-------|-------------|------------|----------------------|-----------|---------------------|
| \#0 | 89354350662 | 1 | Yatırımcı İlişkileri | 1 | 2016-05-18 05:19:20 |
| \#1 | 90329509958 | 0 | Bize ulaşın | 1 | 2016-05-18 08:10:20 |
@ -20,47 +18,47 @@ ClickHouse, sorguların çevrimiçi analitik işlenmesi (OLAP) için sütun odak
Başka bir deyişle, bir satırla ilgili tüm değerler fiziksel olarak yan yana depolanır.
Satır yönelimli DBMS örnekleri MySQL, Postgres ve MS SQL Server'dır.
MySQL, Postgres ve MS SQL Server gibi veritabanları satır odaklı DBMS örnekleridir.
Sütun yönelimli bir DBMS'DE, veriler şu şekilde saklanır:
Sütun odaklı bir DBMS'de ise veriler şu şekilde saklanır:
| Satır: | \#0 | \#1 | \#2 | \#N |
|-------------|----------------------|---------------------|---------------------|-----|
| Watchıd: | 89354350662 | 90329509958 | 89953706054 | … |
| WatchId: | 89354350662 | 90329509958 | 89953706054 | … |
| JavaEnable: | 1 | 0 | 1 | … |
| Başlık: | Yatırımcı İlişkileri | Bize ulaşın | Görev | … |
| GoodEvent: | 1 | 1 | 1 | … |
| EventTime: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … |
| İyiOlay: | 1 | 1 | 1 | … |
| OlayZamanı: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | … |
Bu örnekler yalnızca verilerin düzenlendiği sırayı gösterir. Farklı sütunlardaki değerler ayrı olarak depolanır ve aynı sütundaki veriler birlikte depolanır.
Bir sütun odaklı DBMS örnekleri: Vertica, Paraccel (Actian Matrix ve Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise ve Actian vektör), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid ve kdb+.
Sütun odaklı DBMS örnekleri: Vertica, Paraccel (Actian Matrix ve Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise ve Actian vektör), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid ve kdb+.
Different orders for storing data are better suited to different scenarios. The data access scenario refers to what queries are made, how often, and in what proportion; how much data is read for each type of query rows, columns, and bytes; the relationship between reading and updating data; the working size of the data and how locally it is used; whether transactions are used, and how isolated they are; requirements for data replication and logical integrity; requirements for latency and throughput for each type of query, and so on.
Verinin farklı bir şekilde sıralanarak depolanması, bazı veri erişim senaryoları için daha uygundur. Veri erişim senaryosu, hangi sorguların ne kadar sıklıkla yapıldığını, ne kadar verinin okunduğu, bunların hangi tiplerde hangi kolonlardan, satırlardan ve hangi miktarda(bayt olarak) okunacağını; verinin okunması ile güncellenmesi arasındaki ilişkiyi; verinin işlenen boyutu ve ne kadar yerel olduğunu; veri değiş-tokuşunun(transaction) olup olmayacağını, olacaksa diğer işlemlerden ne kadat yalıtılacağını; verilerin kopyalanması ve mantıksal bütünlük intiyaçlarını; her sorgu türünün gecikme ve iletim debisi ihtiyaçlarını gösterir.
Sistem üzerindeki yük ne kadar yüksek olursa, kullanım senaryosunun gereksinimlerine uyacak şekilde ayarlanmış sistemi özelleştirmek o kadar önemlidir ve bu özelleştirme o kadar ince taneli olur. Önemli ölçüde farklı senaryolara eşit derecede uygun bir sistem yoktur. Bir sistem geniş bir senaryo kümesine uyarlanabilirse, yüksek bir yük altında, sistem tüm senaryoları eşit derecede zayıf bir şekilde ele alır veya olası senaryolardan yalnızca biri veya birkaçı için iyi çalışır.
Sistem üzerindeki yük ne kadar fazlaysa, sistem ayarlarının kullanım senaryolarına uyarlanması ve bu ayarların ne kadar hassas olduğu da o kadar önemli hale gelir. Birbirinden büyük ölçüde farklı olan veri erişim senaryolarına tam uyum sağlayan, yani her işe ve yüke gelen bir sistem yoktur. Eğer bir sistem yük altında her türlü veri erişim senaryosuna adapte olabiliyorsa, o halde böyle bir sistem ya tüm senaryolara ya da senaryoların bir veya birkaçına karşı zayıp bir performans gösterir.
## OLAP senaryosunun temel özellikleri {#key-properties-of-olap-scenario}
- İsteklerin büyük çoğunluğu okuma erişimi içindir.
- İsteklerin büyük çoğunluğu, okuma erişimi içindir.
- Veriler, tek satırlarla değil, oldukça büyük gruplar halinde (\> 1000 satır) güncellenir; veya hiç güncellenmez.
- Veri DB eklenir, ancak değiştirilmez.
- Okumalar için, dB'den oldukça fazla sayıda satır çıkarılır,ancak yalnızca küçük bir sütun alt kümesi.
- Tablolar şunlardır “wide,” çok sayıda sütun içerdikleri anlamına gelir.
- Sorgular nispeten nadirdir (genellikle sunucu başına yüzlerce sorgu veya saniyede daha az).
- Veri, veritabanına eklenir, ancak değiştirilmez.
- Bazı sorgular için veritabanından den oldukça fazla sayıda satır çekilir, ancak sonuç sadece birkaç satır ve sütunludur.
- Tablolar "geniştir", yani bir tabloda çok sayıda kolon vardır(onlarca).
- Sorgular sıkılığı diğer senaryolara göre daha azdır (genellikle sunucu başına saniyede 100 veya daha az sorgu gelir).
- Basit sorgular için, 50 ms civarında gecikmelere izin verilir.
- Sütun değerleri oldukça küçüktür: sayılar ve kısa dizeler (örneğin, URL başına 60 bayt).
- Tek bir sorguyu işlerken yüksek verim gerektirir (sunucu başına saniyede milyarlarca satıra kadar).
- İşlemler gerekli değildir.
- Veri tutarlılığı için düşük gereksinimler.
- Sorgu başına bir büyük tablo var. Biri hariç tüm tablolar küçüktür.
- Bir sorgu sonucu, kaynak veriden önemli ölçüde daha küçüktür. Başka bir deyişle, veriler filtrelenir veya toplanır, böylece sonuç tek bir sunucunun RAM'İNE sığar.
- Saklanan veriler oldukça küçüktür: genelde sadece sayılar ve kısa metinler içerir(örneğin, URL başına 60 bayt).
- Tek bir sorguyu işlemek yüksek miktarda veri okunmasını gerektirir(sunucu başına saniyede milyarlarca satıra kadar).
- Veri değiş-tokuşu(transaction) gerekli değildir.
- Veri tutarlılığı o kadar da önemli değildir.
- Genelde bir tane çok büyük tablo vardır, gerisi küçük tablolardan oluşur
- Bir sorgu sonucu elde edilen veri, okuanan veri miktarından oldukça küçüktür. Başka bir deyişle, milyarlarca satır içinden veriler süzgeçlenerek veya birleştirilerek elde edilen verilerin tek bir sunucunun RAM'ine sığar.
OLAP senaryosunun diğer popüler senaryolardan (OLTP veya anahtar değeri erişimi gibi) çok farklı olduğunu görmek kolaydır. Bu nedenle, iyi bir performans elde etmek istiyorsanız, analitik sorguları işlemek için OLTP veya anahtar değeri DB'Yİ kullanmayı denemek mantıklı değildir. Örneğin, analitik için MongoDB veya Redis kullanmaya çalışırsanız, OLAP veritabanlarına kıyasla çok düşük performans elde edersiniz.
OLAP senaryosunun diğer popüler senaryolardan (*Online Transactional Processing* - OLTP veya *Key-Value* veritabanı) çok farklı olduğu açıkça görülebilir. Bu nedenle, iyi bir performans elde etmek istiyorsanız, analitik sorguları işlemek için OLTP veya *Key-Value* veritabanlarını kullanmak pek mantıklı olmaz. Örneğin, analitik için MongoDB veya Redis kullanmaya çalışırsanız, OLAP veritabanlarına kıyasla çok düşük performans elde edersiniz.
## Sütun yönelimli veritabanları OLAP senaryosunda neden daha iyi çalışır {#why-column-oriented-databases-work-better-in-the-olap-scenario}
Sütun yönelimli veritabanları OLAP senaryolarına daha uygundur: çoğu sorgunun işlenmesinde en az 100 kat daha hızlıdır. Nedenleri aşağıda ayrıntılı olarak açıklanmıştır, ancak gerçek görsel olarak göstermek daha kolaydır:
Sütun yönelimli veritabanları OLAP senaryolarına daha uygundur: hatta o kadar ki, çoğu sorgunun işlenmesi en az 100 kat daha hızlıdır. Her ne kadar OLAP veritabanlarının neden bu kadar hızlı olduğuna dair nedenler aşağıda ayrıntılı verilmiş olsa da görseller üzerinden anlatmak daha kolay olacakttır:
**Satır yönelimli DBMS**
@ -70,7 +68,7 @@ Sütun yönelimli veritabanları OLAP senaryolarına daha uygundur: çoğu sorgu
![Column-oriented](images/column_oriented.gif#)
Farkı görüyor musun?
Farkı görüyor musunuz?
### Giriş/çıkış {#inputoutput}
@ -80,48 +78,6 @@ Farkı görüyor musun?
Örneğin, sorgu “count the number of records for each advertising platform” bir okuma gerektirir “advertising platform ID” 1 bayt sıkıştırılmamış kadar alır sütun. Trafiğin çoğu reklam platformlarından değilse, bu sütunun en az 10 kat sıkıştırılmasını bekleyebilirsiniz. Hızlı bir sıkıştırma algoritması kullanırken, saniyede en az birkaç gigabayt sıkıştırılmamış veri hızında veri dekompresyonu mümkündür. Başka bir deyişle, bu sorgu, tek bir sunucuda saniyede yaklaşık birkaç milyar satır hızında işlenebilir. Bu hız aslında pratikte elde edilir.
<details markdown="1">
<summary>Örnek</summary>
``` bash
$ clickhouse-client
ClickHouse client version 0.0.52053.
Connecting to localhost:9000.
Connected to ClickHouse server version 0.0.52053.
```
``` sql
SELECT CounterID, count() FROM hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20
```
``` text
┌─CounterID─┬──count()─┐
│ 114208 │ 56057344 │
│ 115080 │ 51619590 │
│ 3228 │ 44658301 │
│ 38230 │ 42045932 │
│ 145263 │ 42042158 │
│ 91244 │ 38297270 │
│ 154139 │ 26647572 │
│ 150748 │ 24112755 │
│ 242232 │ 21302571 │
│ 338158 │ 13507087 │
│ 62180 │ 12229491 │
│ 82264 │ 12187441 │
│ 232261 │ 12148031 │
│ 146272 │ 11438516 │
│ 168777 │ 11403636 │
│ 4120072 │ 11227824 │
│ 10938808 │ 10519739 │
│ 74088 │ 9047015 │
│ 115079 │ 8837972 │
│ 337234 │ 8205961 │
└───────────┴──────────┘
```
</details>
### CPU {#cpu}
Bir sorguyu yürütmek çok sayıda satırı işlemeyi gerektirdiğinden, ayrı satırlar yerine tüm vektörler için tüm işlemlerin gönderilmesine veya sorgu motorunun neredeyse hiç gönderim maliyeti olmaması için uygulanmasına yardımcı olur. Bunu yapmazsanız, yarı iyi bir disk alt sistemi ile, sorgu yorumlayıcısı kaçınılmaz olarak CPU'yu durdurur. Hem verileri sütunlarda depolamak hem de mümkün olduğunda sütunlarla işlemek mantıklıdır.

View File

@ -74,6 +74,9 @@ toc_title: Entegrasyonlar
- [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (kullanma [ınfi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [Pandalar](https://pandas.pydata.org)
- [pandahouse](https://github.com/kszucs/pandahouse)
- PHP
- [Doctrine](https://www.doctrine-project.org/)
- [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse)
- R
- [dplyr](https://db.rstudio.com/dplyr/)
- [RClickHouse](https://github.com/IMSMWU/RClickHouse) (kullanma [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp))

View File

@ -1,8 +1,3 @@
---
machine_translated: true
machine_translated_rev: b111334d6614a02564cf32f379679e9ff970d9b1
---
# 什么是ClickHouse {#shi-yao-shi-clickhouse}
ClickHouse是一个用于联机分析(OLAP)的列式数据库管理系统(DBMS)。
@ -81,54 +76,6 @@ ClickHouse是一个用于联机分析(OLAP)的列式数据库管理系统(DBMS)
例如查询«统计每个广告平台的记录数量»需要读取«广告平台ID»这一列它在未压缩的情况下需要1个字节进行存储。如果大部分流量不是来自广告平台那么这一列至少可以以十倍的压缩率被压缩。当采用快速压缩算法它的解压速度最少在十亿字节(未压缩数据)每秒。换句话说,这个查询可以在单个服务器上以每秒大约几十亿行的速度进行处理。这实际上是当前实现的速度。
<details markdown="1">
<summary>示例</summary>
$ clickhouse-client
ClickHouse client version 0.0.52053.
Connecting to localhost:9000.
Connected to ClickHouse server version 0.0.52053.
:) SELECT CounterID, count() FROM hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20
SELECT
CounterID,
count()
FROM hits
GROUP BY CounterID
ORDER BY count() DESC
LIMIT 20
┌─CounterID─┬──count()─┐
│ 114208 │ 56057344 │
│ 115080 │ 51619590 │
│ 3228 │ 44658301 │
│ 38230 │ 42045932 │
│ 145263 │ 42042158 │
│ 91244 │ 38297270 │
│ 154139 │ 26647572 │
│ 150748 │ 24112755 │
│ 242232 │ 21302571 │
│ 338158 │ 13507087 │
│ 62180 │ 12229491 │
│ 82264 │ 12187441 │
│ 232261 │ 12148031 │
│ 146272 │ 11438516 │
│ 168777 │ 11403636 │
│ 4120072 │ 11227824 │
│ 10938808 │ 10519739 │
│ 74088 │ 9047015 │
│ 115079 │ 8837972 │
│ 337234 │ 8205961 │
└───────────┴──────────┘
20 rows in set. Elapsed: 0.153 sec. Processed 1.00 billion rows, 4.00 GB (6.53 billion rows/s., 26.10 GB/s.)
:)
</details>
### CPU {#cpu}
由于执行一个查询需要处理大量的行因此在整个向量上执行所有操作将比在每一行上执行所有操作更加高效。同时这将有助于实现一个几乎没有调用成本的查询引擎。如果你不这样做使用任何一个机械硬盘查询引擎都不可避免的停止CPU进行等待。所以在数据按列存储并且按列执行是很有意义的。

View File

@ -1,4 +1,3 @@
# 第三方集成库 {#di-san-fang-ji-cheng-ku}
!!! warning "声明"
@ -8,18 +7,21 @@
- 关系数据库管理系统
- [MySQL](https://www.mysql.com)
- [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
- [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader)
- [horgh-复制器](https://github.com/larsnovikov/horgh-replicator)
- [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support)
- [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader)
- [horgh-复制器](https://github.com/larsnovikov/horgh-replicator)
- [PostgreSQL](https://www.postgresql.org)
- [clickhousedb\_fdw](https://github.com/Percona-Lab/clickhousedb_fdw)
- [infi.clickhouse\_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (使用 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [pg2ch](https://github.com/mkabilov/pg2ch)
- [clickhousedb\_fdw](https://github.com/Percona-Lab/clickhousedb_fdw)
- [infi.clickhouse\_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (使用 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [pg2ch](https://github.com/mkabilov/pg2ch)
- [MSSQL](https://en.wikipedia.org/wiki/Microsoft_SQL_Server)
- [ClickHouseMightrator](https://github.com/zlzforever/ClickHouseMigrator)
- [ClickHouseMightrator](https://github.com/zlzforever/ClickHouseMigrator)
- 消息队列
- [卡夫卡](https://kafka.apache.org)
- [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (使用 [去客户](https://github.com/kshvakov/clickhouse/))
- [clickhouse\_sinker](https://github.com/housepower/clickhouse_sinker) (使用 [去客户](https://github.com/ClickHouse/clickhouse-go/))
- 流处理
- [Flink](https://flink.apache.org)
- [flink-clickhouse-sink](https://github.com/ivi-ru/flink-clickhouse-sink)
- 对象存储
- [S3](https://en.wikipedia.org/wiki/Amazon_S3)
- [ツ环板backupョツ嘉ッツ偲](https://github.com/AlexAkulov/clickhouse-backup)
@ -32,41 +34,44 @@
- [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse)
- 监控
- [石墨](https://graphiteapp.org)
- [graphouse](https://github.com/yandex/graphouse)
- [ツ暗ェツ氾环催ツ団](https://github.com/lomik/carbon-clickhouse) +
- [ツ环板-ョツ嘉ッツ偲](https://github.com/lomik/graphite-clickhouse)
- [石墨-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) -优化静态分区 [\*GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md#graphitemergetree) 如果从规则 [汇总配置](../../engines/table_engines/mergetree_family/graphitemergetree.md#rollup-configuration) 可以应用
- [graphouse](https://github.com/yandex/graphouse)
- [ツ暗ェツ氾环催ツ団](https://github.com/lomik/carbon-clickhouse) +
- [ツ环板-ョツ嘉ッツ偲](https://github.com/lomik/graphite-clickhouse)
- [石墨-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) -优化静态分区 [\*GraphiteMergeTree](../../engines/table_engines/mergetree_family/graphitemergetree.md#graphitemergetree) 如果从规则 [汇总配置](../../engines/table_engines/mergetree_family/graphitemergetree.md#rollup-configuration) 可以应用
- [Grafana](https://grafana.com/)
- [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana)
- [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana)
- [普罗米修斯号](https://prometheus.io/)
- [clickhouse\_exporter](https://github.com/f1yegor/clickhouse_exporter)
- [PromHouse](https://github.com/Percona-Lab/PromHouse)
- [clickhouse\_exporter](https://github.com/hot-wifi/clickhouse_exporter) (用途 [去客户](https://github.com/kshvakov/clickhouse/))
- [clickhouse\_exporter](https://github.com/f1yegor/clickhouse_exporter)
- [PromHouse](https://github.com/Percona-Lab/PromHouse)
- [clickhouse\_exporter](https://github.com/hot-wifi/clickhouse_exporter) (用途 [去客户](https://github.com/kshvakov/clickhouse/))
- [Nagios](https://www.nagios.org/)
- [check\_clickhouse](https://github.com/exogroup/check_clickhouse/)
- [check\_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py)
- [check\_clickhouse](https://github.com/exogroup/check_clickhouse/)
- [check\_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py)
- [Zabbix](https://www.zabbix.com)
- [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/Altinity/clickhouse-zabbix-template)
- [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/Altinity/clickhouse-zabbix-template)
- [Sematext](https://sematext.com/)
- [clickhouse积分](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse)
- [clickhouse积分](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse)
- 记录
- [rsyslog](https://www.rsyslog.com/)
- [鹿茫house om omhousehousehousehouse酶](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html)
- [鹿茫house omhousee酶](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html)
- [fluentd](https://www.fluentd.org)
- [loghouse](https://github.com/flant/loghouse) (对于 [Kubernetes](https://kubernetes.io))
- [logagent](https://www.sematext.com/logagent)
- [logagent输出-插件-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/)
- [loghouse](https://github.com/flant/loghouse) (对于 [Kubernetes](https://kubernetes.io))
- [Sematext](https://www.sematext.com/logagent)
- [logagent输出-插件-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/)
- 地理
- [MaxMind](https://dev.maxmind.com/geoip/)
- [ツ环板-ョツ嘉ッツ偲青clickシツ氾カツ鉄ツ工ツ渉](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip)
- [ツ环板-ョツ嘉ッツ偲青clickシツ氾カツ鉄ツ工ツ渉](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip)
## 编程语言生态系统 {#bian-cheng-yu-yan-sheng-tai-xi-tong}
- Python
- [SQLAlchemy](https://www.sqlalchemy.org)
- [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/cloudflare/sqlalchemy-clickhouse) (使用 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/cloudflare/sqlalchemy-clickhouse) (使用 [infi.clickhouse\_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [熊猫](https://pandas.pydata.org)
- [pandahouse](https://github.com/kszucs/pandahouse)
- [pandahouse](https://github.com/kszucs/pandahouse)
- PHP
- [Doctrine](https://www.doctrine-project.org/)
- [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse)
- R
- [dplyr](https://db.rstudio.com/dplyr/)
- [RClickhouse](https://github.com/IMSMWU/RClickhouse) (使用 [ツ暗ェツ氾环催ツ団](https://github.com/artpaul/clickhouse-cpp))

View File

@ -1,6 +1,5 @@
set(CLICKHOUSE_BENCHMARK_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/Benchmark.cpp)
set(CLICKHOUSE_BENCHMARK_LINK PRIVATE dbms clickhouse_aggregate_functions clickhouse_common_config ${Boost_PROGRAM_OPTIONS_LIBRARY})
set(CLICKHOUSE_BENCHMARK_INCLUDE SYSTEM PRIVATE ${PCG_RANDOM_INCLUDE_DIR})
clickhouse_program_add(benchmark)

View File

@ -684,7 +684,7 @@ private:
if (ignore_error)
{
Tokens tokens(begin, end);
IParser::Pos token_iterator(tokens);
IParser::Pos token_iterator(tokens, context.getSettingsRef().max_parser_depth);
while (token_iterator->type != TokenType::Semicolon && token_iterator.isValid())
++token_iterator;
begin = token_iterator->end;
@ -958,10 +958,15 @@ private:
ParserQuery parser(end, true);
ASTPtr res;
const auto & settings = context.getSettingsRef();
size_t max_length = 0;
if (!allow_multi_statements)
max_length = settings.max_query_size;
if (is_interactive || ignore_error)
{
String message;
res = tryParseQuery(parser, pos, end, message, true, "", allow_multi_statements, 0);
res = tryParseQuery(parser, pos, end, message, true, "", allow_multi_statements, max_length, settings.max_parser_depth);
if (!res)
{
@ -970,7 +975,7 @@ private:
}
}
else
res = parseQueryAndMovePosition(parser, pos, end, "", allow_multi_statements, 0);
res = parseQueryAndMovePosition(parser, pos, end, "", allow_multi_statements, max_length, settings.max_parser_depth);
if (is_interactive)
{

View File

@ -14,6 +14,7 @@
#include <Parsers/ExpressionElementParsers.h>
#include <Compression/CompressionFactory.h>
#include <Common/TerminalSize.h>
#include <Core/Defines.h>
namespace DB
@ -123,7 +124,7 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
DB::ParserCodec codec_parser;
std::string codecs_line = boost::algorithm::join(codecs, ",");
auto ast = DB::parseQuery(codec_parser, "(" + codecs_line + ")", 0);
auto ast = DB::parseQuery(codec_parser, "(" + codecs_line + ")", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
codec = DB::CompressionCodecFactory::instance().get(ast, nullptr);
}
else

View File

@ -12,6 +12,6 @@ set(CLICKHOUSE_COPIER_LINK PRIVATE
clickhouse_dictionaries
string_utils ${Poco_XML_LIBRARY} PUBLIC daemon)
set(CLICKHOUSE_COPIER_INCLUDE SYSTEM PRIVATE ${PCG_RANDOM_INCLUDE_DIR} ${CMAKE_CURRENT_SOURCE_DIR})
set(CLICKHOUSE_COPIER_INCLUDE SYSTEM PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
clickhouse_program_add(copier)

View File

@ -1197,7 +1197,9 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
query += " LIMIT " + limit;
ParserQuery p_query(query.data() + query.size());
return parseQuery(p_query, query, 0);
const auto & settings = context.getSettingsRef();
return parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth);
};
/// Load balancing
@ -1409,7 +1411,8 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
query += "INSERT INTO " + getQuotedTable(split_table_for_current_piece) + " VALUES ";
ParserQuery p_query(query.data() + query.size());
query_insert_ast = parseQuery(p_query, query, 0);
const auto & settings = context.getSettingsRef();
query_insert_ast = parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth);
LOG_DEBUG(log, "Executing INSERT query: " << query);
}
@ -1634,7 +1637,8 @@ ASTPtr ClusterCopier::getCreateTableForPullShard(const ConnectionTimeouts & time
&task_cluster->settings_pull);
ParserCreateQuery parser_create_query;
return parseQuery(parser_create_query, create_query_pull_str, 0);
const auto & settings = context.getSettingsRef();
return parseQuery(parser_create_query, create_query_pull_str, settings.max_query_size, settings.max_parser_depth);
}
/// If it is implicitly asked to create split Distributed table for certain piece on current shard, we will do it.
@ -1712,7 +1716,8 @@ std::set<String> ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti
}
ParserQuery parser_query(query.data() + query.size());
ASTPtr query_ast = parseQuery(parser_query, query, 0);
const auto & settings = context.getSettingsRef();
ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth);
LOG_DEBUG(log, "Computing destination partition set, executing query: " << query);
@ -1759,7 +1764,8 @@ bool ClusterCopier::checkShardHasPartition(const ConnectionTimeouts & timeouts,
<< partition_quoted_name << " existence, executing query: " << query);
ParserQuery parser_query(query.data() + query.size());
ASTPtr query_ast = parseQuery(parser_query, query, 0);
const auto & settings = context.getSettingsRef();
ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth);
Context local_context = context;
local_context.setSettings(task_cluster->settings_pull);
@ -1793,7 +1799,8 @@ bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTi
<< "existence, executing query: " << query);
ParserQuery parser_query(query.data() + query.size());
ASTPtr query_ast = parseQuery(parser_query, query, 0);
const auto & settings = context.getSettingsRef();
ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth);
Context local_context = context;
local_context.setSettings(task_cluster->settings_pull);
@ -1826,7 +1833,8 @@ UInt64 ClusterCopier::executeQueryOnCluster(
if (query_ast_ == nullptr)
{
ParserQuery p_query(query.data() + query.size());
query_ast = parseQuery(p_query, query, 0);
const auto & settings = context.getSettingsRef();
query_ast = parseQuery(p_query, query, settings.max_query_size, settings.max_parser_depth);
}
else
query_ast = query_ast_;

View File

@ -4,6 +4,9 @@
#include "Internals.h"
#include "ClusterPartition.h"
#include <Core/Defines.h>
namespace DB
{
namespace ErrorCodes
@ -260,9 +263,10 @@ inline TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConf
+ "." + escapeForFileName(table_push.second);
engine_push_str = config.getString(table_prefix + "engine");
{
ParserStorage parser_storage;
engine_push_ast = parseQuery(parser_storage, engine_push_str, 0);
engine_push_ast = parseQuery(parser_storage, engine_push_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
engine_push_partition_key_ast = extractPartitionKey(engine_push_ast);
primary_key_comma_separated = createCommaSeparatedStringFrom(extractPrimaryKeyColumnNames(engine_push_ast));
engine_push_zk_path = extractReplicatedTableZookeeperPath(engine_push_ast);
@ -273,7 +277,7 @@ inline TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConf
auxiliary_engine_split_asts.reserve(number_of_splits);
{
ParserExpressionWithOptionalAlias parser_expression(false);
sharding_key_ast = parseQuery(parser_expression, sharding_key_str, 0);
sharding_key_ast = parseQuery(parser_expression, sharding_key_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
main_engine_split_ast = createASTStorageDistributed(cluster_push_name, table_push.first, table_push.second,
sharding_key_ast);
@ -291,7 +295,7 @@ inline TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConf
if (!where_condition_str.empty())
{
ParserExpressionWithOptionalAlias parser_expression(false);
where_condition_ast = parseQuery(parser_expression, where_condition_str, 0);
where_condition_ast = parseQuery(parser_expression, where_condition_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
// Will use canonical expression form
where_condition_str = queryToString(where_condition_ast);

View File

@ -53,7 +53,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
const char * end = pos + query.size();
ParserQuery parser(end);
ASTPtr res = parseQuery(parser, pos, end, "query", 0);
ASTPtr res = parseQuery(parser, pos, end, "query", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
if (!quiet)
{

View File

@ -267,8 +267,10 @@ void LocalServer::processQueries()
String initial_create_query = getInitialCreateTableQuery();
String queries_str = initial_create_query + config().getRawString("query");
const auto & settings = context->getSettingsRef();
std::vector<String> queries;
auto parse_res = splitMultipartQuery(queries_str, queries);
auto parse_res = splitMultipartQuery(queries_str, queries, settings.max_query_size, settings.max_parser_depth);
if (!parse_res.second)
throw Exception("Cannot parse and execute the following part of query: " + String(parse_res.first), ErrorCodes::SYNTAX_ERROR);

View File

@ -120,12 +120,14 @@ void ODBCColumnsInfoHandler::handleRequest(Poco::Net::HTTPServerRequest & reques
SCOPE_EXIT(SQLFreeStmt(hstmt, SQL_DROP));
const auto & context_settings = context->getSettingsRef();
/// TODO Why not do SQLColumns instead?
std::string name = schema_name.empty() ? table_name : schema_name + "." + table_name;
std::stringstream ss;
std::string input = "SELECT * FROM " + name + " WHERE 1 = 0";
ParserQueryWithOutput parser;
ASTPtr select = parseQuery(parser, input.data(), input.data() + input.size(), "", 0);
ASTPtr select = parseQuery(parser, input.data(), input.data() + input.size(), "", context_settings.max_query_size, context_settings.max_parser_depth);
IAST::FormatSettings settings(ss, true);
settings.always_quote_identifiers = true;

View File

@ -15,7 +15,6 @@
#include <common/getFQDNOrHostName.h>
#include <Common/CurrentThread.h>
#include <Common/setThreadName.h>
#include <Common/config.h>
#include <Common/SettingsChanges.h>
#include <Disks/DiskSpaceMonitor.h>
#include <Compression/CompressedReadBuffer.h>
@ -36,6 +35,11 @@
#include <Common/typeid_cast.h>
#include <Poco/Net/HTTPStream.h>
#if !defined(ARCADIA_BUILD)
# include <Common/config.h>
#endif
namespace DB
{

View File

@ -1,10 +1,8 @@
#include <Common/config.h>
#include "MySQLHandler.h"
#include <limits>
#include <ext/scope_guard.h>
#include <Columns/ColumnVector.h>
#include <Common/config_version.h>
#include <Common/NetException.h>
#include <Common/OpenSSLHelpers.h>
#include <Core/MySQLProtocol.h>
@ -18,11 +16,15 @@
#include <boost/algorithm/string/replace.hpp>
#include <regex>
#if !defined(ARCADIA_BUILD)
# include <Common/config_version.h>
#endif
#if USE_POCO_NETSSL
#include <Poco/Net/SecureStreamSocket.h>
#include <Poco/Net/SSLManager.h>
#include <Poco/Crypto/CipherFactory.h>
#include <Poco/Crypto/RSAKey.h>
# include <Poco/Crypto/CipherFactory.h>
# include <Poco/Crypto/RSAKey.h>
# include <Poco/Net/SSLManager.h>
# include <Poco/Net/SecureStreamSocket.h>
#endif
namespace DB

View File

@ -1,13 +1,17 @@
#pragma once
#include <Common/config.h>
#include <Poco/Net/TCPServerConnection.h>
#include <common/getFQDNOrHostName.h>
#include <Common/CurrentMetrics.h>
#include <Core/MySQLProtocol.h>
#include "IServer.h"
#if !defined(ARCADIA_BUILD)
# include <Common/config.h>
#endif
#if USE_POCO_NETSSL
#include <Poco/Net/SecureStreamSocket.h>
# include <Poco/Net/SecureStreamSocket.h>
#endif
namespace CurrentMetrics

View File

@ -1,11 +1,15 @@
#pragma once
#include <Common/config.h>
#include <Poco/Net/TCPServerConnectionFactory.h>
#include <atomic>
#include "IServer.h"
#if !defined(ARCADIA_BUILD)
# include <Common/config.h>
#endif
#if USE_SSL
#include <openssl/rsa.h>
# include <openssl/rsa.h>
#endif
namespace DB

View File

@ -15,7 +15,6 @@
#include <ext/scope_guard.h>
#include <common/logger_useful.h>
#include <common/phdr_cache.h>
#include <common/config_common.h>
#include <common/ErrorHandlers.h>
#include <common/getMemoryAmount.h>
#include <common/coverage.h>
@ -26,7 +25,6 @@
#include <Common/StringUtils/StringUtils.h>
#include <Common/ZooKeeper/ZooKeeper.h>
#include <Common/ZooKeeper/ZooKeeperNodeCache.h>
#include "config_core.h"
#include <common/getFQDNOrHostName.h>
#include <Common/getMultipleKeysFromConfig.h>
#include <Common/getNumberOfPhysicalCPUCores.h>
@ -59,19 +57,24 @@
#include "MetricsTransmitter.h"
#include <Common/StatusFile.h>
#include "TCPHandlerFactory.h"
#include "Common/config_version.h"
#include <Common/SensitiveDataMasker.h>
#include <Common/ThreadFuzzer.h>
#include "MySQLHandlerFactory.h"
#if !defined(ARCADIA_BUILD)
# include <common/config_common.h>
# include "config_core.h"
# include "Common/config_version.h"
#endif
#if defined(OS_LINUX)
#include <Common/hasLinuxCapability.h>
#include <sys/mman.h>
# include <sys/mman.h>
# include <Common/hasLinuxCapability.h>
#endif
#if USE_POCO_NETSSL
#include <Poco/Net/Context.h>
#include <Poco/Net/SecureServerSocket.h>
# include <Poco/Net/Context.h>
# include <Poco/Net/SecureServerSocket.h>
#endif
namespace CurrentMetrics
@ -248,7 +251,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
const auto memory_amount = getMemoryAmount();
#if defined(__linux__)
#if defined(OS_LINUX)
std::string executable_path = getExecutablePath();
if (executable_path.empty())
executable_path = "/usr/bin/clickhouse"; /// It is used for information messages.
@ -631,7 +634,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
dns_cache_updater = std::make_unique<DNSCacheUpdater>(*global_context, config().getInt("dns_cache_update_period", 15));
}
#if defined(__linux__)
#if defined(OS_LINUX)
if (!TaskStatsInfoGetter::checkPermissions())
{
LOG_INFO(log, "It looks like the process has no CAP_NET_ADMIN capability, 'taskstats' performance statistics will be disabled."

View File

@ -6,7 +6,6 @@
#include <Common/Stopwatch.h>
#include <Common/NetException.h>
#include <Common/setThreadName.h>
#include <Common/config_version.h>
#include <IO/Progress.h>
#include <Compression/CompressedReadBuffer.h>
#include <Compression/CompressedWriteBuffer.h>
@ -33,6 +32,10 @@
#include "TCPHandler.h"
#if !defined(ARCADIA_BUILD)
# include <Common/config_version.h>
#endif
namespace DB
{

30
programs/server/ya.make Normal file
View File

@ -0,0 +1,30 @@
PROGRAM(clickhouse-server)
PEERDIR(
clickhouse/base/common
clickhouse/base/daemon
clickhouse/base/loggers
clickhouse/src
contrib/libs/poco/NetSSL_OpenSSL
)
SRCS(
clickhouse-server.cpp
HTTPHandler.cpp
HTTPHandlerFactory.cpp
InterserverIOHTTPHandler.cpp
MetricsTransmitter.cpp
MySQLHandler.cpp
MySQLHandlerFactory.cpp
NotFoundHandler.cpp
PingRequestHandler.cpp
PrometheusMetricsWriter.cpp
PrometheusRequestHandler.cpp
ReplicasStatusHandler.cpp
RootRequestHandler.cpp
Server.cpp
TCPHandler.cpp
)
END()

3
programs/ya.make Normal file
View File

@ -0,0 +1,3 @@
RECURSE(
server
)

View File

@ -253,7 +253,7 @@ private:
}
else
{
if (nodes.contains(keyword))
if (nodes.count(keyword))
throw Exception(keyword + " declared twice", ErrorCodes::LOGICAL_ERROR);
node = std::make_unique<Node>(keyword, node_type);
nodes[node->keyword] = node.get();
@ -279,7 +279,7 @@ private:
{
auto parent_node = std::make_unique<Node>(parent_keyword);
it_parent = nodes.emplace(parent_node->keyword, parent_node.get()).first;
assert(!owned_nodes.contains(parent_node->keyword));
assert(!owned_nodes.count(parent_node->keyword));
std::string_view parent_keyword_as_string_view = parent_node->keyword;
owned_nodes[parent_keyword_as_string_view] = std::move(parent_node);
}
@ -299,9 +299,9 @@ private:
#undef MAKE_ACCESS_FLAGS_TO_KEYWORD_TREE_NODE
if (!owned_nodes.contains("NONE"))
if (!owned_nodes.count("NONE"))
throw Exception("'NONE' not declared", ErrorCodes::LOGICAL_ERROR);
if (!owned_nodes.contains("ALL"))
if (!owned_nodes.count("ALL"))
throw Exception("'ALL' not declared", ErrorCodes::LOGICAL_ERROR);
flags_to_keyword_tree = std::move(owned_nodes["ALL"]);

View File

@ -147,9 +147,9 @@ void ContextAccess::setUser(const UserPtr & user_) const
current_roles.reserve(params.current_roles.size());
for (const auto & id : params.current_roles)
{
if (user->granted_roles.contains(id))
if (user->granted_roles.count(id))
current_roles.push_back(id);
if (user->granted_roles_with_admin_option.contains(id))
if (user->granted_roles_with_admin_option.count(id))
current_roles_with_admin_option.push_back(id);
}
}
@ -358,7 +358,7 @@ void ContextAccess::checkAdminOption(const UUID & role_id) const
return;
auto roles_with_admin_option_loaded = roles_with_admin_option.load();
if (roles_with_admin_option_loaded && roles_with_admin_option_loaded->contains(role_id))
if (roles_with_admin_option_loaded && roles_with_admin_option_loaded->count(role_id))
return;
std::optional<String> role_name = manager->readName(role_id);

View File

@ -32,6 +32,7 @@
#include <Interpreters/InterpreterShowCreateAccessEntityQuery.h>
#include <Interpreters/InterpreterShowGrantsQuery.h>
#include <Common/quoteString.h>
#include <Core/Defines.h>
#include <boost/range/adaptor/map.hpp>
#include <boost/range/algorithm/copy.hpp>
#include <boost/range/algorithm_ext/push_back.hpp>
@ -93,7 +94,7 @@ namespace
const char * end = begin + file_contents.size();
while (pos < end)
{
queries.emplace_back(parseQueryAndMovePosition(parser, pos, end, "", true, 0));
queries.emplace_back(parseQueryAndMovePosition(parser, pos, end, "", true, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH));
while (isWhitespaceASCII(*pos) || *pos == ';')
++pos;
}
@ -560,7 +561,7 @@ std::vector<UUID> DiskAccessStorage::findAllImpl(std::type_index type) const
bool DiskAccessStorage::existsImpl(const UUID & id) const
{
std::lock_guard lock{mutex};
return id_to_entry_map.contains(id);
return id_to_entry_map.count(id);
}
@ -709,7 +710,7 @@ void DiskAccessStorage::updateNoLock(const UUID & id, const UpdateFunc & update_
if (name_changed)
{
const auto & name_to_id_map = name_to_id_maps.at(type);
if (name_to_id_map.contains(new_name))
if (name_to_id_map.count(new_name))
throwNameCollisionCannotRename(type, String{old_name}, new_name);
scheduleWriteLists(type);
}

View File

@ -253,44 +253,44 @@ void ExtendedRoleSet::add(const boost::container::flat_set<UUID> & ids_)
bool ExtendedRoleSet::match(const UUID & id) const
{
return (all || ids.contains(id)) && !except_ids.contains(id);
return (all || ids.count(id)) && !except_ids.count(id);
}
bool ExtendedRoleSet::match(const UUID & user_id, const std::vector<UUID> & enabled_roles) const
{
if (!all && !ids.contains(user_id))
if (!all && !ids.count(user_id))
{
bool found_enabled_role = std::any_of(
enabled_roles.begin(), enabled_roles.end(), [this](const UUID & enabled_role) { return ids.contains(enabled_role); });
enabled_roles.begin(), enabled_roles.end(), [this](const UUID & enabled_role) { return ids.count(enabled_role); });
if (!found_enabled_role)
return false;
}
if (except_ids.contains(user_id))
if (except_ids.count(user_id))
return false;
bool in_except_list = std::any_of(
enabled_roles.begin(), enabled_roles.end(), [this](const UUID & enabled_role) { return except_ids.contains(enabled_role); });
enabled_roles.begin(), enabled_roles.end(), [this](const UUID & enabled_role) { return except_ids.count(enabled_role); });
return !in_except_list;
}
bool ExtendedRoleSet::match(const UUID & user_id, const boost::container::flat_set<UUID> & enabled_roles) const
{
if (!all && !ids.contains(user_id))
if (!all && !ids.count(user_id))
{
bool found_enabled_role = std::any_of(
enabled_roles.begin(), enabled_roles.end(), [this](const UUID & enabled_role) { return ids.contains(enabled_role); });
enabled_roles.begin(), enabled_roles.end(), [this](const UUID & enabled_role) { return ids.count(enabled_role); });
if (!found_enabled_role)
return false;
}
if (except_ids.contains(user_id))
if (except_ids.count(user_id))
return false;
bool in_except_list = std::any_of(
enabled_roles.begin(), enabled_roles.end(), [this](const UUID & enabled_role) { return except_ids.contains(enabled_role); });
enabled_roles.begin(), enabled_roles.end(), [this](const UUID & enabled_role) { return except_ids.count(enabled_role); });
return !in_except_list;
}

View File

@ -255,16 +255,18 @@ void QuotaCache::quotaRemoved(const UUID & quota_id)
void QuotaCache::chooseQuotaToConsume()
{
/// `mutex` is already locked.
std::erase_if(
enabled_quotas,
[&](const std::pair<EnabledQuota::Params, std::weak_ptr<EnabledQuota>> & pr)
for (auto i = enabled_quotas.begin(), e = enabled_quotas.end(); i != e;)
{
auto elem = i->second.lock();
if (!elem)
i = enabled_quotas.erase(i);
else
{
auto elem = pr.second.lock();
if (!elem)
return true; // remove from the `enabled_quotas` list.
chooseQuotaToConsumeFor(*elem);
return false; // keep in the `enabled_quotas` list.
});
++i;
}
}
}
void QuotaCache::chooseQuotaToConsumeFor(EnabledQuota & enabled)

View File

@ -103,16 +103,17 @@ void RoleCache::collectRolesInfo()
{
/// `mutex` is already locked.
std::erase_if(
enabled_roles,
[&](const std::pair<EnabledRoles::Params, std::weak_ptr<EnabledRoles>> & pr)
for (auto i = enabled_roles.begin(), e = enabled_roles.end(); i != e;)
{
auto elem = i->second.lock();
if (!elem)
i = enabled_roles.erase(i);
else
{
auto elem = pr.second.lock();
if (!elem)
return true; // remove from the `enabled_roles` map.
collectRolesInfoFor(*elem);
return false; // keep in the `enabled_roles` map.
});
++i;
}
}
}

View File

@ -8,6 +8,7 @@
#include <Common/quoteString.h>
#include <ext/range.h>
#include <boost/smart_ptr/make_shared.hpp>
#include <Core/Defines.h>
namespace DB
@ -77,7 +78,7 @@ void RowPolicyCache::PolicyInfo::setPolicy(const RowPolicyPtr & policy_)
try
{
ParserExpression parser;
parsed_conditions[type] = parseQuery(parser, condition, 0);
parsed_conditions[type] = parseQuery(parser, condition, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
}
catch (...)
{
@ -178,16 +179,17 @@ void RowPolicyCache::rowPolicyRemoved(const UUID & policy_id)
void RowPolicyCache::mixConditions()
{
/// `mutex` is already locked.
std::erase_if(
enabled_row_policies,
[&](const std::pair<EnabledRowPolicies::Params, std::weak_ptr<EnabledRowPolicies>> & pr)
for (auto i = enabled_row_policies.begin(), e = enabled_row_policies.end(); i != e;)
{
auto elem = i->second.lock();
if (!elem)
i = enabled_row_policies.erase(i);
else
{
auto elem = pr.second.lock();
if (!elem)
return true; // remove from the `enabled_row_policies` map.
mixConditionsFor(*elem);
return false; // keep in the `enabled_row_policies` map.
});
++i;
}
}
}

View File

@ -104,16 +104,17 @@ void SettingsProfilesCache::setDefaultProfileName(const String & default_profile
void SettingsProfilesCache::mergeSettingsAndConstraints()
{
/// `mutex` is already locked.
std::erase_if(
enabled_settings,
[&](const std::pair<EnabledSettings::Params, std::weak_ptr<EnabledSettings>> & pr)
for (auto i = enabled_settings.begin(), e = enabled_settings.end(); i != e;)
{
auto enabled = i->second.lock();
if (!enabled)
i = enabled_settings.erase(i);
else
{
auto enabled = pr.second.lock();
if (!enabled)
return true; // remove from the `enabled_settings` list.
mergeSettingsAndConstraintsFor(*enabled);
return false; // keep in the `enabled_settings` list.
});
++i;
}
}
}
@ -161,7 +162,7 @@ void SettingsProfilesCache::substituteProfiles(SettingsProfileElements & element
auto parent_profile_id = *element.parent_profile;
element.parent_profile.reset();
if (already_substituted.contains(parent_profile_id))
if (already_substituted.count(parent_profile_id))
continue;
already_substituted.insert(parent_profile_id);

40
src/Access/ya.make Normal file
View File

@ -0,0 +1,40 @@
LIBRARY()
PEERDIR(
clickhouse/src/Common
)
SRCS(
AccessControlManager.cpp
AccessRights.cpp
AccessRightsElement.cpp
AllowedClientHosts.cpp
Authentication.cpp
ContextAccess.cpp
DiskAccessStorage.cpp
EnabledQuota.cpp
EnabledRoles.cpp
EnabledRolesInfo.cpp
EnabledRowPolicies.cpp
EnabledSettings.cpp
ExtendedRoleSet.cpp
IAccessEntity.cpp
IAccessStorage.cpp
MemoryAccessStorage.cpp
MultipleAccessStorage.cpp
Quota.cpp
QuotaCache.cpp
QuotaUsageInfo.cpp
Role.cpp
RoleCache.cpp
RowPolicy.cpp
RowPolicyCache.cpp
SettingsConstraints.cpp
SettingsProfile.cpp
SettingsProfileElement.cpp
SettingsProfilesCache.cpp
User.cpp
UsersConfigAccessStorage.cpp
)
END()

View File

@ -2,6 +2,7 @@
#include <Parsers/ExpressionListParsers.h>
#include <Parsers/parseQuery.h>
#include <Common/typeid_cast.h>
#include <Core/Defines.h>
namespace DB
@ -65,7 +66,7 @@ void getAggregateFunctionNameAndParametersArray(
ParserExpressionList params_parser(false);
ASTPtr args_ast = parseQuery(params_parser,
parameters_str.data(), parameters_str.data() + parameters_str.size(),
"parameters of aggregate function in " + error_context, 0);
"parameters of aggregate function in " + error_context, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
if (args_ast->children.empty())
throw Exception("Incorrect list of parameters to aggregate function "

Some files were not shown because too many files have changed in this diff Show More