mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' into amosbird-onclusteraltersettings
This commit is contained in:
commit
8e0c3c8a42
@ -214,6 +214,12 @@ if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
# Create BuildID when using lld. For other linkers it is created by default.
|
||||
if (LINKER_NAME MATCHES "lld$")
|
||||
# SHA1 is not cryptographically secure but it is the best what lld is offering.
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id=sha1")
|
||||
endif ()
|
||||
|
||||
cmake_host_system_information(RESULT AVAILABLE_PHYSICAL_MEMORY QUERY AVAILABLE_PHYSICAL_MEMORY) # Not available under freebsd
|
||||
|
||||
|
||||
|
@ -80,12 +80,24 @@ static void call_default_signal_handler(int sig)
|
||||
raise(sig);
|
||||
}
|
||||
|
||||
// Apparently strsignal is not instrumented by MemorySanitizer, so we
|
||||
// have to unpoison it to avoid msan reports inside fmt library when we
|
||||
// print it.
|
||||
const char * msan_strsignal(int sig)
|
||||
{
|
||||
// Apparently strsignal is not instrumented by MemorySanitizer, so we
|
||||
// have to unpoison it to avoid msan reports inside fmt library when we
|
||||
// print it.
|
||||
// no glibc in osx/freebsd
|
||||
#if !defined(__GLIBC_PREREQ)
|
||||
#define __GLIBC_PREREQ(x, y) 0
|
||||
#endif
|
||||
|
||||
// glibc 2.32+ deprecates sys_siglist[]
|
||||
// newer glibc is a problem only for unbundled build.
|
||||
#if __GLIBC_PREREQ(2, 32)
|
||||
const char * signal_name = sigdescr_np(sig);
|
||||
#else
|
||||
const char * signal_name = sys_siglist[sig];
|
||||
#endif
|
||||
|
||||
__msan_unpoison_string(signal_name);
|
||||
return signal_name;
|
||||
}
|
||||
|
@ -81,12 +81,11 @@ function fuzz
|
||||
echo Server started
|
||||
|
||||
fuzzer_exit_code=0
|
||||
# SC2012: Use find instead of ls to better handle non-alphanumeric filenames.
|
||||
# They are all alphanumeric.
|
||||
# shellcheck disable=SC2012
|
||||
./clickhouse-client --query-fuzzer-runs=1000 \
|
||||
< <(for f in $(ls ch/tests/queries/0_stateless/*.sql | sort -R); do cat "$f"; echo ';'; done) \
|
||||
> >(tail -10000 > fuzzer.log) \
|
||||
# SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric.
|
||||
# SC2046: Quote this to prevent word splitting. Actually I need word splitting.
|
||||
# shellcheck disable=SC2012,SC2046
|
||||
./clickhouse-client --query-fuzzer-runs=1000 --queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) \
|
||||
> >(tail -n 10000 > fuzzer.log) \
|
||||
2>&1 \
|
||||
|| fuzzer_exit_code=$?
|
||||
|
||||
|
@ -11,7 +11,7 @@ Functional tests are the most simple and convenient to use. Most of ClickHouse f
|
||||
|
||||
Each functional test sends one or multiple queries to the running ClickHouse server and compares the result with reference.
|
||||
|
||||
Tests are located in `queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from Yandex.Metrica and not available to general public. We tend to use only `stateless` tests and avoid adding new `stateful` tests.
|
||||
Tests are located in `queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from Yandex.Metrica and it is available to general public.
|
||||
|
||||
Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery --testmode`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`.
|
||||
|
||||
@ -84,11 +84,9 @@ If you want to improve performance of ClickHouse in some scenario, and if improv
|
||||
|
||||
Some programs in `tests` directory are not prepared tests, but are test tools. For example, for `Lexer` there is a tool `src/Parsers/tests/lexer` that just do tokenization of stdin and writes colorized result to stdout. You can use these kind of tools as a code examples and for exploration and manual testing.
|
||||
|
||||
You can also place pair of files `.sh` and `.reference` along with the tool to run it on some predefined input - then script result can be compared to `.reference` file. These kind of tests are not automated.
|
||||
|
||||
## Miscellaneous Tests {#miscellaneous-tests}
|
||||
|
||||
There are tests for external dictionaries located at `tests/external_dictionaries` and for machine learned models in `tests/external_models`. These tests are not updated and must be transferred to integration tests.
|
||||
There are tests for machine learned models in `tests/external_models`. These tests are not updated and must be transferred to integration tests.
|
||||
|
||||
There is separate test for quorum inserts. This test run ClickHouse cluster on separate servers and emulate various failure cases: network split, packet drop (between ClickHouse nodes, between ClickHouse and ZooKeeper, between ClickHouse server and client, etc.), `kill -9`, `kill -STOP` and `kill -CONT` , like [Jepsen](https://aphyr.com/tags/Jepsen). Then the test checks that all acknowledged inserts was written and all rejected inserts was not.
|
||||
|
||||
@ -169,53 +167,55 @@ Precise query execution timings are not recorded and not compared due to high va
|
||||
|
||||
## Build Tests {#build-tests}
|
||||
|
||||
Build tests allow to check that build is not broken on various alternative configurations and on some foreign systems. Tests are located at `ci` directory. They run build from source inside Docker, Vagrant, and sometimes with `qemu-user-static` inside Docker. These tests are under development and test runs are not automated.
|
||||
Build tests allow to check that build is not broken on various alternative configurations and on some foreign systems. These tests are automated as well.
|
||||
|
||||
Motivation:
|
||||
|
||||
Normally we release and run all tests on a single variant of ClickHouse build. But there are alternative build variants that are not thoroughly tested. Examples:
|
||||
|
||||
- build on FreeBSD
|
||||
- build on Debian with libraries from system packages
|
||||
- build with shared linking of libraries
|
||||
- build on AArch64 platform
|
||||
- build on PowerPc platform
|
||||
Examples:
|
||||
- cross-compile for Darwin x86_64 (Mac OS X)
|
||||
- cross-compile for FreeBSD x86_64
|
||||
- cross-compile for Linux AArch64
|
||||
- build on Ubuntu with libraries from system packages (discouraged)
|
||||
- build with shared linking of libraries (discouraged)
|
||||
|
||||
For example, build with system packages is bad practice, because we cannot guarantee what exact version of packages a system will have. But this is really needed by Debian maintainers. For this reason we at least have to support this variant of build. Another example: shared linking is a common source of trouble, but it is needed for some enthusiasts.
|
||||
|
||||
Though we cannot run all tests on all variant of builds, we want to check at least that various build variants are not broken. For this purpose we use build tests.
|
||||
|
||||
We also test that there are no translation units that are too long to compile or require too much RAM.
|
||||
|
||||
We also test that there are no too large stack frames.
|
||||
|
||||
## Testing for Protocol Compatibility {#testing-for-protocol-compatibility}
|
||||
|
||||
When we extend ClickHouse network protocol, we test manually that old clickhouse-client works with new clickhouse-server and new clickhouse-client works with old clickhouse-server (simply by running binaries from corresponding packages).
|
||||
|
||||
We also test some cases automatically with integrational tests:
|
||||
- if data written by old version of ClickHouse can be successfully read by the new version;
|
||||
- do distributed queries work in a cluster with different ClickHouse versions.
|
||||
|
||||
## Help from the Compiler {#help-from-the-compiler}
|
||||
|
||||
Main ClickHouse code (that is located in `dbms` directory) is built with `-Wall -Wextra -Werror` and with some additional enabled warnings. Although these options are not enabled for third-party libraries.
|
||||
|
||||
Clang has even more useful warnings - you can look for them with `-Weverything` and pick something to default build.
|
||||
|
||||
For production builds, gcc is used (it still generates slightly more efficient code than clang). For development, clang is usually more convenient to use. You can build on your own machine with debug mode (to save battery of your laptop), but please note that compiler is able to generate more warnings with `-O3` due to better control flow and inter-procedure analysis. When building with clang in debug mode, debug version of `libc++` is used that allows to catch more errors at runtime.
|
||||
For production builds, clang is used, but we also test make gcc builds. For development, clang is usually more convenient to use. You can build on your own machine with debug mode (to save battery of your laptop), but please note that compiler is able to generate more warnings with `-O3` due to better control flow and inter-procedure analysis. When building with clang in debug mode, debug version of `libc++` is used that allows to catch more errors at runtime.
|
||||
|
||||
## Sanitizers {#sanitizers}
|
||||
|
||||
### Address sanitizer
|
||||
We run functional and integration tests under ASan on per-commit basis.
|
||||
|
||||
### Valgrind (Memcheck)
|
||||
We run functional tests under Valgrind overnight. It takes multiple hours. Currently there is one known false positive in `re2` library, see [this article](https://research.swtch.com/sparse).
|
||||
|
||||
### Undefined behaviour sanitizer
|
||||
We run functional and integration tests under ASan on per-commit basis.
|
||||
We run functional, integration, stress and unit tests under ASan on per-commit basis.
|
||||
|
||||
### Thread sanitizer
|
||||
We run functional tests under TSan on per-commit basis. We still don’t run integration tests under TSan on per-commit basis.
|
||||
We run functional, integration, stress and unit tests under TSan on per-commit basis.
|
||||
|
||||
### Memory sanitizer
|
||||
Currently we still don’t use MSan.
|
||||
We run functional, integration, stress and unit tests under MSan on per-commit basis.
|
||||
|
||||
### Debug allocator
|
||||
Debug version of `jemalloc` is used for debug build.
|
||||
### Undefined behaviour sanitizer
|
||||
We run functional, integration, stress and unit tests under UBSan on per-commit basis. The code of some third-party libraries is not sanitized for UB.
|
||||
|
||||
### Valgrind (Memcheck)
|
||||
We used to run functional tests under Valgrind overnight, but don't do it anymore. It takes multiple hours. Currently there is one known false positive in `re2` library, see [this article](https://research.swtch.com/sparse).
|
||||
|
||||
## Fuzzing {#fuzzing}
|
||||
|
||||
@ -233,19 +233,62 @@ Google OSS-Fuzz can be found at `docker/fuzz`.
|
||||
We also use simple fuzz test to generate random SQL queries and to check that the server doesn’t die executing them.
|
||||
You can find it in `00746_sql_fuzzy.pl`. This test should be run continuously (overnight and longer).
|
||||
|
||||
We also use sophisticated AST-based query fuzzer that is able to find huge amount of corner cases. It does random permutations and substitutions in queries AST. It remembers AST nodes from previous tests to use them for fuzzing of subsequent tests while processing them in random order.
|
||||
|
||||
## Stress test
|
||||
|
||||
Stress tests are another case of fuzzing. It runs all functional tests in parallel in random order with a single server. Results of the tests are not checked.
|
||||
|
||||
It is checked that:
|
||||
- server does not crash, no debug or sanitizer traps are triggered;
|
||||
- there are no deadlocks;
|
||||
- the database structure is consistent;
|
||||
- server can successfully stop after the test and start again without exceptions.
|
||||
|
||||
There are five variants (Debug, ASan, TSan, MSan, UBSan).
|
||||
|
||||
## Thread Fuzzer
|
||||
|
||||
Thread Fuzzer (please don't mix up with Thread Sanitizer) is another kind of fuzzing that allows to randomize thread order of execution. It helps to find even more special cases.
|
||||
|
||||
## Security Audit {#security-audit}
|
||||
|
||||
People from Yandex Security Team do some basic overview of ClickHouse capabilities from the security standpoint.
|
||||
|
||||
## Static Analyzers {#static-analyzers}
|
||||
|
||||
We run `PVS-Studio` on per-commit basis. We have evaluated `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`. You will find instructions for usage in `tests/instructions/` directory. Also you can read [the article in russian](https://habr.com/company/yandex/blog/342018/).
|
||||
We run `clang-tidy` and `PVS-Studio` on per-commit basis. `clang-static-analyzer` checks are also enabled. `clang-tidy` is also used for some style checks.
|
||||
|
||||
We have evaluated `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`, `CodeQL`. You will find instructions for usage in `tests/instructions/` directory. Also you can read [the article in russian](https://habr.com/company/yandex/blog/342018/).
|
||||
|
||||
If you use `CLion` as an IDE, you can leverage some `clang-tidy` checks out of the box.
|
||||
|
||||
We also use `shellcheck` for static analysis of shell scripts.
|
||||
|
||||
## Hardening {#hardening}
|
||||
|
||||
`FORTIFY_SOURCE` is used by default. It is almost useless, but still makes sense in rare cases and we don’t disable it.
|
||||
In debug build we are using custom allocator that does ASLR of user-level allocations.
|
||||
|
||||
We also manually protect memory regions that are expected to be readonly after allocation.
|
||||
|
||||
In debug build we also involve a customization of libc that ensures that no "harmful" (obsolete, insecure, not thread-safe) functions are called.
|
||||
|
||||
Debug assertions are used extensively.
|
||||
|
||||
In debug build, if exception with "logical error" code (implies a bug) is being thrown, the program is terminated prematurally. It allows to use exceptions in release build but make it an assertion in debug build.
|
||||
|
||||
Debug version of jemalloc is used for debug builds.
|
||||
Debug version of libc++ is used for debug builds.
|
||||
|
||||
## Runtime Integrity Checks
|
||||
|
||||
Data stored on disk is checksummed. Data in MergeTree tables is checksummed in three ways simultaneously* (compressed data blocks, uncompressed data blocks, the total checksum across blocks). Data transferred over network between client and server or between servers is also checksummed. Replication ensures bit-identical data on replicas.
|
||||
|
||||
It is required to protect from faulty hardware (bit rot on storage media, bit flips in RAM on server, bit flips in RAM of network controller, bit flips in RAM of network switch, bit flips in RAM of client, bit flips on the wire). Note that bit flips are common and likely to occur even for ECC RAM and in presense of TCP checksums (if you manage to run thousands of servers processing petabytes of data each day). [See the video (russian)](https://www.youtube.com/watch?v=ooBAQIe0KlQ).
|
||||
|
||||
ClickHouse provides diagnostics that will help ops engineers to find faulty hardware.
|
||||
|
||||
\* and it is not slow.
|
||||
|
||||
## Code Style {#code-style}
|
||||
|
||||
@ -259,6 +302,8 @@ Alternatively you can try `uncrustify` tool to reformat your code. Configuration
|
||||
|
||||
`CLion` has its own code formatter that has to be tuned for our code style.
|
||||
|
||||
We also use `codespell` to find typos in code. It is automated as well.
|
||||
|
||||
## Metrica B2B Tests {#metrica-b2b-tests}
|
||||
|
||||
Each ClickHouse release is tested with Yandex Metrica and AppMetrica engines. Testing and stable versions of ClickHouse are deployed on VMs and run with a small copy of Metrica engine that is processing fixed sample of input data. Then results of two instances of Metrica engine are compared together.
|
||||
@ -267,13 +312,25 @@ These tests are automated by separate team. Due to high number of moving parts,
|
||||
|
||||
## Test Coverage {#test-coverage}
|
||||
|
||||
As of July 2018 we don’t track test coverage.
|
||||
We also track test coverage but only for functional tests and only for clickhouse-server. It is performed on daily basis.
|
||||
|
||||
## Tests for Tests
|
||||
|
||||
There is automated check for flaky tests. It runs all new tests 100 times (for functional tests) or 10 times (for integration tests). If at least single time the test failed, it is considered flaky.
|
||||
|
||||
## Testflows
|
||||
|
||||
[Testflows](https://testflows.com/) is an enterprise-grade testing framework. It is used by Altinity for some of the tests and we run these tests in our CI.
|
||||
|
||||
## Yandex Checks (only for Yandex employees)
|
||||
|
||||
These checks are importing ClickHouse code into Yandex internal monorepository, so ClickHouse codebase can be used as a library by other products at Yandex (YT and YDB). Note that clickhouse-server itself is not being build from internal repo and unmodified open-source build is used for Yandex applications.
|
||||
|
||||
## Test Automation {#test-automation}
|
||||
|
||||
We run tests with Yandex internal CI and job automation system named “Sandbox”.
|
||||
|
||||
Build jobs and tests are run in Sandbox on per commit basis. Resulting packages and test results are published in GitHub and can be downloaded by direct links. Artifacts are stored eternally. When you send a pull request on GitHub, we tag it as “can be tested” and our CI system will build ClickHouse packages (release, debug, with address sanitizer, etc) for you.
|
||||
Build jobs and tests are run in Sandbox on per commit basis. Resulting packages and test results are published in GitHub and can be downloaded by direct links. Artifacts are stored for several months. When you send a pull request on GitHub, we tag it as “can be tested” and our CI system will build ClickHouse packages (release, debug, with address sanitizer, etc) for you.
|
||||
|
||||
We don’t use Travis CI due to the limit on time and computational power.
|
||||
We don’t use Jenkins. It was used before and now we are happy we are not using Jenkins.
|
||||
|
@ -410,3 +410,5 @@ GROUP BY yr,
|
||||
ORDER BY yr,
|
||||
mo;
|
||||
```
|
||||
|
||||
The data is also available for interactive queries in the [Playground](https://gh-api.clickhouse.tech/play?user=play), [example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIG1hY2hpbmVfbmFtZSwKICAgICAgIE1JTihjcHUpIEFTIGNwdV9taW4sCiAgICAgICBNQVgoY3B1KSBBUyBjcHVfbWF4LAogICAgICAgQVZHKGNwdSkgQVMgY3B1X2F2ZywKICAgICAgIE1JTihuZXRfaW4pIEFTIG5ldF9pbl9taW4sCiAgICAgICBNQVgobmV0X2luKSBBUyBuZXRfaW5fbWF4LAogICAgICAgQVZHKG5ldF9pbikgQVMgbmV0X2luX2F2ZywKICAgICAgIE1JTihuZXRfb3V0KSBBUyBuZXRfb3V0X21pbiwKICAgICAgIE1BWChuZXRfb3V0KSBBUyBuZXRfb3V0X21heCwKICAgICAgIEFWRyhuZXRfb3V0KSBBUyBuZXRfb3V0X2F2ZwpGUk9NICgKICBTRUxFQ1QgbWFjaGluZV9uYW1lLAogICAgICAgICBDT0FMRVNDRShjcHVfdXNlciwgMC4wKSBBUyBjcHUsCiAgICAgICAgIENPQUxFU0NFKGJ5dGVzX2luLCAwLjApIEFTIG5ldF9pbiwKICAgICAgICAgQ09BTEVTQ0UoYnl0ZXNfb3V0LCAwLjApIEFTIG5ldF9vdXQKICBGUk9NIG1nYmVuY2gubG9nczEKICBXSEVSRSBtYWNoaW5lX25hbWUgSU4gKCdhbmFuc2knLCdhcmFnb2cnLCd1cmQnKQogICAgQU5EIGxvZ190aW1lID49IFRJTUVTVEFNUCAnMjAxNy0wMS0xMSAwMDowMDowMCcKKSBBUyByCkdST1VQIEJZIG1hY2hpbmVfbmFtZQ==).
|
||||
|
@ -71,4 +71,4 @@ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1"
|
||||
|
||||
[ClickHouse tutorial](../../getting-started/tutorial.md) is based on Yandex.Metrica dataset and the recommended way to get started with this dataset is to just go through tutorial.
|
||||
|
||||
Additional examples of queries to these tables can be found among [stateful tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) of ClickHouse (they are named `test.hists` and `test.visits` there).
|
||||
Additional examples of queries to these tables can be found among [stateful tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) of ClickHouse (they are named `test.hits` and `test.visits` there).
|
||||
|
@ -398,6 +398,8 @@ ORDER BY c DESC
|
||||
LIMIT 10;
|
||||
```
|
||||
|
||||
You can also play with the data in Playground, [example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIERheU9mV2VlaywgY291bnQoKikgQVMgYwpGUk9NIG9udGltZQpXSEVSRSBZZWFyPj0yMDAwIEFORCBZZWFyPD0yMDA4CkdST1VQIEJZIERheU9mV2VlawpPUkRFUiBCWSBjIERFU0M7Cg==).
|
||||
|
||||
This performance test was created by Vadim Tkachenko. See:
|
||||
|
||||
- https://www.percona.com/blog/2009/10/02/analyzing-air-traffic-performance-with-infobright-and-monetdb/
|
||||
|
@ -1308,7 +1308,7 @@ Note that the `arraySum` is a [higher-order function](../../sql-reference/functi
|
||||
|
||||
## arrayAvg(\[func,\] arr1, …) {#array-avg}
|
||||
|
||||
Returns the sum of the `func` values. If the function is omitted, it just returns the average of the array elements.
|
||||
Returns the average of the `func` values. If the function is omitted, it just returns the average of the array elements.
|
||||
|
||||
Note that the `arrayAvg` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
|
||||
|
||||
|
@ -267,7 +267,7 @@ void executeQuery(
|
||||
|
||||
**9.** 多行注释的开头和结尾不得有空行(关闭多行注释的行除外)。
|
||||
|
||||
**10.** 要注释掉代码,请使用基本注释,而不是«记录»注释。
|
||||
**10.** 要注释掉代码,请使用基本注释,而不是“文档”注释。
|
||||
|
||||
**11.** 在提交之前删除代码的无效注释部分。
|
||||
|
||||
@ -335,7 +335,7 @@ template <bool without_www>
|
||||
struct ExtractDomain
|
||||
```
|
||||
|
||||
**7.** 对于抽象类型(接口),用 `I` 前缀。
|
||||
**7.** 对于抽象类(接口),用 `I` 前缀。
|
||||
|
||||
``` cpp
|
||||
class IBlockInputStream
|
||||
@ -349,7 +349,7 @@ class IBlockInputStream
|
||||
bool info_successfully_loaded = false;
|
||||
```
|
||||
|
||||
**9.** `define` 和全局常量的名称使用带下划线的 `ALL_CAPS`。
|
||||
**9.** `define` 和全局常量的名称使用全大写带下划线的形式,如 `ALL_CAPS`。
|
||||
|
||||
``` cpp
|
||||
#define MAX_SRC_TABLE_NAMES_TO_STORE 1000
|
||||
@ -357,14 +357,14 @@ bool info_successfully_loaded = false;
|
||||
|
||||
**10.** 文件名应使用与其内容相同的样式。
|
||||
|
||||
如果文件包含单个类,则以与该类名称相同的方式命名该文件。
|
||||
如果文件包含单个类,则以与该类名称相同的方式命名该文件(CamelCase)。
|
||||
|
||||
如果文件包含单个函数,则以与函数名称相同的方式命名文件。
|
||||
如果文件包含单个函数,则以与函数名称相同的方式命名文件(camelCase)。
|
||||
|
||||
**11.** 如果名称包含缩写,则:
|
||||
|
||||
- 对于变量名,缩写应使用小写字母 `mysql_connection`(不是 `mySQL_connection` )。
|
||||
- 对于类和函数的名称,请将大写字母保留在缩写 `MySQLConnection`(不是 `MySqlConnection` 。
|
||||
- 对于类和函数的名称,请将大写字母保留在缩写 `MySQLConnection`(不是 `MySqlConnection`)。
|
||||
|
||||
**12.** 仅用于初始化类成员的构造方法参数的命名方式应与类成员相同,但最后使用下划线。
|
||||
|
||||
@ -411,7 +411,7 @@ enum class CompressionMethod
|
||||
|
||||
如果缩短版本是常用的,则可以接受不完整的单词。
|
||||
|
||||
如果注释中旁边包含全名,您也可以使用缩写。
|
||||
如果旁边有注释包含全名,您也可以使用缩写。
|
||||
|
||||
**17.** C++ 源码文件名称必须为 `.cpp` 拓展名。 头文件必须为 `.h` 拓展名。
|
||||
|
||||
@ -441,7 +441,7 @@ enum class CompressionMethod
|
||||
|
||||
在离线数据处理应用程序中,通常可以接受不捕获异常。
|
||||
|
||||
在处理用户请求的服务器中,通常足以捕获连接处理程序顶层的异常。
|
||||
在处理用户请求的服务器中,捕获连接处理程序顶层的异常通常就足够了。
|
||||
|
||||
在线程函数中,你应该在 `join` 之后捕获并保留所有异常以在主线程中重新抛出它们。
|
||||
|
||||
@ -548,7 +548,7 @@ Fork不用于并行化。
|
||||
|
||||
**10.** 常量。
|
||||
|
||||
使用 const 引用,指向常量的指针,`const_iterator`和 const 指针。
|
||||
使用 const 引用、指针,指向常量、`const_iterator`和 const 方法。
|
||||
|
||||
将 `const` 视为默认值,仅在必要时使用非 `const`。
|
||||
|
||||
@ -560,7 +560,7 @@ Fork不用于并行化。
|
||||
|
||||
**12.** 数值类型。
|
||||
|
||||
使用 `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32`, 以及 `Int64`, `size_t`, `ssize_t` 还有 `ptrdiff_t`。
|
||||
使用 `UInt8`, `UInt16`, `UInt32`, `UInt64`, `Int8`, `Int16`, `Int32` 和 `Int64`,同样还有 `size_t`, `ssize_t` 和 `ptrdiff_t`。
|
||||
|
||||
不要使用这些类型:`signed / unsigned long`,`long long`,`short`,`signed / unsigned char`,`char`。
|
||||
|
||||
@ -732,11 +732,11 @@ CPU指令集是我们服务器中支持的最小集合。 目前,它是SSE 4.2
|
||||
|
||||
**8.** 尽可能经常地进行提交,即使代码只是部分准备好了。
|
||||
|
||||
目的明确的功能,使用分支。
|
||||
为了这种目的可以创建分支。
|
||||
|
||||
如果 `master` 分支中的代码尚不可构建,请在 `push` 之前将其从构建中排除。您需要在几天内完成或删除它。
|
||||
如果您的代码在 `master` 分支中尚不可构建,在 `push` 之前需要将其从构建中排除。您需要在几天内完成或删除它。
|
||||
|
||||
**9.** 对于不重要的更改,请使用分支并在服务器上发布它们。
|
||||
**9.** 对于非一般的更改,请使用分支并在服务器上发布它们。
|
||||
|
||||
**10.** 未使用的代码将从 repo 中删除。
|
||||
|
||||
|
@ -59,7 +59,9 @@
|
||||
#include <DataStreams/AsynchronousBlockInputStream.h>
|
||||
#include <DataStreams/AddingDefaultsBlockInputStream.h>
|
||||
#include <DataStreams/InternalTextLogsRowOutputStream.h>
|
||||
#include <DataStreams/NullBlockOutputStream.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTDropQuery.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
#include <Parsers/ASTUseQuery.h>
|
||||
#include <Parsers/ASTInsertQuery.h>
|
||||
@ -110,6 +112,7 @@ namespace ErrorCodes
|
||||
extern const int INVALID_USAGE_OF_INPUT;
|
||||
extern const int DEADLOCK_AVOIDED;
|
||||
extern const int UNRECOGNIZED_ARGUMENTS;
|
||||
extern const int SYNTAX_ERROR;
|
||||
}
|
||||
|
||||
|
||||
@ -136,6 +139,9 @@ private:
|
||||
bool stdin_is_a_tty = false; /// stdin is a terminal.
|
||||
bool stdout_is_a_tty = false; /// stdout is a terminal.
|
||||
|
||||
/// If not empty, queries will be read from these files
|
||||
std::vector<std::string> queries_files;
|
||||
|
||||
std::unique_ptr<Connection> connection; /// Connection to DB.
|
||||
String full_query; /// Current query as it was given to the client.
|
||||
|
||||
@ -478,10 +484,10 @@ private:
|
||||
/// - stdin is not a terminal. In this case queries are read from it.
|
||||
/// - -qf (--queries-file) command line option is present.
|
||||
/// The value of the option is used as file with query (or of multiple queries) to execute.
|
||||
if (!stdin_is_a_tty || config().has("query") || config().has("queries-file"))
|
||||
if (!stdin_is_a_tty || config().has("query") || !queries_files.empty())
|
||||
is_interactive = false;
|
||||
|
||||
if (config().has("query") && config().has("queries-file"))
|
||||
if (config().has("query") && !queries_files.empty())
|
||||
{
|
||||
throw Exception("Specify either `query` or `queries-file` option", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
@ -696,14 +702,8 @@ private:
|
||||
auto query_id = config().getString("query_id", "");
|
||||
if (!query_id.empty())
|
||||
context.setCurrentQueryId(query_id);
|
||||
if (query_fuzzer_runs)
|
||||
{
|
||||
nonInteractiveWithFuzzing();
|
||||
}
|
||||
else
|
||||
{
|
||||
nonInteractive();
|
||||
}
|
||||
|
||||
nonInteractive();
|
||||
|
||||
/// If exception code isn't zero, we should return non-zero return code anyway.
|
||||
if (last_exception_received_from_server)
|
||||
@ -794,15 +794,21 @@ private:
|
||||
{
|
||||
String text;
|
||||
|
||||
if (config().has("queries-file"))
|
||||
if (!queries_files.empty())
|
||||
{
|
||||
ReadBufferFromFile in(config().getString("queries-file"));
|
||||
readStringUntilEOF(text, in);
|
||||
processMultiQuery(text);
|
||||
for (const auto & queries_file : queries_files)
|
||||
{
|
||||
connection->setDefaultDatabase(connection_parameters.default_database);
|
||||
ReadBufferFromFile in(queries_file);
|
||||
readStringUntilEOF(text, in);
|
||||
processMultiQuery(text);
|
||||
}
|
||||
return;
|
||||
}
|
||||
else if (config().has("query"))
|
||||
{
|
||||
text = config().getRawString("query"); /// Poco configuration should not process substitutions in form of ${...} inside query.
|
||||
}
|
||||
else
|
||||
{
|
||||
/// If 'query' parameter is not set, read a query from stdin.
|
||||
@ -811,113 +817,10 @@ private:
|
||||
readStringUntilEOF(text, in);
|
||||
}
|
||||
|
||||
processQueryText(text);
|
||||
}
|
||||
|
||||
void nonInteractiveWithFuzzing()
|
||||
{
|
||||
if (config().has("query"))
|
||||
{
|
||||
// Poco configuration should not process substitutions in form of
|
||||
// ${...} inside query
|
||||
processWithFuzzing(config().getRawString("query"));
|
||||
return;
|
||||
}
|
||||
|
||||
// Try to stream the queries from stdin, without reading all of them
|
||||
// into memory. The interface of the parser does not support streaming,
|
||||
// in particular, it can't distinguish the end of partial input buffer
|
||||
// and the final end of input file. This means we have to try to split
|
||||
// the input into separate queries here. Two patterns of input are
|
||||
// especially interesting:
|
||||
// 1) multiline query:
|
||||
// select 1
|
||||
// from system.numbers;
|
||||
//
|
||||
// 2) csv insert with in-place data:
|
||||
// insert into t format CSV 1;2
|
||||
//
|
||||
// (1) means we can't split on new line, and (2) means we can't split on
|
||||
// semicolon. Solution: split on ';\n'. This sequence is frequent enough
|
||||
// in the SQL tests which are our principal input for fuzzing. Now we
|
||||
// have another interesting case:
|
||||
// 3) escaped semicolon followed by newline, e.g.
|
||||
// select ';
|
||||
// '
|
||||
//
|
||||
// To handle (3), parse until we can, and read more data if the parser
|
||||
// complains. Hopefully this should be enough...
|
||||
ReadBufferFromFileDescriptor in(STDIN_FILENO);
|
||||
std::string text;
|
||||
while (!in.eof())
|
||||
{
|
||||
// Read until separator.
|
||||
while (!in.eof())
|
||||
{
|
||||
char * next_separator = find_first_symbols<';'>(in.position(),
|
||||
in.buffer().end());
|
||||
|
||||
if (next_separator < in.buffer().end())
|
||||
{
|
||||
next_separator++;
|
||||
if (next_separator < in.buffer().end()
|
||||
&& *next_separator == '\n')
|
||||
{
|
||||
// Found ';\n', append it to the query text and try to
|
||||
// parse.
|
||||
next_separator++;
|
||||
text.append(in.position(), next_separator - in.position());
|
||||
in.position() = next_separator;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Didn't find the semicolon and reached the end of buffer.
|
||||
text.append(in.position(), next_separator - in.position());
|
||||
in.position() = next_separator;
|
||||
|
||||
if (text.size() > 1024 * 1024)
|
||||
{
|
||||
// We've read a lot of text and still haven't seen a separator.
|
||||
// Likely some pathological input, just fall through to prevent
|
||||
// too long loops.
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Parse and execute what we've read.
|
||||
const auto * new_end = processWithFuzzing(text);
|
||||
|
||||
if (new_end > &text[0])
|
||||
{
|
||||
const auto rest_size = text.size() - (new_end - &text[0]);
|
||||
|
||||
memcpy(&text[0], new_end, rest_size);
|
||||
text.resize(rest_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
// We didn't read enough text to parse a query. Will read more.
|
||||
}
|
||||
|
||||
// Ensure that we're still connected to the server. If the server died,
|
||||
// the reconnect is going to fail with an exception, and the fuzzer
|
||||
// will exit. The ping() would be the best match here, but it's
|
||||
// private, probably for a good reason that the protocol doesn't allow
|
||||
// pings at any possible moment.
|
||||
// Don't forget to reset the default database which might have changed.
|
||||
connection->setDefaultDatabase("");
|
||||
connection->forceConnected(connection_parameters.timeouts);
|
||||
|
||||
if (text.size() > 4 * 1024)
|
||||
{
|
||||
// Some pathological situation where the text is larger than 4kB
|
||||
// and we still cannot parse a single query in it. Abort.
|
||||
std::cerr << "Read too much text and still can't parse a query."
|
||||
" Aborting." << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
if (query_fuzzer_runs)
|
||||
processWithFuzzing(text);
|
||||
else
|
||||
processQueryText(text);
|
||||
}
|
||||
|
||||
bool processQueryText(const String & text)
|
||||
@ -945,7 +848,8 @@ private:
|
||||
{
|
||||
const bool test_mode = config().has("testmode");
|
||||
|
||||
{ /// disable logs if expects errors
|
||||
{
|
||||
/// disable logs if expects errors
|
||||
TestHint test_hint(test_mode, all_queries_text);
|
||||
if (test_hint.clientError() || test_hint.serverError())
|
||||
processTextAsSingleQuery("SET send_logs_level = 'fatal'");
|
||||
@ -1019,7 +923,7 @@ private:
|
||||
if (hint.clientError() != e.code())
|
||||
{
|
||||
if (hint.clientError())
|
||||
e.addMessage("\nExpected clinet error: " + std::to_string(hint.clientError()));
|
||||
e.addMessage("\nExpected client error: " + std::to_string(hint.clientError()));
|
||||
throw;
|
||||
}
|
||||
|
||||
@ -1078,39 +982,49 @@ private:
|
||||
expected_client_error = test_hint.clientError();
|
||||
expected_server_error = test_hint.serverError();
|
||||
|
||||
try
|
||||
if (query_fuzzer_runs)
|
||||
{
|
||||
processParsedSingleQuery();
|
||||
|
||||
if (insert_ast && insert_ast->data)
|
||||
processWithFuzzing(full_query);
|
||||
}
|
||||
else
|
||||
{
|
||||
try
|
||||
{
|
||||
// For VALUES format: use the end of inline data as reported
|
||||
// by the format parser (it is saved in sendData()). This
|
||||
// allows us to handle queries like:
|
||||
// insert into t values (1); select 1
|
||||
//, where the inline data is delimited by semicolon and not
|
||||
// by a newline.
|
||||
this_query_end = parsed_query->as<ASTInsertQuery>()->end;
|
||||
processParsedSingleQuery();
|
||||
|
||||
if (insert_ast && insert_ast->data)
|
||||
{
|
||||
// For VALUES format: use the end of inline data as reported
|
||||
// by the format parser (it is saved in sendData()). This
|
||||
// allows us to handle queries like:
|
||||
// insert into t values (1); select 1
|
||||
//, where the inline data is delimited by semicolon and not
|
||||
// by a newline.
|
||||
this_query_end = parsed_query->as<ASTInsertQuery>()->end;
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
last_exception_received_from_server = std::make_unique<Exception>(getCurrentExceptionMessage(true), getCurrentExceptionCode());
|
||||
actual_client_error = last_exception_received_from_server->code();
|
||||
if (!ignore_error && (!actual_client_error || actual_client_error != expected_client_error))
|
||||
std::cerr << "Error on processing query: " << full_query << std::endl << last_exception_received_from_server->message();
|
||||
received_exception_from_server = true;
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
last_exception_received_from_server = std::make_unique<Exception>(getCurrentExceptionMessage(true), getCurrentExceptionCode());
|
||||
actual_client_error = last_exception_received_from_server->code();
|
||||
if (!ignore_error && (!actual_client_error || actual_client_error != expected_client_error))
|
||||
std::cerr << "Error on processing query: " << full_query << std::endl << last_exception_received_from_server->message();
|
||||
received_exception_from_server = true;
|
||||
}
|
||||
|
||||
if (!test_hint.checkActual(actual_server_error, actual_client_error, received_exception_from_server, last_exception_received_from_server))
|
||||
connection->forceConnected(connection_parameters.timeouts);
|
||||
if (!test_hint.checkActual(
|
||||
actual_server_error, actual_client_error, received_exception_from_server, last_exception_received_from_server))
|
||||
{
|
||||
connection->forceConnected(connection_parameters.timeouts);
|
||||
}
|
||||
|
||||
if (received_exception_from_server && !ignore_error)
|
||||
{
|
||||
if (is_interactive)
|
||||
break;
|
||||
else
|
||||
return false;
|
||||
if (received_exception_from_server && !ignore_error)
|
||||
{
|
||||
if (is_interactive)
|
||||
break;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
this_query_begin = this_query_end;
|
||||
@ -1120,165 +1034,145 @@ private:
|
||||
}
|
||||
|
||||
|
||||
// Returns the last position we could parse.
|
||||
const char * processWithFuzzing(const String & text)
|
||||
void processWithFuzzing(const String & text)
|
||||
{
|
||||
/// Several queries separated by ';'.
|
||||
/// INSERT data is ended by the end of line, not ';'.
|
||||
ASTPtr orig_ast;
|
||||
|
||||
const char * begin = text.data();
|
||||
const char * end = begin + text.size();
|
||||
|
||||
while (begin < end)
|
||||
try
|
||||
{
|
||||
// Skip whitespace before the query
|
||||
while (isWhitespaceASCII(*begin) || *begin == ';')
|
||||
{
|
||||
++begin;
|
||||
}
|
||||
|
||||
const auto * this_query_begin = begin;
|
||||
ASTPtr orig_ast = parseQuery(begin, end, true);
|
||||
|
||||
if (!orig_ast)
|
||||
{
|
||||
// Can't continue after a parsing error
|
||||
return begin;
|
||||
}
|
||||
|
||||
auto * as_insert = orig_ast->as<ASTInsertQuery>();
|
||||
if (as_insert && as_insert->data)
|
||||
{
|
||||
// INSERT data is ended by newline
|
||||
as_insert->end = find_first_symbols<'\n'>(as_insert->data, end);
|
||||
begin = as_insert->end;
|
||||
}
|
||||
|
||||
full_query = text.substr(this_query_begin - text.data(),
|
||||
begin - text.data());
|
||||
|
||||
// Don't repeat inserts, the tables grow too big. Also don't repeat
|
||||
// creates because first we run the unmodified query, it will succeed,
|
||||
// and the subsequent queries will fail. When we run out of fuzzer
|
||||
// errors, it may be interesting to add fuzzing of create queries that
|
||||
// wraps columns into LowCardinality or Nullable. Also there are other
|
||||
// kinds of create queries such as CREATE DICTIONARY, we could fuzz
|
||||
// them as well.
|
||||
int this_query_runs = query_fuzzer_runs;
|
||||
if (as_insert
|
||||
|| orig_ast->as<ASTCreateQuery>())
|
||||
{
|
||||
this_query_runs = 1;
|
||||
}
|
||||
|
||||
ASTPtr fuzz_base = orig_ast;
|
||||
for (int fuzz_step = 0; fuzz_step < this_query_runs; fuzz_step++)
|
||||
{
|
||||
fprintf(stderr, "fuzzing step %d out of %d for query at pos %zd\n",
|
||||
fuzz_step, this_query_runs, this_query_begin - text.data());
|
||||
|
||||
ASTPtr ast_to_process;
|
||||
try
|
||||
{
|
||||
WriteBufferFromOwnString dump_before_fuzz;
|
||||
fuzz_base->dumpTree(dump_before_fuzz);
|
||||
auto base_before_fuzz = fuzz_base->formatForErrorMessage();
|
||||
|
||||
ast_to_process = fuzz_base->clone();
|
||||
|
||||
WriteBufferFromOwnString dump_of_cloned_ast;
|
||||
ast_to_process->dumpTree(dump_of_cloned_ast);
|
||||
|
||||
// Run the original query as well.
|
||||
if (fuzz_step > 0)
|
||||
{
|
||||
fuzzer.fuzzMain(ast_to_process);
|
||||
}
|
||||
|
||||
auto base_after_fuzz = fuzz_base->formatForErrorMessage();
|
||||
|
||||
// Debug AST cloning errors.
|
||||
if (base_before_fuzz != base_after_fuzz)
|
||||
{
|
||||
fprintf(stderr, "base before fuzz: %s\n"
|
||||
"base after fuzz: %s\n", base_before_fuzz.c_str(),
|
||||
base_after_fuzz.c_str());
|
||||
fprintf(stderr, "dump before fuzz:\n%s\n",
|
||||
dump_before_fuzz.str().c_str());
|
||||
fprintf(stderr, "dump of cloned ast:\n%s\n",
|
||||
dump_of_cloned_ast.str().c_str());
|
||||
fprintf(stderr, "dump after fuzz:\n");
|
||||
WriteBufferFromOStream cerr_buf(std::cerr, 4096);
|
||||
fuzz_base->dumpTree(cerr_buf);
|
||||
cerr_buf.next();
|
||||
|
||||
fmt::print(stderr, "IAST::clone() is broken for some AST node. This is a bug. The original AST ('dump before fuzz') and its cloned copy ('dump of cloned AST') refer to the same nodes, which must never happen. This means that their parent node doesn't implement clone() correctly.");
|
||||
|
||||
assert(false);
|
||||
}
|
||||
|
||||
auto fuzzed_text = ast_to_process->formatForErrorMessage();
|
||||
if (fuzz_step > 0 && fuzzed_text == base_before_fuzz)
|
||||
{
|
||||
fprintf(stderr, "got boring ast\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
parsed_query = ast_to_process;
|
||||
query_to_send = parsed_query->formatForErrorMessage();
|
||||
|
||||
processParsedSingleQuery();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// Some functions (e.g. protocol parsers) don't throw, but
|
||||
// set last_exception instead, so we'll also do it here for
|
||||
// uniformity.
|
||||
last_exception_received_from_server = std::make_unique<Exception>(getCurrentExceptionMessage(true), getCurrentExceptionCode());
|
||||
received_exception_from_server = true;
|
||||
}
|
||||
|
||||
if (received_exception_from_server)
|
||||
{
|
||||
fmt::print(stderr, "Error on processing query '{}': {}\n",
|
||||
ast_to_process->formatForErrorMessage(),
|
||||
last_exception_received_from_server->message());
|
||||
}
|
||||
|
||||
if (!connection->isConnected())
|
||||
{
|
||||
// Probably the server is dead because we found an assertion
|
||||
// failure. Fail fast.
|
||||
fmt::print(stderr, "Lost connection to the server\n");
|
||||
return begin;
|
||||
}
|
||||
|
||||
// The server is still alive so we're going to continue fuzzing.
|
||||
// Determine what we're going to use as the starting AST.
|
||||
if (received_exception_from_server)
|
||||
{
|
||||
// Query completed with error, keep the previous starting AST.
|
||||
// Also discard the exception that we now know to be non-fatal,
|
||||
// so that it doesn't influence the exit code.
|
||||
last_exception_received_from_server.reset(nullptr);
|
||||
received_exception_from_server = false;
|
||||
}
|
||||
else if (ast_to_process->formatForErrorMessage().size() > 500)
|
||||
{
|
||||
// ast too long, start from original ast
|
||||
fprintf(stderr, "Current AST is too long, discarding it and using the original AST as a start\n");
|
||||
fuzz_base = orig_ast;
|
||||
}
|
||||
else
|
||||
{
|
||||
// fuzz starting from this successful query
|
||||
fprintf(stderr, "Query succeeded, using this AST as a start\n");
|
||||
fuzz_base = ast_to_process;
|
||||
}
|
||||
}
|
||||
const char * begin = text.data();
|
||||
orig_ast = parseQuery(begin, begin + text.size(), true);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
if (e.code() != ErrorCodes::SYNTAX_ERROR)
|
||||
throw;
|
||||
}
|
||||
|
||||
return begin;
|
||||
if (!orig_ast)
|
||||
{
|
||||
// Can't continue after a parsing error
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't repeat inserts, the tables grow too big. Also don't repeat
|
||||
// creates because first we run the unmodified query, it will succeed,
|
||||
// and the subsequent queries will fail. When we run out of fuzzer
|
||||
// errors, it may be interesting to add fuzzing of create queries that
|
||||
// wraps columns into LowCardinality or Nullable. Also there are other
|
||||
// kinds of create queries such as CREATE DICTIONARY, we could fuzz
|
||||
// them as well. Also there is no point fuzzing DROP queries.
|
||||
size_t this_query_runs = query_fuzzer_runs;
|
||||
if (orig_ast->as<ASTInsertQuery>() || orig_ast->as<ASTCreateQuery>() || orig_ast->as<ASTDropQuery>())
|
||||
{
|
||||
this_query_runs = 1;
|
||||
}
|
||||
|
||||
ASTPtr fuzz_base = orig_ast;
|
||||
for (size_t fuzz_step = 0; fuzz_step < this_query_runs; ++fuzz_step)
|
||||
{
|
||||
fmt::print(stderr, "Fuzzing step {} out of {}\n",
|
||||
fuzz_step, this_query_runs);
|
||||
|
||||
ASTPtr ast_to_process;
|
||||
try
|
||||
{
|
||||
WriteBufferFromOwnString dump_before_fuzz;
|
||||
fuzz_base->dumpTree(dump_before_fuzz);
|
||||
auto base_before_fuzz = fuzz_base->formatForErrorMessage();
|
||||
|
||||
ast_to_process = fuzz_base->clone();
|
||||
|
||||
WriteBufferFromOwnString dump_of_cloned_ast;
|
||||
ast_to_process->dumpTree(dump_of_cloned_ast);
|
||||
|
||||
// Run the original query as well.
|
||||
if (fuzz_step > 0)
|
||||
{
|
||||
fuzzer.fuzzMain(ast_to_process);
|
||||
}
|
||||
|
||||
auto base_after_fuzz = fuzz_base->formatForErrorMessage();
|
||||
|
||||
// Debug AST cloning errors.
|
||||
if (base_before_fuzz != base_after_fuzz)
|
||||
{
|
||||
fmt::print(stderr,
|
||||
"Base before fuzz: {}\n"
|
||||
"Base after fuzz: {}\n",
|
||||
base_before_fuzz, base_after_fuzz);
|
||||
fmt::print(stderr, "Dump before fuzz:\n{}\n", dump_before_fuzz.str());
|
||||
fmt::print(stderr, "Dump of cloned AST:\n{}\n", dump_of_cloned_ast.str());
|
||||
fmt::print(stderr, "Dump after fuzz:\n");
|
||||
|
||||
WriteBufferFromOStream cerr_buf(std::cerr, 4096);
|
||||
fuzz_base->dumpTree(cerr_buf);
|
||||
cerr_buf.next();
|
||||
|
||||
fmt::print(stderr, "IAST::clone() is broken for some AST node. This is a bug. The original AST ('dump before fuzz') and its cloned copy ('dump of cloned AST') refer to the same nodes, which must never happen. This means that their parent node doesn't implement clone() correctly.");
|
||||
|
||||
assert(false);
|
||||
}
|
||||
|
||||
auto fuzzed_text = ast_to_process->formatForErrorMessage();
|
||||
if (fuzz_step > 0 && fuzzed_text == base_before_fuzz)
|
||||
{
|
||||
fmt::print(stderr, "Got boring AST\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
parsed_query = ast_to_process;
|
||||
query_to_send = parsed_query->formatForErrorMessage();
|
||||
|
||||
processParsedSingleQuery();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// Some functions (e.g. protocol parsers) don't throw, but
|
||||
// set last_exception instead, so we'll also do it here for
|
||||
// uniformity.
|
||||
last_exception_received_from_server = std::make_unique<Exception>(getCurrentExceptionMessage(true), getCurrentExceptionCode());
|
||||
received_exception_from_server = true;
|
||||
}
|
||||
|
||||
if (received_exception_from_server)
|
||||
{
|
||||
fmt::print(stderr, "Error on processing query '{}': {}\n",
|
||||
ast_to_process->formatForErrorMessage(),
|
||||
last_exception_received_from_server->message());
|
||||
}
|
||||
|
||||
if (!connection->isConnected())
|
||||
{
|
||||
// Probably the server is dead because we found an assertion
|
||||
// failure. Fail fast.
|
||||
fmt::print(stderr, "Lost connection to the server\n");
|
||||
return;
|
||||
}
|
||||
|
||||
// The server is still alive so we're going to continue fuzzing.
|
||||
// Determine what we're going to use as the starting AST.
|
||||
if (received_exception_from_server)
|
||||
{
|
||||
// Query completed with error, keep the previous starting AST.
|
||||
// Also discard the exception that we now know to be non-fatal,
|
||||
// so that it doesn't influence the exit code.
|
||||
last_exception_received_from_server.reset(nullptr);
|
||||
received_exception_from_server = false;
|
||||
}
|
||||
else if (ast_to_process->formatForErrorMessage().size() > 500)
|
||||
{
|
||||
// ast too long, start from original ast
|
||||
fmt::print(stderr, "Current AST is too long, discarding it and using the original AST as a start\n");
|
||||
fuzz_base = orig_ast;
|
||||
}
|
||||
else
|
||||
{
|
||||
// fuzz starting from this successful query
|
||||
fmt::print(stderr, "Query succeeded, using this AST as a start\n");
|
||||
fuzz_base = ast_to_process;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void processTextAsSingleQuery(const String & text_)
|
||||
@ -1891,6 +1785,13 @@ private:
|
||||
{
|
||||
if (!block_out_stream)
|
||||
{
|
||||
/// Ignore all results when fuzzing as they can be huge.
|
||||
if (query_fuzzer_runs)
|
||||
{
|
||||
block_out_stream = std::make_shared<NullBlockOutputStream>(block);
|
||||
return;
|
||||
}
|
||||
|
||||
WriteBuffer * out_buf = nullptr;
|
||||
String pager = config().getString("pager", "");
|
||||
if (!pager.empty())
|
||||
@ -2348,7 +2249,8 @@ public:
|
||||
"Suggestion limit for how many databases, tables and columns to fetch.")
|
||||
("multiline,m", "multiline")
|
||||
("multiquery,n", "multiquery")
|
||||
("queries-file", po::value<std::string>(), "file path with queries to execute")
|
||||
("queries-file", po::value<std::vector<std::string>>()->multitoken(),
|
||||
"file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)")
|
||||
("format,f", po::value<std::string>(), "default output format")
|
||||
("testmode,T", "enable test hints in comments")
|
||||
("ignore-error", "do not stop processing in multiquery mode")
|
||||
@ -2478,12 +2380,11 @@ public:
|
||||
if (options.count("query"))
|
||||
config().setString("query", options["query"].as<std::string>());
|
||||
if (options.count("queries-file"))
|
||||
config().setString("queries-file", options["queries-file"].as<std::string>());
|
||||
queries_files = options["queries-file"].as<std::vector<std::string>>();
|
||||
if (options.count("database"))
|
||||
config().setString("database", options["database"].as<std::string>());
|
||||
if (options.count("pager"))
|
||||
config().setString("pager", options["pager"].as<std::string>());
|
||||
|
||||
if (options.count("port") && !options["port"].defaulted())
|
||||
config().setInt("port", options["port"].as<int>());
|
||||
if (options.count("secure"))
|
||||
@ -2537,7 +2438,6 @@ public:
|
||||
config().setBool("multiquery", true);
|
||||
|
||||
// Ignore errors in parsing queries.
|
||||
// TODO stop using parseQuery.
|
||||
config().setBool("ignore-error", true);
|
||||
ignore_error = true;
|
||||
}
|
||||
|
@ -53,17 +53,35 @@ class AggregateFunctionIfNullUnary final
|
||||
private:
|
||||
size_t num_arguments;
|
||||
|
||||
/// The name of the nested function, including combinators (i.e. *If)
|
||||
///
|
||||
/// getName() from the nested_function cannot be used because in case of *If combinator
|
||||
/// with Nullable argument nested_function will point to the function w/o combinator.
|
||||
/// (I.e. sumIf(Nullable, 1) -> sum()), and distributed query processing will fail.
|
||||
///
|
||||
/// And nested_function cannot point to the function with *If since
|
||||
/// due to optimization in the add() which pass only one column with the result,
|
||||
/// and so AggregateFunctionIf::add() cannot be called this way
|
||||
/// (it write to the last argument -- num_arguments-1).
|
||||
///
|
||||
/// And to avoid extra level of indirection, the name of function is cached:
|
||||
///
|
||||
/// AggregateFunctionIfNullUnary::add -> [ AggregateFunctionIf::add -> ] AggregateFunctionSum::add
|
||||
String name;
|
||||
|
||||
using Base = AggregateFunctionNullBase<result_is_nullable, serialize_flag,
|
||||
AggregateFunctionIfNullUnary<result_is_nullable, serialize_flag>>;
|
||||
public:
|
||||
|
||||
String getName() const override
|
||||
{
|
||||
return Base::getName();
|
||||
return name;
|
||||
}
|
||||
|
||||
AggregateFunctionIfNullUnary(AggregateFunctionPtr nested_function_, const DataTypes & arguments, const Array & params)
|
||||
: Base(std::move(nested_function_), arguments, params), num_arguments(arguments.size())
|
||||
AggregateFunctionIfNullUnary(const String & name_, AggregateFunctionPtr nested_function_, const DataTypes & arguments, const Array & params)
|
||||
: Base(std::move(nested_function_), arguments, params)
|
||||
, num_arguments(arguments.size())
|
||||
, name(name_)
|
||||
{
|
||||
if (num_arguments == 0)
|
||||
throw Exception("Aggregate function " + getName() + " require at least one argument",
|
||||
@ -174,14 +192,14 @@ AggregateFunctionPtr AggregateFunctionIf::getOwnNullAdapter(
|
||||
{
|
||||
if (return_type_is_nullable)
|
||||
{
|
||||
return std::make_shared<AggregateFunctionIfNullUnary<true, true>>(nested_func, arguments, params);
|
||||
return std::make_shared<AggregateFunctionIfNullUnary<true, true>>(nested_function->getName(), nested_func, arguments, params);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (serialize_flag)
|
||||
return std::make_shared<AggregateFunctionIfNullUnary<false, true>>(nested_func, arguments, params);
|
||||
return std::make_shared<AggregateFunctionIfNullUnary<false, true>>(nested_function->getName(), nested_func, arguments, params);
|
||||
else
|
||||
return std::make_shared<AggregateFunctionIfNullUnary<false, false>>(nested_func, arguments, params);
|
||||
return std::make_shared<AggregateFunctionIfNullUnary<false, false>>(nested_function->getName(), nested_func, arguments, params);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -531,6 +531,7 @@
|
||||
M(562, TLD_LIST_NOT_FOUND) \
|
||||
M(563, CANNOT_READ_MAP_FROM_TEXT) \
|
||||
M(564, INTERSERVER_SCHEME_DOESNT_MATCH) \
|
||||
M(565, TOO_MANY_PARTITIONS) \
|
||||
\
|
||||
M(999, KEEPER_EXCEPTION) \
|
||||
M(1000, POCO_EXCEPTION) \
|
||||
|
@ -16,8 +16,8 @@ private:
|
||||
bool nextImpl() override;
|
||||
|
||||
public:
|
||||
CompressedReadBuffer(ReadBuffer & in_)
|
||||
: CompressedReadBufferBase(&in_), BufferWithOwnMemory<ReadBuffer>(0)
|
||||
CompressedReadBuffer(ReadBuffer & in_, bool allow_different_codecs_ = false)
|
||||
: CompressedReadBufferBase(&in_, allow_different_codecs_), BufferWithOwnMemory<ReadBuffer>(0)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -46,6 +46,9 @@ CompressionCodecPtr CompressionCodecFactory::get(const String & family_name, std
|
||||
|
||||
void CompressionCodecFactory::validateCodec(const String & family_name, std::optional<int> level, bool sanity_check) const
|
||||
{
|
||||
if (family_name.empty())
|
||||
throw Exception("Compression codec name cannot be empty", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
if (level)
|
||||
{
|
||||
auto literal = std::make_shared<ASTLiteral>(static_cast<UInt64>(*level));
|
||||
|
@ -353,6 +353,7 @@ class IColumn;
|
||||
M(Bool, allow_introspection_functions, false, "Allow functions for introspection of ELF and DWARF for query profiling. These functions are slow and may impose security considerations.", 0) \
|
||||
\
|
||||
M(UInt64, max_partitions_per_insert_block, 100, "Limit maximum number of partitions in single INSERTed block. Zero means unlimited. Throw exception if the block contains too many partitions. This setting is a safety threshold, because using large number of partitions is a common misconception.", 0) \
|
||||
M(UInt64, max_partitions_to_read, 0, "Limit the max number of partitions that can be accessed in one query. 0 means unlimited.", 0) \
|
||||
M(Bool, check_query_single_value_result, true, "Return check query result as single 1/0 value", 0) \
|
||||
M(Bool, allow_drop_detached, false, "Allow ALTER TABLE ... DROP DETACHED PART[ITION] ... queries", 0) \
|
||||
\
|
||||
|
@ -225,7 +225,7 @@ std::variant<Block, int> RemoteQueryExecutor::read(std::unique_ptr<ReadContext>
|
||||
if (!read_context->resumeRoutine())
|
||||
return Block();
|
||||
|
||||
if (read_context->is_read_in_progress)
|
||||
if (read_context->is_read_in_progress.load(std::memory_order_relaxed))
|
||||
{
|
||||
read_context->setTimer();
|
||||
return read_context->epoll_fd;
|
||||
|
@ -22,7 +22,7 @@ class RemoteQueryExecutorReadContext
|
||||
public:
|
||||
using Self = RemoteQueryExecutorReadContext;
|
||||
|
||||
bool is_read_in_progress = false;
|
||||
std::atomic_bool is_read_in_progress = false;
|
||||
Packet packet;
|
||||
|
||||
std::exception_ptr exception;
|
||||
@ -162,7 +162,7 @@ public:
|
||||
|
||||
bool resumeRoutine()
|
||||
{
|
||||
if (is_read_in_progress && !checkTimeout())
|
||||
if (is_read_in_progress.load(std::memory_order_relaxed) && !checkTimeout())
|
||||
return false;
|
||||
|
||||
{
|
||||
@ -226,9 +226,9 @@ public:
|
||||
throw;
|
||||
}
|
||||
|
||||
read_context.is_read_in_progress = true;
|
||||
read_context.is_read_in_progress.store(true, std::memory_order_relaxed);
|
||||
fiber = std::move(fiber).resume();
|
||||
read_context.is_read_in_progress = false;
|
||||
read_context.is_read_in_progress.store(false, std::memory_order_relaxed);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -18,6 +18,7 @@ namespace ErrorCodes
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
|
||||
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
|
||||
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||
}
|
||||
|
||||
// FunctionStringHash
|
||||
@ -30,6 +31,8 @@ public:
|
||||
static constexpr auto name = Name::name;
|
||||
static constexpr size_t default_shingle_size = 3;
|
||||
static constexpr size_t default_num_hashes = 6;
|
||||
static constexpr size_t max_shingle_size = 25;
|
||||
static constexpr size_t max_num_hashes = 25;
|
||||
|
||||
static FunctionPtr create(const Context &) { return std::make_shared<FunctionsStringHash>(); }
|
||||
|
||||
@ -100,10 +103,14 @@ public:
|
||||
}
|
||||
|
||||
if (shingle_size == 0)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Second argument (shingle size) of function {} cannot be zero", getName());
|
||||
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Second argument (shingle size) of function {} cannot be zero", getName());
|
||||
if (num_hashes == 0)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Third argument (num hashes) of function {} cannot be zero", getName());
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Third argument (num hashes) of function {} cannot be zero", getName());
|
||||
|
||||
if (shingle_size > max_shingle_size)
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Second argument (shingle size) of function {} cannot be greater then {}", getName(), max_shingle_size);
|
||||
if (num_hashes > max_num_hashes)
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Third argument (num hashes) of function {} cannot be greater then {}", getName(), max_num_hashes);
|
||||
|
||||
auto type = std::make_shared<DataTypeUInt64>();
|
||||
if constexpr (is_simhash)
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <re2/re2.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -14,6 +15,7 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int NO_SUCH_COLUMN_IN_TABLE;
|
||||
extern const int CANNOT_COMPILE_REGEXP;
|
||||
}
|
||||
|
||||
void IASTColumnsTransformer::transform(const ASTPtr & transformer, ASTs & nodes)
|
||||
@ -86,6 +88,9 @@ void ASTColumnsExceptTransformer::formatImpl(const FormatSettings & settings, Fo
|
||||
(*it)->formatImpl(settings, state, frame);
|
||||
}
|
||||
|
||||
if (!original_pattern.empty())
|
||||
settings.ostr << quoteString(original_pattern);
|
||||
|
||||
if (children.size() > 1)
|
||||
settings.ostr << ")";
|
||||
}
|
||||
@ -93,24 +98,40 @@ void ASTColumnsExceptTransformer::formatImpl(const FormatSettings & settings, Fo
|
||||
void ASTColumnsExceptTransformer::transform(ASTs & nodes) const
|
||||
{
|
||||
std::set<String> expected_columns;
|
||||
for (const auto & child : children)
|
||||
expected_columns.insert(child->as<const ASTIdentifier &>().name());
|
||||
|
||||
for (auto it = nodes.begin(); it != nodes.end();)
|
||||
if (original_pattern.empty())
|
||||
{
|
||||
if (const auto * id = it->get()->as<ASTIdentifier>())
|
||||
for (const auto & child : children)
|
||||
expected_columns.insert(child->as<const ASTIdentifier &>().name());
|
||||
|
||||
for (auto it = nodes.begin(); it != nodes.end();)
|
||||
{
|
||||
auto expected_column = expected_columns.find(id->shortName());
|
||||
if (expected_column != expected_columns.end())
|
||||
if (const auto * id = it->get()->as<ASTIdentifier>())
|
||||
{
|
||||
expected_columns.erase(expected_column);
|
||||
it = nodes.erase(it);
|
||||
auto expected_column = expected_columns.find(id->shortName());
|
||||
if (expected_column != expected_columns.end())
|
||||
{
|
||||
expected_columns.erase(expected_column);
|
||||
it = nodes.erase(it);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else
|
||||
++it;
|
||||
}
|
||||
else
|
||||
++it;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto it = nodes.begin(); it != nodes.end();)
|
||||
{
|
||||
if (const auto * id = it->get()->as<ASTIdentifier>())
|
||||
{
|
||||
if (isColumnMatching(id->shortName()))
|
||||
{
|
||||
it = nodes.erase(it);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_strict && !expected_columns.empty())
|
||||
@ -125,6 +146,21 @@ void ASTColumnsExceptTransformer::transform(ASTs & nodes) const
|
||||
}
|
||||
}
|
||||
|
||||
void ASTColumnsExceptTransformer::setPattern(String pattern)
|
||||
{
|
||||
original_pattern = std::move(pattern);
|
||||
column_matcher = std::make_shared<RE2>(original_pattern, RE2::Quiet);
|
||||
if (!column_matcher->ok())
|
||||
throw DB::Exception(
|
||||
"COLUMNS pattern " + original_pattern + " cannot be compiled: " + column_matcher->error(),
|
||||
DB::ErrorCodes::CANNOT_COMPILE_REGEXP);
|
||||
}
|
||||
|
||||
bool ASTColumnsExceptTransformer::isColumnMatching(const String & column_name) const
|
||||
{
|
||||
return RE2::PartialMatch(column_name, *column_matcher);
|
||||
}
|
||||
|
||||
void ASTColumnsReplaceTransformer::Replacement::formatImpl(
|
||||
const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
|
@ -2,6 +2,11 @@
|
||||
|
||||
#include <Parsers/IAST.h>
|
||||
|
||||
namespace re2
|
||||
{
|
||||
class RE2;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class IASTColumnsTransformer : public IAST
|
||||
@ -43,9 +48,13 @@ public:
|
||||
return clone;
|
||||
}
|
||||
void transform(ASTs & nodes) const override;
|
||||
void setPattern(String pattern);
|
||||
bool isColumnMatching(const String & column_name) const;
|
||||
|
||||
protected:
|
||||
void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override;
|
||||
std::shared_ptr<re2::RE2> column_matcher;
|
||||
String original_pattern;
|
||||
};
|
||||
|
||||
class ASTColumnsReplaceTransformer : public IASTColumnsTransformer
|
||||
|
@ -343,6 +343,26 @@ bool ParserFunction::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
throw Exception("Argument of function toDate is unquoted: toDate(" + contents_str + "), must be: toDate('" + contents_str + "')"
|
||||
, ErrorCodes::SYNTAX_ERROR);
|
||||
}
|
||||
else if (Poco::toLower(getIdentifierName(identifier)) == "position")
|
||||
{
|
||||
/// POSITION(needle IN haystack) is equivalent to function position(haystack, needle)
|
||||
if (const auto * list = expr_list_args->as<ASTExpressionList>())
|
||||
{
|
||||
if (list->children.size() == 1)
|
||||
{
|
||||
if (const auto * in_func = list->children[0]->as<ASTFunction>())
|
||||
{
|
||||
if (in_func->name == "in")
|
||||
{
|
||||
// switch the two arguments
|
||||
const auto & arg_list = in_func->arguments->as<ASTExpressionList &>();
|
||||
if (arg_list.children.size() == 2)
|
||||
expr_list_args->children = {arg_list.children[1], arg_list.children[0]};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The parametric aggregate function has two lists (parameters and arguments) in parentheses. Example: quantile(0.9)(x).
|
||||
if (allow_function_parameters && pos->type == TokenType::OpeningRoundBracket)
|
||||
@ -1427,6 +1447,8 @@ bool ParserColumnsTransformers::parseImpl(Pos & pos, ASTPtr & node, Expected & e
|
||||
is_strict = true;
|
||||
|
||||
ASTs identifiers;
|
||||
ASTPtr regex_node;
|
||||
ParserStringLiteral regex;
|
||||
auto parse_id = [&identifiers, &pos, &expected]
|
||||
{
|
||||
ASTPtr identifier;
|
||||
@ -1441,7 +1463,7 @@ bool ParserColumnsTransformers::parseImpl(Pos & pos, ASTPtr & node, Expected & e
|
||||
{
|
||||
// support one or more parameter
|
||||
++pos;
|
||||
if (!ParserList::parseUtil(pos, expected, parse_id, false))
|
||||
if (!ParserList::parseUtil(pos, expected, parse_id, false) && !regex.parse(pos, regex_node, expected))
|
||||
return false;
|
||||
|
||||
if (pos->type != TokenType::ClosingRoundBracket)
|
||||
@ -1451,12 +1473,15 @@ bool ParserColumnsTransformers::parseImpl(Pos & pos, ASTPtr & node, Expected & e
|
||||
else
|
||||
{
|
||||
// only one parameter
|
||||
if (!parse_id())
|
||||
if (!parse_id() && !regex.parse(pos, regex_node, expected))
|
||||
return false;
|
||||
}
|
||||
|
||||
auto res = std::make_shared<ASTColumnsExceptTransformer>();
|
||||
res->children = std::move(identifiers);
|
||||
if (regex_node)
|
||||
res->setPattern(regex_node->as<ASTLiteral &>().value.get<String>());
|
||||
else
|
||||
res->children = std::move(identifiers);
|
||||
res->is_strict = is_strict;
|
||||
node = std::move(res);
|
||||
return true;
|
||||
|
@ -1181,7 +1181,7 @@ void TCPHandler::receiveUnexpectedData()
|
||||
std::shared_ptr<ReadBuffer> maybe_compressed_in;
|
||||
|
||||
if (last_block_in.compression == Protocol::Compression::Enable)
|
||||
maybe_compressed_in = std::make_shared<CompressedReadBuffer>(*in);
|
||||
maybe_compressed_in = std::make_shared<CompressedReadBuffer>(*in, /* allow_different_codecs */ true);
|
||||
else
|
||||
maybe_compressed_in = in;
|
||||
|
||||
@ -1198,8 +1198,11 @@ void TCPHandler::initBlockInput()
|
||||
{
|
||||
if (!state.block_in)
|
||||
{
|
||||
/// 'allow_different_codecs' is set to true, because some parts of compressed data can be precompressed in advance
|
||||
/// with another codec that the rest of the data. Example: data sent by Distributed tables.
|
||||
|
||||
if (state.compression == Protocol::Compression::Enable)
|
||||
state.maybe_compressed_in = std::make_shared<CompressedReadBuffer>(*in);
|
||||
state.maybe_compressed_in = std::make_shared<CompressedReadBuffer>(*in, /* allow_different_codecs */ true);
|
||||
else
|
||||
state.maybe_compressed_in = in;
|
||||
|
||||
|
@ -299,6 +299,10 @@ DistributedBlockOutputStream::runWritingJob(DistributedBlockOutputStream::JobRep
|
||||
const Block & shard_block = (num_shards > 1) ? job.current_shard_block : current_block;
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
|
||||
/// Do not initiate INSERT for empty block.
|
||||
if (shard_block.rows() == 0)
|
||||
return;
|
||||
|
||||
if (!job.is_local_job || !settings.prefer_localhost_replica)
|
||||
{
|
||||
if (!job.stream)
|
||||
@ -368,7 +372,8 @@ void DistributedBlockOutputStream::writeSync(const Block & block)
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
const auto & shards_info = cluster->getShardsInfo();
|
||||
bool random_shard_insert = settings.insert_distributed_one_random_shard && !storage.has_sharding_key;
|
||||
size_t start = 0, end = shards_info.size();
|
||||
size_t start = 0;
|
||||
size_t end = shards_info.size();
|
||||
if (random_shard_insert)
|
||||
{
|
||||
start = storage.getRandomShardIndex(shards_info);
|
||||
@ -582,6 +587,17 @@ void DistributedBlockOutputStream::writeToLocal(const Block & block, const size_
|
||||
|
||||
void DistributedBlockOutputStream::writeToShard(const Block & block, const std::vector<std::string> & dir_names)
|
||||
{
|
||||
const auto & settings = context.getSettingsRef();
|
||||
|
||||
std::string compression_method = Poco::toUpper(settings.network_compression_method.toString());
|
||||
std::optional<int> compression_level;
|
||||
|
||||
if (compression_method == "ZSTD")
|
||||
compression_level = settings.network_zstd_compression_level;
|
||||
|
||||
CompressionCodecFactory::instance().validateCodec(compression_method, compression_level, !settings.allow_suspicious_codecs);
|
||||
CompressionCodecPtr compression_codec = CompressionCodecFactory::instance().get(compression_method, compression_level);
|
||||
|
||||
/// tmp directory is used to ensure atomicity of transactions
|
||||
/// and keep monitor thread out from reading incomplete data
|
||||
std::string first_file_tmp_path{};
|
||||
@ -607,7 +623,7 @@ void DistributedBlockOutputStream::writeToShard(const Block & block, const std::
|
||||
/// Write batch to temporary location
|
||||
{
|
||||
WriteBufferFromFile out{first_file_tmp_path};
|
||||
CompressedWriteBuffer compress{out};
|
||||
CompressedWriteBuffer compress{out, compression_codec};
|
||||
NativeBlockOutputStream stream{compress, DBMS_TCP_PROTOCOL_VERSION, block.cloneEmpty()};
|
||||
|
||||
/// Prepare the header.
|
||||
|
@ -840,7 +840,7 @@ void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_
|
||||
sync_guard.emplace(volume->getDisk(), to);
|
||||
|
||||
if (!volume->getDisk()->exists(from))
|
||||
throw Exception("Part directory " + fullPath(volume->getDisk(), from) + " doesn't exist. Most likely it is logical error.", ErrorCodes::FILE_DOESNT_EXIST);
|
||||
throw Exception("Part directory " + fullPath(volume->getDisk(), from) + " doesn't exist. Most likely it is a logical error.", ErrorCodes::FILE_DOESNT_EXIST);
|
||||
|
||||
if (volume->getDisk()->exists(to))
|
||||
{
|
||||
|
@ -59,6 +59,7 @@ namespace ErrorCodes
|
||||
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||
extern const int TOO_MANY_ROWS;
|
||||
extern const int CANNOT_PARSE_TEXT;
|
||||
extern const int TOO_MANY_PARTITIONS;
|
||||
}
|
||||
|
||||
|
||||
@ -706,6 +707,21 @@ QueryPlanPtr MergeTreeDataSelectExecutor::readFromParts(
|
||||
if (parts_with_ranges.empty())
|
||||
return std::make_unique<QueryPlan>();
|
||||
|
||||
auto max_partitions_to_read
|
||||
= settings.max_partitions_to_read.changed ? settings.max_partitions_to_read : data.getSettings()->max_partitions_to_read;
|
||||
if (max_partitions_to_read)
|
||||
{
|
||||
std::set<String> partitions;
|
||||
for (auto & part_with_ranges : parts_with_ranges)
|
||||
partitions.insert(part_with_ranges.data_part->info.partition_id);
|
||||
if (partitions.size() > max_partitions_to_read)
|
||||
throw Exception(
|
||||
ErrorCodes::TOO_MANY_PARTITIONS,
|
||||
"Too many partitions to read. Current {}, max {}",
|
||||
partitions.size(),
|
||||
max_partitions_to_read);
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::SelectedParts, parts_with_ranges.size());
|
||||
ProfileEvents::increment(ProfileEvents::SelectedRanges, sum_ranges);
|
||||
ProfileEvents::increment(ProfileEvents::SelectedMarks, sum_marks);
|
||||
|
@ -110,6 +110,7 @@ struct Settings;
|
||||
M(Bool, allow_nullable_key, false, "Allow Nullable types as primary keys.", 0) \
|
||||
M(Bool, remove_empty_parts, true, "Remove empty parts after they were pruned by TTL, mutation, or collapsing merge algorithm", 0) \
|
||||
M(Bool, assign_part_uuids, false, "Generate UUIDs for parts. Before enabling check that all replicas support new format.", 0) \
|
||||
M(UInt64, max_partitions_to_read, 0, "Limit the max number of partitions that can be accessed in one query. 0 means unlimited. This setting is the default that can be overridden by the query-level setting with the same name.", 0) \
|
||||
\
|
||||
/** Obsolete settings. Kept for backward compatibility only. */ \
|
||||
M(UInt64, min_relative_delay_to_yield_leadership, 120, "Obsolete setting, does nothing.", 0) \
|
||||
|
@ -540,7 +540,7 @@ BlockOutputStreamPtr StorageDistributed::write(const ASTPtr &, const StorageMeta
|
||||
/// Ban an attempt to make async insert into the table belonging to DatabaseMemory
|
||||
if (!storage_policy && !owned_cluster && !settings.insert_distributed_sync)
|
||||
{
|
||||
throw Exception("Storage " + getName() + " must has own data directory to enable asynchronous inserts",
|
||||
throw Exception("Storage " + getName() + " must have own data directory to enable asynchronous inserts",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
@ -558,8 +558,10 @@ BlockOutputStreamPtr StorageDistributed::write(const ASTPtr &, const StorageMeta
|
||||
|
||||
/// DistributedBlockOutputStream will not own cluster, but will own ConnectionPools of the cluster
|
||||
return std::make_shared<DistributedBlockOutputStream>(
|
||||
context, *this, metadata_snapshot, createInsertToRemoteTableQuery(remote_database, remote_table, metadata_snapshot->getSampleBlockNonMaterialized()), cluster,
|
||||
insert_sync, timeout);
|
||||
context, *this, metadata_snapshot,
|
||||
createInsertToRemoteTableQuery(
|
||||
remote_database, remote_table, metadata_snapshot->getSampleBlockNonMaterialized()),
|
||||
cluster, insert_sync, timeout);
|
||||
}
|
||||
|
||||
|
||||
|
@ -0,0 +1,13 @@
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
@ -0,0 +1,16 @@
|
||||
SET send_logs_level = 'fatal';
|
||||
select 1 = position('' in '');
|
||||
select 1 = position('' in 'abc');
|
||||
select 0 = position('abc' in '');
|
||||
select 1 = position('abc' in 'abc');
|
||||
select 2 = position('bc' in 'abc');
|
||||
select 3 = position('c' in 'abc');
|
||||
|
||||
select 1 = position('' in '');
|
||||
select 1 = position('' in 'абв');
|
||||
select 0 = position('абв' in '');
|
||||
select 1 = position('абв' in 'абв');
|
||||
select 3 = position('бв' in 'абв');
|
||||
select 5 = position('в' in 'абв');
|
||||
|
||||
select 6 = position('/' IN s) FROM (SELECT 'Hello/World' AS s);
|
@ -108,4 +108,8 @@ SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleMinH
|
||||
SELECT 'wordShingleMinHashCaseInsensitiveUTF8';
|
||||
SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleMinHashCaseInsensitiveUTF8(s, 2, 3) as h FROM defaults GROUP BY h;
|
||||
|
||||
SELECT wordShingleSimHash('foobar', 9223372036854775807); -- { serverError 69 }
|
||||
SELECT wordShingleSimHash('foobar', 1001); -- { serverError 69 }
|
||||
SELECT wordShingleSimHash('foobar', 0); -- { serverError 69 }
|
||||
|
||||
DROP TABLE defaults;
|
||||
|
@ -1 +1 @@
|
||||
16
|
||||
1
|
||||
|
@ -1 +1 @@
|
||||
SELECT length(buildId());
|
||||
SELECT length(buildId()) >= 16;
|
||||
|
@ -0,0 +1 @@
|
||||
100 10 324 120.00 B 8.00 B 23.00 B
|
@ -0,0 +1,7 @@
|
||||
DROP TABLE IF EXISTS columns_transformers;
|
||||
|
||||
CREATE TABLE columns_transformers (i int, j int, k int, a_bytes int, b_bytes int, c_bytes int) Engine=TinyLog;
|
||||
INSERT INTO columns_transformers VALUES (100, 10, 324, 120, 8, 23);
|
||||
SELECT * EXCEPT 'bytes', COLUMNS('bytes') APPLY formatReadableSize FROM columns_transformers;
|
||||
|
||||
DROP TABLE IF EXISTS columns_transformers;
|
@ -0,0 +1,3 @@
|
||||
8746326176292337648
|
||||
---mutation---
|
||||
11916226932045201400
|
@ -0,0 +1,219 @@
|
||||
DROP TABLE IF EXISTS simple_agf_summing_mt;
|
||||
|
||||
CREATE TABLE simple_agf_summing_mt
|
||||
(
|
||||
a Int64,
|
||||
min_aggreg AggregateFunction(min, UInt64),
|
||||
min_simple SimpleAggregateFunction(min, UInt64),
|
||||
max_aggreg AggregateFunction(max, UInt64),
|
||||
max_simple SimpleAggregateFunction(max, UInt64),
|
||||
sum_aggreg AggregateFunction(sum, UInt64),
|
||||
sum_simple SimpleAggregateFunction(sum, UInt64),
|
||||
sumov_aggreg AggregateFunction(sumWithOverflow, UInt64),
|
||||
sumov_simple SimpleAggregateFunction(sumWithOverflow, UInt64),
|
||||
gbitand_aggreg AggregateFunction(groupBitAnd, UInt64),
|
||||
gbitand_simple SimpleAggregateFunction(groupBitAnd, UInt64),
|
||||
gbitor_aggreg AggregateFunction(groupBitOr, UInt64),
|
||||
gbitor_simple SimpleAggregateFunction(groupBitOr, UInt64),
|
||||
gbitxor_aggreg AggregateFunction(groupBitXor, UInt64),
|
||||
gbitxor_simple SimpleAggregateFunction(groupBitXor, UInt64),
|
||||
gra_aggreg AggregateFunction(groupArrayArray, Array(UInt64)),
|
||||
gra_simple SimpleAggregateFunction(groupArrayArray, Array(UInt64)),
|
||||
grp_aggreg AggregateFunction(groupUniqArrayArray, Array(UInt64)),
|
||||
grp_simple SimpleAggregateFunction(groupUniqArrayArray, Array(UInt64)),
|
||||
aggreg_map AggregateFunction(sumMap, Tuple(Array(String), Array(UInt64))),
|
||||
simple_map SimpleAggregateFunction(sumMap, Tuple(Array(String), Array(UInt64))),
|
||||
aggreg_map_min AggregateFunction(minMap, Tuple(Array(String), Array(UInt64))),
|
||||
simple_map_min SimpleAggregateFunction(minMap, Tuple(Array(String), Array(UInt64))),
|
||||
aggreg_map_max AggregateFunction(maxMap, Tuple(Array(String), Array(UInt64))),
|
||||
simple_map_max SimpleAggregateFunction(maxMap, Tuple(Array(String), Array(UInt64)))
|
||||
)
|
||||
ENGINE = SummingMergeTree
|
||||
ORDER BY a;
|
||||
|
||||
INSERT INTO simple_agf_summing_mt SELECT
|
||||
number % 51 AS a,
|
||||
minState(number),
|
||||
min(number),
|
||||
maxState(number),
|
||||
max(number),
|
||||
sumState(number),
|
||||
sum(number),
|
||||
sumWithOverflowState(number),
|
||||
sumWithOverflow(number),
|
||||
groupBitAndState(number + 111111111),
|
||||
groupBitAnd(number + 111111111),
|
||||
groupBitOrState(number + 111111111),
|
||||
groupBitOr(number + 111111111),
|
||||
groupBitXorState(number + 111111111),
|
||||
groupBitXor(number + 111111111),
|
||||
groupArrayArrayState([toUInt64(number % 1000)]),
|
||||
groupArrayArray([toUInt64(number % 1000)]),
|
||||
groupUniqArrayArrayState([toUInt64(number % 500)]),
|
||||
groupUniqArrayArray([toUInt64(number % 500)]),
|
||||
sumMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
minMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
minMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
maxMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
maxMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13))))
|
||||
FROM numbers(10000)
|
||||
GROUP BY a;
|
||||
|
||||
INSERT INTO simple_agf_summing_mt SELECT
|
||||
number % 1151 AS a,
|
||||
minState(number),
|
||||
min(number),
|
||||
maxState(number),
|
||||
max(number),
|
||||
sumState(number),
|
||||
sum(number),
|
||||
sumWithOverflowState(number),
|
||||
sumWithOverflow(number),
|
||||
groupBitAndState(number + 111111111),
|
||||
groupBitAnd(number + 111111111),
|
||||
groupBitOrState(number + 111111111),
|
||||
groupBitOr(number + 111111111),
|
||||
groupBitXorState(number + 111111111),
|
||||
groupBitXor(number + 111111111),
|
||||
groupArrayArrayState([toUInt64(number % 1000)]),
|
||||
groupArrayArray([toUInt64(number % 1000)]),
|
||||
groupUniqArrayArrayState([toUInt64(number % 500)]),
|
||||
groupUniqArrayArray([toUInt64(number % 500)]),
|
||||
sumMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
minMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
minMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
maxMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
maxMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13))))
|
||||
FROM numbers(1000000)
|
||||
GROUP BY a;
|
||||
|
||||
OPTIMIZE TABLE simple_agf_summing_mt FINAL;
|
||||
|
||||
SELECT cityHash64(groupArray(cityHash64(*))) FROM (
|
||||
SELECT
|
||||
a % 31 AS g,
|
||||
minMerge(min_aggreg) AS minagg,
|
||||
min(min_simple) AS mins,
|
||||
minagg = mins AS M,
|
||||
maxMerge(max_aggreg) AS maxagg,
|
||||
max(max_simple) AS maxs,
|
||||
maxagg = maxs AS MX,
|
||||
sumMerge(sum_aggreg) AS sumagg,
|
||||
sum(sum_simple) AS sums,
|
||||
sumagg = sums AS S,
|
||||
sumWithOverflowMerge(sumov_aggreg) AS sumaggov,
|
||||
sumWithOverflow(sumov_simple) AS sumsov,
|
||||
sumaggov = sumsov AS SO,
|
||||
groupBitAndMerge(gbitand_aggreg) AS gbitandaggreg,
|
||||
groupBitAnd(gbitand_simple) AS gbitandsimple,
|
||||
gbitandaggreg = gbitandsimple AS BIT_AND,
|
||||
groupBitOrMerge(gbitor_aggreg) AS gbitoraggreg,
|
||||
groupBitOr(gbitor_simple) AS gbitorsimple,
|
||||
gbitoraggreg = gbitorsimple AS BIT_OR,
|
||||
groupBitXorMerge(gbitxor_aggreg) AS gbitxoraggreg,
|
||||
groupBitXor(gbitxor_simple) AS gbitxorsimple,
|
||||
gbitxoraggreg = gbitxorsimple AS BITXOR,
|
||||
arraySort(groupArrayArrayMerge(gra_aggreg)) AS graa,
|
||||
arraySort(groupArrayArray(gra_simple)) AS gras,
|
||||
graa = gras AS GAA,
|
||||
arraySort(groupUniqArrayArrayMerge(grp_aggreg)) AS gra,
|
||||
arraySort(groupUniqArrayArray(grp_simple)) AS grs,
|
||||
gra = grs AS T,
|
||||
sumMapMerge(aggreg_map) AS smmapagg,
|
||||
sumMap(simple_map) AS smmaps,
|
||||
smmapagg = smmaps AS SM,
|
||||
minMapMerge(aggreg_map_min) AS minmapapagg,
|
||||
minMap(simple_map_min) AS minmaps,
|
||||
minmapapagg = minmaps AS SMIN,
|
||||
maxMapMerge(aggreg_map_max) AS maxmapapagg,
|
||||
maxMap(simple_map_max) AS maxmaps,
|
||||
maxmapapagg = maxmaps AS SMAX
|
||||
FROM simple_agf_summing_mt
|
||||
GROUP BY g
|
||||
ORDER BY g
|
||||
);
|
||||
|
||||
SELECT '---mutation---';
|
||||
|
||||
ALTER TABLE simple_agf_summing_mt
|
||||
DELETE WHERE (a % 3) = 0
|
||||
SETTINGS mutations_sync = 1;
|
||||
|
||||
INSERT INTO simple_agf_summing_mt SELECT
|
||||
number % 11151 AS a,
|
||||
minState(number),
|
||||
min(number),
|
||||
maxState(number),
|
||||
max(number),
|
||||
sumState(number),
|
||||
sum(number),
|
||||
sumWithOverflowState(number),
|
||||
sumWithOverflow(number),
|
||||
groupBitAndState((number % 3) + 111111110),
|
||||
groupBitAnd((number % 3) + 111111110),
|
||||
groupBitOrState(number + 111111111),
|
||||
groupBitOr(number + 111111111),
|
||||
groupBitXorState(number + 111111111),
|
||||
groupBitXor(number + 111111111),
|
||||
groupArrayArrayState([toUInt64(number % 100)]),
|
||||
groupArrayArray([toUInt64(number % 100)]),
|
||||
groupUniqArrayArrayState([toUInt64(number % 50)]),
|
||||
groupUniqArrayArray([toUInt64(number % 50)]),
|
||||
sumMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
minMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
minMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
maxMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
maxMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13))))
|
||||
FROM numbers(1000000)
|
||||
GROUP BY a;
|
||||
|
||||
OPTIMIZE TABLE simple_agf_summing_mt FINAL;
|
||||
|
||||
SELECT cityHash64(groupArray(cityHash64(*))) FROM (
|
||||
SELECT
|
||||
a % 31 AS g,
|
||||
minMerge(min_aggreg) AS minagg,
|
||||
min(min_simple) AS mins,
|
||||
minagg = mins AS M,
|
||||
maxMerge(max_aggreg) AS maxagg,
|
||||
max(max_simple) AS maxs,
|
||||
maxagg = maxs AS MX,
|
||||
sumMerge(sum_aggreg) AS sumagg,
|
||||
sum(sum_simple) AS sums,
|
||||
sumagg = sums AS S,
|
||||
sumWithOverflowMerge(sumov_aggreg) AS sumaggov,
|
||||
sumWithOverflow(sumov_simple) AS sumsov,
|
||||
sumaggov = sumsov AS SO,
|
||||
groupBitAndMerge(gbitand_aggreg) AS gbitandaggreg,
|
||||
groupBitAnd(gbitand_simple) AS gbitandsimple,
|
||||
gbitandaggreg = gbitandsimple AS BIT_AND,
|
||||
groupBitOrMerge(gbitor_aggreg) AS gbitoraggreg,
|
||||
groupBitOr(gbitor_simple) AS gbitorsimple,
|
||||
gbitoraggreg = gbitorsimple AS BIT_OR,
|
||||
groupBitXorMerge(gbitxor_aggreg) AS gbitxoraggreg,
|
||||
groupBitXor(gbitxor_simple) AS gbitxorsimple,
|
||||
gbitxoraggreg = gbitxorsimple AS BITXOR,
|
||||
arraySort(groupArrayArrayMerge(gra_aggreg)) AS graa,
|
||||
arraySort(groupArrayArray(gra_simple)) AS gras,
|
||||
graa = gras AS GAA,
|
||||
arraySort(groupUniqArrayArrayMerge(grp_aggreg)) AS gra,
|
||||
arraySort(groupUniqArrayArray(grp_simple)) AS grs,
|
||||
gra = grs AS T,
|
||||
sumMapMerge(aggreg_map) AS smmapagg,
|
||||
sumMap(simple_map) AS smmaps,
|
||||
smmapagg = smmaps AS SM,
|
||||
minMapMerge(aggreg_map_min) AS minmapapagg,
|
||||
minMap(simple_map_min) AS minmaps,
|
||||
minmapapagg = minmaps AS SMIN,
|
||||
maxMapMerge(aggreg_map_max) AS maxmapapagg,
|
||||
maxMap(simple_map_max) AS maxmaps,
|
||||
maxmapapagg = maxmaps AS SMAX
|
||||
FROM simple_agf_summing_mt
|
||||
GROUP BY g
|
||||
ORDER BY g
|
||||
);
|
||||
|
||||
DROP TABLE simple_agf_summing_mt;
|
@ -0,0 +1,3 @@
|
||||
8746326176292337648
|
||||
---mutation---
|
||||
11916226932045201400
|
@ -0,0 +1,219 @@
|
||||
DROP TABLE IF EXISTS simple_agf_aggregating_mt;
|
||||
|
||||
CREATE TABLE simple_agf_aggregating_mt
|
||||
(
|
||||
a Int64,
|
||||
min_aggreg AggregateFunction(min, UInt64),
|
||||
min_simple SimpleAggregateFunction(min, UInt64),
|
||||
max_aggreg AggregateFunction(max, UInt64),
|
||||
max_simple SimpleAggregateFunction(max, UInt64),
|
||||
sum_aggreg AggregateFunction(sum, UInt64),
|
||||
sum_simple SimpleAggregateFunction(sum, UInt64),
|
||||
sumov_aggreg AggregateFunction(sumWithOverflow, UInt64),
|
||||
sumov_simple SimpleAggregateFunction(sumWithOverflow, UInt64),
|
||||
gbitand_aggreg AggregateFunction(groupBitAnd, UInt64),
|
||||
gbitand_simple SimpleAggregateFunction(groupBitAnd, UInt64),
|
||||
gbitor_aggreg AggregateFunction(groupBitOr, UInt64),
|
||||
gbitor_simple SimpleAggregateFunction(groupBitOr, UInt64),
|
||||
gbitxor_aggreg AggregateFunction(groupBitXor, UInt64),
|
||||
gbitxor_simple SimpleAggregateFunction(groupBitXor, UInt64),
|
||||
gra_aggreg AggregateFunction(groupArrayArray, Array(UInt64)),
|
||||
gra_simple SimpleAggregateFunction(groupArrayArray, Array(UInt64)),
|
||||
grp_aggreg AggregateFunction(groupUniqArrayArray, Array(UInt64)),
|
||||
grp_simple SimpleAggregateFunction(groupUniqArrayArray, Array(UInt64)),
|
||||
aggreg_map AggregateFunction(sumMap, Tuple(Array(String), Array(UInt64))),
|
||||
simple_map SimpleAggregateFunction(sumMap, Tuple(Array(String), Array(UInt64))),
|
||||
aggreg_map_min AggregateFunction(minMap, Tuple(Array(String), Array(UInt64))),
|
||||
simple_map_min SimpleAggregateFunction(minMap, Tuple(Array(String), Array(UInt64))),
|
||||
aggreg_map_max AggregateFunction(maxMap, Tuple(Array(String), Array(UInt64))),
|
||||
simple_map_max SimpleAggregateFunction(maxMap, Tuple(Array(String), Array(UInt64)))
|
||||
)
|
||||
ENGINE = AggregatingMergeTree
|
||||
ORDER BY a;
|
||||
|
||||
INSERT INTO simple_agf_aggregating_mt SELECT
|
||||
number % 51 AS a,
|
||||
minState(number),
|
||||
min(number),
|
||||
maxState(number),
|
||||
max(number),
|
||||
sumState(number),
|
||||
sum(number),
|
||||
sumWithOverflowState(number),
|
||||
sumWithOverflow(number),
|
||||
groupBitAndState(number + 111111111),
|
||||
groupBitAnd(number + 111111111),
|
||||
groupBitOrState(number + 111111111),
|
||||
groupBitOr(number + 111111111),
|
||||
groupBitXorState(number + 111111111),
|
||||
groupBitXor(number + 111111111),
|
||||
groupArrayArrayState([toUInt64(number % 1000)]),
|
||||
groupArrayArray([toUInt64(number % 1000)]),
|
||||
groupUniqArrayArrayState([toUInt64(number % 500)]),
|
||||
groupUniqArrayArray([toUInt64(number % 500)]),
|
||||
sumMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
minMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
minMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
maxMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
maxMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13))))
|
||||
FROM numbers(10000)
|
||||
GROUP BY a;
|
||||
|
||||
INSERT INTO simple_agf_aggregating_mt SELECT
|
||||
number % 1151 AS a,
|
||||
minState(number),
|
||||
min(number),
|
||||
maxState(number),
|
||||
max(number),
|
||||
sumState(number),
|
||||
sum(number),
|
||||
sumWithOverflowState(number),
|
||||
sumWithOverflow(number),
|
||||
groupBitAndState(number + 111111111),
|
||||
groupBitAnd(number + 111111111),
|
||||
groupBitOrState(number + 111111111),
|
||||
groupBitOr(number + 111111111),
|
||||
groupBitXorState(number + 111111111),
|
||||
groupBitXor(number + 111111111),
|
||||
groupArrayArrayState([toUInt64(number % 1000)]),
|
||||
groupArrayArray([toUInt64(number % 1000)]),
|
||||
groupUniqArrayArrayState([toUInt64(number % 500)]),
|
||||
groupUniqArrayArray([toUInt64(number % 500)]),
|
||||
sumMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
minMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
minMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
maxMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
maxMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13))))
|
||||
FROM numbers(1000000)
|
||||
GROUP BY a;
|
||||
|
||||
OPTIMIZE TABLE simple_agf_aggregating_mt FINAL;
|
||||
|
||||
SELECT cityHash64(groupArray(cityHash64(*))) FROM (
|
||||
SELECT
|
||||
a % 31 AS g,
|
||||
minMerge(min_aggreg) AS minagg,
|
||||
min(min_simple) AS mins,
|
||||
minagg = mins AS M,
|
||||
maxMerge(max_aggreg) AS maxagg,
|
||||
max(max_simple) AS maxs,
|
||||
maxagg = maxs AS MX,
|
||||
sumMerge(sum_aggreg) AS sumagg,
|
||||
sum(sum_simple) AS sums,
|
||||
sumagg = sums AS S,
|
||||
sumWithOverflowMerge(sumov_aggreg) AS sumaggov,
|
||||
sumWithOverflow(sumov_simple) AS sumsov,
|
||||
sumaggov = sumsov AS SO,
|
||||
groupBitAndMerge(gbitand_aggreg) AS gbitandaggreg,
|
||||
groupBitAnd(gbitand_simple) AS gbitandsimple,
|
||||
gbitandaggreg = gbitandsimple AS BIT_AND,
|
||||
groupBitOrMerge(gbitor_aggreg) AS gbitoraggreg,
|
||||
groupBitOr(gbitor_simple) AS gbitorsimple,
|
||||
gbitoraggreg = gbitorsimple AS BIT_OR,
|
||||
groupBitXorMerge(gbitxor_aggreg) AS gbitxoraggreg,
|
||||
groupBitXor(gbitxor_simple) AS gbitxorsimple,
|
||||
gbitxoraggreg = gbitxorsimple AS BITXOR,
|
||||
arraySort(groupArrayArrayMerge(gra_aggreg)) AS graa,
|
||||
arraySort(groupArrayArray(gra_simple)) AS gras,
|
||||
graa = gras AS GAA,
|
||||
arraySort(groupUniqArrayArrayMerge(grp_aggreg)) AS gra,
|
||||
arraySort(groupUniqArrayArray(grp_simple)) AS grs,
|
||||
gra = grs AS T,
|
||||
sumMapMerge(aggreg_map) AS smmapagg,
|
||||
sumMap(simple_map) AS smmaps,
|
||||
smmapagg = smmaps AS SM,
|
||||
minMapMerge(aggreg_map_min) AS minmapapagg,
|
||||
minMap(simple_map_min) AS minmaps,
|
||||
minmapapagg = minmaps AS SMIN,
|
||||
maxMapMerge(aggreg_map_max) AS maxmapapagg,
|
||||
maxMap(simple_map_max) AS maxmaps,
|
||||
maxmapapagg = maxmaps AS SMAX
|
||||
FROM simple_agf_aggregating_mt
|
||||
GROUP BY g
|
||||
ORDER BY g
|
||||
);
|
||||
|
||||
SELECT '---mutation---';
|
||||
|
||||
ALTER TABLE simple_agf_aggregating_mt
|
||||
DELETE WHERE (a % 3) = 0
|
||||
SETTINGS mutations_sync = 1;
|
||||
|
||||
INSERT INTO simple_agf_aggregating_mt SELECT
|
||||
number % 11151 AS a,
|
||||
minState(number),
|
||||
min(number),
|
||||
maxState(number),
|
||||
max(number),
|
||||
sumState(number),
|
||||
sum(number),
|
||||
sumWithOverflowState(number),
|
||||
sumWithOverflow(number),
|
||||
groupBitAndState((number % 3) + 111111110),
|
||||
groupBitAnd((number % 3) + 111111110),
|
||||
groupBitOrState(number + 111111111),
|
||||
groupBitOr(number + 111111111),
|
||||
groupBitXorState(number + 111111111),
|
||||
groupBitXor(number + 111111111),
|
||||
groupArrayArrayState([toUInt64(number % 100)]),
|
||||
groupArrayArray([toUInt64(number % 100)]),
|
||||
groupUniqArrayArrayState([toUInt64(number % 50)]),
|
||||
groupUniqArrayArray([toUInt64(number % 50)]),
|
||||
sumMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
minMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
minMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
maxMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))),
|
||||
maxMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13))))
|
||||
FROM numbers(1000000)
|
||||
GROUP BY a;
|
||||
|
||||
OPTIMIZE TABLE simple_agf_aggregating_mt FINAL;
|
||||
|
||||
SELECT cityHash64(groupArray(cityHash64(*))) FROM (
|
||||
SELECT
|
||||
a % 31 AS g,
|
||||
minMerge(min_aggreg) AS minagg,
|
||||
min(min_simple) AS mins,
|
||||
minagg = mins AS M,
|
||||
maxMerge(max_aggreg) AS maxagg,
|
||||
max(max_simple) AS maxs,
|
||||
maxagg = maxs AS MX,
|
||||
sumMerge(sum_aggreg) AS sumagg,
|
||||
sum(sum_simple) AS sums,
|
||||
sumagg = sums AS S,
|
||||
sumWithOverflowMerge(sumov_aggreg) AS sumaggov,
|
||||
sumWithOverflow(sumov_simple) AS sumsov,
|
||||
sumaggov = sumsov AS SO,
|
||||
groupBitAndMerge(gbitand_aggreg) AS gbitandaggreg,
|
||||
groupBitAnd(gbitand_simple) AS gbitandsimple,
|
||||
gbitandaggreg = gbitandsimple AS BIT_AND,
|
||||
groupBitOrMerge(gbitor_aggreg) AS gbitoraggreg,
|
||||
groupBitOr(gbitor_simple) AS gbitorsimple,
|
||||
gbitoraggreg = gbitorsimple AS BIT_OR,
|
||||
groupBitXorMerge(gbitxor_aggreg) AS gbitxoraggreg,
|
||||
groupBitXor(gbitxor_simple) AS gbitxorsimple,
|
||||
gbitxoraggreg = gbitxorsimple AS BITXOR,
|
||||
arraySort(groupArrayArrayMerge(gra_aggreg)) AS graa,
|
||||
arraySort(groupArrayArray(gra_simple)) AS gras,
|
||||
graa = gras AS GAA,
|
||||
arraySort(groupUniqArrayArrayMerge(grp_aggreg)) AS gra,
|
||||
arraySort(groupUniqArrayArray(grp_simple)) AS grs,
|
||||
gra = grs AS T,
|
||||
sumMapMerge(aggreg_map) AS smmapagg,
|
||||
sumMap(simple_map) AS smmaps,
|
||||
smmapagg = smmaps AS SM,
|
||||
minMapMerge(aggreg_map_min) AS minmapapagg,
|
||||
minMap(simple_map_min) AS minmaps,
|
||||
minmapapagg = minmaps AS SMIN,
|
||||
maxMapMerge(aggreg_map_max) AS maxmapapagg,
|
||||
maxMap(simple_map_max) AS maxmaps,
|
||||
maxmapapagg = maxmaps AS SMAX
|
||||
FROM simple_agf_aggregating_mt
|
||||
GROUP BY g
|
||||
ORDER BY g
|
||||
);
|
||||
|
||||
DROP TABLE simple_agf_aggregating_mt;
|
@ -0,0 +1,6 @@
|
||||
2021-01-01 1 2
|
||||
2021-01-02 4 5
|
||||
2021-01-01 1 2
|
||||
2021-01-02 4 5
|
||||
2021-01-01 1 2
|
||||
2021-01-02 4 5
|
17
tests/queries/0_stateless/01632_max_partitions_to_read.sql
Normal file
17
tests/queries/0_stateless/01632_max_partitions_to_read.sql
Normal file
@ -0,0 +1,17 @@
|
||||
drop table if exists p;
|
||||
|
||||
create table p(d Date, i int, j int) engine MergeTree partition by d order by i settings max_partitions_to_read = 1;
|
||||
|
||||
insert into p values ('2021-01-01', 1, 2), ('2021-01-02', 4, 5);
|
||||
|
||||
select * from p order by i; -- { serverError 565 }
|
||||
|
||||
select * from p order by i settings max_partitions_to_read = 2;
|
||||
|
||||
select * from p order by i settings max_partitions_to_read = 0; -- unlimited
|
||||
|
||||
alter table p modify setting max_partitions_to_read = 2;
|
||||
|
||||
select * from p order by i;
|
||||
|
||||
drop table if exists p;
|
@ -0,0 +1,6 @@
|
||||
128
|
||||
256
|
||||
128
|
||||
256
|
||||
128
|
||||
256
|
@ -0,0 +1,24 @@
|
||||
DROP TABLE IF EXISTS local;
|
||||
DROP TABLE IF EXISTS distributed;
|
||||
|
||||
CREATE TABLE local (x UInt8) ENGINE = Memory;
|
||||
CREATE TABLE distributed AS local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), local, x);
|
||||
|
||||
SET insert_distributed_sync = 1;
|
||||
|
||||
INSERT INTO distributed SELECT number FROM numbers(256) WHERE number % 2 = 0;
|
||||
SELECT count() FROM local;
|
||||
SELECT count() FROM distributed;
|
||||
|
||||
TRUNCATE TABLE local;
|
||||
INSERT INTO distributed SELECT number FROM numbers(256) WHERE number % 2 = 1;
|
||||
SELECT count() FROM local;
|
||||
SELECT count() FROM distributed;
|
||||
|
||||
TRUNCATE TABLE local;
|
||||
INSERT INTO distributed SELECT number FROM numbers(256) WHERE number < 128;
|
||||
SELECT count() FROM local;
|
||||
SELECT count() FROM distributed;
|
||||
|
||||
DROP TABLE local;
|
||||
DROP TABLE distributed;
|
@ -0,0 +1,2 @@
|
||||
256
|
||||
512
|
@ -0,0 +1,16 @@
|
||||
DROP TABLE IF EXISTS local;
|
||||
DROP TABLE IF EXISTS distributed;
|
||||
|
||||
CREATE TABLE local (x UInt8) ENGINE = Memory;
|
||||
CREATE TABLE distributed AS local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), local, x);
|
||||
|
||||
SET insert_distributed_sync = 0, network_compression_method = 'zstd';
|
||||
|
||||
INSERT INTO distributed SELECT number FROM numbers(256);
|
||||
SYSTEM FLUSH DISTRIBUTED distributed;
|
||||
|
||||
SELECT count() FROM local;
|
||||
SELECT count() FROM distributed;
|
||||
|
||||
DROP TABLE local;
|
||||
DROP TABLE distributed;
|
@ -0,0 +1,4 @@
|
||||
"marks",7
|
||||
"optimize_trivial_count_query",16384
|
||||
"max_threads=1",16384
|
||||
"max_threads=100",16384
|
@ -0,0 +1,57 @@
|
||||
DROP TABLE IF EXISTS adaptive_table;
|
||||
|
||||
CREATE TABLE adaptive_table(
|
||||
key UInt64,
|
||||
value String
|
||||
) ENGINE MergeTree()
|
||||
ORDER BY key
|
||||
SETTINGS
|
||||
index_granularity_bytes=1048576,
|
||||
min_bytes_for_wide_part=0,
|
||||
old_parts_lifetime=0,
|
||||
index_granularity=8192
|
||||
;
|
||||
|
||||
-- This triggers adjustment of the granules that was introduced in PR#17120
|
||||
INSERT INTO adaptive_table SELECT number, randomPrintableASCII(if(number BETWEEN 8192-30 AND 8192, 102400, 1)) FROM system.numbers LIMIT 16384;
|
||||
-- This creates the following marks:
|
||||
--
|
||||
-- $ check-marks /path/to/db/adaptive_table/all_*/key.{mrk2,bin}
|
||||
-- Mark 0, points to 0, 0, has rows after 8192, decompressed size 72808. <!-- wrong number of rows, should be 5461
|
||||
-- Mark 1, points to 0, 43688, has rows after 1820, decompressed size 29120.
|
||||
-- Mark 2, points to 0, 58248, has rows after 1820, decompressed size 14560.
|
||||
-- Mark 3, points to 36441, 0, has rows after 1820, decompressed size 58264.
|
||||
-- Mark 4, points to 36441, 14560, has rows after 1820, decompressed size 43704.
|
||||
-- Mark 5, points to 36441, 29120, has rows after 8192, decompressed size 29144.
|
||||
-- Mark 6, points to 36441, 58264, has rows after 0, decompressed size 0.
|
||||
OPTIMIZE TABLE adaptive_table FINAL;
|
||||
|
||||
SELECT 'marks', marks FROM system.parts WHERE table = 'adaptive_table' AND database = currentDatabase() AND active FORMAT CSV;
|
||||
|
||||
-- Reset marks cache
|
||||
DETACH TABLE adaptive_table;
|
||||
ATTACH TABLE adaptive_table;
|
||||
|
||||
-- This works correctly, since it does not read any marks
|
||||
SELECT 'optimize_trivial_count_query', count() FROM adaptive_table SETTINGS
|
||||
optimize_trivial_count_query=1
|
||||
FORMAT CSV;
|
||||
-- This works correctly, since it reads marks sequentially and don't seek
|
||||
SELECT 'max_threads=1', count() FROM adaptive_table SETTINGS
|
||||
optimize_trivial_count_query=0,
|
||||
max_threads=1
|
||||
FORMAT CSV;
|
||||
-- This works wrong, since it seek to each mark (due to reading each mark from a separate thread),
|
||||
-- so if the marks offsets will be wrong it will read more data.
|
||||
--
|
||||
-- Reading each mark from a separate thread is just the simplest reproducers,
|
||||
-- this can be also reproduced with PREWHERE since it skips data physically,
|
||||
-- so it also uses seeks.
|
||||
SELECT 'max_threads=100', count() FROM adaptive_table SETTINGS
|
||||
optimize_trivial_count_query=0,
|
||||
merge_tree_min_rows_for_concurrent_read=1,
|
||||
merge_tree_min_bytes_for_concurrent_read=1,
|
||||
max_threads=100
|
||||
FORMAT CSV;
|
||||
|
||||
DROP TABLE adaptive_table;
|
@ -0,0 +1,5 @@
|
||||
\N
|
||||
\N
|
||||
\N
|
||||
0
|
||||
90
|
@ -0,0 +1,7 @@
|
||||
SELECT sumIf(dummy, dummy) FROM remote('127.0.0.{1,2}', view(SELECT cast(Null AS Nullable(UInt8)) AS dummy FROM system.one));
|
||||
SELECT sumIf(dummy, 1) FROM remote('127.0.0.{1,2}', view(SELECT cast(Null AS Nullable(UInt8)) AS dummy FROM system.one));
|
||||
-- Before #16610 it returns 0 while with this patch it will return NULL
|
||||
SELECT sumIf(dummy, dummy) FROM remote('127.0.0.{1,2}', view(SELECT cast(dummy AS Nullable(UInt8)) AS dummy FROM system.one));
|
||||
SELECT sumIf(dummy, 1) FROM remote('127.0.0.{1,2}', view(SELECT cast(dummy AS Nullable(UInt8)) AS dummy FROM system.one));
|
||||
|
||||
SELECT sumIf(n, 1) FROM remote('127.0.0.{1,2}', view(SELECT cast(* AS Nullable(UInt8)) AS n FROM system.numbers limit 10))
|
@ -181,3 +181,4 @@
|
||||
01561_mann_whitney_scipy
|
||||
01601_custom_tld
|
||||
01636_nullable_fuzz2
|
||||
01639_distributed_sync_insert_zero_rows
|
||||
|
Loading…
Reference in New Issue
Block a user