mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge remote-tracking branch 'origin/master' into bnc/config-max-num-threads
This commit is contained in:
commit
1a63972a4b
@ -85,7 +85,8 @@ static const size_t signal_pipe_buf_size =
|
||||
+ sizeof(ucontext_t)
|
||||
+ sizeof(StackTrace)
|
||||
+ sizeof(UInt32)
|
||||
+ max_query_id_size + 1; /// query_id + varint encoded length
|
||||
+ max_query_id_size + 1 /// query_id + varint encoded length
|
||||
+ sizeof(void*);
|
||||
|
||||
|
||||
using signal_function = void(int, siginfo_t*, void*);
|
||||
@ -135,6 +136,7 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
|
||||
DB::writePODBinary(stack_trace, out);
|
||||
DB::writeBinary(UInt32(getThreadId()), out);
|
||||
DB::writeStringBinary(query_id, out);
|
||||
DB::writePODBinary(DB::current_thread, out);
|
||||
|
||||
out.next();
|
||||
|
||||
@ -218,16 +220,18 @@ public:
|
||||
StackTrace stack_trace(NoCapture{});
|
||||
UInt32 thread_num;
|
||||
std::string query_id;
|
||||
DB::ThreadStatus * thread_ptr{};
|
||||
|
||||
DB::readPODBinary(info, in);
|
||||
DB::readPODBinary(context, in);
|
||||
DB::readPODBinary(stack_trace, in);
|
||||
DB::readBinary(thread_num, in);
|
||||
DB::readBinary(query_id, in);
|
||||
DB::readPODBinary(thread_ptr, in);
|
||||
|
||||
/// This allows to receive more signals if failure happens inside onFault function.
|
||||
/// Example: segfault while symbolizing stack trace.
|
||||
std::thread([=, this] { onFault(sig, info, context, stack_trace, thread_num, query_id); }).detach();
|
||||
std::thread([=, this] { onFault(sig, info, context, stack_trace, thread_num, query_id, thread_ptr); }).detach();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -248,8 +252,19 @@ private:
|
||||
const ucontext_t & context,
|
||||
const StackTrace & stack_trace,
|
||||
UInt32 thread_num,
|
||||
const std::string & query_id) const
|
||||
const std::string & query_id,
|
||||
DB::ThreadStatus * thread_ptr) const
|
||||
{
|
||||
DB::ThreadStatus thread_status;
|
||||
|
||||
/// Send logs from this thread to client if possible.
|
||||
/// It will allow client to see failure messages directly.
|
||||
if (thread_ptr)
|
||||
{
|
||||
if (auto logs_queue = thread_ptr->getInternalTextLogsQueue())
|
||||
DB::CurrentThread::attachInternalTextLogsQueue(logs_queue, DB::LogsLevel::trace);
|
||||
}
|
||||
|
||||
LOG_FATAL(log, "########################################");
|
||||
|
||||
if (query_id.empty())
|
||||
@ -280,6 +295,10 @@ private:
|
||||
|
||||
/// Write symbolized stack trace line by line for better grep-ability.
|
||||
stack_trace.toStringEveryLine([&](const std::string & s) { LOG_FATAL(log, s); });
|
||||
|
||||
/// When everything is done, we will try to send these error messages to client.
|
||||
if (thread_ptr)
|
||||
thread_ptr->onFatalError();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# This strings autochanged from release_lib.sh:
|
||||
SET(VERSION_REVISION 54435)
|
||||
SET(VERSION_REVISION 54436)
|
||||
SET(VERSION_MAJOR 20)
|
||||
SET(VERSION_MINOR 5)
|
||||
SET(VERSION_MINOR 6)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 91df18a906dcffdbee6816e5389df6c65f86e35f)
|
||||
SET(VERSION_DESCRIBE v20.5.1.1-prestable)
|
||||
SET(VERSION_STRING 20.5.1.1)
|
||||
SET(VERSION_GITHASH efc57fb063b3fb4df968d916720ec4d4ced4642e)
|
||||
SET(VERSION_DESCRIBE v20.6.1.1-prestable)
|
||||
SET(VERSION_STRING 20.6.1.1)
|
||||
# end of autochange
|
||||
|
@ -18,7 +18,7 @@ message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
||||
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||
set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||
|
||||
# glibc-compatibility library relies to fixed version of libc headers
|
||||
# glibc-compatibility library relies to constant version of libc headers
|
||||
# (because minor changes in function attributes between different glibc versions will introduce incompatibilities)
|
||||
# This is for x86_64. For other architectures we have separate toolchains.
|
||||
if (ARCH_AMD64 AND NOT_UNBUNDLED)
|
||||
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@ -1,5 +1,5 @@
|
||||
clickhouse (20.5.1.1) unstable; urgency=low
|
||||
clickhouse (20.6.1.1) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Tue, 28 Apr 2020 20:12:13 +0300
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Mon, 22 Jun 2020 20:40:23 +0300
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=20.5.1.*
|
||||
ARG version=20.6.1.*
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=20.5.1.*
|
||||
ARG version=20.6.1.*
|
||||
ARG gosu_ver=1.10
|
||||
|
||||
RUN apt-get update \
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||
ARG version=20.5.1.*
|
||||
ARG version=20.6.1.*
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y apt-transport-https dirmngr && \
|
||||
|
@ -7,3 +7,7 @@ services:
|
||||
POSTGRES_PASSWORD: mysecretpassword
|
||||
ports:
|
||||
- 5432:5432
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- postgre-sql.local
|
||||
|
@ -5,4 +5,11 @@ toc_priority: 25
|
||||
toc_title: hidden
|
||||
---
|
||||
|
||||
# ClickHouse Engines
|
||||
|
||||
There are two key engine kinds in ClickHouse:
|
||||
|
||||
- [Table engines](table-engines/index.md)
|
||||
- [Database engines](database-engines/index.md)
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/engines/) ##}
|
||||
|
@ -19,7 +19,7 @@ The table engine (type of table) determines:
|
||||
|
||||
### MergeTree {#mergetree}
|
||||
|
||||
The most universal and functional table engines for high-load tasks. The property shared by these engines is quick data insertion with subsequent background data processing. `MergeTree` family engines support data replication (with [Replicated\*](../../engines/table-engines/mergetree-family/replication.md#table_engines-replication) versions of engines), partitioning, and other features not supported in other engines.
|
||||
The most universal and functional table engines for high-load tasks. The property shared by these engines is quick data insertion with subsequent background data processing. `MergeTree` family engines support data replication (with [Replicated\*](../../engines/table-engines/mergetree-family/replication.md#table_engines-replication) versions of engines), partitioning, secondary data-skipping indexes, and other features not supported in other engines.
|
||||
|
||||
Engines in the family:
|
||||
|
||||
@ -80,4 +80,4 @@ To select data from a virtual column, you must specify its name in the `SELECT`
|
||||
|
||||
If you create a table with a column that has the same name as one of the table virtual columns, the virtual column becomes inaccessible. We don’t recommend doing this. To help avoid conflicts, virtual column names are usually prefixed with an underscore.
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/) <!--hide-->
|
||||
|
@ -1,58 +0,0 @@
|
||||
---
|
||||
toc_priority: 78
|
||||
toc_title: General Questions
|
||||
---
|
||||
|
||||
# General Questions {#general-questions}
|
||||
|
||||
## Why Not Use Something Like MapReduce? {#why-not-use-something-like-mapreduce}
|
||||
|
||||
We can refer to systems like MapReduce as distributed computing systems in which the reduce operation is based on distributed sorting. The most common open-source solution in this class is [Apache Hadoop](http://hadoop.apache.org). Yandex uses its in-house solution, YT.
|
||||
|
||||
These systems aren’t appropriate for online queries due to their high latency. In other words, they can’t be used as the back-end for a web interface. These types of systems aren’t useful for real-time data updates. Distributed sorting isn’t the best way to perform reduce operations if the result of the operation and all the intermediate results (if there are any) are located in the RAM of a single server, which is usually the case for online queries. In such a case, a hash table is an optimal way to perform reduce operations. A common approach to optimizing map-reduce tasks is pre-aggregation (partial reduce) using a hash table in RAM. The user performs this optimization manually. Distributed sorting is one of the main causes of reduced performance when running simple map-reduce tasks.
|
||||
|
||||
Most MapReduce implementations allow you to execute arbitrary code on a cluster. But a declarative query language is better suited to OLAP to run experiments quickly. For example, Hadoop has Hive and Pig. Also consider Cloudera Impala or Shark (outdated) for Spark, as well as Spark SQL, Presto, and Apache Drill. Performance when running such tasks is highly sub-optimal compared to specialized systems, but relatively high latency makes it unrealistic to use these systems as the backend for a web interface.
|
||||
|
||||
## What If I Have a Problem with Encodings When Using Oracle Through ODBC? {#oracle-odbc-encodings}
|
||||
|
||||
If you use Oracle through the ODBC driver as a source of external dictionaries, you need to set the correct value for the `NLS_LANG` environment variable in `/etc/default/clickhouse`. For more information, see the [Oracle NLS\_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
NLS_LANG=RUSSIAN_RUSSIA.UTF8
|
||||
```
|
||||
|
||||
## How Do I Export Data from ClickHouse to a File? {#how-to-export-to-file}
|
||||
|
||||
### Using INTO OUTFILE Clause {#using-into-outfile-clause}
|
||||
|
||||
Add an [INTO OUTFILE](../sql-reference/statements/select/into-outfile.md#into-outfile-clause) clause to your query.
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file'
|
||||
```
|
||||
|
||||
By default, ClickHouse uses the [TabSeparated](../interfaces/formats.md#tabseparated) format for output data. To select the [data format](../interfaces/formats.md), use the [FORMAT clause](../sql-reference/statements/select/format.md#format-clause).
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV
|
||||
```
|
||||
|
||||
### Using a File-Engine Table {#using-a-file-engine-table}
|
||||
|
||||
See [File](../engines/table-engines/special/file.md).
|
||||
|
||||
### Using Command-Line Redirection {#using-command-line-redirection}
|
||||
|
||||
``` sql
|
||||
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
||||
```
|
||||
|
||||
See [clickhouse-client](../interfaces/cli.md).
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/general/) ##}
|
11
docs/en/faq/general/dbms-naming.md
Normal file
11
docs/en/faq/general/dbms-naming.md
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
toc_hidden: true
|
||||
toc_priority: 10
|
||||
---
|
||||
|
||||
# What Does “ClickHouse” Mean? {#what-does-clickhouse-mean}
|
||||
|
||||
It’s a combination of “**Click**stream” and “Data ware**house**”. It comes from the original use case at Yandex.Metrica, where ClickHouse was supposed to keep records of all clicks by people from all over the Internet and it still does the job. You can read more about this use case on [ClickHouse history](../../introduction/history.md) page.
|
||||
|
||||
!!! info "Fun fact"
|
||||
Many years after ClickHouse got its name, this approach of combining two words that are meaningful on their own has been highlighted as the best way to name a database in a [research by Andy Pavlo](https://www.cs.cmu.edu/~pavlo/blog/2020/03/on-naming-a-database-management-system.html), an Associate Professor of Databases at Carnegie Mellon University. ClickHouse shared his “best database name of all time” award with Postgres.
|
18
docs/en/faq/general/index.md
Normal file
18
docs/en/faq/general/index.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 1
|
||||
toc_title: General
|
||||
---
|
||||
|
||||
# General Questions About ClickHouse {#general-questions}
|
||||
|
||||
Questions:
|
||||
|
||||
- [What does “ClickHouse” mean?](../../faq/general/dbms-naming.md)
|
||||
- [What does “Не тормозит” mean?](../../faq/general/ne-tormozit.md)
|
||||
- [Why not use something like MapReduce?](../../faq/general/mapreduce.md)
|
||||
|
||||
!!! info "Don’t see what you were looking for?"
|
||||
Check out [other F.A.Q. categories](../../faq/index.md) or browse around main documentation articles found in the left sidebar.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/general/) ##}
|
12
docs/en/faq/general/mapreduce.md
Normal file
12
docs/en/faq/general/mapreduce.md
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
toc_hidden: true
|
||||
toc_priority: 20
|
||||
---
|
||||
|
||||
# Why Not Use Something Like MapReduce? {#why-not-use-something-like-mapreduce}
|
||||
|
||||
We can refer to systems like MapReduce as distributed computing systems in which the reduce operation is based on distributed sorting. The most common open-source solution in this class is [Apache Hadoop](http://hadoop.apache.org). Yandex uses its in-house solution, YT.
|
||||
|
||||
These systems aren’t appropriate for online queries due to their high latency. In other words, they can’t be used as the back-end for a web interface. These types of systems aren’t useful for real-time data updates. Distributed sorting isn’t the best way to perform reduce operations if the result of the operation and all the intermediate results (if there are any) are located in the RAM of a single server, which is usually the case for online queries. In such a case, a hash table is an optimal way to perform reduce operations. A common approach to optimizing map-reduce tasks is pre-aggregation (partial reduce) using a hash table in RAM. The user performs this optimization manually. Distributed sorting is one of the main causes of reduced performance when running simple map-reduce tasks.
|
||||
|
||||
Most MapReduce implementations allow you to execute arbitrary code on a cluster. But a declarative query language is better suited to OLAP to run experiments quickly. For example, Hadoop has Hive and Pig. Also consider Cloudera Impala or Shark (outdated) for Spark, as well as Spark SQL, Presto, and Apache Drill. Performance when running such tasks is highly sub-optimal compared to specialized systems, but relatively high latency makes it unrealistic to use these systems as the backend for a web interface.
|
24
docs/en/faq/general/ne-tormozit.md
Normal file
24
docs/en/faq/general/ne-tormozit.md
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
toc_hidden: true
|
||||
toc_priority: 11
|
||||
---
|
||||
|
||||
# What Does “Не тормозит” mean? {#what-does-ne-tormozit-mean}
|
||||
|
||||
This question usually arises when people see official ClickHouse t-shirts. They have large words **“ClickHouse не тормозит”** on the front.
|
||||
|
||||
Before ClickHouse became open-source, it has been developed as an in-house storage system by the largest Russian IT company, [Yandex](https://yandex.com/company/). That’s why it initially got its slogan in Russian, which is “не тормозит”. After the open-source release we first produced some of those t-shirts for events in Russia and it was a no-brainer to use the slogan as-is.
|
||||
|
||||
One of the following batches of those t-shirts was supposed to be given away on events outside of Russia and we tried to make the English version of the slogan. Unfortunately, the Russian language is kind of elegant in terms of expressing stuff and there was a restriction of limited space on a t-shirt, so we failed to come up with good enough translation (most options appeared to be either long or inaccurate) and decided to keep the slogan in Russian even on t-shirts produced for international events. It appeared to be a great decision because people all over the world get positively surprised and curious when they see it.
|
||||
|
||||
So, what does it mean? Here are some ways to translate *“не тормозит”*:
|
||||
|
||||
- If you translate it literally, it’d be something like *“ClickHouse doesn’t press the brake pedal”*.
|
||||
- If you’d want to express it as close to how it sounds to a Russian person with IT background, it’d be something like *“If you larger system lags, it’s not because it uses ClickHouse”*.
|
||||
- Shorter, but not so precise versions could be *“ClickHouse is not slow”*, *“ClickHouse doesn’t lag”* or just *“ClickHouse is fast”*.
|
||||
|
||||
If you haven’t seen one of those t-shirts in person, you can check them out online in many ClickHouse-related videos. For example, this one:
|
||||
|
||||
![iframe](https://www.youtube.com/embed/bSyQahMVZ7w)
|
||||
|
||||
P.S. These t-shirts are not for sale, they are given away for free on most [ClickHouse Meetups](https://clickhouse.tech/#meet), usually for best questions or other forms of active participation.
|
@ -2,7 +2,16 @@
|
||||
toc_folder_title: F.A.Q.
|
||||
toc_hidden: true
|
||||
toc_priority: 76
|
||||
toc_title: hidden
|
||||
---
|
||||
|
||||
# ClickHouse F.A.Q {#clickhouse-f-a-q}
|
||||
|
||||
This section of the documentation is a place to collect answers to ClickHouse-related questions that arise often.
|
||||
|
||||
Categories:
|
||||
|
||||
- [General](../faq/general/index.md)
|
||||
- [Operations](../faq/operations/index.md)
|
||||
- [Integration](../faq/integration/index.md)
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq) ##}
|
||||
|
36
docs/en/faq/integration/file-export.md
Normal file
36
docs/en/faq/integration/file-export.md
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
toc_hidden: true
|
||||
toc_priority: 10
|
||||
---
|
||||
|
||||
# How Do I Export Data from ClickHouse to a File? {#how-to-export-to-file}
|
||||
|
||||
## Using INTO OUTFILE Clause {#using-into-outfile-clause}
|
||||
|
||||
Add an [INTO OUTFILE](../../sql-reference/statements/select/into-outfile.md#into-outfile-clause) clause to your query.
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file'
|
||||
```
|
||||
|
||||
By default, ClickHouse uses the [TabSeparated](../../interfaces/formats.md#tabseparated) format for output data. To select the [data format](../../interfaces/formats.md), use the [FORMAT clause](../../sql-reference/statements/select/format.md#format-clause).
|
||||
|
||||
For example:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM table INTO OUTFILE 'file' FORMAT CSV
|
||||
```
|
||||
|
||||
## Using a File-Engine Table {#using-a-file-engine-table}
|
||||
|
||||
See [File](../../engines/table-engines/special/file.md) table engine.
|
||||
|
||||
## Using Command-Line Redirection {#using-command-line-redirection}
|
||||
|
||||
``` sql
|
||||
$ clickhouse-client --query "SELECT * from table" --format FormatName > result.txt
|
||||
```
|
||||
|
||||
See [clickhouse-client](../../interfaces/cli.md).
|
17
docs/en/faq/integration/index.md
Normal file
17
docs/en/faq/integration/index.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 3
|
||||
toc_title: Integration
|
||||
---
|
||||
|
||||
# Question About Integrating ClickHouse and Other Systems {#question-about-integrating-clickhouse-and-other-systems}
|
||||
|
||||
Questions:
|
||||
|
||||
- [How do I export data from ClickHouse to a file?](../../faq/integration/file-export.md)
|
||||
- [What if I Have a problem with encodings when connecting to Oracle via ODBC?](../../faq/integration/oracle-odbc.md)
|
||||
|
||||
!!! info "Don’t see what you were looking for?"
|
||||
Check out [other F.A.Q. categories](../../faq/index.md) or browse around main documentation articles found in the left sidebar.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/integration/) ##}
|
14
docs/en/faq/integration/oracle-odbc.md
Normal file
14
docs/en/faq/integration/oracle-odbc.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
toc_hidden: true
|
||||
toc_priority: 20
|
||||
---
|
||||
|
||||
# What If I Have a Problem with Encodings When Using Oracle Via ODBC? {#oracle-odbc-encodings}
|
||||
|
||||
If you use Oracle as a source of ClickHouse external dictionaries via Oracle ODBC driver, you need to set the correct value for the `NLS_LANG` environment variable in `/etc/default/clickhouse`. For more information, see the [Oracle NLS\_LANG FAQ](https://www.oracle.com/technetwork/products/globalization/nls-lang-099431.html).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
NLS_LANG=RUSSIAN_RUSSIA.UTF8
|
||||
```
|
16
docs/en/faq/operations/index.md
Normal file
16
docs/en/faq/operations/index.md
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
toc_hidden_folder: true
|
||||
toc_priority: 2
|
||||
toc_title: Operations
|
||||
---
|
||||
|
||||
# Question About Operating ClickHouse Servers and Clusters {#question-about-operating-clickhouse-servers-and-clusters}
|
||||
|
||||
Questions:
|
||||
|
||||
- [Which ClickHouse version to use in production?](../../faq/operations/production.md)
|
||||
|
||||
!!! info "Don’t see what you were looking for?"
|
||||
Check out [other F.A.Q. categories](../../faq/index.md) or browse around main documentation articles found in the left sidebar.
|
||||
|
||||
{## [Original article](https://clickhouse.tech/docs/en/faq/production/) ##}
|
69
docs/en/faq/operations/production.md
Normal file
69
docs/en/faq/operations/production.md
Normal file
@ -0,0 +1,69 @@
|
||||
---
|
||||
toc_hidden: true
|
||||
toc_priority: 10
|
||||
---
|
||||
|
||||
# Which ClickHouse Version to Use in Production? {#which-clickhouse-version-to-use-in-production}
|
||||
|
||||
First of all, let’s discuss why people ask this question in the first place. There are two key reasons:
|
||||
|
||||
1. ClickHouse is developed with pretty high velocity and usually, there are 10+ stable releases per year. It makes a wide range of releases to choose from, which is not so trivial choice.
|
||||
2. Some users want to avoid spending time figuring out which version works best for their use case and just follow someone else’s advice.
|
||||
|
||||
The second reason is more fundamental, so we’ll start with it and then get back to navigating through various ClickHouse releases.
|
||||
|
||||
## Which ClickHouse Version Do You Recommend? {#which-clickhouse-version-do-you-recommend}
|
||||
|
||||
It’s tempting to hire consultants or trust some known experts to get rid of responsibility for your production environment. You install some specific ClickHouse version that someone else recommended, now if there’s some issue with it - it’s not your fault, it’s someone else’s. This line of reasoning is a big trap. No external person knows better what’s going on in your company’s production environment.
|
||||
|
||||
So how to properly choose which ClickHouse version to upgrade to? Or how to choose your first ClickHouse version? First of all, you need to invest in setting up a **realistic pre-production environment**. In an ideal world, it could be a completely identical shadow copy, but that’s usually expensive.
|
||||
|
||||
Here’re some key points to get reasonable fidelity in a pre-production environment with not so high costs:
|
||||
|
||||
- Pre-production environment needs to run an as close set of queries as you intend to run in production:
|
||||
- Don’t make it read-only with some frozen data.
|
||||
- Don’t make it write-only with just copying data without building some typical reports.
|
||||
- Don’t wipe it clean instead of applying schema migrations.
|
||||
- Use a sample of real production data and queries. Try to choose a sample that’s still representative and makes `SELECT` queries return reasonable results. Use obfuscation if your data is sensitive and internal policies don’t allow it to leave the production environment.
|
||||
- Make sure that pre-production is covered by your monitoring and alerting software the same way as your production environment does.
|
||||
- If your production spans across multiple datacenters or regions, make your pre-production does the same.
|
||||
- If your production uses complex features like replication, distributed table, cascading materialize views, make sure they are configured similarly in pre-production.
|
||||
- There’s a trade-off on using the roughly same number of servers or VMs in pre-production as in production, but of smaller size, or much less of them, but of the same size. The first option might catch extra network-related issues, while the latter is easier to manage.
|
||||
|
||||
The second area to invest in is **automated testing infrastructure**. Don’t assume that if some kind of query has executed successfully once, it’ll continue to do so forever. It’s ok to have some unit tests where ClickHouse is mocked but make sure your product has a reasonable set of automated tests that are run against real ClickHouse and check that all important use cases are still working as expected.
|
||||
|
||||
Extra step forward could be contributing those automated tests to [ClickHouse’s open-source test infrastructure](https://github.com/ClickHouse/ClickHouse/tree/master/tests) that’s continuously used in its day-to-day development. It definitely will take some additional time and effort to learn [how to run it](../../development/tests.md) and then how to adapt your tests to this framework, but it’ll pay off by ensuring that ClickHouse releases are already tested against them when they are announced stable, instead of repeatedly losing time on reporting the issue after the fact and then waiting for a bugfix to be implemented, backported and released. Some companies even have such test contributions to infrastructure by its use as an internal policy, most notably it’s called [Beyonce’s Rule](https://www.oreilly.com/library/view/software-engineering-at/9781492082781/ch01.html#policies_that_scale_well) at Google.
|
||||
|
||||
When you have your pre-production environment and testing infrastructure in place, choosing the best version is straightforward:
|
||||
|
||||
1. Routinely run your automated tests against new ClickHouse releases. You can do it even for ClickHouse releases that are marked as `testing`, but going forward to the next steps with them is not recommended.
|
||||
2. Deploy the ClickHouse release that passed the tests to pre-production and check that all processes are running as expected.
|
||||
3. Report any issues you discovered to [ClickHouse GitHub Issues](https://github.com/ClickHouse/ClickHouse/issues).
|
||||
4. If there were no major issues, it should be safe to start deploying ClickHouse release to your production environment. Investing in gradual release automation that implements an approach similar to [canary releases](https://martinfowler.com/bliki/CanaryRelease.html) or [green-blue deployments](https://martinfowler.com/bliki/BlueGreenDeployment.html) might further reduce the risk of issues in production.
|
||||
|
||||
As you might have noticed, there’s nothing specific to ClickHouse in the approach described above, people do that for any piece of infrastructure they rely on if they take their production environment seriously.
|
||||
|
||||
## How to Choose Between ClickHouse Releases? {#how-to-choose-between-clickhouse-releases}
|
||||
|
||||
If you look into contents of ClickHouse package repository, you’ll see four kinds of packages:
|
||||
|
||||
1. `testing`
|
||||
2. `prestable`
|
||||
3. `stable`
|
||||
4. `lts` (long-term support)
|
||||
|
||||
As was mentioned earlier, `testing` is good mostly to notice issues early, running them in production is not recommended because each of them is not tested as thoroughly as other kinds of packages.
|
||||
|
||||
`prestable` is a release candidate which generally looks promising and is likely to become announced as `stable` soon. You can try them out in pre-production and report issues if you see any.
|
||||
|
||||
For production use, there are two key options: `stable` and `lts`. Here is some guidance on how to choose between them:
|
||||
|
||||
- `stable` is the kind of package we recommend by default. They are released roughly monthly (and thus provide new features with reasonable delay) and three latest stable releases are supported in terms of diagnostics and backporting of bugfixes.
|
||||
- `lts` are released twice a year and are supported for a year after their initial release. You might prefer them over `stable` in the following cases:
|
||||
- Your company has some internal policies that don’t allow for frequent upgrades or using non-LTS software.
|
||||
- You are using ClickHouse in some secondary products that either doesn’t require any complex ClickHouse features and don’t have enough resources to keep it updated.
|
||||
|
||||
Many teams who initially thought that `lts` is the way to go, often switch to `stable` anyway because of some recent feature that’s important for their product.
|
||||
|
||||
!!! warning "Important"
|
||||
One more thing to keep in mind when upgrading ClickHouse: we’re always keeping eye on compatibility across releases, but sometimes it’s not reasonable to keep and some minor details might change. So make sure you check the [changelog](../../whats-new/changelog/index.md) before upgrading to see if there are any notes about backward-incompatible changes.
|
@ -821,6 +821,10 @@ ClickHouse supports the following algorithms of choosing replicas:
|
||||
- [First or random](#load_balancing-first_or_random)
|
||||
- [Round robin](#load_balancing-round_robin)
|
||||
|
||||
See also:
|
||||
|
||||
- [distributed\_replica\_max\_ignored\_errors](#settings-distributed_replica_max_ignored_errors)
|
||||
|
||||
### Random (by Default) {#load_balancing-random}
|
||||
|
||||
``` sql
|
||||
@ -1170,8 +1174,10 @@ Controls how fast errors in distributed tables are zeroed. If a replica is unava
|
||||
|
||||
See also:
|
||||
|
||||
- [load\_balancing](#load_balancing-round_robin)
|
||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap)
|
||||
- [distributed\_replica\_max\_ignored\_errors](#settings-distributed_replica_max_ignored_errors)
|
||||
|
||||
## distributed\_replica\_error\_cap {#settings-distributed_replica_error_cap}
|
||||
|
||||
@ -1182,8 +1188,24 @@ Error count of each replica is capped at this value, preventing a single replica
|
||||
|
||||
See also:
|
||||
|
||||
- [load\_balancing](#load_balancing-round_robin)
|
||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life)
|
||||
- [distributed\_replica\_max\_ignored\_errors](#settings-distributed_replica_max_ignored_errors)
|
||||
|
||||
## distributed\_replica\_max\_ignored\_errors {#settings-distributed_replica_max_ignored_errors}
|
||||
|
||||
- Type: unsigned int
|
||||
- Default value: 0
|
||||
|
||||
Number of errors that will be ignored while choosing replicas (according to `load_balancing` algorithm).
|
||||
|
||||
See also:
|
||||
|
||||
- [load\_balancing](#load_balancing-round_robin)
|
||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||
- [distributed\_replica\_error\_cap](#settings-distributed_replica_error_cap)
|
||||
- [distributed\_replica\_error\_half\_life](#settings-distributed_replica_error_half_life)
|
||||
|
||||
## distributed\_directory\_monitor\_sleep\_time\_ms {#distributed_directory_monitor_sleep_time_ms}
|
||||
|
||||
|
@ -9,6 +9,7 @@ The following aggregate functions are supported:
|
||||
- [`min`](../../sql-reference/aggregate-functions/reference/min.md#agg_function-min)
|
||||
- [`max`](../../sql-reference/aggregate-functions/reference/max.md#agg_function-max)
|
||||
- [`sum`](../../sql-reference/aggregate-functions/reference/sum.md#agg_function-sum)
|
||||
- [`sumWithOverflow`](../../sql-reference/aggregate-functions/reference/sumwithoverflow.md#sumwithoverflowx)
|
||||
- [`groupBitAnd`](../../sql-reference/aggregate-functions/reference/groupbitand.md#groupbitand)
|
||||
- [`groupBitOr`](../../sql-reference/aggregate-functions/reference/groupbitor.md#groupbitor)
|
||||
- [`groupBitXor`](../../sql-reference/aggregate-functions/reference/groupbitxor.md#groupbitxor)
|
||||
|
@ -206,7 +206,7 @@ Setting fields:
|
||||
|
||||
ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so it’s necessary to set table name accordingly to table name case in database.
|
||||
|
||||
If you have a problems with encodings when using Oracle, see the corresponding [FAQ](../../../faq/general.md#oracle-odbc-encodings) article.
|
||||
If you have a problems with encodings when using Oracle, see the corresponding [F.A.Q.](../../../faq/integration/oracle-odbc.md) item.
|
||||
|
||||
### Known Vulnerability of the ODBC Dictionary Functionality {#known-vulnerability-of-the-odbc-dictionary-functionality}
|
||||
|
||||
|
@ -276,7 +276,7 @@ $ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&
|
||||
### Пример {#primer}
|
||||
|
||||
``` bash
|
||||
$ curl -sS "<address>?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}"
|
||||
$ curl -sS "http://localhost:8123/?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}"
|
||||
```
|
||||
|
||||
## Предопределенный HTTP интерфейс {#predefined_http_interface}
|
||||
|
@ -85,6 +85,15 @@ def html_to_amp(content):
|
||||
tag.attrs['width'] = '640'
|
||||
if not tag.attrs.get('height'):
|
||||
tag.attrs['height'] = '320'
|
||||
if tag.name == 'iframe':
|
||||
tag.name = 'amp-iframe'
|
||||
tag.attrs['layout'] = 'responsive'
|
||||
del tag.attrs['alt']
|
||||
del tag.attrs['allowfullscreen']
|
||||
if not tag.attrs.get('width'):
|
||||
tag.attrs['width'] = '640'
|
||||
if not tag.attrs.get('height'):
|
||||
tag.attrs['height'] = '320'
|
||||
elif tag.name == 'a':
|
||||
href = tag.attrs.get('href')
|
||||
if href:
|
||||
|
@ -1,5 +1,6 @@
|
||||
import collections
|
||||
import datetime
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
|
||||
@ -39,13 +40,17 @@ def build_nav_entry(root, args):
|
||||
title = meta.get('toc_folder_title', 'hidden')
|
||||
prio = meta.get('toc_priority', 9999)
|
||||
logging.debug(f'Nav entry: {prio}, {title}, {path}')
|
||||
if not content.strip():
|
||||
if meta.get('toc_hidden') or not content.strip():
|
||||
title = 'hidden'
|
||||
if title == 'hidden':
|
||||
title = 'hidden-' + hashlib.sha1(content.encode('utf-8')).hexdigest()
|
||||
if args.nav_limit and len(result_items) >= args.nav_limit:
|
||||
break
|
||||
result_items.append((prio, title, path))
|
||||
result_items = sorted(result_items, key=lambda x: (x[0], x[1]))
|
||||
result = collections.OrderedDict([(item[1], item[2]) for item in result_items])
|
||||
if index_meta.get('toc_hidden_folder'):
|
||||
current_title += '|hidden-folder'
|
||||
return index_meta.get('toc_priority', 10000), current_title, result
|
||||
|
||||
|
||||
|
@ -123,7 +123,7 @@ private:
|
||||
};
|
||||
bool is_interactive = true; /// Use either interactive line editing interface or batch mode.
|
||||
bool need_render_progress = true; /// Render query execution progress.
|
||||
bool send_logs = false; /// send_logs_level passed, do not use previous cursor position, to avoid overlaps with logs
|
||||
bool has_received_logs = false; /// We have received some logs, do not use previous cursor position, to avoid overlaps with logs
|
||||
bool echo_queries = false; /// Print queries before execution in batch mode.
|
||||
bool ignore_error = false; /// In case of errors, don't print error message, continue to next query. Only applicable for non-interactive mode.
|
||||
bool print_time_to_stderr = false; /// Output execution time to stderr in batch mode.
|
||||
@ -911,8 +911,6 @@ private:
|
||||
|
||||
connection->forceConnected(connection_parameters.timeouts);
|
||||
|
||||
send_logs = context.getSettingsRef().send_logs_level != LogsLevel::none;
|
||||
|
||||
ASTPtr input_function;
|
||||
if (insert && insert->select)
|
||||
insert->tryFindInputFunction(input_function);
|
||||
@ -1521,6 +1519,7 @@ private:
|
||||
|
||||
void onLogData(Block & block)
|
||||
{
|
||||
has_received_logs = true;
|
||||
initLogsOutputStream();
|
||||
logs_out_stream->write(block);
|
||||
logs_out_stream->flush();
|
||||
@ -1556,7 +1555,7 @@ private:
|
||||
void clearProgress()
|
||||
{
|
||||
written_progress_chars = 0;
|
||||
if (!send_logs)
|
||||
if (!has_received_logs)
|
||||
std::cerr << "\r" CLEAR_TO_END_OF_LINE;
|
||||
}
|
||||
|
||||
@ -1584,7 +1583,7 @@ private:
|
||||
|
||||
const char * indicator = indicators[increment % 8];
|
||||
|
||||
if (!send_logs && written_progress_chars)
|
||||
if (!has_received_logs && written_progress_chars)
|
||||
message << '\r';
|
||||
|
||||
size_t prefix_size = message.count();
|
||||
@ -1638,7 +1637,7 @@ private:
|
||||
|
||||
message << CLEAR_TO_END_OF_LINE;
|
||||
|
||||
if (send_logs)
|
||||
if (has_received_logs)
|
||||
message << '\n';
|
||||
|
||||
++increment;
|
||||
|
@ -65,7 +65,11 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
else
|
||||
throw Exception("ODBC connection string parameter name doesn't begin with valid identifier character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
|
||||
while (pos < end && isWordCharASCII(*pos))
|
||||
/// Additionally allow dash and dot symbols in names.
|
||||
/// Strictly speaking, the name with that characters should be escaped.
|
||||
/// But some ODBC drivers (e.g.) Postgres don't like escaping.
|
||||
|
||||
while (pos < end && (isWordCharASCII(*pos) || *pos == '-' || *pos == '.'))
|
||||
++pos;
|
||||
|
||||
return std::string(begin, pos);
|
||||
@ -213,7 +217,11 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
|
||||
auto write_value = [&](const std::string & value)
|
||||
{
|
||||
if (std::all_of(value.begin(), value.end(), isWordCharASCII))
|
||||
/// Additionally allow dash and dot symbols - for hostnames.
|
||||
/// Strictly speaking, hostname with that characters should be escaped.
|
||||
/// But some ODBC drivers (e.g.) Postgres don't like escaping.
|
||||
|
||||
if (std::all_of(value.begin(), value.end(), [](char c) { return isWordCharASCII(c) || c == '.' || c == '-'; }))
|
||||
write_plain_value(value);
|
||||
else
|
||||
write_escaped_value(value);
|
||||
|
@ -84,7 +84,10 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts
|
||||
break;
|
||||
}
|
||||
|
||||
return Base::get(try_get_entry, get_priority);
|
||||
UInt64 max_ignored_errors = settings ? settings->distributed_replica_max_ignored_errors.value : 0;
|
||||
bool fallback_to_stale_replicas = settings ? settings->fallback_to_stale_replicas_for_distributed_queries.value : true;
|
||||
|
||||
return Base::get(max_ignored_errors, fallback_to_stale_replicas, try_get_entry, get_priority);
|
||||
}
|
||||
|
||||
ConnectionPoolWithFailover::Status ConnectionPoolWithFailover::getStatus() const
|
||||
@ -206,9 +209,12 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
|
||||
break;
|
||||
}
|
||||
|
||||
bool fallback_to_stale_replicas = settings ? bool(settings->fallback_to_stale_replicas_for_distributed_queries) : true;
|
||||
UInt64 max_ignored_errors = settings ? settings->distributed_replica_max_ignored_errors.value : 0;
|
||||
bool fallback_to_stale_replicas = settings ? settings->fallback_to_stale_replicas_for_distributed_queries.value : true;
|
||||
|
||||
return Base::getMany(min_entries, max_entries, max_tries, try_get_entry, get_priority, fallback_to_stale_replicas);
|
||||
return Base::getMany(min_entries, max_entries, max_tries,
|
||||
max_ignored_errors, fallback_to_stale_replicas,
|
||||
try_get_entry, get_priority);
|
||||
}
|
||||
|
||||
ConnectionPoolWithFailover::TryResult
|
||||
|
@ -72,6 +72,13 @@ void CurrentThread::attachInternalTextLogsQueue(const std::shared_ptr<InternalTe
|
||||
current_thread->attachInternalTextLogsQueue(logs_queue, client_logs_level);
|
||||
}
|
||||
|
||||
void CurrentThread::setFatalErrorCallback(std::function<void()> callback)
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
return;
|
||||
current_thread->setFatalErrorCallback(callback);
|
||||
}
|
||||
|
||||
std::shared_ptr<InternalTextLogsQueue> CurrentThread::getInternalTextLogsQueue()
|
||||
{
|
||||
/// NOTE: this method could be called at early server startup stage
|
||||
|
@ -46,6 +46,8 @@ public:
|
||||
LogsLevel client_logs_level);
|
||||
static std::shared_ptr<InternalTextLogsQueue> getInternalTextLogsQueue();
|
||||
|
||||
static void setFatalErrorCallback(std::function<void()> callback);
|
||||
|
||||
/// Makes system calls to update ProfileEvents that contain info from rusage and taskstats
|
||||
static void updatePerformanceCounters();
|
||||
|
||||
|
@ -100,28 +100,28 @@ public:
|
||||
/// this functor. The pools with lower result value will be tried first.
|
||||
using GetPriorityFunc = std::function<size_t(size_t index)>;
|
||||
|
||||
/// Returns a single connection.
|
||||
Entry get(const TryGetEntryFunc & try_get_entry, const GetPriorityFunc & get_priority = GetPriorityFunc());
|
||||
|
||||
|
||||
/// Returns at least min_entries and at most max_entries connections (at most one connection per nested pool).
|
||||
/// The method will throw if it is unable to get min_entries alive connections or
|
||||
/// if fallback_to_stale_replicas is false and it is unable to get min_entries connections to up-to-date replicas.
|
||||
std::vector<TryResult> getMany(
|
||||
size_t min_entries, size_t max_entries, size_t max_tries,
|
||||
size_t max_ignored_errors,
|
||||
bool fallback_to_stale_replicas,
|
||||
const TryGetEntryFunc & try_get_entry,
|
||||
const GetPriorityFunc & get_priority = GetPriorityFunc(),
|
||||
bool fallback_to_stale_replicas = true);
|
||||
|
||||
void reportError(const Entry & entry);
|
||||
const GetPriorityFunc & get_priority = GetPriorityFunc());
|
||||
|
||||
protected:
|
||||
struct PoolState;
|
||||
|
||||
using PoolStates = std::vector<PoolState>;
|
||||
|
||||
/// Returns a single connection.
|
||||
Entry get(size_t max_ignored_errors, bool fallback_to_stale_replicas,
|
||||
const TryGetEntryFunc & try_get_entry, const GetPriorityFunc & get_priority = GetPriorityFunc());
|
||||
|
||||
/// This function returns a copy of pool states to avoid race conditions when modifying shared pool states.
|
||||
PoolStates updatePoolStates();
|
||||
PoolStates updatePoolStates(size_t max_ignored_errors);
|
||||
PoolStates getPoolStates() const;
|
||||
|
||||
NestedPools nested_pools;
|
||||
@ -139,9 +139,13 @@ protected:
|
||||
|
||||
template <typename TNestedPool>
|
||||
typename TNestedPool::Entry
|
||||
PoolWithFailoverBase<TNestedPool>::get(const TryGetEntryFunc & try_get_entry, const GetPriorityFunc & get_priority)
|
||||
PoolWithFailoverBase<TNestedPool>::get(size_t max_ignored_errors, bool fallback_to_stale_replicas,
|
||||
const TryGetEntryFunc & try_get_entry, const GetPriorityFunc & get_priority)
|
||||
{
|
||||
std::vector<TryResult> results = getMany(1, 1, 1, try_get_entry, get_priority);
|
||||
std::vector<TryResult> results = getMany(
|
||||
1 /* min entries */, 1 /* max entries */, 1 /* max tries */,
|
||||
max_ignored_errors, fallback_to_stale_replicas,
|
||||
try_get_entry, get_priority);
|
||||
if (results.empty() || results[0].entry.isNull())
|
||||
throw DB::Exception(
|
||||
"PoolWithFailoverBase::getMany() returned less than min_entries entries.",
|
||||
@ -153,12 +157,13 @@ template <typename TNestedPool>
|
||||
std::vector<typename PoolWithFailoverBase<TNestedPool>::TryResult>
|
||||
PoolWithFailoverBase<TNestedPool>::getMany(
|
||||
size_t min_entries, size_t max_entries, size_t max_tries,
|
||||
size_t max_ignored_errors,
|
||||
bool fallback_to_stale_replicas,
|
||||
const TryGetEntryFunc & try_get_entry,
|
||||
const GetPriorityFunc & get_priority,
|
||||
bool fallback_to_stale_replicas)
|
||||
const GetPriorityFunc & get_priority)
|
||||
{
|
||||
/// Update random numbers and error counts.
|
||||
PoolStates pool_states = updatePoolStates();
|
||||
PoolStates pool_states = updatePoolStates(max_ignored_errors);
|
||||
if (get_priority)
|
||||
{
|
||||
for (size_t i = 0; i < pool_states.size(); ++i)
|
||||
@ -295,22 +300,6 @@ PoolWithFailoverBase<TNestedPool>::getMany(
|
||||
return try_results;
|
||||
}
|
||||
|
||||
template <typename TNestedPool>
|
||||
void PoolWithFailoverBase<TNestedPool>::reportError(const Entry & entry)
|
||||
{
|
||||
for (size_t i = 0; i < nested_pools.size(); ++i)
|
||||
{
|
||||
if (nested_pools[i]->contains(entry))
|
||||
{
|
||||
std::lock_guard lock(pool_states_mutex);
|
||||
auto & pool_state = shared_pool_states[i];
|
||||
pool_state.error_count = std::min(max_error_cap, pool_state.error_count + 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw DB::Exception("Can't find pool to report error", DB::ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
template <typename TNestedPool>
|
||||
struct PoolWithFailoverBase<TNestedPool>::PoolState
|
||||
{
|
||||
@ -335,7 +324,7 @@ private:
|
||||
|
||||
template <typename TNestedPool>
|
||||
typename PoolWithFailoverBase<TNestedPool>::PoolStates
|
||||
PoolWithFailoverBase<TNestedPool>::updatePoolStates()
|
||||
PoolWithFailoverBase<TNestedPool>::updatePoolStates(size_t max_ignored_errors)
|
||||
{
|
||||
PoolStates result;
|
||||
result.reserve(nested_pools.size());
|
||||
@ -354,14 +343,17 @@ PoolWithFailoverBase<TNestedPool>::updatePoolStates()
|
||||
|
||||
if (delta >= 0)
|
||||
{
|
||||
const UInt64 MAX_BITS = sizeof(UInt64) * CHAR_BIT;
|
||||
size_t shift_amount = MAX_BITS;
|
||||
/// Divide error counts by 2 every decrease_error_period seconds.
|
||||
size_t shift_amount = delta / decrease_error_period;
|
||||
if (decrease_error_period)
|
||||
shift_amount = delta / decrease_error_period;
|
||||
/// Update time but don't do it more often than once a period.
|
||||
/// Else if the function is called often enough, error count will never decrease.
|
||||
if (shift_amount)
|
||||
last_error_decrease_time = current_time;
|
||||
|
||||
if (shift_amount >= sizeof(UInt64) * CHAR_BIT)
|
||||
if (shift_amount >= MAX_BITS)
|
||||
{
|
||||
for (auto & state : shared_pool_states)
|
||||
state.error_count = 0;
|
||||
@ -378,6 +370,11 @@ PoolWithFailoverBase<TNestedPool>::updatePoolStates()
|
||||
|
||||
result.assign(shared_pool_states.begin(), shared_pool_states.end());
|
||||
}
|
||||
|
||||
/// distributed_replica_max_ignored_errors
|
||||
for (auto & state : result)
|
||||
state.error_count = std::max<UInt64>(0, state.error_count - max_ignored_errors);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -98,4 +98,15 @@ void ThreadStatus::attachInternalTextLogsQueue(const InternalTextLogsQueuePtr &
|
||||
thread_group->client_logs_level = client_logs_level;
|
||||
}
|
||||
|
||||
void ThreadStatus::setFatalErrorCallback(std::function<void()> callback)
|
||||
{
|
||||
fatal_error_callback = std::move(callback);
|
||||
}
|
||||
|
||||
void ThreadStatus::onFatalError()
|
||||
{
|
||||
if (fatal_error_callback)
|
||||
fatal_error_callback();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -145,6 +145,10 @@ public:
|
||||
void attachInternalTextLogsQueue(const InternalTextLogsQueuePtr & logs_queue,
|
||||
LogsLevel client_logs_level);
|
||||
|
||||
/// Callback that is used to trigger sending fatal error messages to client.
|
||||
void setFatalErrorCallback(std::function<void()> callback);
|
||||
void onFatalError();
|
||||
|
||||
/// Sets query context for current thread and its thread group
|
||||
/// NOTE: query_context have to be alive until detachQuery() is called
|
||||
void attachQueryContext(Context & query_context);
|
||||
@ -200,6 +204,9 @@ protected:
|
||||
std::unique_ptr<RUsageCounters> last_rusage;
|
||||
std::unique_ptr<TasksStatsCounters> taskstats;
|
||||
|
||||
/// Is used to send logs from logs_queue to client in case of fatal errors.
|
||||
std::function<void()> fatal_error_callback;
|
||||
|
||||
private:
|
||||
void setupState(const ThreadGroupStatusPtr & thread_group_);
|
||||
};
|
||||
|
@ -316,7 +316,7 @@ struct Settings : public SettingsCollection<Settings>
|
||||
M(SettingBool, log_profile_events, true, "Log query performance statistics into the query_log and query_thread_log.", 0) \
|
||||
M(SettingBool, log_query_settings, true, "Log query settings into the query_log.", 0) \
|
||||
M(SettingBool, log_query_threads, true, "Log query threads into system.query_thread_log table. This setting have effect only when 'log_queries' is true.", 0) \
|
||||
M(SettingLogsLevel, send_logs_level, LogsLevel::none, "Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'none'", 0) \
|
||||
M(SettingLogsLevel, send_logs_level, LogsLevel::fatal, "Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none'", 0) \
|
||||
M(SettingBool, enable_optimize_predicate_expression, 1, "If it is set to true, optimize predicates to subqueries.", 0) \
|
||||
M(SettingBool, enable_optimize_predicate_expression_to_final_subquery, 1, "Allow push predicate to final subquery.", 0) \
|
||||
\
|
||||
@ -348,6 +348,7 @@ struct Settings : public SettingsCollection<Settings>
|
||||
\
|
||||
M(SettingSeconds, distributed_replica_error_half_life, DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_DECREASE_ERROR_PERIOD, "Time period reduces replica error counter by 2 times.", 0) \
|
||||
M(SettingUInt64, distributed_replica_error_cap, DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT, "Max number of errors per replica, prevents piling up an incredible amount of errors if replica was offline for some time and allows it to be reconsidered in a shorter amount of time.", 0) \
|
||||
M(SettingUInt64, distributed_replica_max_ignored_errors, 0, "Number of errors that will be ignored while choosing replicas", 0) \
|
||||
\
|
||||
M(SettingBool, allow_experimental_live_view, false, "Enable LIVE VIEW. Not mature enough.", 0) \
|
||||
M(SettingSeconds, live_view_heartbeat_interval, DEFAULT_LIVE_VIEW_HEARTBEAT_INTERVAL_SEC, "The heartbeat interval in seconds to indicate live query is alive.", 0) \
|
||||
|
@ -542,6 +542,7 @@ IMPLEMENT_SETTING_ENUM(FormatSettings::DateTimeInputFormat, DATE_TIME_INPUT_FORM
|
||||
|
||||
#define LOGS_LEVEL_LIST_OF_NAMES(M) \
|
||||
M(none, "none") \
|
||||
M(fatal, "fatal") \
|
||||
M(error, "error") \
|
||||
M(warning, "warning") \
|
||||
M(information, "information") \
|
||||
|
@ -302,6 +302,7 @@ using SettingDateTimeInputFormat = SettingEnum<FormatSettings::DateTimeInputForm
|
||||
enum class LogsLevel
|
||||
{
|
||||
none = 0, /// Disable
|
||||
fatal,
|
||||
error,
|
||||
warning,
|
||||
information,
|
||||
|
@ -30,7 +30,9 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
static const std::vector<String> supported_functions{"any", "anyLast", "min", "max", "sum", "groupBitAnd", "groupBitOr", "groupBitXor", "sumMap", "groupArrayArray", "groupUniqArrayArray"};
|
||||
static const std::vector<String> supported_functions{"any", "anyLast", "min",
|
||||
"max", "sum", "sumWithOverflow", "groupBitAnd", "groupBitOr", "groupBitXor",
|
||||
"sumMap", "groupArrayArray", "groupUniqArrayArray"};
|
||||
|
||||
|
||||
String DataTypeCustomSimpleAggregateFunction::getName() const
|
||||
|
@ -65,6 +65,21 @@ void registerDataTypeNumbers(DataTypeFactory & factory)
|
||||
factory.registerAlias("DOUBLE", "Float64", DataTypeFactory::CaseInsensitive);
|
||||
|
||||
factory.registerAlias("DOUBLE PRECISION", "Float64", DataTypeFactory::CaseInsensitive);
|
||||
|
||||
/// MySQL
|
||||
factory.registerAlias("TINYINT SIGNED", "Int8", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("INT1 SIGNED", "Int8", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("SMALLINT SIGNED", "Int16", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("INT SIGNED", "Int32", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("INTEGER SIGNED", "Int32", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("BIGINT SIGNED", "Int64", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("TINYINT UNSIGNED", "UInt8", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("INT1 UNSIGNED", "UInt8", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("SMALLINT UNSIGNED", "UInt16", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("INT UNSIGNED", "UInt32", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("INTEGER UNSIGNED", "UInt32", DataTypeFactory::CaseInsensitive);
|
||||
factory.registerAlias("BIGINT UNSIGNED", "UInt64", DataTypeFactory::CaseInsensitive);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -210,13 +210,11 @@ void TranslateQualifiedNamesMatcher::visit(ASTExpressionList & node, const ASTPt
|
||||
if (tables_with_columns.empty())
|
||||
throw Exception("An asterisk cannot be replaced with empty columns.", ErrorCodes::LOGICAL_ERROR);
|
||||
has_asterisk = true;
|
||||
break;
|
||||
}
|
||||
else if (const auto * qa = child->as<ASTQualifiedAsterisk>())
|
||||
{
|
||||
visit(*qa, child, data); /// check if it's OK before rewrite
|
||||
has_asterisk = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,14 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
if (ParserKeyword("PRECISION").ignore(pos))
|
||||
type_name_suffix = "PRECISION";
|
||||
}
|
||||
else if (type_name_upper.find("INT") != std::string::npos)
|
||||
{
|
||||
/// Support SIGNED and UNSIGNED integer type modifiers for compatibility with MySQL
|
||||
if (ParserKeyword("SIGNED").ignore(pos))
|
||||
type_name_suffix = "SIGNED";
|
||||
else if (ParserKeyword("UNSIGNED").ignore(pos))
|
||||
type_name_suffix = "UNSIGNED";
|
||||
}
|
||||
|
||||
if (!type_name_suffix.empty())
|
||||
type_name = type_name_upper + " " + type_name_suffix;
|
||||
|
@ -232,15 +232,12 @@ HTTPHandler::HTTPHandler(IServer & server_, const std::string & name)
|
||||
|
||||
|
||||
void HTTPHandler::processQuery(
|
||||
Context & context,
|
||||
Poco::Net::HTTPServerRequest & request,
|
||||
HTMLForm & params,
|
||||
Poco::Net::HTTPServerResponse & response,
|
||||
Output & used_output)
|
||||
{
|
||||
Context context = server.context();
|
||||
|
||||
CurrentThread::QueryScope query_scope(context);
|
||||
|
||||
LOG_TRACE(log, "Request URI: {}", request.getURI());
|
||||
|
||||
std::istream & istr = request.stream();
|
||||
@ -683,6 +680,11 @@ void HTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Ne
|
||||
setThreadName("HTTPHandler");
|
||||
ThreadStatus thread_status;
|
||||
|
||||
/// Should be initialized before anything,
|
||||
/// For correct memory accounting.
|
||||
Context context = server.context();
|
||||
CurrentThread::QueryScope query_scope(context);
|
||||
|
||||
Output used_output;
|
||||
|
||||
/// In case of exception, send stack trace to client.
|
||||
@ -706,7 +708,7 @@ void HTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Ne
|
||||
throw Exception("The Transfer-Encoding is not chunked and there is no Content-Length header for POST request", ErrorCodes::HTTP_LENGTH_REQUIRED);
|
||||
}
|
||||
|
||||
processQuery(request, params, response, used_output);
|
||||
processQuery(context, request, params, response, used_output);
|
||||
LOG_INFO(log, "Done processing query");
|
||||
}
|
||||
catch (...)
|
||||
|
@ -72,6 +72,7 @@ private:
|
||||
|
||||
/// Also initializes 'used_output'.
|
||||
void processQuery(
|
||||
Context & context,
|
||||
Poco::Net::HTTPServerRequest & request,
|
||||
HTMLForm & params,
|
||||
Poco::Net::HTTPServerResponse & response,
|
||||
|
@ -189,6 +189,7 @@ void TCPHandler::runImpl()
|
||||
state.logs_queue = std::make_shared<InternalTextLogsQueue>();
|
||||
state.logs_queue->max_priority = Poco::Logger::parseLevel(client_logs_level.toString());
|
||||
CurrentThread::attachInternalTextLogsQueue(state.logs_queue, client_logs_level);
|
||||
CurrentThread::setFatalErrorCallback([this]{ sendLogs(); });
|
||||
}
|
||||
|
||||
query_context->setExternalTablesInitializer([&connection_settings, this] (Context & context)
|
||||
|
@ -728,6 +728,10 @@ void AlterCommands::apply(StorageInMemoryMetadata & metadata, const Context & co
|
||||
metadata_copy.primary_key.definition_ast = nullptr;
|
||||
}
|
||||
|
||||
/// Changes in columns may lead to changes in secondary indices
|
||||
for (auto & index : metadata_copy.secondary_indices)
|
||||
index.recalculateWithNewColumns(metadata_copy.columns, context);
|
||||
|
||||
/// Changes in columns may lead to changes in TTL expressions.
|
||||
auto column_ttl_asts = metadata_copy.columns.getColumnTTLs();
|
||||
for (const auto & [name, ast] : column_ttl_asts)
|
||||
|
@ -289,7 +289,7 @@ void IStorage::check(const Block & block, bool need_all) const
|
||||
|
||||
void IStorage::setColumns(ColumnsDescription columns_)
|
||||
{
|
||||
if (columns_.getOrdinary().empty())
|
||||
if (columns_.getAllPhysical().empty())
|
||||
throw Exception("Empty list of columns passed", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED);
|
||||
metadata.columns = std::move(columns_);
|
||||
}
|
||||
|
@ -117,6 +117,10 @@ IndexDescription IndexDescription::getIndexFromAST(const ASTPtr & definition_ast
|
||||
return result;
|
||||
}
|
||||
|
||||
void IndexDescription::recalculateWithNewColumns(const ColumnsDescription & new_columns, const Context & context)
|
||||
{
|
||||
*this = getIndexFromAST(definition_ast, new_columns, context);
|
||||
}
|
||||
|
||||
bool IndicesDescription::has(const String & name) const
|
||||
{
|
||||
@ -154,6 +158,7 @@ IndicesDescription IndicesDescription::parse(const String & str, const ColumnsDe
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
ExpressionActionsPtr IndicesDescription::getSingleExpressionForIndices(const ColumnsDescription & columns, const Context & context) const
|
||||
{
|
||||
ASTPtr combined_expr_list = std::make_shared<ASTExpressionList>();
|
||||
|
@ -55,6 +55,10 @@ struct IndexDescription
|
||||
/// unintentionaly share AST variables and modify them.
|
||||
IndexDescription(const IndexDescription & other);
|
||||
IndexDescription & operator=(const IndexDescription & other);
|
||||
|
||||
/// Recalculate index with new columns because index expression may change
|
||||
/// if something change in columns.
|
||||
void recalculateWithNewColumns(const ColumnsDescription & new_columns, const Context & context);
|
||||
};
|
||||
|
||||
/// All secondary indices in storage
|
||||
|
@ -320,7 +320,6 @@ void MergeTreeBaseSelectProcessor::executePrewhereActions(Block & block, const P
|
||||
else
|
||||
{
|
||||
auto & ctn = block.getByName(prewhere_info->prewhere_column_name);
|
||||
ctn.type = std::make_shared<DataTypeUInt8>();
|
||||
ctn.column = ctn.type->createColumnConst(block.rows(), 1u)->convertToFullColumnIfConst();
|
||||
}
|
||||
|
||||
|
@ -4,9 +4,11 @@ const char * auto_contributors[] {
|
||||
"243f6a8885a308d313198a2e037",
|
||||
"821008736@qq.com",
|
||||
"Akazz",
|
||||
"Albert Kidrachev",
|
||||
"Alberto",
|
||||
"Aleksandra (Ася)",
|
||||
"Aleksei Levushkin",
|
||||
"Aleksey",
|
||||
"Aleksey Akulovich",
|
||||
"Alex Bocharov",
|
||||
"Alex Krash",
|
||||
@ -32,12 +34,15 @@ const char * auto_contributors[] {
|
||||
"Alexander Sapin",
|
||||
"Alexander Tokmakov",
|
||||
"Alexander Tretiakov",
|
||||
"Alexandr Kondratev",
|
||||
"Alexandr Krasheninnikov",
|
||||
"Alexandr Orlov",
|
||||
"Alexei Averchenko",
|
||||
"Alexey",
|
||||
"Alexey Arno",
|
||||
"Alexey Dushechkin",
|
||||
"Alexey Elymanov",
|
||||
"Alexey Ilyukhov",
|
||||
"Alexey Milovidov",
|
||||
"Alexey Tronov",
|
||||
"Alexey Vasiliev",
|
||||
@ -53,18 +58,22 @@ const char * auto_contributors[] {
|
||||
"AndreevDm",
|
||||
"Andrei Bodrov",
|
||||
"Andrei Chulkov",
|
||||
"Andrei Nekrashevich",
|
||||
"Andrew Grigorev",
|
||||
"Andrew Onyshchuk",
|
||||
"Andrey",
|
||||
"Andrey Chulkov",
|
||||
"Andrey Dudin",
|
||||
"Andrey Kadochnikov",
|
||||
"Andrey Konchagin",
|
||||
"Andrey Konyaev",
|
||||
"Andrey M",
|
||||
"Andrey Mironov",
|
||||
"Andrey Skobtsov",
|
||||
"Andrey Urusov",
|
||||
"Andy Yang",
|
||||
"Anton Kobzev",
|
||||
"Anton Okhitin",
|
||||
"Anton Okulov",
|
||||
"Anton Patsev",
|
||||
"Anton Popov",
|
||||
@ -74,7 +83,9 @@ const char * auto_contributors[] {
|
||||
"Anton Zhabolenko",
|
||||
"Arsen Hakobyan",
|
||||
"Artem Andreenko",
|
||||
"Artem Gavrilov",
|
||||
"Artem Konovalov",
|
||||
"Artem Streltsov",
|
||||
"Artem Zuikov",
|
||||
"Artemeey",
|
||||
"Artemkin Pavel",
|
||||
@ -90,6 +101,7 @@ const char * auto_contributors[] {
|
||||
"BanyRule",
|
||||
"Baudouin Giard",
|
||||
"BayoNet",
|
||||
"Bharat Nallan",
|
||||
"Big Elephant",
|
||||
"BlahGeek",
|
||||
"Bogdan",
|
||||
@ -101,6 +113,7 @@ const char * auto_contributors[] {
|
||||
"Brett Hoerner",
|
||||
"Bulat Gaifullin",
|
||||
"Carbyn",
|
||||
"Chao Wang",
|
||||
"Chen Yufei",
|
||||
"Ciprian Hacman",
|
||||
"Clement Rodriguez",
|
||||
@ -119,6 +132,7 @@ const char * auto_contributors[] {
|
||||
"Denis Zhuravlev",
|
||||
"Derek Perkins",
|
||||
"Ding Xiang Fei",
|
||||
"Dmitriev Mikhail",
|
||||
"Dmitrii Kovalkov",
|
||||
"Dmitrii Raev",
|
||||
"Dmitry",
|
||||
@ -132,6 +146,7 @@ const char * auto_contributors[] {
|
||||
"Dmitry Rubashkin",
|
||||
"Dmitry S..ky / skype: dvska-at-skype",
|
||||
"Doge",
|
||||
"DoomzD",
|
||||
"Dr. Strange Looker",
|
||||
"Eldar Zaitov",
|
||||
"Elena Baskakova",
|
||||
@ -141,17 +156,21 @@ const char * auto_contributors[] {
|
||||
"Ernest Poletaev",
|
||||
"Eugene Klimov",
|
||||
"Eugene Konkov",
|
||||
"Evgenia Sudarikova",
|
||||
"Evgenii Pravda",
|
||||
"Evgeniy Gatov",
|
||||
"Evgeniy Udodov",
|
||||
"Evgeny Konkov",
|
||||
"Ewout",
|
||||
"Fabian Stäber",
|
||||
"Fadi Hadzh",
|
||||
"Fan()",
|
||||
"FawnD2",
|
||||
"FeehanG",
|
||||
"Filipe Caixeta",
|
||||
"Flowyi",
|
||||
"Francisco Barón",
|
||||
"Frank Zhao",
|
||||
"Fruit of Eden",
|
||||
"Gagan Arneja",
|
||||
"Gary Dotzler",
|
||||
@ -196,6 +215,7 @@ const char * auto_contributors[] {
|
||||
"Ivan Kushnarenko",
|
||||
"Ivan Lezhankin",
|
||||
"Ivan Remen",
|
||||
"Ivan Starkov",
|
||||
"Ivan Zhukov",
|
||||
"JaosnHsieh",
|
||||
"Jason",
|
||||
@ -216,6 +236,7 @@ const char * auto_contributors[] {
|
||||
"Konstantin Malanchev",
|
||||
"Konstantin Podshumok",
|
||||
"Korviakov Andrey",
|
||||
"Kozlov Ivan",
|
||||
"Kruglov Pavel",
|
||||
"Leonardo Cecchi",
|
||||
"Leopold Schabel",
|
||||
@ -246,6 +267,7 @@ const char * auto_contributors[] {
|
||||
"Max Akhmedov",
|
||||
"Max Vetrov",
|
||||
"Maxim Akhmedov",
|
||||
"Maxim Babenko",
|
||||
"Maxim Fedotov",
|
||||
"Maxim Fridental",
|
||||
"Maxim Khrisanfov",
|
||||
@ -263,6 +285,7 @@ const char * auto_contributors[] {
|
||||
"Michael Kolupaev",
|
||||
"Michael Monashev",
|
||||
"Michael Razuvaev",
|
||||
"Michael Smitasin",
|
||||
"Michal Lisowski",
|
||||
"Mihail Fandyushin",
|
||||
"Mikahil Nacharov",
|
||||
@ -271,12 +294,14 @@ const char * auto_contributors[] {
|
||||
"Mikhail Fandyushin",
|
||||
"Mikhail Filimonov",
|
||||
"Mikhail Korotov",
|
||||
"Mikhail Malafeev",
|
||||
"Mikhail Nacharov",
|
||||
"Mikhail Salosin",
|
||||
"Mikhail Surin",
|
||||
"Mikhail f. Shiryaev",
|
||||
"Milad Arabi",
|
||||
"Mohammad Hossein Sekhavat",
|
||||
"MovElb",
|
||||
"Murat Kabilov",
|
||||
"NIKITA MIKHAILOV",
|
||||
"Narek Galstyan",
|
||||
@ -289,8 +314,10 @@ const char * auto_contributors[] {
|
||||
"Nikita Lapkov",
|
||||
"Nikita Mikhailov",
|
||||
"Nikita Mikhaylov",
|
||||
"Nikita Orlov",
|
||||
"Nikita Vasilev",
|
||||
"Nikolai Kochetov",
|
||||
"Nikolai Sorokin",
|
||||
"Nikolay Degterinsky",
|
||||
"Nikolay Kirsh",
|
||||
"Nikolay Semyachkin",
|
||||
@ -305,6 +332,7 @@ const char * auto_contributors[] {
|
||||
"Olga Khvostikova",
|
||||
"Orivej Desh",
|
||||
"Oskar Wojciski",
|
||||
"Paramtamtam",
|
||||
"Pavel",
|
||||
"Pavel Kartaviy",
|
||||
"Pavel Kartavyy",
|
||||
@ -315,8 +343,10 @@ const char * auto_contributors[] {
|
||||
"Pavlo Bashynskiy",
|
||||
"Pawel Rog",
|
||||
"Persiyanov Dmitriy Andreevich",
|
||||
"Pervakov Grigorii",
|
||||
"Pervakov Grigory",
|
||||
"Philippe Ombredanne",
|
||||
"Potya",
|
||||
"Pradeep Chhetri",
|
||||
"Quid37",
|
||||
"Rafael David Tinoco",
|
||||
@ -324,6 +354,7 @@ const char * auto_contributors[] {
|
||||
"Ravengg",
|
||||
"Reilee",
|
||||
"Reto Kromer",
|
||||
"Ri",
|
||||
"Roman Lipovsky",
|
||||
"Roman Nikolaev",
|
||||
"Roman Nozdrin",
|
||||
@ -331,10 +362,12 @@ const char * auto_contributors[] {
|
||||
"Roman Tsisyk",
|
||||
"Ruslan",
|
||||
"Ruzal Ibragimov",
|
||||
"S.M.A. Djawadi",
|
||||
"Sabyanin Maxim",
|
||||
"SaltTan",
|
||||
"Sami Kerola",
|
||||
"Samuel Chou",
|
||||
"Saulius Valatka",
|
||||
"Serge Rider",
|
||||
"Sergei Bocharov",
|
||||
"Sergei Semin",
|
||||
@ -364,10 +397,12 @@ const char * auto_contributors[] {
|
||||
"Sébastien Launay",
|
||||
"TAC",
|
||||
"TCeason",
|
||||
"Tagir Kuskarov",
|
||||
"Tangaev",
|
||||
"Tema Novikov",
|
||||
"The-Alchemist",
|
||||
"Tobias Adamson",
|
||||
"Tom Bombadil",
|
||||
"Tsarkova Anastasia",
|
||||
"Ubuntu",
|
||||
"Ubus",
|
||||
@ -377,6 +412,7 @@ const char * auto_contributors[] {
|
||||
"Vadim Skipin",
|
||||
"VadimPE",
|
||||
"Valera Ryaboshapko",
|
||||
"Vasily Morozov",
|
||||
"Vasily Nemkov",
|
||||
"Vasily Okunev",
|
||||
"Vasily Vasilkov",
|
||||
@ -396,7 +432,9 @@ const char * auto_contributors[] {
|
||||
"Vivien Maisonneuve",
|
||||
"Vlad Arkhipov",
|
||||
"Vladimir",
|
||||
"Vladimir Bunchuk",
|
||||
"Vladimir Chebotarev",
|
||||
"Vladimir Golovchenko",
|
||||
"Vladimir Goncharov",
|
||||
"Vladimir Kolobaev",
|
||||
"Vladimir Kopysov",
|
||||
@ -405,6 +443,7 @@ const char * auto_contributors[] {
|
||||
"Vladislav Rassokhin",
|
||||
"Vladislav Smirnov",
|
||||
"Vojtech Splichal",
|
||||
"Volodymyr Kuznetsov",
|
||||
"Vsevolod Orlov",
|
||||
"Vxider",
|
||||
"Vyacheslav Alipov",
|
||||
@ -416,6 +455,7 @@ const char * auto_contributors[] {
|
||||
"Yegor Andreenko",
|
||||
"Yiğit Konur",
|
||||
"Yohann Jardin",
|
||||
"Yuntao Wu",
|
||||
"Yuri Dyachenko",
|
||||
"Yurii Vlasenko",
|
||||
"Yuriy",
|
||||
@ -423,6 +463,7 @@ const char * auto_contributors[] {
|
||||
"Yury Karpovich",
|
||||
"Yury Stankevich",
|
||||
"Zhichang Yu",
|
||||
"Zhipeng",
|
||||
"abdrakhmanov",
|
||||
"abyss7",
|
||||
"achimbab",
|
||||
@ -448,6 +489,8 @@ const char * auto_contributors[] {
|
||||
"benamazing",
|
||||
"bgranvea",
|
||||
"blazerer",
|
||||
"bluebirddm",
|
||||
"bobrovskij artemij",
|
||||
"bseng",
|
||||
"cekc",
|
||||
"champtar",
|
||||
@ -462,6 +505,7 @@ const char * auto_contributors[] {
|
||||
"decaseal",
|
||||
"dependabot-preview[bot]",
|
||||
"dependabot[bot]",
|
||||
"dgrr",
|
||||
"dimarub2000",
|
||||
"dinosaur",
|
||||
"dmitrii",
|
||||
@ -471,20 +515,28 @@ const char * auto_contributors[] {
|
||||
"egatov",
|
||||
"elBroom",
|
||||
"elenaspb2019",
|
||||
"emironyuk",
|
||||
"evtan",
|
||||
"exprmntr",
|
||||
"ezhaka",
|
||||
"f1yegor",
|
||||
"favstovol",
|
||||
"felixoid",
|
||||
"fenglv",
|
||||
"fessmage",
|
||||
"filimonov",
|
||||
"flow",
|
||||
"foxxmary",
|
||||
"frank",
|
||||
"franklee",
|
||||
"fredchenbj",
|
||||
"g-arslan",
|
||||
"ggerogery",
|
||||
"giordyb",
|
||||
"glockbender",
|
||||
"hao.he",
|
||||
"hcz",
|
||||
"hexiaoting",
|
||||
"hotid",
|
||||
"igor",
|
||||
"igor.lapko",
|
||||
@ -500,6 +552,7 @@ const char * auto_contributors[] {
|
||||
"kreuzerkrieg",
|
||||
"ks1322",
|
||||
"kshvakov",
|
||||
"kssenii",
|
||||
"l",
|
||||
"lalex",
|
||||
"leozhang",
|
||||
@ -513,11 +566,14 @@ const char * auto_contributors[] {
|
||||
"liuyimin",
|
||||
"liyang",
|
||||
"lomberts",
|
||||
"long2ice",
|
||||
"luc1ph3r",
|
||||
"madianjun",
|
||||
"maiha",
|
||||
"malkfilipp",
|
||||
"maqroll",
|
||||
"maxim",
|
||||
"maxim-babenko",
|
||||
"maxkuzn",
|
||||
"maxulan",
|
||||
"memo",
|
||||
@ -527,8 +583,10 @@ const char * auto_contributors[] {
|
||||
"mfridental",
|
||||
"miha-g",
|
||||
"millb",
|
||||
"mnkonkova",
|
||||
"morty",
|
||||
"moscas",
|
||||
"myrrc",
|
||||
"nagorny",
|
||||
"never lee",
|
||||
"nicelulu",
|
||||
@ -543,6 +601,7 @@ const char * auto_contributors[] {
|
||||
"palasonicq",
|
||||
"peshkurov",
|
||||
"philip.han",
|
||||
"potya",
|
||||
"proller",
|
||||
"pufit",
|
||||
"pyos",
|
||||
@ -562,6 +621,7 @@ const char * auto_contributors[] {
|
||||
"simon-says",
|
||||
"spyros87",
|
||||
"stavrolia",
|
||||
"stepenhu",
|
||||
"sundy",
|
||||
"sundy-li",
|
||||
"sundyli",
|
||||
@ -577,9 +637,11 @@ const char * auto_contributors[] {
|
||||
"velom",
|
||||
"vicdashkov",
|
||||
"vinity",
|
||||
"vitstn",
|
||||
"vxider",
|
||||
"vzakaznikov",
|
||||
"wangchao",
|
||||
"xPoSx",
|
||||
"yonesko",
|
||||
"zamulla",
|
||||
"zhang2014",
|
||||
@ -600,7 +662,9 @@ const char * auto_contributors[] {
|
||||
"小路",
|
||||
"张健",
|
||||
"张风啸",
|
||||
"极客青年",
|
||||
"谢磊",
|
||||
"黄朝晖",
|
||||
"黄璞",
|
||||
"박현우",
|
||||
nullptr};
|
||||
|
@ -271,6 +271,10 @@ def run_tests_array(all_tests_with_params):
|
||||
if stderr:
|
||||
print(stderr.encode('utf-8'))
|
||||
|
||||
# Stop on fatal errors like segmentation fault. They are send to client via logs.
|
||||
if ' <Fatal> ' in stderr:
|
||||
SERVER_DIED = True
|
||||
|
||||
if args.stop and ('Connection refused' in stderr or 'Attempt to read after eof' in stderr) and not 'Received exception from server' in stderr:
|
||||
SERVER_DIED = True
|
||||
|
||||
|
@ -732,6 +732,10 @@ class ClickHouseInstance:
|
||||
self.ipv6_address = ipv6_address
|
||||
self.with_installed_binary = with_installed_binary
|
||||
|
||||
def is_built_with_thread_sanitizer(self):
|
||||
build_opts = self.query("SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'")
|
||||
return "-fsanitize=thread" in build_opts
|
||||
|
||||
# Connects to the instance via clickhouse-client, sends a query (1st argument) and returns the answer
|
||||
def query(self, sql, stdin=None, timeout=None, settings=None, user=None, password=None, ignore_error=False):
|
||||
return self.client.query(sql, stdin, timeout, settings, user, password, ignore_error)
|
||||
|
@ -7,15 +7,11 @@ cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance('node1',
|
||||
config_dir='configs',
|
||||
main_configs=['configs/logs_config.xml'],
|
||||
with_zookeeper=True,
|
||||
macros={"shard": 0, "replica": 1} )
|
||||
main_configs=['configs/logs_config.xml'])
|
||||
|
||||
node2 = cluster.add_instance('node2',
|
||||
config_dir='configs',
|
||||
main_configs=['configs/logs_config.xml'],
|
||||
with_zookeeper=True,
|
||||
macros={"shard": 0, "replica": 2} )
|
||||
main_configs=['configs/logs_config.xml'])
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
@ -32,23 +28,34 @@ def test_alter_codec_pk(started_cluster):
|
||||
try:
|
||||
name = "test_alter_codec_pk"
|
||||
node1.query("""
|
||||
CREATE TABLE {name} (id UInt64) Engine=MergeTree() ORDER BY id
|
||||
CREATE TABLE {name} (id UInt64, value UInt64) Engine=MergeTree() ORDER BY id
|
||||
""".format(name=name))
|
||||
|
||||
node1.query("INSERT INTO {name} SELECT number, number * number from numbers(100)".format(name=name))
|
||||
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 CODEC(NONE)".format(name=name))
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 CODEC(Delta, LZ4)".format(name=name))
|
||||
|
||||
assert node1.query("SELECT sum(id) FROM {name}".format(name=name)) == "4950\n"
|
||||
|
||||
with pytest.raises(QueryRuntimeException):
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt32 CODEC(Delta, LZ4)".format(name=name))
|
||||
|
||||
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 DEFAULT 3 CODEC(Delta, LZ4)".format(name=name))
|
||||
|
||||
node1.query("INSERT INTO {name} (value) VALUES (1)".format(name=name))
|
||||
|
||||
assert node1.query("SELECT sum(id) FROM {name}".format(name=name)) == "4953\n"
|
||||
|
||||
with pytest.raises(QueryRuntimeException):
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 ALIAS 3 CODEC(Delta, LZ4)".format(name=name))
|
||||
|
||||
with pytest.raises(QueryRuntimeException):
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 MATERIALIZED 3 CODEC(Delta, LZ4)".format(name=name))
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 MATERIALIZED 3 CODEC(Delta, LZ4)".format(name=name))
|
||||
|
||||
node1.query("INSERT INTO {name} (value) VALUES (1)".format(name=name))
|
||||
|
||||
assert node1.query("SELECT sum(id) FROM {name}".format(name=name)) == "4956\n"
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64".format(name=name))
|
||||
|
||||
with pytest.raises(QueryRuntimeException):
|
||||
@ -62,9 +69,11 @@ def test_alter_codec_index(started_cluster):
|
||||
try:
|
||||
name = "test_alter_codec_index"
|
||||
node1.query("""
|
||||
CREATE TABLE {name} (`id` UInt64, INDEX id_index id TYPE minmax GRANULARITY 1) Engine=MergeTree() ORDER BY tuple()
|
||||
CREATE TABLE {name} (`id` UInt64, value UInt64, INDEX id_index id TYPE minmax GRANULARITY 1) Engine=MergeTree() ORDER BY tuple()
|
||||
""".format(name=name))
|
||||
|
||||
node1.query("INSERT INTO {name} SELECT number, number * number from numbers(100)".format(name=name))
|
||||
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 CODEC(NONE)".format(name=name))
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 CODEC(Delta, LZ4)".format(name=name))
|
||||
|
||||
@ -73,11 +82,18 @@ def test_alter_codec_index(started_cluster):
|
||||
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 DEFAULT 3 CODEC(Delta, LZ4)".format(name=name))
|
||||
|
||||
node1.query("INSERT INTO {name} (value) VALUES (1)".format(name=name))
|
||||
|
||||
assert node1.query("SELECT sum(id) FROM {name}".format(name=name)) == "4953\n"
|
||||
|
||||
with pytest.raises(QueryRuntimeException):
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 ALIAS 3 CODEC(Delta, LZ4)".format(name=name))
|
||||
|
||||
with pytest.raises(QueryRuntimeException):
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 MATERIALIZED 3 CODEC(Delta, LZ4)".format(name=name))
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 MATERIALIZED 3 CODEC(Delta, LZ4)".format(name=name))
|
||||
|
||||
node1.query("INSERT INTO {name} (value) VALUES (1)".format(name=name))
|
||||
|
||||
assert node1.query("SELECT sum(id) FROM {name}".format(name=name)) == "4956\n"
|
||||
|
||||
node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64".format(name=name))
|
||||
|
||||
|
@ -153,6 +153,7 @@ def get_dict(source, layout, fields, suffix_name=''):
|
||||
dictionary.generate_config()
|
||||
return dictionary
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global DICTIONARIES
|
||||
global cluster
|
||||
@ -210,8 +211,42 @@ def get_dictionaries(fold, total_folds, all_dicts):
|
||||
return all_dicts[fold * chunk_len : (fold + 1) * chunk_len]
|
||||
|
||||
|
||||
def remove_mysql_dicts():
|
||||
"""
|
||||
We have false-positive race condition in our openSSL version.
|
||||
MySQL dictionary use OpenSSL, so to prevent known failure we
|
||||
disable tests for these dictionaries.
|
||||
|
||||
Read of size 8 at 0x7b3c00005dd0 by thread T61 (mutexes: write M1010349240585225536):
|
||||
#0 EVP_CIPHER_mode <null> (clickhouse+0x13b2223b)
|
||||
#1 do_ssl3_write <null> (clickhouse+0x13a137bc)
|
||||
#2 ssl3_write_bytes <null> (clickhouse+0x13a12387)
|
||||
#3 ssl3_write <null> (clickhouse+0x139db0e6)
|
||||
#4 ssl_write_internal <null> (clickhouse+0x139eddce)
|
||||
#5 SSL_write <null> (clickhouse+0x139edf20)
|
||||
#6 ma_tls_write <null> (clickhouse+0x139c7557)
|
||||
#7 ma_pvio_tls_write <null> (clickhouse+0x139a8f59)
|
||||
#8 ma_pvio_write <null> (clickhouse+0x139a8488)
|
||||
#9 ma_net_real_write <null> (clickhouse+0x139a4e2c)
|
||||
#10 ma_net_write_command <null> (clickhouse+0x139a546d)
|
||||
#11 mthd_my_send_cmd <null> (clickhouse+0x13992546)
|
||||
#12 mysql_close_slow_part <null> (clickhouse+0x13999afd)
|
||||
#13 mysql_close <null> (clickhouse+0x13999071)
|
||||
#14 mysqlxx::Connection::~Connection() <null> (clickhouse+0x1370f814)
|
||||
#15 mysqlxx::Pool::~Pool() <null> (clickhouse+0x13715a7b)
|
||||
|
||||
TODO remove this when open ssl will be fixed or thread sanitizer will be suppressed
|
||||
"""
|
||||
|
||||
global DICTIONARIES
|
||||
DICTIONARIES = [d for d in DICTIONARIES if not d.name.startswith("MySQL")]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("fold", list(range(10)))
|
||||
def test_simple_dictionaries(started_cluster, fold):
|
||||
if node.is_built_with_thread_sanitizer():
|
||||
remove_mysql_dicts()
|
||||
|
||||
fields = FIELDS["simple"]
|
||||
values = VALUES["simple"]
|
||||
data = [Row(fields, vals) for vals in values]
|
||||
@ -259,6 +294,10 @@ def test_simple_dictionaries(started_cluster, fold):
|
||||
|
||||
@pytest.mark.parametrize("fold", list(range(10)))
|
||||
def test_complex_dictionaries(started_cluster, fold):
|
||||
|
||||
if node.is_built_with_thread_sanitizer():
|
||||
remove_mysql_dicts()
|
||||
|
||||
fields = FIELDS["complex"]
|
||||
values = VALUES["complex"]
|
||||
data = [Row(fields, vals) for vals in values]
|
||||
@ -292,6 +331,9 @@ def test_complex_dictionaries(started_cluster, fold):
|
||||
|
||||
@pytest.mark.parametrize("fold", list(range(10)))
|
||||
def test_ranged_dictionaries(started_cluster, fold):
|
||||
if node.is_built_with_thread_sanitizer():
|
||||
remove_mysql_dicts()
|
||||
|
||||
fields = FIELDS["ranged"]
|
||||
values = VALUES["ranged"]
|
||||
data = [Row(fields, vals) for vals in values]
|
||||
@ -380,7 +422,7 @@ def test_key_value_complex_dictionaries(started_cluster, fold):
|
||||
values = VALUES["complex"]
|
||||
data = [Row(fields, vals) for vals in values]
|
||||
|
||||
all_complex_dicts = [d for d in DICTIONARIES if d.structure.layout.layout_type == "complex"]
|
||||
all_complex_dicts = [d for d in DICTIONARIES_KV if d.structure.layout.layout_type == "complex"]
|
||||
complex_dicts = get_dictionaries(fold, 10, all_complex_dicts)
|
||||
for dct in complex_dicts:
|
||||
dct.load_data(data)
|
||||
|
@ -16,8 +16,23 @@ n3 = cluster.add_instance('n3', main_configs=['configs/remote_servers.xml'])
|
||||
nodes = len(cluster.instances)
|
||||
queries = nodes*5
|
||||
|
||||
def create_tables():
|
||||
def bootstrap():
|
||||
for n in cluster.instances.values():
|
||||
# At startup, server loads configuration files.
|
||||
#
|
||||
# However ConfigReloader does not know about already loaded files
|
||||
# (files is empty()), hence it will always reload the configuration
|
||||
# just after server starts (+ 2 seconds, reload timeout).
|
||||
#
|
||||
# And on configuration reload the clusters will be re-created, so some
|
||||
# internal stuff will be reseted:
|
||||
# - error_count
|
||||
# - last_used (round_robing)
|
||||
#
|
||||
# And if the reload will happen during round_robin test it will start
|
||||
# querying from the beginning, so let's issue config reload just after
|
||||
# start to avoid reload in the middle of the test execution.
|
||||
n.query('SYSTEM RELOAD CONFIG')
|
||||
n.query('DROP TABLE IF EXISTS data')
|
||||
n.query('DROP TABLE IF EXISTS dist')
|
||||
n.query('CREATE TABLE data (key Int) Engine=Memory()')
|
||||
@ -36,7 +51,7 @@ def make_uuid():
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
create_tables()
|
||||
bootstrap()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
@ -112,3 +127,39 @@ def test_load_balancing_round_robin():
|
||||
unique_nodes.add(get_node(n1, settings={'load_balancing': 'round_robin'}))
|
||||
assert len(unique_nodes) == nodes, unique_nodes
|
||||
assert unique_nodes == set(['n1', 'n2', 'n3'])
|
||||
|
||||
def test_distributed_replica_max_ignored_errors():
|
||||
settings = {
|
||||
'load_balancing': 'in_order',
|
||||
'prefer_localhost_replica': 0,
|
||||
'connect_timeout': 2,
|
||||
'receive_timeout': 2,
|
||||
'send_timeout': 2,
|
||||
'idle_connection_timeout': 2,
|
||||
'tcp_keep_alive_timeout': 2,
|
||||
|
||||
'distributed_replica_max_ignored_errors': 0,
|
||||
'distributed_replica_error_half_life': 60,
|
||||
}
|
||||
|
||||
# initiate connection (if started only this test)
|
||||
n2.query('SELECT * FROM dist', settings=settings)
|
||||
cluster.pause_container('n1')
|
||||
|
||||
# n1 paused -- skipping, and increment error_count for n1
|
||||
# but the query succeeds, no need in query_and_get_error()
|
||||
n2.query('SELECT * FROM dist', settings=settings)
|
||||
# XXX: due to config reloading we need second time (sigh)
|
||||
n2.query('SELECT * FROM dist', settings=settings)
|
||||
# check error_count for n1
|
||||
assert int(n2.query("""
|
||||
SELECT errors_count FROM system.clusters
|
||||
WHERE cluster = 'replicas_cluster' AND host_name = 'n1'
|
||||
""", settings=settings)) == 1
|
||||
|
||||
cluster.unpause_container('n1')
|
||||
# still n2
|
||||
assert get_node(n2, settings=settings) == 'n2'
|
||||
# now n1
|
||||
settings['distributed_replica_max_ignored_errors'] = 1
|
||||
assert get_node(n2, settings=settings) == 'n1'
|
||||
|
@ -81,7 +81,10 @@ class TestLiveViewOverDistributedSuite:
|
||||
client1.expect(prompt)
|
||||
|
||||
client1.send(select_query)
|
||||
client1.expect('"node1",0,0\r\n.*"node1",1,1\r\n.*"node2",0,10\r\n.*"node2",1,11\r\n')
|
||||
client1.expect('"node1",0,0')
|
||||
client1.expect('"node1",1,1')
|
||||
client1.expect('"node2",0,10')
|
||||
client1.expect('"node2",1,11')
|
||||
client1.expect(prompt)
|
||||
|
||||
client1.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 2, 3)")
|
||||
@ -90,7 +93,13 @@ class TestLiveViewOverDistributedSuite:
|
||||
client2.expect(prompt)
|
||||
time.sleep(2)
|
||||
client1.send(select_query)
|
||||
client1.expect('"node1",0,0\r\n.*"node1",1,1\r\n.*"node1",1,3\r\n.*"node1",2,3\r\n.*"node1",3,3\r\n.*"node2",0,10\r\n.*"node2",1,11\r\n')
|
||||
client1.expect('"node1",0,0')
|
||||
client1.expect('"node1",1,1')
|
||||
client1.expect('"node1",1,3')
|
||||
client1.expect('"node1",2,3')
|
||||
client1.expect('"node1",3,3')
|
||||
client1.expect('"node2",0,10')
|
||||
client1.expect('"node2",1,11')
|
||||
client1.expect(prompt)
|
||||
|
||||
def test_distributed_over_live_view_order_by_key(self, started_cluster, node, source):
|
||||
@ -110,7 +119,10 @@ class TestLiveViewOverDistributedSuite:
|
||||
client1.expect(prompt)
|
||||
|
||||
client1.send(select_query)
|
||||
client1.expect('"node1",0,0\r\n"node2",0,10\r\n"node1",1,1\r\n.*"node2",1,11\r\n')
|
||||
client1.expect('"node1",0,0')
|
||||
client1.expect('"node2",0,10')
|
||||
client1.expect('"node1",1,1')
|
||||
client1.expect('"node2",1,11')
|
||||
client1.expect(prompt)
|
||||
|
||||
client1.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 2, 3)")
|
||||
@ -119,7 +131,13 @@ class TestLiveViewOverDistributedSuite:
|
||||
client2.expect(prompt)
|
||||
time.sleep(2)
|
||||
client1.send(select_query)
|
||||
client1.expect('"node1",0,0\r\n.*"node2",0,10.*\r\n"node1",1,1\r\n.*"node1",1,3\r\n.*"node2",1,11\r\n.*"node1",2,3\r\n.*"node1",3,3\r\n')
|
||||
client1.expect('"node1",0,0')
|
||||
client1.expect('"node2",0,10')
|
||||
client1.expect('"node1",1,1')
|
||||
client1.expect('"node1",1,3')
|
||||
client1.expect('"node2",1,11')
|
||||
client1.expect('"node1",2,3')
|
||||
client1.expect('"node1",3,3')
|
||||
client1.expect(prompt)
|
||||
|
||||
def test_distributed_over_live_view_group_by_node(self, started_cluster, node, source):
|
||||
@ -139,14 +157,16 @@ class TestLiveViewOverDistributedSuite:
|
||||
client1.expect(prompt)
|
||||
|
||||
client1.send(select_query)
|
||||
client1.expect('"node1",1\r\n"node2",21\r\n')
|
||||
client1.expect('"node1",1')
|
||||
client1.expect('"node2",21')
|
||||
client1.expect(prompt)
|
||||
|
||||
client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)")
|
||||
client2.expect(prompt)
|
||||
time.sleep(2)
|
||||
client1.send(select_query)
|
||||
client1.expect('"node1",3\r\n.*"node2",21\r\n')
|
||||
client1.expect('"node1",3')
|
||||
client1.expect('"node2",21')
|
||||
client1.expect(prompt)
|
||||
|
||||
client1.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 3, 3)")
|
||||
@ -155,7 +175,8 @@ class TestLiveViewOverDistributedSuite:
|
||||
client2.expect(prompt)
|
||||
time.sleep(2)
|
||||
client1.send(select_query)
|
||||
client1.expect('"node1",12\r\n.*"node2",21\r\n')
|
||||
client1.expect('"node1",12')
|
||||
client1.expect('"node2",21')
|
||||
client1.expect(prompt)
|
||||
|
||||
def test_distributed_over_live_view_group_by_key(self, started_cluster, node, source):
|
||||
@ -175,21 +196,27 @@ class TestLiveViewOverDistributedSuite:
|
||||
client1.expect(prompt)
|
||||
|
||||
client1.send(select_query)
|
||||
client1.expect("0,10\r\n1,12\r\n")
|
||||
client1.expect('0,10')
|
||||
client1.expect('1,12')
|
||||
client1.expect(prompt)
|
||||
|
||||
client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)")
|
||||
client2.expect(prompt)
|
||||
time.sleep(2)
|
||||
client1.send(select_query)
|
||||
client1.expect("0,10\r\n1,12\r\n2,2\r\n")
|
||||
client1.expect('0,10')
|
||||
client1.expect('1,12')
|
||||
client1.expect('2,2')
|
||||
client1.expect(prompt)
|
||||
|
||||
client2.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 3, 3)")
|
||||
client2.expect(prompt)
|
||||
time.sleep(2)
|
||||
client1.send(select_query)
|
||||
client1.expect("0,10\r\n.*1,15\r\n.*2,2\r\n.*3,3\r\n")
|
||||
client1.expect('0,10')
|
||||
client1.expect('1,15')
|
||||
client1.expect('2,2')
|
||||
client1.expect('3,3')
|
||||
client1.expect(prompt)
|
||||
|
||||
def test_distributed_over_live_view_sum(self, started_cluster, node, source):
|
||||
|
@ -111,15 +111,28 @@ node2\t1\t11
|
||||
client1.expect(prompt)
|
||||
|
||||
client1.send("WATCH lv FORMAT CSV")
|
||||
client1.expect('"node1",0,0,1\r\n.*"node1",1,1,1\r\n.*"node2",0,10,1\r\n.*"node2",1,11,1\r\n')
|
||||
client1.expect('"node1",0,0,1')
|
||||
client1.expect('"node1",1,1,1')
|
||||
client1.expect('"node2",0,10,1')
|
||||
client1.expect('"node2",1,11,1')
|
||||
|
||||
client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)")
|
||||
client2.expect(prompt)
|
||||
client1.expect('"node1",0,0,2\r\n.*"node1",1,1,2\r\n.*"node1",2,2,2\r\n.*"node2",0,10,2\r\n.*"node2",1,11,2\r\n')
|
||||
client1.expect('"node1",0,0,2')
|
||||
client1.expect('"node1",1,1,2')
|
||||
client1.expect('"node1",2,2,2')
|
||||
client1.expect('"node2",0,10,2')
|
||||
client1.expect('"node2",1,11,2')
|
||||
|
||||
client2.send("INSERT INTO distributed_table VALUES ('node1', 0, 3), ('node3', 3, 3)")
|
||||
client2.expect(prompt)
|
||||
client1.expect('"node1",0,0,3\r\n.*"node1",0,3,3\r\n.*"node1",1,1,3\r\n.*"node1",2,2,3\r\n.*"node2",0,10,3\r\n.*"node2",1,11,3\r\n.*"node3",3,3,3\r\n')
|
||||
client1.expect('"node1",0,0,3')
|
||||
client1.expect('"node1",0,3,3')
|
||||
client1.expect('"node1",1,1,3')
|
||||
client1.expect('"node1",2,2,3')
|
||||
client1.expect('"node2",0,10,3')
|
||||
client1.expect('"node2",1,11,3')
|
||||
client1.expect('"node3",3,3,3')
|
||||
|
||||
def test_watch_live_view_order_by_key(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
@ -141,15 +154,28 @@ node2\t1\t11
|
||||
client1.expect(prompt)
|
||||
|
||||
client1.send("WATCH lv FORMAT CSV")
|
||||
client1.expect('"node1",0,0,1\r\n.*"node2",0,10,1\r\n.*"node1",1,1,1\r\n.*"node2",1,11,1\r\n')
|
||||
client1.expect('"node1",0,0,1')
|
||||
client1.expect('"node2",0,10,1')
|
||||
client1.expect('"node1",1,1,1')
|
||||
client1.expect('"node2",1,11,1')
|
||||
|
||||
client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)")
|
||||
client2.expect(prompt)
|
||||
client1.expect('"node1",0,0,2\r\n.*"node2",0,10,2\r\n.*"node1",1,1,2\r\n.*"node2",1,11,2\r\n.*"node1",2,2,2\r\n')
|
||||
client1.expect('"node1",0,0,2')
|
||||
client1.expect('"node2",0,10,2')
|
||||
client1.expect('"node1",1,1,2')
|
||||
client1.expect('"node2",1,11,2')
|
||||
client1.expect('"node1",2,2,2')
|
||||
|
||||
client2.send("INSERT INTO distributed_table VALUES ('node1', 0, 3), ('node3', 3, 3)")
|
||||
client2.expect(prompt)
|
||||
client1.expect('"node1",0,0,3\r\n.*"node1",0,3,3\r\n.*"node2",0,10,3\r\n.*"node1",1,1,3\r\n.*"node2",1,11,3\r\n.*"node1",2,2,3\r\n.*"node3",3,3,3\r\n')
|
||||
client1.expect('"node1",0,0,3')
|
||||
client1.expect('"node1",0,3,3')
|
||||
client1.expect('"node2",0,10,3')
|
||||
client1.expect('"node1",1,1,3')
|
||||
client1.expect('"node2",1,11,3')
|
||||
client1.expect('"node1",2,2,3')
|
||||
client1.expect('"node3",3,3,3')
|
||||
|
||||
def test_watch_live_view_group_by_node(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
@ -171,15 +197,19 @@ node2\t1\t11
|
||||
client1.expect(prompt)
|
||||
|
||||
client1.send("WATCH lv FORMAT CSV")
|
||||
client1.expect('"node1",1,1\r\n.*"node2",21,1\r\n')
|
||||
client1.expect('"node1",1,1')
|
||||
client1.expect('"node2",21,1')
|
||||
|
||||
client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)")
|
||||
client2.expect(prompt)
|
||||
client1.expect('"node1",3,2\r\n.*"node2",21,2\r\n')
|
||||
client1.expect('"node1",3,2')
|
||||
client1.expect('"node2",21,2')
|
||||
|
||||
client2.send("INSERT INTO distributed_table VALUES ('node1', 0, 3), ('node3', 3, 3)")
|
||||
client2.expect(prompt)
|
||||
client1.expect('"node1",6,3\r\n.*"node2",21,3\r\n.*"node3",3,3\r\n')
|
||||
client1.expect('"node1",6,3')
|
||||
client1.expect('"node2",21,3')
|
||||
client1.expect('"node3",3,3')
|
||||
|
||||
def test_watch_live_view_group_by_key(self, started_cluster, node, source):
|
||||
log = sys.stdout
|
||||
@ -201,15 +231,21 @@ node2\t1\t11
|
||||
client1.expect(prompt)
|
||||
|
||||
client1.send("WATCH lv FORMAT CSV")
|
||||
client1.expect("0,10,1\r\n.*1,12,1\r\n")
|
||||
client1.expect('0,10,1')
|
||||
client1.expect('1,12,1')
|
||||
|
||||
client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)")
|
||||
client2.expect(prompt)
|
||||
client1.expect("0,10,2\r\n.*1,12,2\r\n.*2,2,2\r\n")
|
||||
client1.expect('0,10,2')
|
||||
client1.expect('1,12,2')
|
||||
client1.expect('2,2,2')
|
||||
|
||||
client2.send("INSERT INTO distributed_table VALUES ('node1', 0, 3), ('node1', 3, 3)")
|
||||
client2.expect(prompt)
|
||||
client1.expect("0,13,3\r\n.*1,12,3\r\n.*2,2,3\r\n.*3,3,3\r\n")
|
||||
client1.expect('0,13,3')
|
||||
client1.expect('1,12,3')
|
||||
client1.expect('2,2,3')
|
||||
client1.expect('3,3,3')
|
||||
|
||||
|
||||
def test_watch_live_view_sum(self, started_cluster, node, source):
|
||||
|
@ -222,11 +222,16 @@ def test_postgres_odbc_hached_dictionary_no_tty_pipe_overflow(started_cluster):
|
||||
def test_postgres_insert(started_cluster):
|
||||
conn = get_postgres_conn()
|
||||
conn.cursor().execute("truncate table clickhouse.test_table")
|
||||
node1.query("create table pg_insert (column1 UInt8, column2 String) engine=ODBC('DSN=postgresql_odbc;', 'clickhouse', 'test_table')")
|
||||
|
||||
# Also test with Servername containing '.' and '-' symbols (defined in
|
||||
# postgres .yml file). This is needed to check parsing, validation and
|
||||
# reconstruction of connection string.
|
||||
|
||||
node1.query("create table pg_insert (column1 UInt8, column2 String) engine=ODBC('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table')")
|
||||
node1.query("insert into pg_insert values (1, 'hello'), (2, 'world')")
|
||||
assert node1.query("select * from pg_insert") == '1\thello\n2\tworld\n'
|
||||
node1.query("insert into table function odbc('DSN=postgresql_odbc;', 'clickhouse', 'test_table') format CSV 3,test")
|
||||
node1.query("insert into table function odbc('DSN=postgresql_odbc;', 'clickhouse', 'test_table') select number, 's' || toString(number) from numbers (4, 7)")
|
||||
node1.query("insert into table function odbc('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table') select number, 's' || toString(number) from numbers (4, 7)")
|
||||
assert node1.query("select sum(column1), count(column1) from pg_insert") == "55\t10\n"
|
||||
assert node1.query("select sum(n), count(n) from (select (*,).1 as n from (select * from odbc('DSN=postgresql_odbc;', 'clickhouse', 'test_table')))") == "55\t10\n"
|
||||
|
||||
|
@ -1,14 +1,13 @@
|
||||
<test>
|
||||
<query>SELECT max(-1 * (((-2 * (number * -3)) * -4) * -5)) FROM numbers(500000000)</query>
|
||||
<query>SELECT max(-1 * (((-2 * (number * -3)) * -4) * -5)) FROM numbers(500000)</query>
|
||||
|
||||
<query>SELECT min(-1 * (((-2 * (number * -3)) * -4) * -5)) FROM numbers(500000000)</query>
|
||||
<query>SELECT min(-1 * (((-2 * (number * -3)) * -4) * -5)) FROM numbers(500000)</query>
|
||||
|
||||
<query>SELECT sum(-1 * (((-2 * (number * -3)) * -4) * -5)) FROM numbers(500000000)</query>
|
||||
<query>SELECT sum(-1 * (((-2 * (number * -3)) * -4) * -5)) FROM numbers(500000)</query>
|
||||
|
||||
<query>SELECT min(-1 + (((-2 + (number + -3)) + -4) + -5)) FROM numbers(500000000)</query>
|
||||
<query>SELECT min(-1 + (((-2 + (number + -3)) + -4) + -5)) FROM numbers(500000)</query>
|
||||
|
||||
<query>SELECT max(-1 + (((-2 + (number + -3)) + -4) + -5)) FROM numbers(500000000)</query>
|
||||
|
||||
<query>SELECT max(((((number) * 10) * -2) * 3) * 2) + min(((((number) * 10) * -2) * 3) * 2) FROM numbers(500000000)</query>
|
||||
<query>SELECT max(-1 + (((-2 + (number + -3)) + -4) + -5)) FROM numbers(500000)</query>
|
||||
|
||||
<query>SELECT max(((((number) * 10) * -2) * 3) * 2) + min(((((number) * 10) * -2) * 3) * 2) FROM numbers(500000)</query>
|
||||
</test>
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
SELECT * FROM system.numbers LIMIT 3;
|
||||
SELECT sys_num.number FROM system.numbers AS sys_num WHERE number > 2 LIMIT 2;
|
||||
|
@ -2,7 +2,7 @@ CREATE DATABASE IF NOT EXISTS test2_00158;
|
||||
DROP TABLE IF EXISTS test2_00158.mt_buffer_00158;
|
||||
DROP TABLE IF EXISTS test2_00158.mt_00158;
|
||||
CREATE TABLE test2_00158.mt_buffer_00158 (d Date DEFAULT today(), x UInt64) ENGINE = Buffer(test2_00158, mt_00158, 16, 100, 100, 1000000, 1000000, 1000000000, 1000000000);
|
||||
SET send_logs_level = 'none'; -- Supress "Destination table test2.mt doesn't exist. Block of data is discarded."
|
||||
SET send_logs_level = 'fatal'; -- Supress "Destination table test2.mt doesn't exist. Block of data is discarded."
|
||||
INSERT INTO test2_00158.mt_buffer_00158 (x) SELECT number AS x FROM system.numbers LIMIT 100000;
|
||||
INSERT INTO test2_00158.mt_buffer_00158 (x) SELECT number AS x FROM system.numbers LIMIT 1000000;
|
||||
DROP TABLE IF EXISTS test2_00158.mt_buffer_00158;
|
||||
|
@ -1,3 +1,3 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
SELECT count() FROM remote('{127,1}.0.0.{2,3}', system.one) SETTINGS skip_unavailable_shards = 1;
|
||||
SELECT count() FROM remote('{1,127}.0.0.{2,3}', system.one) SETTINGS skip_unavailable_shards = 1;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
SELECT (SELECT (SELECT (SELECT (SELECT (SELECT count() FROM (SELECT * FROM system.numbers LIMIT 10)))))) = (SELECT 10), ((SELECT 1, 'Hello', [1, 2]).3)[1];
|
||||
SELECT toUInt64((SELECT 9)) IN (SELECT number FROM system.numbers LIMIT 10);
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
select 1 = position('', '');
|
||||
select 1 = position('abc', '');
|
||||
select 0 = position('', 'abc');
|
||||
|
@ -2,7 +2,7 @@ DROP TEMPORARY TABLE IF EXISTS temp_tab;
|
||||
CREATE TEMPORARY TABLE temp_tab (number UInt64);
|
||||
INSERT INTO temp_tab SELECT number FROM system.numbers LIMIT 1;
|
||||
SELECT number FROM temp_tab;
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
EXISTS TEMPORARY TABLE temp_tab;
|
||||
DROP TABLE temp_tab;
|
||||
EXISTS TEMPORARY TABLE temp_tab;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS sum_map;
|
||||
CREATE TABLE sum_map(date Date, timeslot DateTime, statusMap Nested(status UInt16, requests UInt64)) ENGINE = Log;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
SET any_join_distinct_right_table_keys = 1;
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
SELECT countMerge(x) AS y FROM ( SELECT countState() * 2 AS x FROM ( SELECT 1 ));
|
||||
SELECT countMerge(x) AS y FROM ( SELECT countState() * 0 AS x FROM ( SELECT 1 UNION ALL SELECT 2));
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
SELECT if(); -- { serverError 42 }
|
||||
SELECT if(1); -- { serverError 42 }
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
SET max_block_size = 0;
|
||||
SELECT number FROM system.numbers; -- { serverError 12 }
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS mergetree_00698;
|
||||
CREATE TABLE mergetree_00698 (k UInt32, `n.x` Array(UInt64), `n.y` Array(UInt64)) ENGINE = MergeTree ORDER BY k;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS Issue_2231_Invalid_Nested_Columns_Size;
|
||||
CREATE TABLE Issue_2231_Invalid_Nested_Columns_Size (
|
||||
|
@ -1,7 +1,7 @@
|
||||
DROP TABLE IF EXISTS a1;
|
||||
DROP TABLE IF EXISTS a2;
|
||||
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
CREATE TABLE a1(a UInt8, b UInt8) ENGINE=Memory;
|
||||
CREATE TABLE a2(a UInt8, b UInt8) ENGINE=Memory;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
DROP TABLE IF EXISTS add_aggregate;
|
||||
CREATE TABLE add_aggregate(a UInt32, b UInt32) ENGINE = Memory;
|
||||
|
||||
|
@ -37,7 +37,7 @@ ENGINE = MergeTree
|
||||
PARTITION BY toDate(created_at)
|
||||
ORDER BY (created_at, id0, id1);
|
||||
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
ALTER TABLE uuid MODIFY COLUMN id0 UUID; -- { serverError 524 }
|
||||
ALTER TABLE uuid MODIFY COLUMN id1 UUID; -- { serverError 524 }
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
SET allow_ddl = 0;
|
||||
|
||||
CREATE DATABASE some_db; -- { serverError 392 }
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS test_local_1;
|
||||
DROP TABLE IF EXISTS test_local_2;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
SELECT formatDateTime(); -- { serverError 42 }
|
||||
SELECT formatDateTime('not a datetime', 'IGNORED'); -- { serverError 43 }
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
drop table if exists t1_00729;
|
||||
create table t1_00729 (id UInt64, val Array(String),nid UInt64, eDate Date)ENGINE = MergeTree(eDate, (id, eDate), 8192);
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
SELECT base64Encode(val) FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar']) val);
|
||||
SELECT base64Decode(val) FROM (select arrayJoin(['', 'Zg==', 'Zm8=', 'Zm9v', 'Zm9vYg==', 'Zm9vYmE=', 'Zm9vYmFy']) val);
|
||||
SELECT base64Decode(base64Encode('foo')) = 'foo', base64Encode(base64Decode('Zm9v')) == 'Zm9v';
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS quorum1;
|
||||
DROP TABLE IF EXISTS quorum2;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS quorum1;
|
||||
DROP TABLE IF EXISTS quorum2;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS quorum1;
|
||||
DROP TABLE IF EXISTS quorum2;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS quorum1;
|
||||
DROP TABLE IF EXISTS quorum2;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS quorum1;
|
||||
DROP TABLE IF EXISTS quorum2;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS quorum1;
|
||||
DROP TABLE IF EXISTS quorum2;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
SELECT 'value vs value';
|
||||
|
||||
|
@ -1,3 +1,3 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
SET join_default_strictness = '';
|
||||
SELECT * FROM system.one INNER JOIN (SELECT number AS k FROM system.numbers) js2 ON dummy = k; -- { serverError 417 }
|
||||
|
@ -1,6 +1,6 @@
|
||||
-- check ALTER MODIFY COLUMN with partitions
|
||||
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS alter_column;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS old_style;
|
||||
CREATE TABLE old_style(d Date, x UInt32) ENGINE MergeTree(d, x, 8192);
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS old_style;
|
||||
CREATE TABLE old_style(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/old_style', 'r1', d, x, 8192);
|
||||
|
@ -11,5 +11,5 @@ SELECT reverse(NULL);
|
||||
SELECT reverse([]);
|
||||
SELECT reverse([[[[]]]]);
|
||||
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
SELECT '[RE7', ( SELECT '\0' ) AS riwwq, ( SELECT reverse([( SELECT bitTestAll(NULL) ) , ( SELECT '\0' ) AS ddfweeuy]) ) AS xuvv, '', ( SELECT * FROM file() ) AS wqgdswyc, ( SELECT * FROM file() ); -- { serverError 42 }
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
select today() < 2018-11-14; -- { serverError 43 }
|
||||
select toDate('2018-01-01') < '2018-11-14';
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
select lcase('FOO');
|
||||
select ucase('foo');
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
|
||||
DROP TABLE IF EXISTS alter_compression_codec;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
SET allow_suspicious_codecs = 1;
|
||||
|
||||
DROP TABLE IF EXISTS compression_codec;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
SET allow_suspicious_codecs = 1;
|
||||
|
||||
-- copy-paste for storage log
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
|
||||
DROP TABLE IF EXISTS delta_codec_synthetic;
|
||||
|
@ -1,4 +1,4 @@
|
||||
SET send_logs_level = 'none';
|
||||
SET send_logs_level = 'fatal';
|
||||
SET allow_suspicious_codecs = 1;
|
||||
|
||||
DROP TABLE IF EXISTS delta_codec_for_alter;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user