mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Merge branch 'master' into session-log-support-sslcertificate-auth
This commit is contained in:
commit
9d46547daa
55
.github/workflows/nightly.yml
vendored
55
.github/workflows/nightly.yml
vendored
@ -122,3 +122,58 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
SonarCloud:
|
||||
runs-on: [self-hosted, builder]
|
||||
env:
|
||||
SONAR_SCANNER_VERSION: 4.7.0.2747
|
||||
SONAR_SERVER_URL: "https://sonarcloud.io"
|
||||
BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed
|
||||
CC: clang-15
|
||||
CXX: clang++-15
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
|
||||
submodules: true
|
||||
- name: Set up JDK 11
|
||||
uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: 11
|
||||
- name: Download and set up sonar-scanner
|
||||
env:
|
||||
SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux.zip
|
||||
run: |
|
||||
mkdir -p "$HOME/.sonar"
|
||||
curl -sSLo "$HOME/.sonar/sonar-scanner.zip" "${{ env.SONAR_SCANNER_DOWNLOAD_URL }}"
|
||||
unzip -o "$HOME/.sonar/sonar-scanner.zip" -d "$HOME/.sonar/"
|
||||
echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux/bin" >> "$GITHUB_PATH"
|
||||
- name: Download and set up build-wrapper
|
||||
env:
|
||||
BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip
|
||||
run: |
|
||||
curl -sSLo "$HOME/.sonar/build-wrapper-linux-x86.zip" "${{ env.BUILD_WRAPPER_DOWNLOAD_URL }}"
|
||||
unzip -o "$HOME/.sonar/build-wrapper-linux-x86.zip" -d "$HOME/.sonar/"
|
||||
echo "$HOME/.sonar/build-wrapper-linux-x86" >> "$GITHUB_PATH"
|
||||
- name: Set Up Build Tools
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -yq git cmake ccache python3 ninja-build
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
- name: Run build-wrapper
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
cd ..
|
||||
build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/
|
||||
- name: Run sonar-scanner
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
run: |
|
||||
sonar-scanner \
|
||||
--define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \
|
||||
--define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \
|
||||
--define sonar.projectKey="ClickHouse_ClickHouse" \
|
||||
--define sonar.organization="clickhouse-java" \
|
||||
--define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql"
|
||||
|
@ -5,7 +5,7 @@ ClickHouse® is an open-source column-oriented database management system that a
|
||||
## Useful Links
|
||||
|
||||
* [Official website](https://clickhouse.com/) has a quick high-level overview of ClickHouse on the main page.
|
||||
* [ClickHouse Cloud](https://clickhouse.com/cloud) ClickHouse as a service, built by the creators and maintainers.
|
||||
* [ClickHouse Cloud](https://clickhouse.cloud) ClickHouse as a service, built by the creators and maintainers.
|
||||
* [Tutorial](https://clickhouse.com/docs/en/getting_started/tutorial/) shows how to set up and query a small ClickHouse cluster.
|
||||
* [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information.
|
||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||
@ -16,5 +16,6 @@ ClickHouse® is an open-source column-oriented database management system that a
|
||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||
|
||||
## Upcoming events
|
||||
* [**v22.10 Release Webinar**](https://clickhouse.com/company/events/v22-10-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||
* [**Introducing ClickHouse Cloud**](https://clickhouse.com/company/events/cloud-beta) Introducing ClickHouse as a service, built by creators and maintainers of the fastest OLAP database on earth. Join Tanya Bragin for a detailed walkthrough of ClickHouse Cloud capabilities, as well as a peek behind the curtain to understand the unique architecture that makes our service tick.
|
||||
* [**v22.11 Release Webinar**](https://clickhouse.com/company/events/v22-11-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||
* [**ClickHouse Meetup at the Deutsche Bank office in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/289311596/) Hear from Deutsche Bank on why they chose ClickHouse for big sensitive data in a regulated environment. The ClickHouse team will then present how ClickHouse is used for real time financial data analytics, including tick data, trade analytics and risk management.
|
||||
* [**AWS re:Invent**](https://clickhouse.com/company/events/aws-reinvent) Core members of the ClickHouse team -- including 2 of our founders -- will be at re:Invent from November 29 to December 3. We are available on the show floor, but are also determining interest in holding an event during the time there.
|
||||
|
2
contrib/cctz
vendored
2
contrib/cctz
vendored
@ -1 +1 @@
|
||||
Subproject commit 7a454c25c7d16053bcd327cdd16329212a08fa4a
|
||||
Subproject commit 5c8528fb35e89ee0b3a7157490423fba0d4dd7b5
|
@ -21,6 +21,9 @@ set (LLVM_INCLUDE_DIRS
|
||||
"${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm/include"
|
||||
)
|
||||
set (LLVM_LIBRARY_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm")
|
||||
# NOTE: You should not remove this line since otherwise it will use default 20,
|
||||
# and llvm cannot be compiled with bundled libcxx and 20 standard.
|
||||
set (CMAKE_CXX_STANDARD 14)
|
||||
|
||||
# This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
|
||||
set (REQUIRED_LLVM_LIBRARIES
|
||||
|
26
docs/changelogs/v22.3.14.18-lts.md
Normal file
26
docs/changelogs/v22.3.14.18-lts.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.3.14.18-lts (642946f61b2) FIXME as compared to v22.3.13.80-lts (e2708b01fba)
|
||||
|
||||
#### Bug Fix
|
||||
* Backported in [#42432](https://github.com/ClickHouse/ClickHouse/issues/42432): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#42328](https://github.com/ClickHouse/ClickHouse/issues/42328): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||
* Backported in [#42358](https://github.com/ClickHouse/ClickHouse/issues/42358): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#42298](https://github.com/ClickHouse/ClickHouse/issues/42298): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#42592](https://github.com/ClickHouse/ClickHouse/issues/42592): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
29
docs/changelogs/v22.3.14.23-lts.md
Normal file
29
docs/changelogs/v22.3.14.23-lts.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.3.14.23-lts (74956bfee4d) FIXME as compared to v22.3.13.80-lts (e2708b01fba)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#42527](https://github.com/ClickHouse/ClickHouse/issues/42527): Fix issue with passing MySQL timeouts for MySQL database engine and MySQL table function. Closes [#34168](https://github.com/ClickHouse/ClickHouse/issues/34168)?notification_referrer_id=NT_kwDOAzsV57MzMDMxNjAzNTY5OjU0MjAzODc5. [#40751](https://github.com/ClickHouse/ClickHouse/pull/40751) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Bug Fix
|
||||
* Backported in [#42432](https://github.com/ClickHouse/ClickHouse/issues/42432): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#42328](https://github.com/ClickHouse/ClickHouse/issues/42328): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||
* Backported in [#42358](https://github.com/ClickHouse/ClickHouse/issues/42358): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#42298](https://github.com/ClickHouse/ClickHouse/issues/42298): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#42592](https://github.com/ClickHouse/ClickHouse/issues/42592): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
@ -139,7 +139,7 @@ The following settings can be specified in configuration file for given endpoint
|
||||
- `use_environment_credentials` — If set to `true`, S3 client will try to obtain credentials from environment variables and [Amazon EC2](https://en.wikipedia.org/wiki/Amazon_Elastic_Compute_Cloud) metadata for given endpoint. Optional, default value is `false`.
|
||||
- `region` — Specifies S3 region name. Optional.
|
||||
- `use_insecure_imds_request` — If set to `true`, S3 client will use insecure IMDS request while obtaining credentials from Amazon EC2 metadata. Optional, default value is `false`.
|
||||
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be speficied multiple times.
|
||||
- `header` — Adds specified HTTP header to a request to given endpoint. Optional, can be specified multiple times.
|
||||
- `server_side_encryption_customer_key_base64` — If specified, required headers for accessing S3 objects with SSE-C encryption will be set. Optional.
|
||||
- `max_single_read_retries` — The maximum number of attempts during single read. Default value is `4`. Optional.
|
||||
|
||||
|
@ -1244,7 +1244,7 @@ Result:
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
When there are two arguments: first is an [Integer](../../sql-reference/data-types/int-uint.md) or [DateTime](../../sql-reference/data-types/datetime.md), second is a constant format string — it acts in the same way as [formatDateTime](#formatdatetime) and return [String](../../sql-reference/data-types/string.md#string) type.
|
||||
When there are two or three arguments, the first an [Integer](../../sql-reference/data-types/int-uint.md), [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md), the second a constant format string and the third an optional constant time zone string — it acts in the same way as [formatDateTime](#formatdatetime) and return [String](../../sql-reference/data-types/string.md#string) type.
|
||||
|
||||
For example:
|
||||
|
||||
|
@ -107,7 +107,7 @@ ALTER TABLE visits RENAME COLUMN webBrowser TO browser
|
||||
CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name
|
||||
```
|
||||
|
||||
Resets all data in a column for a specified partition. Read more about setting the partition name in the section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
Resets all data in a column for a specified partition. Read more about setting the partition name in the section [How to set the partition expression](partition.md#how-to-set-partition-expression).
|
||||
|
||||
If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist.
|
||||
|
||||
@ -204,8 +204,9 @@ It is used if it is necessary to add or update a column with a complicated expre
|
||||
Syntax:
|
||||
|
||||
```sql
|
||||
ALTER TABLE table MATERIALIZE COLUMN col;
|
||||
ALTER TABLE [db.]table [ON CLUSTER cluster] MATERIALIZE COLUMN col [IN PARTITION partition | IN PARTITION ID 'partition_id'];
|
||||
```
|
||||
- If you specify a PARTITION, a column will be materialized with only the specified partition.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -39,7 +39,7 @@ ALTER TABLE mt DETACH PARTITION '2020-11-21';
|
||||
ALTER TABLE mt DETACH PART 'all_2_2_0';
|
||||
```
|
||||
|
||||
Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
Read about setting the partition expression in a section [How to set the partition expression](#how-to-set-partition-expression).
|
||||
|
||||
After the query is executed, you can do whatever you want with the data in the `detached` directory — delete it from the file system, or just leave it.
|
||||
|
||||
@ -53,7 +53,7 @@ ALTER TABLE table_name [ON CLUSTER cluster] DROP PARTITION|PART partition_expr
|
||||
|
||||
Deletes the specified partition from the table. This query tags the partition as inactive and deletes data completely, approximately in 10 minutes.
|
||||
|
||||
Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
Read about setting the partition expression in a section [How to set the partition expression](#how-to-set-partition-expression).
|
||||
|
||||
The query is replicated – it deletes data on all replicas.
|
||||
|
||||
@ -71,7 +71,7 @@ ALTER TABLE table_name [ON CLUSTER cluster] DROP DETACHED PARTITION|PART partiti
|
||||
```
|
||||
|
||||
Removes the specified part or all parts of the specified partition from `detached`.
|
||||
Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
Read more about setting the partition expression in a section [How to set the partition expression](#how-to-set-partition-expression).
|
||||
|
||||
## ATTACH PARTITION\|PART
|
||||
|
||||
@ -86,7 +86,7 @@ ALTER TABLE visits ATTACH PARTITION 201901;
|
||||
ALTER TABLE visits ATTACH PART 201901_2_2_0;
|
||||
```
|
||||
|
||||
Read more about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
Read more about setting the partition expression in a section [How to set the partition expression](#how-to-set-partition-expression).
|
||||
|
||||
This query is replicated. The replica-initiator checks whether there is data in the `detached` directory.
|
||||
If data exists, the query checks its integrity. If everything is correct, the query adds the data to the table.
|
||||
@ -166,7 +166,7 @@ This query creates a local backup of a specified partition. If the `PARTITION` c
|
||||
The entire backup process is performed without stopping the server.
|
||||
:::
|
||||
|
||||
Note that for old-styled tables you can specify the prefix of the partition name (for example, `2019`) - then the query creates the backup for all the corresponding partitions. Read about setting the partition expression in a section [How to specify the partition expression](#alter-how-to-specify-part-expr).
|
||||
Note that for old-styled tables you can specify the prefix of the partition name (for example, `2019`) - then the query creates the backup for all the corresponding partitions. Read about setting the partition expression in a section [How to set the partition expression](#how-to-set-partition-expression).
|
||||
|
||||
At the time of execution, for a data snapshot, the query creates hardlinks to a table data. Hardlinks are placed in the directory `/var/lib/clickhouse/shadow/N/...`, where:
|
||||
|
||||
|
@ -22,7 +22,7 @@ The `OPTIMIZE` query is supported for [MergeTree](../../engines/table-engines/me
|
||||
When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all replicas (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `2`) or on current replica (if the [replication_alter_partitions_sync](../../operations/settings/settings.md#replication-alter-partitions-sync) setting is set to `1`).
|
||||
|
||||
- If `OPTIMIZE` does not perform a merge for any reason, it does not notify the client. To enable notifications, use the [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) setting.
|
||||
- If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](../../sql-reference/statements/alter/index.md#alter-how-to-specify-part-expr).
|
||||
- If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](alter/partition.md#how-to-set-partition-expression).
|
||||
- If you specify `FINAL`, optimization is performed even when all the data is already in one part. Also merge is forced even if concurrent merges are performed.
|
||||
- If you specify `DEDUPLICATE`, then completely identical rows (unless by-clause is specified) will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine.
|
||||
|
||||
|
@ -1126,8 +1126,7 @@ SELECT FROM_UNIXTIME(423543535);
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
В случае, когда есть два аргумента: первый типа [Integer](../../sql-reference/data-types/int-uint.md) или [DateTime](../../sql-reference/data-types/datetime.md), а второй является строкой постоянного формата — функция работает также, как [formatDateTime](#formatdatetime), и возвращает значение типа [String](../../sql-reference/data-types/string.md#string).
|
||||
|
||||
В случае, когда есть два или три аргумента: первый типа [Integer](../../sql-reference/data-types/int-uint.md), [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md), а второй является строкой постоянного формата и третий является строкой постоянной временной зоны — функция работает также, как [formatDateTime](#formatdatetime), и возвращает значение типа [String](../../sql-reference/data-types/string.md#string).
|
||||
|
||||
Запрос:
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
#include <Common/OvercommitTracker.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include "config.h"
|
||||
@ -86,6 +87,8 @@ inline std::string_view toDescription(OvercommitResult result)
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event QueryMemoryLimitExceeded;
|
||||
extern const Event MemoryAllocatorPurge;
|
||||
extern const Event MemoryAllocatorPurgeTimeMicroseconds;
|
||||
}
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
@ -229,7 +232,10 @@ void MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryT
|
||||
{
|
||||
if (free_memory_in_allocator_arenas.exchange(-current_free_memory_in_allocator_arenas) > 0)
|
||||
{
|
||||
Stopwatch watch;
|
||||
mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0);
|
||||
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge);
|
||||
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurgeTimeMicroseconds, watch.elapsedMicroseconds());
|
||||
}
|
||||
}
|
||||
|
||||
@ -432,7 +438,7 @@ void MemoryTracker::reset()
|
||||
|
||||
void MemoryTracker::setRSS(Int64 rss_, Int64 free_memory_in_allocator_arenas_)
|
||||
{
|
||||
Int64 new_amount = rss_; // - free_memory_in_allocator_arenas_;
|
||||
Int64 new_amount = rss_;
|
||||
total_memory_tracker.amount.store(new_amount, std::memory_order_relaxed);
|
||||
free_memory_in_allocator_arenas.store(free_memory_in_allocator_arenas_, std::memory_order_relaxed);
|
||||
|
||||
|
@ -229,6 +229,8 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(UserTimeMicroseconds, "Total time spent in processing (queries and other tasks) threads executing CPU instructions in user space. This include time CPU pipeline was stalled due to cache misses, branch mispredictions, hyper-threading, etc.") \
|
||||
M(SystemTimeMicroseconds, "Total time spent in processing (queries and other tasks) threads executing CPU instructions in OS kernel space. This include time CPU pipeline was stalled due to cache misses, branch mispredictions, hyper-threading, etc.") \
|
||||
M(MemoryOvercommitWaitTimeMicroseconds, "Total time spent in waiting for memory to be freed in OvercommitTracker.") \
|
||||
M(MemoryAllocatorPurge, "Total number of times memory allocator purge was requested") \
|
||||
M(MemoryAllocatorPurgeTimeMicroseconds, "Total number of times memory allocator purge was requested") \
|
||||
M(SoftPageFaults, "The number of soft page faults in query execution threads. Soft page fault usually means a miss in the memory allocator cache which required a new memory mapping from the OS and subsequent allocation of a page of physical memory.") \
|
||||
M(HardPageFaults, "The number of hard page faults in query execution threads. High values indicate either that you forgot to turn off swap on your server, or eviction of memory pages of the ClickHouse binary during very high memory pressure, or successful usage of the 'mmap' read method for the tables data.") \
|
||||
\
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <cmath>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
@ -21,63 +20,56 @@ namespace ErrorCodes
|
||||
/// Just 10^9.
|
||||
static constexpr auto NS = 1000000000UL;
|
||||
|
||||
/// Tracking window. Actually the size is not really important. We just want to avoid
|
||||
/// throttles when there are no actions for a long period time.
|
||||
static const double window_ns = 1ULL * NS;
|
||||
static const size_t default_burst_seconds = 1;
|
||||
|
||||
Throttler::Throttler(size_t max_speed_, const std::shared_ptr<Throttler> & parent_)
|
||||
: max_speed(max_speed_)
|
||||
, max_burst(max_speed_ * default_burst_seconds)
|
||||
, limit_exceeded_exception_message("")
|
||||
, tokens(max_burst)
|
||||
, parent(parent_)
|
||||
{}
|
||||
|
||||
Throttler::Throttler(size_t max_speed_, size_t limit_, const char * limit_exceeded_exception_message_,
|
||||
const std::shared_ptr<Throttler> & parent_)
|
||||
: max_speed(max_speed_)
|
||||
, max_burst(max_speed_ * default_burst_seconds)
|
||||
, limit(limit_)
|
||||
, limit_exceeded_exception_message(limit_exceeded_exception_message_)
|
||||
, tokens(max_burst)
|
||||
, parent(parent_)
|
||||
{}
|
||||
|
||||
void Throttler::add(size_t amount)
|
||||
{
|
||||
size_t new_count;
|
||||
/// This outer variable is always equal to smoothed_speed.
|
||||
/// We use to avoid race condition.
|
||||
double current_speed = 0;
|
||||
|
||||
// Values obtained under lock to be checked after release
|
||||
size_t count_value;
|
||||
double tokens_value;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
auto now = clock_gettime_ns_adjusted(prev_ns);
|
||||
/// If prev_ns is equal to zero (first `add` call) we known nothing about speed
|
||||
/// and don't track anything.
|
||||
if (max_speed && prev_ns != 0)
|
||||
if (max_speed)
|
||||
{
|
||||
/// Time spent to process the amount of bytes
|
||||
double time_spent = now - prev_ns;
|
||||
|
||||
/// The speed in bytes per second is equal to amount / time_spent in seconds
|
||||
auto new_speed = amount / (time_spent / NS);
|
||||
|
||||
/// We want to make old values of speed less important for our smoothed value
|
||||
/// so we decay it's value with coef.
|
||||
auto decay_coeff = std::pow(0.5, time_spent / window_ns);
|
||||
|
||||
/// Weighted average between previous and new speed
|
||||
smoothed_speed = smoothed_speed * decay_coeff + (1 - decay_coeff) * new_speed;
|
||||
current_speed = smoothed_speed;
|
||||
double delta_seconds = prev_ns ? static_cast<double>(now - prev_ns) / NS : 0;
|
||||
tokens = std::min<double>(tokens + max_speed * delta_seconds - amount, max_burst);
|
||||
}
|
||||
|
||||
count += amount;
|
||||
new_count = count;
|
||||
count_value = count;
|
||||
tokens_value = tokens;
|
||||
prev_ns = now;
|
||||
}
|
||||
|
||||
if (limit && new_count > limit)
|
||||
if (limit && count_value > limit)
|
||||
throw Exception(limit_exceeded_exception_message + std::string(" Maximum: ") + toString(limit), ErrorCodes::LIMIT_EXCEEDED);
|
||||
|
||||
if (max_speed && current_speed > max_speed)
|
||||
/// Wait unless there is positive amount of tokens - throttling
|
||||
if (max_speed && tokens_value < 0)
|
||||
{
|
||||
/// If we was too fast then we have to sleep until our smoothed speed became <= max_speed
|
||||
int64_t sleep_time = static_cast<int64_t>(-window_ns * std::log2(max_speed / current_speed));
|
||||
|
||||
if (sleep_time > 0)
|
||||
{
|
||||
accumulated_sleep += sleep_time;
|
||||
|
||||
sleepForNanoseconds(sleep_time);
|
||||
|
||||
accumulated_sleep -= sleep_time;
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::ThrottlerSleepMicroseconds, sleep_time / 1000UL);
|
||||
}
|
||||
int64_t sleep_time = static_cast<int64_t>(-tokens_value / max_speed * NS);
|
||||
accumulated_sleep += sleep_time;
|
||||
sleepForNanoseconds(sleep_time);
|
||||
accumulated_sleep -= sleep_time;
|
||||
ProfileEvents::increment(ProfileEvents::ThrottlerSleepMicroseconds, sleep_time / 1000UL);
|
||||
}
|
||||
|
||||
if (parent)
|
||||
@ -89,9 +81,9 @@ void Throttler::reset()
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
count = 0;
|
||||
accumulated_sleep = 0;
|
||||
smoothed_speed = 0;
|
||||
tokens = max_burst;
|
||||
prev_ns = 0;
|
||||
// NOTE: do not zero `accumulated_sleep` to avoid races
|
||||
}
|
||||
|
||||
bool Throttler::isThrottling() const
|
||||
|
@ -10,25 +10,26 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Allows you to limit the speed of something (in entities per second) using sleep.
|
||||
* Specifics of work:
|
||||
* Tracks exponentially (pow of 1/2) smoothed speed with hardcoded window.
|
||||
* See more comments in .cpp file.
|
||||
*
|
||||
* Also allows you to set a limit on the maximum number of entities. If exceeded, an exception will be thrown.
|
||||
/** Allows you to limit the speed of something (in tokens per second) using sleep.
|
||||
* Implemented using Token Bucket Throttling algorithm.
|
||||
* Also allows you to set a limit on the maximum number of tokens. If exceeded, an exception will be thrown.
|
||||
*/
|
||||
class Throttler
|
||||
{
|
||||
public:
|
||||
explicit Throttler(size_t max_speed_, const std::shared_ptr<Throttler> & parent_ = nullptr)
|
||||
: max_speed(max_speed_), limit_exceeded_exception_message(""), parent(parent_) {}
|
||||
Throttler(size_t max_speed_, size_t max_burst_, const std::shared_ptr<Throttler> & parent_ = nullptr)
|
||||
: max_speed(max_speed_), max_burst(max_burst_), limit_exceeded_exception_message(""), tokens(max_burst), parent(parent_) {}
|
||||
|
||||
explicit Throttler(size_t max_speed_, const std::shared_ptr<Throttler> & parent_ = nullptr);
|
||||
|
||||
Throttler(size_t max_speed_, size_t max_burst_, size_t limit_, const char * limit_exceeded_exception_message_,
|
||||
const std::shared_ptr<Throttler> & parent_ = nullptr)
|
||||
: max_speed(max_speed_), max_burst(max_burst_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), tokens(max_burst), parent(parent_) {}
|
||||
|
||||
Throttler(size_t max_speed_, size_t limit_, const char * limit_exceeded_exception_message_,
|
||||
const std::shared_ptr<Throttler> & parent_ = nullptr)
|
||||
: max_speed(max_speed_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), parent(parent_) {}
|
||||
const std::shared_ptr<Throttler> & parent_ = nullptr);
|
||||
|
||||
/// Calculates the smoothed speed, sleeps if required and throws exception on
|
||||
/// limit overflow.
|
||||
/// Use `amount` tokens, sleeps if required or throws exception on limit overflow.
|
||||
void add(size_t amount);
|
||||
|
||||
/// Not thread safe
|
||||
@ -45,15 +46,14 @@ public:
|
||||
|
||||
private:
|
||||
size_t count{0};
|
||||
const size_t max_speed{0};
|
||||
const uint64_t limit{0}; /// 0 - not limited.
|
||||
const size_t max_speed{0}; /// in tokens per second.
|
||||
const size_t max_burst{0}; /// in tokens.
|
||||
const uint64_t limit{0}; /// 0 - not limited.
|
||||
const char * limit_exceeded_exception_message = nullptr;
|
||||
std::mutex mutex;
|
||||
std::atomic<uint64_t> accumulated_sleep{0};
|
||||
/// Smoothed value of current speed. Updated in `add` method.
|
||||
double smoothed_speed{0};
|
||||
/// previous `add` call time (in nanoseconds)
|
||||
uint64_t prev_ns{0};
|
||||
std::atomic<uint64_t> accumulated_sleep{0}; // Accumulated sleep time over all waiting threads
|
||||
double tokens{0}; /// Amount of tokens available in token bucket. Updated in `add` method.
|
||||
uint64_t prev_ns{0}; /// Previous `add` call time (in nanoseconds).
|
||||
|
||||
/// Used to implement a hierarchy of throttlers
|
||||
std::shared_ptr<Throttler> parent;
|
||||
|
@ -55,6 +55,9 @@ bool MetadataStorageFromStaticFilesWebServer::exists(const std::string & path) c
|
||||
path,
|
||||
[](const auto & file, const std::string & path_) { return file.first < path_; }
|
||||
);
|
||||
if (it == object_storage.files.end())
|
||||
return false;
|
||||
|
||||
if (startsWith(it->first, path)
|
||||
|| (it != object_storage.files.begin() && startsWith(std::prev(it)->first, path)))
|
||||
return true;
|
||||
|
@ -168,10 +168,7 @@ public:
|
||||
inline ResultValueType apply(const size_t i) const
|
||||
{
|
||||
const auto a = !!vec[i];
|
||||
if constexpr (Op::isSaturable())
|
||||
return Op::isSaturatedValue(a) ? a : Op::apply(a, next.apply(i));
|
||||
else
|
||||
return Op::apply(a, next.apply(i));
|
||||
return Op::apply(a, next.apply(i));
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDate32.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeDateTime64.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
@ -45,6 +46,7 @@ template <> struct ActionValueTypeMap<DataTypeUInt32> { using ActionValueTyp
|
||||
template <> struct ActionValueTypeMap<DataTypeInt64> { using ActionValueType = UInt32; };
|
||||
template <> struct ActionValueTypeMap<DataTypeUInt64> { using ActionValueType = UInt32; };
|
||||
template <> struct ActionValueTypeMap<DataTypeDate> { using ActionValueType = UInt16; };
|
||||
template <> struct ActionValueTypeMap<DataTypeDate32> { using ActionValueType = Int32; };
|
||||
template <> struct ActionValueTypeMap<DataTypeDateTime> { using ActionValueType = UInt32; };
|
||||
// TODO(vnemkov): to add sub-second format instruction, make that DateTime64 and do some math in Action<T>.
|
||||
template <> struct ActionValueTypeMap<DataTypeDateTime64> { using ActionValueType = Int64; };
|
||||
@ -315,44 +317,39 @@ public:
|
||||
if constexpr (support_integer)
|
||||
{
|
||||
if (arguments.size() != 1 && arguments.size() != 2 && arguments.size() != 3)
|
||||
throw Exception(
|
||||
"Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size())
|
||||
+ ", should be 1, 2 or 3",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Number of arguments for function {} doesn't match: passed {}, should be 1, 2 or 3",
|
||||
getName(), arguments.size());
|
||||
if (arguments.size() == 1 && !isInteger(arguments[0].type))
|
||||
throw Exception(
|
||||
"Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName()
|
||||
+ " when arguments size is 1. Should be integer",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
if (arguments.size() > 1 && !(isInteger(arguments[0].type) || isDate(arguments[0].type) || isDateTime(arguments[0].type) || isDateTime64(arguments[0].type)))
|
||||
throw Exception(
|
||||
"Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName()
|
||||
+ " when arguments size is 2 or 3. Should be a integer or a date with time",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of first argument of function {} when arguments size is 1. Should be integer",
|
||||
arguments[0].type->getName(), getName());
|
||||
if (arguments.size() > 1 && !(isInteger(arguments[0].type) || isDate(arguments[0].type) || isDateTime(arguments[0].type) || isDate32(arguments[0].type) || isDateTime64(arguments[0].type)))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of first argument of function {} when arguments size is 2 or 3. Should be a integer or a date with time",
|
||||
arguments[0].type->getName(), getName());
|
||||
}
|
||||
else
|
||||
{
|
||||
if (arguments.size() != 2 && arguments.size() != 3)
|
||||
throw Exception(
|
||||
"Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size())
|
||||
+ ", should be 2 or 3",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
if (!isDate(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type))
|
||||
throw Exception(
|
||||
"Illegal type " + arguments[0].type->getName() + " of 1 argument of function " + getName()
|
||||
+ ". Should be a date or a date with time",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Number of arguments for function {} doesn't match: passed {}, should be 2 or 3",
|
||||
getName(), arguments.size());
|
||||
if (!isDate(arguments[0].type) && !isDateTime(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime64(arguments[0].type))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of first argument of function {}. Should be a date or a date with time",
|
||||
arguments[0].type->getName(), getName());
|
||||
}
|
||||
|
||||
if (arguments.size() == 2 && !WhichDataType(arguments[1].type).isString())
|
||||
throw Exception(
|
||||
"Illegal type " + arguments[1].type->getName() + " of 2 argument of function " + getName() + ". Must be String.",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of second argument of function {}. Must be String.",
|
||||
arguments[1].type->getName(), getName());
|
||||
|
||||
if (arguments.size() == 3 && !WhichDataType(arguments[2].type).isString())
|
||||
throw Exception(
|
||||
"Illegal type " + arguments[2].type->getName() + " of 3 argument of function " + getName() + ". Must be String.",
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of third argument of function {}. Must be String.",
|
||||
arguments[2].type->getName(), getName());
|
||||
|
||||
if (arguments.size() == 1)
|
||||
return std::make_shared<DataTypeDateTime>();
|
||||
@ -373,10 +370,9 @@ public:
|
||||
return true;
|
||||
}))
|
||||
{
|
||||
throw Exception(
|
||||
"Illegal column " + arguments[0].column->getName() + " of function " + getName()
|
||||
+ ", must be Integer or DateTime when arguments size is 1.",
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal column {} of function {}, must be Integer, Date, Date32, DateTime or DateTime64 when arguments size is 1.",
|
||||
arguments[0].column->getName(), getName());
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -385,32 +381,31 @@ public:
|
||||
{
|
||||
using FromDataType = std::decay_t<decltype(type)>;
|
||||
if (!(res = executeType<FromDataType>(arguments, result_type)))
|
||||
throw Exception(
|
||||
"Illegal column " + arguments[0].column->getName() + " of function " + getName()
|
||||
+ ", must be Integer or DateTime.",
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal column {} of function {}, must be Integer, Date, Date32, DateTime or DateTime64.",
|
||||
arguments[0].column->getName(), getName());
|
||||
return true;
|
||||
}))
|
||||
{
|
||||
if (!((res = executeType<DataTypeDate>(arguments, result_type))
|
||||
|| (res = executeType<DataTypeDate32>(arguments, result_type))
|
||||
|| (res = executeType<DataTypeDateTime>(arguments, result_type))
|
||||
|| (res = executeType<DataTypeDateTime64>(arguments, result_type))))
|
||||
throw Exception(
|
||||
"Illegal column " + arguments[0].column->getName() + " of function " + getName()
|
||||
+ ", must be Integer or DateTime.",
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal column {} of function {}, must be Integer or DateTime.",
|
||||
arguments[0].column->getName(), getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!((res = executeType<DataTypeDate>(arguments, result_type))
|
||||
|| (res = executeType<DataTypeDate32>(arguments, result_type))
|
||||
|| (res = executeType<DataTypeDateTime>(arguments, result_type))
|
||||
|| (res = executeType<DataTypeDateTime64>(arguments, result_type))))
|
||||
throw Exception(
|
||||
"Illegal column " + arguments[0].column->getName() + " of function " + getName()
|
||||
+ ", must be Date or DateTime.",
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal column {} of function {}, must be Date or DateTime.",
|
||||
arguments[0].column->getName(), getName());
|
||||
}
|
||||
|
||||
return res;
|
||||
@ -425,10 +420,9 @@ public:
|
||||
|
||||
const ColumnConst * pattern_column = checkAndGetColumnConst<ColumnString>(arguments[1].column.get());
|
||||
if (!pattern_column)
|
||||
throw Exception("Illegal column " + arguments[1].column->getName()
|
||||
+ " of second ('format') argument of function " + getName()
|
||||
+ ". Must be constant string.",
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Illegal column {} of second ('format') argument of function {}. Must be constant string.",
|
||||
arguments[1].column->getName(), getName());
|
||||
|
||||
String pattern = pattern_column->getValue<String>();
|
||||
|
||||
@ -712,12 +706,14 @@ public:
|
||||
// Unimplemented
|
||||
case 'U': [[fallthrough]];
|
||||
case 'W':
|
||||
throw Exception("Wrong pattern '" + pattern + "', symbol '" + *pos + " is not implemented ' for function " + getName(),
|
||||
ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
||||
"Wrong pattern '{}', symbol '{}' is not implemented for function {}",
|
||||
pattern, *pos, getName());
|
||||
|
||||
default:
|
||||
throw Exception(
|
||||
"Wrong pattern '" + pattern + "', unexpected symbol '" + *pos + "' for function " + getName(), ErrorCodes::ILLEGAL_COLUMN);
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
|
||||
"Wrong pattern '{}', unexpected symbol '{}' for function {}",
|
||||
pattern, *pos, getName());
|
||||
}
|
||||
|
||||
++pos;
|
||||
|
@ -703,19 +703,26 @@ void AsynchronousMetrics::update(TimePoint update_time)
|
||||
Int64 free_memory_in_allocator_arenas = 0;
|
||||
|
||||
#if USE_JEMALLOC
|
||||
/// This is a memory which is kept by allocator.
|
||||
/// Will subsract it from RSS to decrease memory drift.
|
||||
/// According to jemalloc man, pdirty is:
|
||||
///
|
||||
/// Number of pages within unused extents that are potentially
|
||||
/// dirty, and for which madvise() or similar has not been called.
|
||||
///
|
||||
/// So they will be subtracted from RSS to make accounting more
|
||||
/// accurate, since those pages are not really RSS but a memory
|
||||
/// that can be used at anytime via jemalloc.
|
||||
free_memory_in_allocator_arenas = je_malloc_pdirty * getPageSize();
|
||||
#endif
|
||||
|
||||
Int64 difference = rss - free_memory_in_allocator_arenas - amount;
|
||||
Int64 difference = rss - amount;
|
||||
|
||||
/// Log only if difference is high. This is for convenience. The threshold is arbitrary.
|
||||
if (difference >= 1048576 || difference <= -1048576)
|
||||
LOG_TRACE(log,
|
||||
"MemoryTracking: was {}, peak {}, will set to {} (RSS), difference: {}",
|
||||
"MemoryTracking: was {}, peak {}, free memory in arenas {}, will set to {} (RSS), difference: {}",
|
||||
ReadableSize(amount),
|
||||
ReadableSize(peak),
|
||||
ReadableSize(free_memory_in_allocator_arenas),
|
||||
ReadableSize(rss),
|
||||
ReadableSize(difference));
|
||||
|
||||
|
@ -295,11 +295,11 @@ namespace
|
||||
}
|
||||
|
||||
|
||||
bool parseHosts(IParserBase::Pos & pos, Expected & expected, const String & prefix, AllowedClientHosts & hosts)
|
||||
bool parseHosts(IParserBase::Pos & pos, Expected & expected, std::string_view prefix, AllowedClientHosts & hosts)
|
||||
{
|
||||
return IParserBase::wrapParseImpl(pos, [&]
|
||||
{
|
||||
if (!prefix.empty() && !ParserKeyword{prefix.c_str()}.ignore(pos, expected))
|
||||
if (!prefix.empty() && !ParserKeyword{prefix}.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
if (!ParserKeyword{"HOST"}.ignore(pos, expected))
|
||||
@ -492,7 +492,6 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
||||
|
||||
if (alter)
|
||||
{
|
||||
String maybe_new_name;
|
||||
if (!new_name && (names->size() == 1) && parseRenameTo(pos, expected, new_name))
|
||||
continue;
|
||||
|
||||
|
@ -561,13 +561,10 @@ public:
|
||||
|
||||
virtual bool getResult(ASTPtr & node)
|
||||
{
|
||||
if (elements.size() == 1)
|
||||
{
|
||||
node = std::move(elements[0]);
|
||||
return true;
|
||||
}
|
||||
if (!finished)
|
||||
return false;
|
||||
|
||||
return false;
|
||||
return getResultImpl(node);
|
||||
}
|
||||
|
||||
virtual bool parse(IParser::Pos & /*pos*/, Expected & /*expected*/, Action & /*action*/) = 0;
|
||||
@ -746,6 +743,17 @@ public:
|
||||
Checkpoint current_checkpoint = Checkpoint::None;
|
||||
|
||||
protected:
|
||||
virtual bool getResultImpl(ASTPtr & node)
|
||||
{
|
||||
if (elements.size() == 1)
|
||||
{
|
||||
node = std::move(elements[0]);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<Operator> operators;
|
||||
ASTs operands;
|
||||
ASTs elements;
|
||||
@ -766,17 +774,12 @@ public:
|
||||
bool getResult(ASTPtr & node) override
|
||||
{
|
||||
/// We can exit the main cycle outside the parse() function,
|
||||
/// so we need to merge the element here
|
||||
/// so we need to merge the element here.
|
||||
/// Because of this 'finished' flag can also not be set.
|
||||
if (!mergeElement())
|
||||
return false;
|
||||
|
||||
if (elements.size() == 1)
|
||||
{
|
||||
node = std::move(elements[0]);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return Layer::getResultImpl(node);
|
||||
}
|
||||
|
||||
bool parse(IParser::Pos & pos, Expected & /*expected*/, Action & /*action*/) override
|
||||
@ -1029,17 +1032,6 @@ private:
|
||||
class RoundBracketsLayer : public Layer
|
||||
{
|
||||
public:
|
||||
bool getResult(ASTPtr & node) override
|
||||
{
|
||||
// Round brackets can mean priority operator as well as function tuple()
|
||||
if (!is_tuple && elements.size() == 1)
|
||||
node = std::move(elements[0]);
|
||||
else
|
||||
node = makeASTFunction("tuple", std::move(elements));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parse(IParser::Pos & pos, Expected & expected, Action & action) override
|
||||
{
|
||||
if (ParserToken(TokenType::Comma).ignore(pos, expected))
|
||||
@ -1069,6 +1061,19 @@ public:
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool getResultImpl(ASTPtr & node) override
|
||||
{
|
||||
// Round brackets can mean priority operator as well as function tuple()
|
||||
if (!is_tuple && elements.size() == 1)
|
||||
node = std::move(elements[0]);
|
||||
else
|
||||
node = makeASTFunction("tuple", std::move(elements));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
bool is_tuple = false;
|
||||
};
|
||||
@ -1077,16 +1082,17 @@ private:
|
||||
class ArrayLayer : public LayerWithSeparator<TokenType::Comma, TokenType::ClosingSquareBracket>
|
||||
{
|
||||
public:
|
||||
bool getResult(ASTPtr & node) override
|
||||
{
|
||||
node = makeASTFunction("array", std::move(elements));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parse(IParser::Pos & pos, Expected & expected, Action & action) override
|
||||
{
|
||||
return LayerWithSeparator::parse(pos, expected, action);
|
||||
}
|
||||
|
||||
protected:
|
||||
bool getResultImpl(ASTPtr & node) override
|
||||
{
|
||||
node = makeASTFunction("array", std::move(elements));
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
/// Layer for arrayElement square brackets operator
|
||||
@ -1206,23 +1212,6 @@ class ExtractLayer : public LayerWithSeparator<TokenType::Comma, TokenType::Clos
|
||||
public:
|
||||
ExtractLayer() : LayerWithSeparator(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true) {}
|
||||
|
||||
bool getResult(ASTPtr & node) override
|
||||
{
|
||||
if (state == 2)
|
||||
{
|
||||
if (elements.empty())
|
||||
return false;
|
||||
|
||||
node = makeASTFunction(interval_kind.toNameOfFunctionExtractTimePart(), elements[0]);
|
||||
}
|
||||
else
|
||||
{
|
||||
node = makeASTFunction("extract", std::move(elements));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parse(IParser::Pos & pos, Expected & expected, Action & action) override
|
||||
{
|
||||
/// extract(haystack, pattern) or EXTRACT(DAY FROM Date)
|
||||
@ -1268,6 +1257,25 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool getResultImpl(ASTPtr & node) override
|
||||
{
|
||||
if (state == 2)
|
||||
{
|
||||
if (elements.empty())
|
||||
return false;
|
||||
|
||||
node = makeASTFunction(interval_kind.toNameOfFunctionExtractTimePart(), elements[0]);
|
||||
}
|
||||
else
|
||||
{
|
||||
node = makeASTFunction("extract", std::move(elements));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
IntervalKind interval_kind;
|
||||
};
|
||||
@ -1277,12 +1285,6 @@ class SubstringLayer : public Layer
|
||||
public:
|
||||
SubstringLayer() : Layer(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true) {}
|
||||
|
||||
bool getResult(ASTPtr & node) override
|
||||
{
|
||||
node = makeASTFunction("substring", std::move(elements));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parse(IParser::Pos & pos, Expected & expected, Action & action) override
|
||||
{
|
||||
/// Either SUBSTRING(expr FROM start [FOR length]) or SUBSTRING(expr, start, length)
|
||||
@ -1332,6 +1334,13 @@ public:
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool getResultImpl(ASTPtr & node) override
|
||||
{
|
||||
node = makeASTFunction("substring", std::move(elements));
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
class PositionLayer : public Layer
|
||||
@ -1339,15 +1348,6 @@ class PositionLayer : public Layer
|
||||
public:
|
||||
PositionLayer() : Layer(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true) {}
|
||||
|
||||
bool getResult(ASTPtr & node) override
|
||||
{
|
||||
if (state == 2)
|
||||
std::swap(elements[1], elements[0]);
|
||||
|
||||
node = makeASTFunction("position", std::move(elements));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parse(IParser::Pos & pos, Expected & expected, Action & action) override
|
||||
{
|
||||
/// position(haystack, needle[, start_pos]) or position(needle IN haystack)
|
||||
@ -1402,6 +1402,16 @@ public:
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool getResultImpl(ASTPtr & node) override
|
||||
{
|
||||
if (state == 2)
|
||||
std::swap(elements[1], elements[0]);
|
||||
|
||||
node = makeASTFunction("position", std::move(elements));
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
class ExistsLayer : public Layer
|
||||
@ -1436,12 +1446,6 @@ public:
|
||||
TrimLayer(bool trim_left_, bool trim_right_)
|
||||
: Layer(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true), trim_left(trim_left_), trim_right(trim_right_) {}
|
||||
|
||||
bool getResult(ASTPtr & node) override
|
||||
{
|
||||
node = makeASTFunction(function_name, std::move(elements));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parse(IParser::Pos & pos, Expected & expected, Action & action) override
|
||||
{
|
||||
/// Handles all possible TRIM/LTRIM/RTRIM call variants
|
||||
@ -1583,6 +1587,14 @@ public:
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool getResultImpl(ASTPtr & node) override
|
||||
{
|
||||
node = makeASTFunction(function_name, std::move(elements));
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
bool trim_left;
|
||||
bool trim_right;
|
||||
@ -1598,23 +1610,6 @@ public:
|
||||
explicit DateAddLayer(const char * function_name_)
|
||||
: LayerWithSeparator(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true), function_name(function_name_) {}
|
||||
|
||||
bool getResult(ASTPtr & node) override
|
||||
{
|
||||
if (parsed_interval_kind)
|
||||
{
|
||||
if (elements.size() < 2)
|
||||
return false;
|
||||
|
||||
elements[0] = makeASTFunction(interval_kind.toNameOfFunctionToIntervalDataType(), elements[0]);
|
||||
node = makeASTFunction(function_name, elements[1], elements[0]);
|
||||
}
|
||||
else
|
||||
node = makeASTFunction(function_name, std::move(elements));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool parse(IParser::Pos & pos, Expected & expected, Action & action) override
|
||||
{
|
||||
/// DATEADD(YEAR, 1, date) or DATEADD(INTERVAL 1 YEAR, date);
|
||||
@ -1644,6 +1639,23 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool getResultImpl(ASTPtr & node) override
|
||||
{
|
||||
if (parsed_interval_kind)
|
||||
{
|
||||
if (elements.size() < 2)
|
||||
return false;
|
||||
|
||||
elements[0] = makeASTFunction(interval_kind.toNameOfFunctionToIntervalDataType(), elements[0]);
|
||||
node = makeASTFunction(function_name, elements[1], elements[0]);
|
||||
}
|
||||
else
|
||||
node = makeASTFunction(function_name, std::move(elements));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
IntervalKind interval_kind;
|
||||
const char * function_name;
|
||||
@ -1655,24 +1667,6 @@ class DateDiffLayer : public LayerWithSeparator<TokenType::Comma, TokenType::Clo
|
||||
public:
|
||||
DateDiffLayer() : LayerWithSeparator(/*allow_alias*/ true, /*allow_alias_without_as_keyword*/ true) {}
|
||||
|
||||
bool getResult(ASTPtr & node) override
|
||||
{
|
||||
if (parsed_interval_kind)
|
||||
{
|
||||
if (elements.size() == 2)
|
||||
node = makeASTFunction("dateDiff", std::make_shared<ASTLiteral>(interval_kind.toDateDiffUnit()), elements[0], elements[1]);
|
||||
else if (elements.size() == 3)
|
||||
node = makeASTFunction("dateDiff", std::make_shared<ASTLiteral>(interval_kind.toDateDiffUnit()), elements[0], elements[1], elements[2]);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
node = makeASTFunction("dateDiff", std::move(elements));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parse(IParser::Pos & pos, Expected & expected, Action & action) override
|
||||
{
|
||||
/// 0. Try to parse interval_kind (-> 1)
|
||||
@ -1699,6 +1693,25 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool getResultImpl(ASTPtr & node) override
|
||||
{
|
||||
if (parsed_interval_kind)
|
||||
{
|
||||
if (elements.size() == 2)
|
||||
node = makeASTFunction("dateDiff", std::make_shared<ASTLiteral>(interval_kind.toDateDiffUnit()), elements[0], elements[1]);
|
||||
else if (elements.size() == 3)
|
||||
node = makeASTFunction("dateDiff", std::make_shared<ASTLiteral>(interval_kind.toDateDiffUnit()), elements[0], elements[1], elements[2]);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
node = makeASTFunction("dateDiff", std::move(elements));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
IntervalKind interval_kind;
|
||||
bool parsed_interval_kind = false;
|
||||
@ -1882,16 +1895,6 @@ class ViewLayer : public Layer
|
||||
public:
|
||||
explicit ViewLayer(bool if_permitted_) : if_permitted(if_permitted_) {}
|
||||
|
||||
bool getResult(ASTPtr & node) override
|
||||
{
|
||||
if (if_permitted)
|
||||
node = makeASTFunction("viewIfPermitted", std::move(elements));
|
||||
else
|
||||
node = makeASTFunction("view", std::move(elements));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parse(IParser::Pos & pos, Expected & expected, Action & /*action*/) override
|
||||
{
|
||||
/// view(SELECT ...)
|
||||
@ -1948,6 +1951,17 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool getResultImpl(ASTPtr & node) override
|
||||
{
|
||||
if (if_permitted)
|
||||
node = makeASTFunction("viewIfPermitted", std::move(elements));
|
||||
else
|
||||
node = makeASTFunction("view", std::move(elements));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
bool if_permitted;
|
||||
};
|
||||
|
@ -71,7 +71,7 @@ bool ExecutingGraph::addEdges(uint64_t node)
|
||||
}
|
||||
}
|
||||
|
||||
/// Add direct edges form output ports.
|
||||
/// Add direct edges from output ports.
|
||||
auto & outputs = from->getOutputs();
|
||||
auto from_output = nodes[node]->direct_edges.size();
|
||||
|
||||
|
@ -86,6 +86,88 @@ String extractFixedPrefixFromLikePattern(const String & like_pattern)
|
||||
return fixed_prefix;
|
||||
}
|
||||
|
||||
/// for "^prefix..." string it returns "prefix"
|
||||
static String extractFixedPrefixFromRegularExpression(const String & regexp)
|
||||
{
|
||||
if (regexp.size() <= 1 || regexp[0] != '^')
|
||||
return {};
|
||||
|
||||
String fixed_prefix;
|
||||
const char * begin = regexp.data() + 1;
|
||||
const char * pos = begin;
|
||||
const char * end = regexp.data() + regexp.size();
|
||||
|
||||
while (pos != end)
|
||||
{
|
||||
switch (*pos)
|
||||
{
|
||||
case '\0':
|
||||
pos = end;
|
||||
break;
|
||||
|
||||
case '\\':
|
||||
{
|
||||
++pos;
|
||||
if (pos == end)
|
||||
break;
|
||||
|
||||
switch (*pos)
|
||||
{
|
||||
case '|':
|
||||
case '(':
|
||||
case ')':
|
||||
case '^':
|
||||
case '$':
|
||||
case '.':
|
||||
case '[':
|
||||
case '?':
|
||||
case '*':
|
||||
case '+':
|
||||
case '{':
|
||||
fixed_prefix += *pos;
|
||||
break;
|
||||
default:
|
||||
/// all other escape sequences are not supported
|
||||
pos = end;
|
||||
break;
|
||||
}
|
||||
|
||||
++pos;
|
||||
break;
|
||||
}
|
||||
|
||||
/// non-trivial cases
|
||||
case '|':
|
||||
fixed_prefix.clear();
|
||||
[[fallthrough]];
|
||||
case '(':
|
||||
case '[':
|
||||
case '^':
|
||||
case '$':
|
||||
case '.':
|
||||
case '+':
|
||||
pos = end;
|
||||
break;
|
||||
|
||||
/// Quantifiers that allow a zero number of occurrences.
|
||||
case '{':
|
||||
case '?':
|
||||
case '*':
|
||||
if (!fixed_prefix.empty())
|
||||
fixed_prefix.pop_back();
|
||||
|
||||
pos = end;
|
||||
break;
|
||||
default:
|
||||
fixed_prefix += *pos;
|
||||
pos++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return fixed_prefix;
|
||||
}
|
||||
|
||||
|
||||
/** For a given string, get a minimum string that is strictly greater than all strings with this prefix,
|
||||
* or return an empty string if there are no such strings.
|
||||
@ -581,6 +663,27 @@ const KeyCondition::AtomMap KeyCondition::atom_map
|
||||
return true;
|
||||
}
|
||||
},
|
||||
{
|
||||
"match",
|
||||
[] (RPNElement & out, const Field & value)
|
||||
{
|
||||
if (value.getType() != Field::Types::String)
|
||||
return false;
|
||||
|
||||
String prefix = extractFixedPrefixFromRegularExpression(value.get<const String &>());
|
||||
if (prefix.empty())
|
||||
return false;
|
||||
|
||||
String right_bound = firstStringThatIsGreaterThanAllStringsWithPrefix(prefix);
|
||||
|
||||
out.function = RPNElement::FUNCTION_IN_RANGE;
|
||||
out.range = !right_bound.empty()
|
||||
? Range(prefix, true, right_bound, false)
|
||||
: Range::createLeftBounded(prefix, true);
|
||||
|
||||
return true;
|
||||
}
|
||||
},
|
||||
{
|
||||
"isNotNull",
|
||||
[] (RPNElement & out, const Field &)
|
||||
@ -1738,7 +1841,7 @@ bool KeyCondition::tryParseAtomFromAST(const Tree & node, ContextPtr context, Bl
|
||||
else if (func_name == "in" || func_name == "notIn" ||
|
||||
func_name == "like" || func_name == "notLike" ||
|
||||
func_name == "ilike" || func_name == "notIlike" ||
|
||||
func_name == "startsWith")
|
||||
func_name == "startsWith" || func_name == "match")
|
||||
{
|
||||
/// "const IN data_column" doesn't make sense (unlike "data_column IN const")
|
||||
return false;
|
||||
|
@ -2199,6 +2199,7 @@ void MergeTreeData::dropAllData()
|
||||
|
||||
LOG_TRACE(log, "dropAllData: removing all data parts from memory.");
|
||||
data_parts_indexes.clear();
|
||||
all_data_dropped = true;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -3142,7 +3143,7 @@ void MergeTreeData::removePartsInRangeFromWorkingSet(MergeTreeTransaction * txn,
|
||||
removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper(txn, drop_range, lock);
|
||||
}
|
||||
|
||||
MergeTreeData::DataPartsVector MergeTreeData::removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper(
|
||||
MergeTreeData::PartsToRemoveFromZooKeeper MergeTreeData::removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper(
|
||||
MergeTreeTransaction * txn, const MergeTreePartInfo & drop_range, DataPartsLock & lock)
|
||||
{
|
||||
DataPartsVector parts_to_remove;
|
||||
@ -3220,15 +3221,20 @@ MergeTreeData::DataPartsVector MergeTreeData::removePartsInRangeFromWorkingSetAn
|
||||
/// FIXME refactor removePartsFromWorkingSet(...), do not remove parts twice
|
||||
removePartsFromWorkingSet(txn, parts_to_remove, clear_without_timeout, lock);
|
||||
|
||||
/// Since we can return parts in Deleting state, we have to use a wrapper that restricts access to such parts.
|
||||
PartsToRemoveFromZooKeeper parts_to_remove_from_zookeeper;
|
||||
for (auto & part : parts_to_remove)
|
||||
parts_to_remove_from_zookeeper.emplace_back(std::move(part));
|
||||
|
||||
for (auto & part : inactive_parts_to_remove_immediately)
|
||||
{
|
||||
if (!drop_range.contains(part->info))
|
||||
continue;
|
||||
part->remove_time.store(0, std::memory_order_relaxed);
|
||||
parts_to_remove.push_back(std::move(part));
|
||||
parts_to_remove_from_zookeeper.emplace_back(std::move(part), /* was_active */ false);
|
||||
}
|
||||
|
||||
return parts_to_remove;
|
||||
return parts_to_remove_from_zookeeper;
|
||||
}
|
||||
|
||||
void MergeTreeData::restoreAndActivatePart(const DataPartPtr & part, DataPartsLock * acquired_lock)
|
||||
@ -5176,9 +5182,27 @@ void MergeTreeData::Transaction::rollback()
|
||||
buf << ".";
|
||||
LOG_DEBUG(data.log, "Undoing transaction.{}", buf.str());
|
||||
|
||||
data.removePartsFromWorkingSet(txn,
|
||||
DataPartsVector(precommitted_parts.begin(), precommitted_parts.end()),
|
||||
/* clear_without_timeout = */ true);
|
||||
auto lock = data.lockParts();
|
||||
|
||||
if (data.data_parts_indexes.empty())
|
||||
{
|
||||
/// Table was dropped concurrently and all parts (including PreActive parts) were cleared, so there's nothing to rollback
|
||||
if (!data.all_data_dropped)
|
||||
{
|
||||
Strings part_names;
|
||||
for (const auto & part : precommitted_parts)
|
||||
part_names.emplace_back(part->name);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are some PreActive parts ({}) to rollback, "
|
||||
"but data parts set is empty and table {} was not dropped. It's a bug",
|
||||
fmt::join(part_names, ", "), data.getStorageID().getNameForLogs());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
data.removePartsFromWorkingSet(txn,
|
||||
DataPartsVector(precommitted_parts.begin(), precommitted_parts.end()),
|
||||
/* clear_without_timeout = */ true, &lock);
|
||||
}
|
||||
}
|
||||
|
||||
clear();
|
||||
|
@ -584,10 +584,33 @@ public:
|
||||
/// Used in REPLACE PARTITION command.
|
||||
void removePartsInRangeFromWorkingSet(MergeTreeTransaction * txn, const MergeTreePartInfo & drop_range, DataPartsLock & lock);
|
||||
|
||||
/// This wrapper is required to restrict access to parts in Deleting state
|
||||
class PartToRemoveFromZooKeeper
|
||||
{
|
||||
DataPartPtr part;
|
||||
bool was_active;
|
||||
|
||||
public:
|
||||
explicit PartToRemoveFromZooKeeper(DataPartPtr && part_, bool was_active_ = true)
|
||||
: part(std::move(part_)), was_active(was_active_)
|
||||
{
|
||||
}
|
||||
|
||||
/// It's safe to get name of any part
|
||||
const String & getPartName() const { return part->name; }
|
||||
|
||||
DataPartPtr getPartIfItWasActive() const
|
||||
{
|
||||
return was_active ? part : nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
using PartsToRemoveFromZooKeeper = std::vector<PartToRemoveFromZooKeeper>;
|
||||
|
||||
/// Same as above, but also returns list of parts to remove from ZooKeeper.
|
||||
/// It includes parts that have been just removed by these method
|
||||
/// and Outdated parts covered by drop_range that were removed earlier for any reason.
|
||||
DataPartsVector removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper(
|
||||
PartsToRemoveFromZooKeeper removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper(
|
||||
MergeTreeTransaction * txn, const MergeTreePartInfo & drop_range, DataPartsLock & lock);
|
||||
|
||||
/// Restores Outdated part and adds it to working set
|
||||
@ -640,6 +663,9 @@ public:
|
||||
/// Deletes the data directory and flushes the uncompressed blocks cache and the marks cache.
|
||||
void dropAllData();
|
||||
|
||||
/// This flag is for hardening and assertions.
|
||||
bool all_data_dropped = false;
|
||||
|
||||
/// Drop data directories if they are empty. It is safe to call this method if table creation was unsuccessful.
|
||||
void dropIfEmpty();
|
||||
|
||||
|
@ -1827,7 +1827,7 @@ void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry)
|
||||
/// Therefore, we use all data parts.
|
||||
|
||||
auto metadata_snapshot = getInMemoryMetadataPtr();
|
||||
DataPartsVector parts_to_remove;
|
||||
PartsToRemoveFromZooKeeper parts_to_remove;
|
||||
{
|
||||
auto data_parts_lock = lockParts();
|
||||
parts_to_remove = removePartsInRangeFromWorkingSetAndGetPartsToRemoveFromZooKeeper(NO_TRANSACTION_RAW, drop_range_info, data_parts_lock);
|
||||
@ -1849,8 +1849,11 @@ void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry)
|
||||
/// If DETACH clone parts to detached/ directory
|
||||
for (const auto & part : parts_to_remove)
|
||||
{
|
||||
LOG_INFO(log, "Detaching {}", part->getDataPartStorage().getPartDirectory());
|
||||
part->makeCloneInDetached("", metadata_snapshot);
|
||||
if (auto part_to_detach = part.getPartIfItWasActive())
|
||||
{
|
||||
LOG_INFO(log, "Detaching {}", part_to_detach->getDataPartStorage().getPartDirectory());
|
||||
part_to_detach->makeCloneInDetached("", metadata_snapshot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1941,7 +1944,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
|
||||
|
||||
PartDescriptions all_parts;
|
||||
PartDescriptions parts_to_add;
|
||||
DataPartsVector parts_to_remove;
|
||||
PartsToRemoveFromZooKeeper parts_to_remove;
|
||||
|
||||
auto table_lock_holder_dst_table = lockForShare(
|
||||
RWLockImpl::NO_QUERY, getSettings()->lock_acquire_timeout_for_background_operations);
|
||||
@ -1972,7 +1975,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
|
||||
String parts_to_remove_str;
|
||||
for (const auto & part : parts_to_remove)
|
||||
{
|
||||
parts_to_remove_str += part->name;
|
||||
parts_to_remove_str += part.getPartName();
|
||||
parts_to_remove_str += " ";
|
||||
}
|
||||
LOG_TRACE(log, "Replacing {} parts {}with empty set", parts_to_remove.size(), parts_to_remove_str);
|
||||
@ -2248,7 +2251,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
|
||||
String parts_to_remove_str;
|
||||
for (const auto & part : parts_to_remove)
|
||||
{
|
||||
parts_to_remove_str += part->name;
|
||||
parts_to_remove_str += part.getPartName();
|
||||
parts_to_remove_str += " ";
|
||||
}
|
||||
LOG_TRACE(log, "Replacing {} parts {}with {} parts {}", parts_to_remove.size(), parts_to_remove_str,
|
||||
@ -6230,11 +6233,11 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK()
|
||||
}
|
||||
|
||||
|
||||
void StorageReplicatedMergeTree::removePartsFromZooKeeperWithRetries(DataPartsVector & parts, size_t max_retries)
|
||||
void StorageReplicatedMergeTree::removePartsFromZooKeeperWithRetries(PartsToRemoveFromZooKeeper & parts, size_t max_retries)
|
||||
{
|
||||
Strings part_names_to_remove;
|
||||
for (const auto & part : parts)
|
||||
part_names_to_remove.emplace_back(part->name);
|
||||
part_names_to_remove.emplace_back(part.getPartName());
|
||||
|
||||
return removePartsFromZooKeeperWithRetries(part_names_to_remove, max_retries);
|
||||
}
|
||||
@ -6561,7 +6564,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom(
|
||||
if (replace)
|
||||
clearBlocksInPartition(*zookeeper, drop_range.partition_id, drop_range.max_block, drop_range.max_block);
|
||||
|
||||
DataPartsVector parts_to_remove;
|
||||
PartsToRemoveFromZooKeeper parts_to_remove;
|
||||
Coordination::Responses op_results;
|
||||
|
||||
try
|
||||
@ -6797,7 +6800,7 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
|
||||
|
||||
clearBlocksInPartition(*zookeeper, drop_range.partition_id, drop_range.max_block, drop_range.max_block);
|
||||
|
||||
DataPartsVector parts_to_remove;
|
||||
PartsToRemoveFromZooKeeper parts_to_remove;
|
||||
Coordination::Responses op_results;
|
||||
|
||||
try
|
||||
|
@ -549,7 +549,7 @@ private:
|
||||
|
||||
/// Remove parts from ZooKeeper, throw exception if unable to do so after max_retries.
|
||||
void removePartsFromZooKeeperWithRetries(const Strings & part_names, size_t max_retries = 5);
|
||||
void removePartsFromZooKeeperWithRetries(DataPartsVector & parts, size_t max_retries = 5);
|
||||
void removePartsFromZooKeeperWithRetries(PartsToRemoveFromZooKeeper & parts, size_t max_retries = 5);
|
||||
|
||||
/// Removes a part from ZooKeeper and adds a task to the queue to download it. It is supposed to do this with broken parts.
|
||||
void removePartAndEnqueueFetch(const String & part_name);
|
||||
|
@ -129,6 +129,9 @@ def test_incorrect_usage(cluster):
|
||||
result = node2.query_and_get_error("TRUNCATE TABLE test0")
|
||||
assert "Table is read-only" in result
|
||||
|
||||
result = node2.query_and_get_error("OPTIMIZE TABLE test0 FINAL")
|
||||
assert "Only read-only operations are supported" in result
|
||||
|
||||
node2.query("DROP TABLE test0 SYNC")
|
||||
|
||||
|
||||
|
@ -1,33 +1,34 @@
|
||||
20
|
||||
20 20
|
||||
02 02
|
||||
01/02/18 01/02/18
|
||||
2 2
|
||||
2018-01-02 2018-01-02
|
||||
22 00
|
||||
02
|
||||
01/02/18
|
||||
2
|
||||
2018-01-02
|
||||
22
|
||||
02
|
||||
10
|
||||
10 12
|
||||
11
|
||||
12
|
||||
001
|
||||
366
|
||||
01
|
||||
33
|
||||
\n
|
||||
AM
|
||||
001 001
|
||||
366 366
|
||||
01 01
|
||||
33 00
|
||||
\n \n
|
||||
AM AM
|
||||
AM
|
||||
PM
|
||||
22:33
|
||||
44
|
||||
\t
|
||||
22:33:44
|
||||
1 7
|
||||
01 01 53 52
|
||||
1 0
|
||||
18
|
||||
2018
|
||||
%
|
||||
no formatting pattern
|
||||
22:33 00:00
|
||||
44 00
|
||||
\t \t
|
||||
22:33:44 00:00:00
|
||||
1 7 1 7
|
||||
01 01 53 52 01 01 53 52
|
||||
1 0 1 0
|
||||
18 18
|
||||
2018 2018
|
||||
% %
|
||||
no formatting pattern no formatting pattern
|
||||
2018-01-01 00:00:00
|
||||
1927-01-01 00:00:00
|
||||
2018-01-01 01:00:00 2018-01-01 04:00:00
|
||||
+0000
|
||||
-1100
|
||||
|
@ -8,38 +8,44 @@ SELECT formatDateTime(now(), 'unescaped %'); -- { serverError 36 }
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%U'); -- { serverError 48 }
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%W'); -- { serverError 48 }
|
||||
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%C');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%d');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%D');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%e');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%F');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%H');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%C'), formatDateTime(toDate32('2018-01-02'), '%C');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%d'), formatDateTime(toDate32('2018-01-02'), '%d');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%D'), formatDateTime(toDate32('2018-01-02'), '%D');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%e'), formatDateTime(toDate32('2018-01-02'), '%e');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%F'), formatDateTime(toDate32('2018-01-02'), '%F');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%H'), formatDateTime(toDate32('2018-01-02'), '%H');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 02:33:44'), '%H');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%I');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%I'), formatDateTime(toDate32('2018-01-02'), '%I');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 11:33:44'), '%I');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 00:33:44'), '%I');
|
||||
SELECT formatDateTime(toDateTime('2018-01-01 00:33:44'), '%j');
|
||||
SELECT formatDateTime(toDateTime('2000-12-31 00:33:44'), '%j');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%m');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%M');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%n');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 00:33:44'), '%p');
|
||||
SELECT formatDateTime(toDateTime('2018-01-01 00:33:44'), '%j'), formatDateTime(toDate32('2018-01-01'), '%j');
|
||||
SELECT formatDateTime(toDateTime('2000-12-31 00:33:44'), '%j'), formatDateTime(toDate32('2000-12-31'), '%j');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%m'), formatDateTime(toDate32('2018-01-02'), '%m');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%M'), formatDateTime(toDate32('2018-01-02'), '%M');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%n'), formatDateTime(toDate32('2018-01-02'), '%n');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 00:33:44'), '%p'), formatDateTime(toDateTime('2018-01-02'), '%p');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 11:33:44'), '%p');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 12:33:44'), '%p');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%R');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%S');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%t');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%T');
|
||||
SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%u'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%u');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%R'), formatDateTime(toDate32('2018-01-02'), '%R');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%S'), formatDateTime(toDate32('2018-01-02'), '%S');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%t'), formatDateTime(toDate32('2018-01-02'), '%t');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%T'), formatDateTime(toDate32('2018-01-02'), '%T');
|
||||
SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%u'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%u'),
|
||||
formatDateTime(toDate32('2018-01-01'), '%u'), formatDateTime(toDate32('2018-01-07'), '%u');
|
||||
SELECT formatDateTime(toDateTime('1996-01-01 22:33:44'), '%V'), formatDateTime(toDateTime('1996-12-31 22:33:44'), '%V'),
|
||||
formatDateTime(toDateTime('1999-01-01 22:33:44'), '%V'), formatDateTime(toDateTime('1999-12-31 22:33:44'), '%V');
|
||||
SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%w'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%w');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%y');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%Y');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%%');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), 'no formatting pattern');
|
||||
formatDateTime(toDateTime('1999-01-01 22:33:44'), '%V'), formatDateTime(toDateTime('1999-12-31 22:33:44'), '%V'),
|
||||
formatDateTime(toDate32('1996-01-01'), '%V'), formatDateTime(toDate32('1996-12-31'), '%V'),
|
||||
formatDateTime(toDate32('1999-01-01'), '%V'), formatDateTime(toDate32('1999-12-31'), '%V');
|
||||
SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%w'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%w'),
|
||||
formatDateTime(toDate32('2018-01-01'), '%w'), formatDateTime(toDate32('2018-01-07'), '%w');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%y'), formatDateTime(toDate32('2018-01-02'), '%y');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%Y'), formatDateTime(toDate32('2018-01-02'), '%Y');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%%'), formatDateTime(toDate32('2018-01-02'), '%%');
|
||||
SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), 'no formatting pattern'), formatDateTime(toDate32('2018-01-02'), 'no formatting pattern');
|
||||
|
||||
SELECT formatDateTime(toDate('2018-01-01'), '%F %T');
|
||||
SELECT formatDateTime(toDate32('1927-01-01'), '%F %T');
|
||||
|
||||
SELECT
|
||||
formatDateTime(toDateTime('2018-01-01 01:00:00', 'UTC'), '%F %T', 'UTC'),
|
||||
formatDateTime(toDateTime('2018-01-01 01:00:00', 'UTC'), '%F %T', 'Asia/Istanbul');
|
||||
|
@ -1,7 +1,7 @@
|
||||
-- Tags: shard
|
||||
|
||||
-- Limit to 10 MB/sec
|
||||
SET max_network_bandwidth = 10000000;
|
||||
-- Limit to 100 KB/sec
|
||||
SET max_network_bandwidth = 100000;
|
||||
|
||||
-- Lower max_block_size, so we can start throttling sooner. Otherwise query will be executed too quickly.
|
||||
SET max_block_size = 100;
|
||||
@ -11,7 +11,7 @@ CREATE TEMPORARY TABLE times (t DateTime);
|
||||
-- rand64 is uncompressable data. Each number will take 8 bytes of bandwidth.
|
||||
-- This query should execute in no less than 1.6 seconds if throttled.
|
||||
INSERT INTO times SELECT now();
|
||||
SELECT sum(ignore(*)) FROM (SELECT rand64() FROM remote('127.0.0.{2,3}', numbers(2000000)));
|
||||
SELECT sum(ignore(*)) FROM (SELECT rand64() FROM remote('127.0.0.{2,3}', numbers(20000)));
|
||||
INSERT INTO times SELECT now();
|
||||
|
||||
SELECT max(t) - min(t) >= 1 FROM times;
|
||||
|
@ -5,25 +5,25 @@
|
||||
11
|
||||
1970-01-15
|
||||
1970-01-15 06:52:36
|
||||
20
|
||||
20 20
|
||||
02 02
|
||||
01/02/18 01/02/18
|
||||
2 2
|
||||
2018-01-02 2018-01-02
|
||||
22 00
|
||||
02
|
||||
01/02/18
|
||||
2
|
||||
2018-01-02
|
||||
22
|
||||
02
|
||||
10
|
||||
10 12
|
||||
11
|
||||
12
|
||||
001
|
||||
366
|
||||
01
|
||||
33
|
||||
\n
|
||||
AM
|
||||
001 001
|
||||
366 366
|
||||
01 01
|
||||
33 00
|
||||
\n \n
|
||||
AM AM
|
||||
AM
|
||||
PM
|
||||
22:33
|
||||
44
|
||||
\t
|
||||
22:33:44
|
||||
22:33 00:00
|
||||
44 00
|
||||
\t \t
|
||||
22:33:44 00:00:00
|
||||
|
@ -5,25 +5,25 @@ SELECT FROM_UNIXTIME(5345345, '%C', 'UTC');
|
||||
SELECT FROM_UNIXTIME(645123, '%H', 'UTC');
|
||||
SELECT FROM_UNIXTIME(1232456, '%Y-%m-%d', 'UTC');
|
||||
SELECT FROM_UNIXTIME(1234356, '%Y-%m-%d %R:%S', 'UTC');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%C');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%d');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%D');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%e');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%F');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%H');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%C'), FROM_UNIXTIME(toDate32('2018-01-02'), '%C');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%d'), FROM_UNIXTIME(toDate32('2018-01-02'), '%d');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%D'), FROM_UNIXTIME(toDate32('2018-01-02'), '%D');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%e'), FROM_UNIXTIME(toDate32('2018-01-02'), '%e');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%F'), FROM_UNIXTIME(toDate32('2018-01-02'), '%F');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%H'), FROM_UNIXTIME(toDate32('2018-01-02'), '%H');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 02:33:44'), '%H');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%I');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%I'), FROM_UNIXTIME(toDate32('2018-01-02'), '%I');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 11:33:44'), '%I');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 00:33:44'), '%I');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-01 00:33:44'), '%j');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2000-12-31 00:33:44'), '%j');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%m');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%M');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%n');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 00:33:44'), '%p');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-01 00:33:44'), '%j'), FROM_UNIXTIME(toDate32('2018-01-01'), '%j');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2000-12-31 00:33:44'), '%j'), FROM_UNIXTIME(toDate32('2000-12-31'), '%j');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%m'), FROM_UNIXTIME(toDate32('2018-01-02'), '%m');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%M'), FROM_UNIXTIME(toDate32('2018-01-02'), '%M');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%n'), FROM_UNIXTIME(toDate32('2018-01-02'), '%n');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 00:33:44'), '%p'), FROM_UNIXTIME(toDate32('2018-01-02'), '%p');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 11:33:44'), '%p');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 12:33:44'), '%p');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%R');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%S');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%t');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%T');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%R'), FROM_UNIXTIME(toDate32('2018-01-02'), '%R');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%S'), FROM_UNIXTIME(toDate32('2018-01-02'), '%S');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%t'), FROM_UNIXTIME(toDate32('2018-01-02'), '%t');
|
||||
SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%T'), FROM_UNIXTIME(toDate32('2018-01-02'), '%T');
|
||||
|
@ -0,0 +1,5 @@
|
||||
4
|
||||
1
|
||||
3
|
||||
4
|
||||
4
|
9
tests/queries/0_stateless/02462_match_regexp_pk.sql
Normal file
9
tests/queries/0_stateless/02462_match_regexp_pk.sql
Normal file
@ -0,0 +1,9 @@
|
||||
CREATE TABLE mt_match_pk (v String) ENGINE = MergeTree ORDER BY v SETTINGS index_granularity = 1;
|
||||
INSERT INTO mt_match_pk VALUES ('a'), ('aaa'), ('aba'), ('bac'), ('acccca');
|
||||
|
||||
SET force_primary_key = 1;
|
||||
SELECT count() FROM mt_match_pk WHERE match(v, '^a');
|
||||
SELECT count() FROM mt_match_pk WHERE match(v, '^ab');
|
||||
SELECT count() FROM mt_match_pk WHERE match(v, '^a.');
|
||||
SELECT count() FROM mt_match_pk WHERE match(v, '^ab*');
|
||||
SELECT count() FROM mt_match_pk WHERE match(v, '^ac?');
|
@ -0,0 +1 @@
|
||||
EXPLAIN AST ALTER user WITH a; -- { clientError SYNTAX_ERROR }
|
@ -0,0 +1 @@
|
||||
CREATE DATABASE conv_mian ENGINE QALL(COLUMNS('|T.D'),¸mp} -- { clientError SYNTAX_ERROR }
|
@ -36,6 +36,7 @@ v22.4.5.9-stable 2022-05-06
|
||||
v22.4.4.7-stable 2022-04-29
|
||||
v22.4.3.3-stable 2022-04-26
|
||||
v22.4.2.1-stable 2022-04-22
|
||||
v22.3.14.23-lts 2022-10-28
|
||||
v22.3.13.80-lts 2022-09-30
|
||||
v22.3.12.19-lts 2022-08-29
|
||||
v22.3.11.12-lts 2022-08-10
|
||||
|
|
Loading…
Reference in New Issue
Block a user