mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' into fix-ibm
This commit is contained in:
commit
28e03f5781
@ -23,6 +23,7 @@ curl https://clickhouse.com/ | sh
|
||||
## Upcoming Events
|
||||
* [**ClickHouse Spring Meetup in Manhattan**](https://www.meetup.com/clickhouse-new-york-user-group/events/292517734) - April 26 - It's spring, and it's time to meet again in the city! Talks include: "Building a domain specific query language on top of Clickhouse", "A Galaxy of Information", "Our Journey to ClickHouse Cloud from Redshift", and a ClickHouse update!
|
||||
* [**v23.4 Release Webinar**](https://clickhouse.com/company/events/v23-4-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-04) - April 26 - 23.4 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**ClickHouse Meetup in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/292892466) - May 16 - Save the date! ClickHouse is coming back to Berlin. We’re excited to announce an upcoming ClickHouse Meetup that you won’t want to miss. Join us as we gather together to discuss the latest in the world of ClickHouse and share user stories.
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
|
@ -3,13 +3,29 @@
|
||||
#include <Poco/Util/LayeredConfiguration.h>
|
||||
#include <Poco/Util/MapConfiguration.h>
|
||||
|
||||
|
||||
void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::LayeredConfiguration & config, int priority)
|
||||
void argsToConfig(const Poco::Util::Application::ArgVec & argv,
|
||||
Poco::Util::LayeredConfiguration & config,
|
||||
int priority,
|
||||
const std::unordered_set<std::string>* alias_names)
|
||||
{
|
||||
/// Parsing all args and converting to config layer
|
||||
/// Test: -- --1=1 --1=2 --3 5 7 8 -9 10 -11=12 14= 15== --16==17 --=18 --19= --20 21 22 --23 --24 25 --26 -27 28 ---29=30 -- ----31 32 --33 3-4
|
||||
Poco::AutoPtr<Poco::Util::MapConfiguration> map_config = new Poco::Util::MapConfiguration;
|
||||
std::string key;
|
||||
|
||||
auto add_arg = [&map_config, &alias_names](const std::string & k, const std::string & v)
|
||||
{
|
||||
map_config->setString(k, v);
|
||||
|
||||
if (alias_names && !alias_names->contains(k))
|
||||
{
|
||||
std::string alias_key = k;
|
||||
std::replace(alias_key.begin(), alias_key.end(), '-', '_');
|
||||
if (alias_names->contains(alias_key))
|
||||
map_config->setString(alias_key, v);
|
||||
}
|
||||
};
|
||||
|
||||
for (const auto & arg : argv)
|
||||
{
|
||||
auto key_start = arg.find_first_not_of('-');
|
||||
@ -19,7 +35,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye
|
||||
// old saved '--key', will set to some true value "1"
|
||||
if (!key.empty() && pos_minus != std::string::npos && pos_minus < key_start)
|
||||
{
|
||||
map_config->setString(key, "1");
|
||||
add_arg(key, "1");
|
||||
key = "";
|
||||
}
|
||||
|
||||
@ -29,7 +45,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye
|
||||
{
|
||||
if (pos_minus == std::string::npos || pos_minus > key_start)
|
||||
{
|
||||
map_config->setString(key, arg);
|
||||
add_arg(key, arg);
|
||||
}
|
||||
key = "";
|
||||
}
|
||||
@ -55,7 +71,7 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::Laye
|
||||
if (arg.size() > pos_eq)
|
||||
value = arg.substr(pos_eq + 1);
|
||||
|
||||
map_config->setString(key, value);
|
||||
add_arg(key, value);
|
||||
key = "";
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <Poco/Util/Application.h>
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
|
||||
namespace Poco::Util
|
||||
{
|
||||
@ -8,4 +10,7 @@ class LayeredConfiguration; // NOLINT(cppcoreguidelines-virtual-class-destructor
|
||||
}
|
||||
|
||||
/// Import extra command line arguments to configuration. These are command line arguments after --.
|
||||
void argsToConfig(const Poco::Util::Application::ArgVec & argv, Poco::Util::LayeredConfiguration & config, int priority);
|
||||
void argsToConfig(const Poco::Util::Application::ArgVec & argv,
|
||||
Poco::Util::LayeredConfiguration & config,
|
||||
int priority,
|
||||
const std::unordered_set<std::string>* registered_alias_names = nullptr);
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
esac
|
||||
|
||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||
ARG VERSION="23.3.1.2823"
|
||||
ARG VERSION="23.3.2.37"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.3.1.2823"
|
||||
ARG VERSION="23.3.2.37"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.3.1.2823"
|
||||
ARG VERSION="23.3.2.37"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
22
docs/changelogs/v22.8.17.17-lts.md
Normal file
22
docs/changelogs/v22.8.17.17-lts.md
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v22.8.17.17-lts (df7f2ef0b41) FIXME as compared to v22.8.16.32-lts (7c4be737bd0)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#48157](https://github.com/ClickHouse/ClickHouse/issues/48157): Fixed `UNKNOWN_TABLE` exception when attaching to a materialized view that has dependent tables that are not available. This might be useful when trying to restore state from a backup. [#47975](https://github.com/ClickHouse/ClickHouse/pull/47975) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#48957](https://github.com/ClickHouse/ClickHouse/issues/48957): After the recent update, the `dockerd` requires `--tlsverify=false` together with the http port explicitly. [#48924](https://github.com/ClickHouse/ClickHouse/pull/48924) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix explain graph with projection [#47473](https://github.com/ClickHouse/ClickHouse/pull/47473) ([flynn](https://github.com/ucasfl)).
|
||||
* Remove a feature [#48195](https://github.com/ClickHouse/ClickHouse/pull/48195) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix possible segfault in cache [#48469](https://github.com/ClickHouse/ClickHouse/pull/48469) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix bug in Keeper when a node is not created with scheme `auth` in ACL sometimes. [#48595](https://github.com/ClickHouse/ClickHouse/pull/48595) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
|
28
docs/changelogs/v23.1.7.30-stable.md
Normal file
28
docs/changelogs/v23.1.7.30-stable.md
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.1.7.30-stable (c94dba6e023) FIXME as compared to v23.1.6.42-stable (783ddf67991)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#48161](https://github.com/ClickHouse/ClickHouse/issues/48161): Fixed `UNKNOWN_TABLE` exception when attaching to a materialized view that has dependent tables that are not available. This might be useful when trying to restore state from a backup. [#47975](https://github.com/ClickHouse/ClickHouse/pull/47975) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#48585](https://github.com/ClickHouse/ClickHouse/issues/48585): Update time zones. The following were updated: Africa/Cairo, Africa/Casablanca, Africa/El_Aaiun, America/Bogota, America/Cambridge_Bay, America/Ciudad_Juarez, America/Godthab, America/Inuvik, America/Iqaluit, America/Nuuk, America/Ojinaga, America/Pangnirtung, America/Rankin_Inlet, America/Resolute, America/Whitehorse, America/Yellowknife, Asia/Gaza, Asia/Hebron, Asia/Kuala_Lumpur, Asia/Singapore, Canada/Yukon, Egypt, Europe/Kirov, Europe/Volgograd, Singapore. [#48572](https://github.com/ClickHouse/ClickHouse/pull/48572) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#48958](https://github.com/ClickHouse/ClickHouse/issues/48958): After the recent update, the `dockerd` requires `--tlsverify=false` together with the http port explicitly. [#48924](https://github.com/ClickHouse/ClickHouse/pull/48924) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix race in grace hash join with limit [#47153](https://github.com/ClickHouse/ClickHouse/pull/47153) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix explain graph with projection [#47473](https://github.com/ClickHouse/ClickHouse/pull/47473) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix crash in polygonsSymDifferenceCartesian [#47702](https://github.com/ClickHouse/ClickHouse/pull/47702) ([pufit](https://github.com/pufit)).
|
||||
* Remove a feature [#48195](https://github.com/ClickHouse/ClickHouse/pull/48195) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* ClickHouse startup error when loading a distributed table that depends on a dictionary [#48419](https://github.com/ClickHouse/ClickHouse/pull/48419) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix possible segfault in cache [#48469](https://github.com/ClickHouse/ClickHouse/pull/48469) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix nested map for keys of IP and UUID types [#48556](https://github.com/ClickHouse/ClickHouse/pull/48556) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix bug in Keeper when a node is not created with scheme `auth` in ACL sometimes. [#48595](https://github.com/ClickHouse/ClickHouse/pull/48595) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Fix IPv4 comparable with UInt [#48611](https://github.com/ClickHouse/ClickHouse/pull/48611) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
|
35
docs/changelogs/v23.3.2.37-lts.md
Normal file
35
docs/changelogs/v23.3.2.37-lts.md
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.3.2.37-lts (1b144bcd101) FIXME as compared to v23.3.1.2823-lts (46e85357ce2)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#48459](https://github.com/ClickHouse/ClickHouse/issues/48459): Formatter '%M' in function formatDateTime() now prints the month name instead of the minutes. This makes the behavior consistent with MySQL. The previous behavior can be restored using setting "formatdatetime_parsedatetime_m_is_month_name = 0". [#47246](https://github.com/ClickHouse/ClickHouse/pull/47246) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#48842](https://github.com/ClickHouse/ClickHouse/issues/48842): Fix some mysql related settings not being handled with mysql dictionary source + named collection. Closes [#48402](https://github.com/ClickHouse/ClickHouse/issues/48402). [#48759](https://github.com/ClickHouse/ClickHouse/pull/48759) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#49035](https://github.com/ClickHouse/ClickHouse/issues/49035): Add fallback to password authentication when authentication with SSL user certificate has failed. Closes [#48974](https://github.com/ClickHouse/ClickHouse/issues/48974). [#48989](https://github.com/ClickHouse/ClickHouse/pull/48989) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#48589](https://github.com/ClickHouse/ClickHouse/issues/48589): Update time zones. The following were updated: Africa/Cairo, Africa/Casablanca, Africa/El_Aaiun, America/Bogota, America/Cambridge_Bay, America/Ciudad_Juarez, America/Godthab, America/Inuvik, America/Iqaluit, America/Nuuk, America/Ojinaga, America/Pangnirtung, America/Rankin_Inlet, America/Resolute, America/Whitehorse, America/Yellowknife, Asia/Gaza, Asia/Hebron, Asia/Kuala_Lumpur, Asia/Singapore, Canada/Yukon, Egypt, Europe/Kirov, Europe/Volgograd, Singapore. [#48572](https://github.com/ClickHouse/ClickHouse/pull/48572) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#48960](https://github.com/ClickHouse/ClickHouse/issues/48960): After the recent update, the `dockerd` requires `--tlsverify=false` together with the http port explicitly. [#48924](https://github.com/ClickHouse/ClickHouse/pull/48924) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Remove a feature [#48195](https://github.com/ClickHouse/ClickHouse/pull/48195) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix cpu usage in rabbitmq (was worsened in 23.2 after [#44404](https://github.com/ClickHouse/ClickHouse/issues/44404)) [#48311](https://github.com/ClickHouse/ClickHouse/pull/48311) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix ThreadPool for DistributedSink and use StrongTypedef for CurrentMetrics/ProfileEvents/StatusInfo to avoid further errors [#48314](https://github.com/ClickHouse/ClickHouse/pull/48314) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Reset downloader for cache file segment in TemporaryFileStream [#48386](https://github.com/ClickHouse/ClickHouse/pull/48386) ([Vladimir C](https://github.com/vdimir)).
|
||||
* ClickHouse startup error when loading a distributed table that depends on a dictionary [#48419](https://github.com/ClickHouse/ClickHouse/pull/48419) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Fix possible segfault in cache [#48469](https://github.com/ClickHouse/ClickHouse/pull/48469) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix nested map for keys of IP and UUID types [#48556](https://github.com/ClickHouse/ClickHouse/pull/48556) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix bug in Keeper when a node is not created with scheme `auth` in ACL sometimes. [#48595](https://github.com/ClickHouse/ClickHouse/pull/48595) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Fix IPv4 comparable with UInt [#48611](https://github.com/ClickHouse/ClickHouse/pull/48611) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Batch fix for projections analysis with analyzer. [#48357](https://github.com/ClickHouse/ClickHouse/pull/48357) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix a confusing warning about interserver mode [#48793](https://github.com/ClickHouse/ClickHouse/pull/48793) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
@ -90,7 +90,7 @@ Process 1 stopped
|
||||
|
||||
## Visual Studio Code integration
|
||||
|
||||
- (CodeLLDB extension)[https://github.com/vadimcn/vscode-lldb] is required for visual debugging, the (Command Variable)[https://github.com/rioj7/command-variable] extension can help dynamic launches if using (cmake variants)[https://github.com/microsoft/vscode-cmake-tools/blob/main/docs/variants.md].
|
||||
- [CodeLLDB extension](https://github.com/vadimcn/vscode-lldb) is required for visual debugging, the [Command Variable](https://github.com/rioj7/command-variable) extension can help dynamic launches if using [cmake variants](https://github.com/microsoft/vscode-cmake-tools/blob/main/docs/variants.md).
|
||||
- Make sure to set the backend to your llvm installation eg. `"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"`
|
||||
- Launcher:
|
||||
```json
|
||||
|
@ -2241,7 +2241,7 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filenam
|
||||
- [input_format_arrow_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_allow_missing_columns) - allow missing columns while reading Arrow data. Default value - `false`.
|
||||
- [input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Arrow format. Default value - `false`.
|
||||
- [output_format_arrow_fixed_string_as_fixed_byte_array](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_fixed_string_as_fixed_byte_array) - use Arrow FIXED_SIZE_BINARY type instead of Binary/String for FixedString columns. Default value - `true`.
|
||||
- [output_format_arrow_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_compression_method) - compression method used in output Arrow format. Default value - `none`.
|
||||
- [output_format_arrow_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_compression_method) - compression method used in output Arrow format. Default value - `lz4_frame`.
|
||||
|
||||
## ArrowStream {#data-format-arrow-stream}
|
||||
|
||||
|
@ -1123,7 +1123,7 @@ Could be used for throttling speed when replicating the data to add or replace n
|
||||
The timeout in milliseconds for connecting to a remote server for a Distributed table engine, if the ‘shard’ and ‘replica’ sections are used in the cluster definition.
|
||||
If unsuccessful, several attempts are made to connect to various replicas.
|
||||
|
||||
Default value: 50.
|
||||
Default value: 1000.
|
||||
|
||||
## connection_pool_max_wait_ms {#connection-pool-max-wait-ms}
|
||||
|
||||
@ -4102,7 +4102,7 @@ Enabled by default.
|
||||
If we can't establish connection with replica after this timeout in hedged requests, we start working with the next replica without cancelling connection to the previous.
|
||||
Timeout value is in milliseconds.
|
||||
|
||||
Default value: `100`.
|
||||
Default value: `50`.
|
||||
|
||||
## receive_data_timeout {#receive_data_timeout}
|
||||
|
||||
|
@ -67,6 +67,7 @@
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
#include <iostream>
|
||||
#include <filesystem>
|
||||
#include <map>
|
||||
@ -2405,6 +2406,54 @@ struct TransparentStringHash
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* This functor is used to parse command line arguments and replace dashes with underscores,
|
||||
* allowing options to be specified using either dashes or underscores.
|
||||
*/
|
||||
class OptionsAliasParser
|
||||
{
|
||||
public:
|
||||
explicit OptionsAliasParser(const boost::program_options::options_description& options)
|
||||
{
|
||||
options_names.reserve(options.options().size());
|
||||
for (const auto& option : options.options())
|
||||
options_names.insert(option->long_name());
|
||||
}
|
||||
|
||||
/*
|
||||
* Parses arguments by replacing dashes with underscores, and matches the resulting name with known options
|
||||
* Implements boost::program_options::ext_parser logic
|
||||
*/
|
||||
std::pair<std::string, std::string> operator()(const std::string& token) const
|
||||
{
|
||||
if (token.find("--") != 0)
|
||||
return {};
|
||||
std::string arg = token.substr(2);
|
||||
|
||||
// divide token by '=' to separate key and value if options style=long_allow_adjacent
|
||||
auto pos_eq = arg.find('=');
|
||||
std::string key = arg.substr(0, pos_eq);
|
||||
|
||||
if (options_names.contains(key))
|
||||
// option does not require any changes, because it is already correct
|
||||
return {};
|
||||
|
||||
std::replace(key.begin(), key.end(), '-', '_');
|
||||
if (!options_names.contains(key))
|
||||
// after replacing '-' with '_' argument is still unknown
|
||||
return {};
|
||||
|
||||
std::string value;
|
||||
if (pos_eq != std::string::npos && pos_eq < arg.size())
|
||||
value = arg.substr(pos_eq + 1);
|
||||
|
||||
return {key, value};
|
||||
}
|
||||
|
||||
private:
|
||||
std::unordered_set<std::string> options_names;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -2455,7 +2504,10 @@ void ClientBase::parseAndCheckOptions(OptionsDescription & options_description,
|
||||
}
|
||||
|
||||
/// Parse main commandline options.
|
||||
auto parser = po::command_line_parser(arguments).options(options_description.main_description.value()).allow_unregistered();
|
||||
auto parser = po::command_line_parser(arguments)
|
||||
.options(options_description.main_description.value())
|
||||
.extra_parser(OptionsAliasParser(options_description.main_description.value()))
|
||||
.allow_unregistered();
|
||||
po::parsed_options parsed = parser.run();
|
||||
|
||||
/// Check unrecognized options without positional options.
|
||||
@ -2497,6 +2549,19 @@ void ClientBase::init(int argc, char ** argv)
|
||||
|
||||
readArguments(argc, argv, common_arguments, external_tables_arguments, hosts_and_ports_arguments);
|
||||
|
||||
/// Support for Unicode dashes
|
||||
/// Interpret Unicode dashes as default double-hyphen
|
||||
for (auto & arg : common_arguments)
|
||||
{
|
||||
// replace em-dash(U+2014)
|
||||
boost::replace_all(arg, "—", "--");
|
||||
// replace en-dash(U+2013)
|
||||
boost::replace_all(arg, "–", "--");
|
||||
// replace mathematical minus(U+2212)
|
||||
boost::replace_all(arg, "−", "--");
|
||||
}
|
||||
|
||||
|
||||
po::variables_map options;
|
||||
OptionsDescription options_description;
|
||||
options_description.main_description.emplace(createOptionsDescription("Main options", terminal_width));
|
||||
@ -2670,7 +2735,14 @@ void ClientBase::init(int argc, char ** argv)
|
||||
profile_events.delay_ms = options["profile-events-delay-ms"].as<UInt64>();
|
||||
|
||||
processOptions(options_description, options, external_tables_arguments, hosts_and_ports_arguments);
|
||||
argsToConfig(common_arguments, config(), 100);
|
||||
{
|
||||
std::unordered_set<std::string> alias_names;
|
||||
alias_names.reserve(options_description.main_description->options().size());
|
||||
for (const auto& option : options_description.main_description->options())
|
||||
alias_names.insert(option->long_name());
|
||||
argsToConfig(common_arguments, config(), 100, &alias_names);
|
||||
}
|
||||
|
||||
clearPasswordFromCommandLine(argc, argv);
|
||||
|
||||
/// Limit on total memory usage
|
||||
|
@ -497,7 +497,16 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(MergeTreeAllRangesAnnouncementsSent, "The number of announcement sent from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.") \
|
||||
M(ReadTaskRequestsSentElapsedMicroseconds, "Time spent in callbacks requested from the remote server back to the initiator server to choose the read task (for s3Cluster table function and similar). Measured on the remote server side.") \
|
||||
M(MergeTreeReadTaskRequestsSentElapsedMicroseconds, "Time spent in callbacks requested from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the remote server side.") \
|
||||
M(MergeTreeAllRangesAnnouncementsSentElapsedMicroseconds, "Time spent in sending the announcement from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.")
|
||||
M(MergeTreeAllRangesAnnouncementsSentElapsedMicroseconds, "Time spent in sending the announcement from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.") \
|
||||
\
|
||||
M(LogTest, "Number of log messages with level Test") \
|
||||
M(LogTrace, "Number of log messages with level Trace") \
|
||||
M(LogDebug, "Number of log messages with level Debug") \
|
||||
M(LogInfo, "Number of log messages with level Info") \
|
||||
M(LogWarning, "Number of log messages with level Warning") \
|
||||
M(LogError, "Number of log messages with level Error") \
|
||||
M(LogFatal, "Number of log messages with level Fatal") \
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
@ -612,6 +621,21 @@ void Counters::incrementNoTrace(Event event, Count amount)
|
||||
} while (current != nullptr);
|
||||
}
|
||||
|
||||
void incrementForLogMessage(Poco::Message::Priority priority)
|
||||
{
|
||||
switch (priority)
|
||||
{
|
||||
case Poco::Message::PRIO_TEST: increment(LogTest); break;
|
||||
case Poco::Message::PRIO_TRACE: increment(LogTrace); break;
|
||||
case Poco::Message::PRIO_DEBUG: increment(LogDebug); break;
|
||||
case Poco::Message::PRIO_INFORMATION: increment(LogInfo); break;
|
||||
case Poco::Message::PRIO_WARNING: increment(LogWarning); break;
|
||||
case Poco::Message::PRIO_ERROR: increment(LogError); break;
|
||||
case Poco::Message::PRIO_FATAL: increment(LogFatal); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
CountersIncrement::CountersIncrement(Counters::Snapshot const & snapshot)
|
||||
{
|
||||
init();
|
||||
|
@ -3,10 +3,12 @@
|
||||
#include <Common/VariableContext.h>
|
||||
#include <base/types.h>
|
||||
#include <base/strong_typedef.h>
|
||||
#include <Poco/Message.h>
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <cstddef>
|
||||
|
||||
|
||||
/** Implements global counters for various events happening in the application
|
||||
* - for high level profiling.
|
||||
* See .cpp for list of events.
|
||||
@ -111,6 +113,9 @@ namespace ProfileEvents
|
||||
/// and never sends profile event to trace log.
|
||||
void incrementNoTrace(Event event, Count amount = 1);
|
||||
|
||||
/// Increment a counter for log messages.
|
||||
void incrementForLogMessage(Poco::Message::Priority priority);
|
||||
|
||||
/// Get name of event by identifier. Returns statically allocated string.
|
||||
const char * getName(Event event);
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/Message.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/LoggingFormatStringHelpers.h>
|
||||
|
||||
namespace Poco { class Logger; }
|
||||
@ -26,15 +27,15 @@ namespace
|
||||
|
||||
/// Logs a message to a specified logger with that level.
|
||||
/// If more than one argument is provided,
|
||||
/// the first argument is interpreted as template with {}-substitutions
|
||||
/// and the latter arguments treat as values to substitute.
|
||||
/// If only one argument is provided, it is threat as message without substitutions.
|
||||
/// the first argument is interpreted as a template with {}-substitutions
|
||||
/// and the latter arguments are treated as values to substitute.
|
||||
/// If only one argument is provided, it is treated as a message without substitutions.
|
||||
|
||||
#define LOG_IMPL(logger, priority, PRIORITY, ...) do \
|
||||
{ \
|
||||
auto _logger = ::getLogger(logger); \
|
||||
const bool _is_clients_log = (DB::CurrentThread::getGroup() != nullptr) && \
|
||||
(DB::CurrentThread::get().getClientLogsLevel() >= (priority)); \
|
||||
(DB::CurrentThread::get().getClientLogsLevel() >= (priority)); \
|
||||
if (_is_clients_log || _logger->is((PRIORITY))) \
|
||||
{ \
|
||||
std::string formatted_message = numArgs(__VA_ARGS__) > 1 ? fmt::format(__VA_ARGS__) : firstArg(__VA_ARGS__); \
|
||||
@ -46,9 +47,10 @@ namespace
|
||||
file_function += "; "; \
|
||||
file_function += __PRETTY_FUNCTION__; \
|
||||
Poco::Message poco_message(_logger->name(), formatted_message, \
|
||||
(PRIORITY), file_function.c_str(), __LINE__, tryGetStaticFormatString(LOG_IMPL_FIRST_ARG(__VA_ARGS__))); \
|
||||
(PRIORITY), file_function.c_str(), __LINE__, tryGetStaticFormatString(LOG_IMPL_FIRST_ARG(__VA_ARGS__))); \
|
||||
_channel->log(poco_message); \
|
||||
} \
|
||||
ProfileEvents::incrementForLogMessage(PRIORITY); \
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
|
@ -53,12 +53,12 @@ class IColumn;
|
||||
M(UInt64, max_query_size, DBMS_DEFAULT_MAX_QUERY_SIZE, "The maximum number of bytes of a query string parsed by the SQL parser. Data in the VALUES clause of INSERT queries is processed by a separate stream parser (that consumes O(1) RAM) and not affected by this restriction.", 0) \
|
||||
M(UInt64, interactive_delay, 100000, "The interval in microseconds to check if the request is cancelled, and to send progress info.", 0) \
|
||||
M(Seconds, connect_timeout, DBMS_DEFAULT_CONNECT_TIMEOUT_SEC, "Connection timeout if there are no replicas.", 0) \
|
||||
M(Milliseconds, connect_timeout_with_failover_ms, 50, "Connection timeout for selecting first healthy replica.", 0) \
|
||||
M(Milliseconds, connect_timeout_with_failover_secure_ms, 100, "Connection timeout for selecting first healthy replica (for secure connections).", 0) \
|
||||
M(Milliseconds, connect_timeout_with_failover_ms, 1000, "Connection timeout for selecting first healthy replica.", 0) \
|
||||
M(Milliseconds, connect_timeout_with_failover_secure_ms, 1000, "Connection timeout for selecting first healthy replica (for secure connections).", 0) \
|
||||
M(Seconds, receive_timeout, DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC, "Timeout for receiving data from network, in seconds. If no bytes were received in this interval, exception is thrown. If you set this setting on client, the 'send_timeout' for the socket will be also set on the corresponding connection end on the server.", 0) \
|
||||
M(Seconds, send_timeout, DBMS_DEFAULT_SEND_TIMEOUT_SEC, "Timeout for sending data to network, in seconds. If client needs to sent some data, but it did not able to send any bytes in this interval, exception is thrown. If you set this setting on client, the 'receive_timeout' for the socket will be also set on the corresponding connection end on the server.", 0) \
|
||||
M(Seconds, tcp_keep_alive_timeout, 290 /* less than DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC */, "The time in seconds the connection needs to remain idle before TCP starts sending keepalive probes", 0) \
|
||||
M(Milliseconds, hedged_connection_timeout_ms, 100, "Connection timeout for establishing connection with replica for Hedged requests", 0) \
|
||||
M(Milliseconds, hedged_connection_timeout_ms, 50, "Connection timeout for establishing connection with replica for Hedged requests", 0) \
|
||||
M(Milliseconds, receive_data_timeout_ms, 2000, "Connection timeout for receiving first packet of data or packet with positive progress from replica", 0) \
|
||||
M(Bool, use_hedged_requests, true, "Use hedged requests for distributed queries", 0) \
|
||||
M(Bool, allow_changing_replica_until_first_data_packet, false, "Allow HedgedConnections to change replica until receiving first data packet", 0) \
|
||||
|
@ -80,6 +80,9 @@ namespace SettingsChangesHistory
|
||||
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
||||
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
||||
{
|
||||
{"23.4", {{"connect_timeout_with_failover_ms", 50, 1000, "Increase default connect timeout because of async connect"},
|
||||
{"connect_timeout_with_failover_secure_ms", 100, 1000, "Increase default secure connect timeout because of async connect"},
|
||||
{"hedged_connection_timeout_ms", 100, 50, "Start new connection in hedged requests after 50 ms instead of 100 to correspond with previous connect timeout"}}},
|
||||
{"23.3", {{"output_format_parquet_version", "1.0", "2.latest", "Use latest Parquet format version for output format"},
|
||||
{"input_format_json_ignore_unknown_keys_in_named_tuple", false, true, "Improve parsing JSON objects as named tuples"},
|
||||
{"input_format_native_allow_types_conversion", false, true, "Allow types conversion in Native input forma"},
|
||||
|
@ -654,7 +654,8 @@ void CachedOnDiskReadBufferFromFile::predownload(FileSegmentPtr & file_segment)
|
||||
|
||||
bytes_to_predownload = 0;
|
||||
|
||||
chassert(file_segment->state() == FileSegment::State::PARTIALLY_DOWNLOADED_NO_CONTINUATION);
|
||||
chassert(file_segment->state() == FileSegment::State::PARTIALLY_DOWNLOADED_NO_CONTINUATION
|
||||
|| file_segment->state() == FileSegment::State::SKIP_CACHE);
|
||||
LOG_TEST(log, "Bypassing cache because for {}", file_segment->getInfoForLog());
|
||||
|
||||
read_type = ReadType::REMOTE_FS_READ_BYPASS_CACHE;
|
||||
|
@ -158,8 +158,9 @@ public:
|
||||
Int64 ms = 0;
|
||||
memcpy(reinterpret_cast<UInt8 *>(&ms) + 2, buffer, 6);
|
||||
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
std::reverse(reinterpret_cast<UInt8 *>(&ms), reinterpret_cast<UInt8 *>(&ms) + sizeof(Int64));
|
||||
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
ms = std::byteswap(ms);
|
||||
# endif
|
||||
|
||||
return DecimalUtils::decimalFromComponents<DateTime64>(ms / intExp10(DATETIME_SCALE), ms % intExp10(DATETIME_SCALE), DATETIME_SCALE);
|
||||
}
|
||||
|
@ -123,6 +123,9 @@ public:
|
||||
}
|
||||
|
||||
auto set = column_set->getData();
|
||||
if (!set)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Not-ready Set passed as the second argument for function '{}'", getName());
|
||||
|
||||
auto set_types = set->getDataTypes();
|
||||
|
||||
if (tuple && set_types.size() != 1 && set_types.size() == tuple->tupleSize())
|
||||
|
@ -1540,8 +1540,8 @@ void skipToUnescapedNextLineOrEOF(ReadBuffer & buf);
|
||||
/// Skip to next character after next \0. If no \0 in stream, skip to end.
|
||||
void skipNullTerminated(ReadBuffer & buf);
|
||||
|
||||
/** This function just copies the data from buffer's internal position (in.position())
|
||||
* to current position (from arguments) into memory.
|
||||
/** This function just copies the data from buffer's position (in.position())
|
||||
* to current position (from arguments) appending into memory.
|
||||
*/
|
||||
void saveUpToPosition(ReadBuffer & in, Memory<Allocator<false>> & memory, char * current);
|
||||
|
||||
|
@ -940,7 +940,8 @@ bool ExpressionActions::checkColumnIsAlwaysFalse(const String & column_name) con
|
||||
// Constant ColumnSet cannot be empty, so we only need to check non-constant ones.
|
||||
if (const auto * column_set = checkAndGetColumn<const ColumnSet>(action.node->column.get()))
|
||||
{
|
||||
if (column_set->getData()->isCreated() && column_set->getData()->getTotalRowCount() == 0)
|
||||
auto set = column_set->getData();
|
||||
if (set && set->isCreated() && set->getTotalRowCount() == 0)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -150,7 +150,8 @@ FilterDAGInfoPtr generateFilterActions(
|
||||
for (const auto & column_str : prerequisite_columns)
|
||||
{
|
||||
ParserExpression expr_parser;
|
||||
expr_list->children.push_back(parseQuery(expr_parser, column_str, 0, context->getSettingsRef().max_parser_depth));
|
||||
/// We should add back quotes around column name as it can contain dots.
|
||||
expr_list->children.push_back(parseQuery(expr_parser, backQuoteIfNeed(column_str), 0, context->getSettingsRef().max_parser_depth));
|
||||
}
|
||||
|
||||
select_ast->setExpression(ASTSelectQuery::Expression::TABLES, std::make_shared<ASTTablesInSelectQuery>());
|
||||
|
@ -1248,10 +1248,17 @@ void TCPHandler::receiveHello()
|
||||
Poco::Net::SecureStreamSocket secure_socket(socket());
|
||||
if (secure_socket.havePeerCertificate())
|
||||
{
|
||||
session->authenticate(
|
||||
SSLCertificateCredentials{user, secure_socket.peerCertificate().commonName()},
|
||||
getClientAddress(client_info));
|
||||
return;
|
||||
try
|
||||
{
|
||||
session->authenticate(
|
||||
SSLCertificateCredentials{user, secure_socket.peerCertificate().commonName()},
|
||||
getClientAddress(client_info));
|
||||
return;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "SSL authentication failed, falling back to password authentication");
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -743,9 +743,16 @@ KeyCondition::KeyCondition(
|
||||
, single_point(single_point_)
|
||||
, strict(strict_)
|
||||
{
|
||||
size_t key_index = 0;
|
||||
for (const auto & name : key_column_names)
|
||||
{
|
||||
if (!key_columns.contains(name))
|
||||
{
|
||||
key_columns[name] = key_columns.size();
|
||||
key_indices.push_back(key_index);
|
||||
}
|
||||
++key_index;
|
||||
}
|
||||
|
||||
auto filter_node = buildFilterNode(query, additional_filter_asts);
|
||||
|
||||
@ -808,9 +815,16 @@ KeyCondition::KeyCondition(
|
||||
, single_point(single_point_)
|
||||
, strict(strict_)
|
||||
{
|
||||
size_t key_index = 0;
|
||||
for (const auto & name : key_column_names)
|
||||
{
|
||||
if (!key_columns.contains(name))
|
||||
{
|
||||
key_columns[name] = key_columns.size();
|
||||
key_indices.push_back(key_index);
|
||||
}
|
||||
++key_index;
|
||||
}
|
||||
|
||||
if (!filter_dag)
|
||||
{
|
||||
@ -2561,25 +2575,6 @@ bool KeyCondition::alwaysFalse() const
|
||||
return rpn_stack[0] == 0;
|
||||
}
|
||||
|
||||
size_t KeyCondition::getMaxKeyColumn() const
|
||||
{
|
||||
size_t res = 0;
|
||||
for (const auto & element : rpn)
|
||||
{
|
||||
if (element.function == RPNElement::FUNCTION_NOT_IN_RANGE
|
||||
|| element.function == RPNElement::FUNCTION_IN_RANGE
|
||||
|| element.function == RPNElement::FUNCTION_IS_NULL
|
||||
|| element.function == RPNElement::FUNCTION_IS_NOT_NULL
|
||||
|| element.function == RPNElement::FUNCTION_IN_SET
|
||||
|| element.function == RPNElement::FUNCTION_NOT_IN_SET)
|
||||
{
|
||||
if (element.key_column > res)
|
||||
res = element.key_column;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
bool KeyCondition::hasMonotonicFunctionsChain() const
|
||||
{
|
||||
for (const auto & element : rpn)
|
||||
|
@ -286,9 +286,6 @@ public:
|
||||
|
||||
bool alwaysFalse() const;
|
||||
|
||||
/// Get the maximum number of the key element used in the condition.
|
||||
size_t getMaxKeyColumn() const;
|
||||
|
||||
bool hasMonotonicFunctionsChain() const;
|
||||
|
||||
/// Impose an additional condition: the value in the column `column` must be in the range `range`.
|
||||
@ -297,6 +294,9 @@ public:
|
||||
|
||||
String toString() const;
|
||||
|
||||
/// Get the key indices of key names used in the condition.
|
||||
const std::vector<size_t> & getKeyIndices() const { return key_indices; }
|
||||
|
||||
/// Condition description for EXPLAIN query.
|
||||
struct Description
|
||||
{
|
||||
@ -478,6 +478,8 @@ private:
|
||||
RPN rpn;
|
||||
|
||||
ColumnIndices key_columns;
|
||||
std::vector<size_t> key_indices;
|
||||
|
||||
/// Expression which is used for key condition.
|
||||
const ExpressionActionsPtr key_expr;
|
||||
/// All intermediate columns are used to calculate key_expr.
|
||||
|
@ -1430,18 +1430,21 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange(
|
||||
return res;
|
||||
}
|
||||
|
||||
size_t used_key_size = key_condition.getMaxKeyColumn() + 1;
|
||||
const String & part_name = part->isProjectionPart() ? fmt::format("{}.{}", part->name, part->getParentPart()->name) : part->name;
|
||||
const auto & primary_key = metadata_snapshot->getPrimaryKey();
|
||||
auto index_columns = std::make_shared<ColumnsWithTypeAndName>();
|
||||
const auto & key_indices = key_condition.getKeyIndices();
|
||||
DataTypes key_types;
|
||||
for (size_t i : key_indices)
|
||||
{
|
||||
index_columns->emplace_back(ColumnWithTypeAndName{index[i], primary_key.data_types[i], primary_key.column_names[i]});
|
||||
key_types.emplace_back(primary_key.data_types[i]);
|
||||
}
|
||||
|
||||
std::function<void(size_t, size_t, FieldRef &)> create_field_ref;
|
||||
/// If there are no monotonic functions, there is no need to save block reference.
|
||||
/// Passing explicit field to FieldRef allows to optimize ranges and shows better performance.
|
||||
const auto & primary_key = metadata_snapshot->getPrimaryKey();
|
||||
std::function<void(size_t, size_t, FieldRef &)> create_field_ref;
|
||||
if (key_condition.hasMonotonicFunctionsChain())
|
||||
{
|
||||
auto index_columns = std::make_shared<ColumnsWithTypeAndName>();
|
||||
for (size_t i = 0; i < used_key_size; ++i)
|
||||
index_columns->emplace_back(ColumnWithTypeAndName{index[i], primary_key.data_types[i], primary_key.column_names[i]});
|
||||
|
||||
create_field_ref = [index_columns](size_t row, size_t column, FieldRef & field)
|
||||
{
|
||||
@ -1453,9 +1456,9 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange(
|
||||
}
|
||||
else
|
||||
{
|
||||
create_field_ref = [&index](size_t row, size_t column, FieldRef & field)
|
||||
create_field_ref = [index_columns](size_t row, size_t column, FieldRef & field)
|
||||
{
|
||||
index[column]->get(row, field);
|
||||
(*index_columns)[column].column->get(row, field);
|
||||
// NULL_LAST
|
||||
if (field.isNull())
|
||||
field = POSITIVE_INFINITY;
|
||||
@ -1463,6 +1466,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange(
|
||||
}
|
||||
|
||||
/// NOTE Creating temporary Field objects to pass to KeyCondition.
|
||||
size_t used_key_size = key_indices.size();
|
||||
std::vector<FieldRef> index_left(used_key_size);
|
||||
std::vector<FieldRef> index_right(used_key_size);
|
||||
|
||||
@ -1487,10 +1491,10 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange(
|
||||
create_field_ref(range.end, i, index_right[i]);
|
||||
}
|
||||
}
|
||||
return key_condition.mayBeTrueInRange(
|
||||
used_key_size, index_left.data(), index_right.data(), primary_key.data_types);
|
||||
return key_condition.mayBeTrueInRange(used_key_size, index_left.data(), index_right.data(), key_types);
|
||||
};
|
||||
|
||||
const String & part_name = part->isProjectionPart() ? fmt::format("{}.{}", part->name, part->getParentPart()->name) : part->name;
|
||||
if (!key_condition.matchesExactContinuousRange())
|
||||
{
|
||||
// Do exclusion search, where we drop ranges that do not match
|
||||
|
@ -771,7 +771,7 @@ public:
|
||||
configuration_.request_settings,
|
||||
std::nullopt,
|
||||
DBMS_DEFAULT_BUFFER_SIZE,
|
||||
threadPoolCallbackRunner<void>(IOThreadPool::get(), "S3ParallelRead"),
|
||||
threadPoolCallbackRunner<void>(IOThreadPool::get(), "S3ParallelWrite"),
|
||||
context->getWriteSettings()),
|
||||
compression_method,
|
||||
3);
|
||||
|
@ -314,7 +314,7 @@ static void extractPathImpl(const ActionsDAG::Node & node, Paths & res, ContextP
|
||||
return;
|
||||
|
||||
auto set = column_set->getData();
|
||||
if (!set->isCreated())
|
||||
if (!set || !set->isCreated())
|
||||
return;
|
||||
|
||||
if (!set->hasExplicitSetElements())
|
||||
|
@ -189,6 +189,11 @@ namespace
|
||||
|
||||
using ReplaceFunctionNowVisitor = InDepthNodeVisitor<OneTypeMatcher<ReplaceFunctionNowData>, true>;
|
||||
|
||||
inline UInt32 now()
|
||||
{
|
||||
return static_cast<UInt32>(Poco::Timestamp().epochMicroseconds() / 1000000);
|
||||
}
|
||||
|
||||
class ToIdentifierMatcher
|
||||
{
|
||||
public:
|
||||
@ -1020,7 +1025,7 @@ void StorageWindowView::threadFuncFireProc()
|
||||
|
||||
std::lock_guard lock(fire_signal_mutex);
|
||||
/// TODO: consider using time_t instead (for every timestamp in this class)
|
||||
UInt32 timestamp_now = static_cast<UInt32>(std::time(nullptr));
|
||||
UInt32 timestamp_now = now();
|
||||
|
||||
while (next_fire_signal <= timestamp_now)
|
||||
{
|
||||
@ -1195,7 +1200,7 @@ StorageWindowView::StorageWindowView(
|
||||
target_table_id = has_inner_target_table ? StorageID(table_id_.database_name, generateTargetTableName(table_id_)) : query.to_table_id;
|
||||
|
||||
if (is_proctime)
|
||||
next_fire_signal = getWindowUpperBound(static_cast<UInt32>(std::time(nullptr)));
|
||||
next_fire_signal = getWindowUpperBound(now());
|
||||
|
||||
std::exchange(has_inner_table, true);
|
||||
if (!attach_)
|
||||
@ -1464,7 +1469,7 @@ void StorageWindowView::writeIntoWindowView(
|
||||
column.type = std::make_shared<DataTypeDateTime>();
|
||||
else
|
||||
column.type = std::make_shared<DataTypeDateTime>(timezone);
|
||||
column.column = column.type->createColumnConst(0, Field(std::time(nullptr)));
|
||||
column.column = column.type->createColumnConst(0, Field(now()));
|
||||
|
||||
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
|
||||
auto adding_column_actions = std::make_shared<ExpressionActions>(
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Storages/checkAndGetLiteralArgument.h>
|
||||
#include <Storages/StorageExecutable.h>
|
||||
@ -25,7 +26,8 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int UNSUPPORTED_METHOD;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
}
|
||||
|
||||
std::vector<size_t> TableFunctionExecutable::skipAnalysisForArguments(const QueryTreeNodePtr & query_node_table_function, ContextPtr) const
|
||||
@ -61,6 +63,21 @@ void TableFunctionExecutable::parseArguments(const ASTPtr & ast_function, Contex
|
||||
"Table function '{}' requires minimum 3 arguments: script_name, format, structure, [input_query...]",
|
||||
getName());
|
||||
|
||||
auto check_argument = [&](size_t i, const std::string & argument_name)
|
||||
{
|
||||
if (!args[i]->as<ASTIdentifier>() &&
|
||||
!args[i]->as<ASTLiteral>() &&
|
||||
!args[i]->as<ASTQueryParameter>() &&
|
||||
!args[i]->as<ASTSubquery>())
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type of argument '{}' for table function '{}': must be an identifier or string literal",
|
||||
argument_name, getName());
|
||||
};
|
||||
|
||||
check_argument(0, "script_name");
|
||||
check_argument(1, "format");
|
||||
check_argument(2, "structure");
|
||||
|
||||
for (size_t i = 0; i <= 2; ++i)
|
||||
args[i] = evaluateConstantExpressionOrIdentifierAsLiteral(args[i], context);
|
||||
|
||||
@ -83,15 +100,18 @@ void TableFunctionExecutable::parseArguments(const ASTPtr & ast_function, Contex
|
||||
}
|
||||
else
|
||||
{
|
||||
ASTPtr query = args[i]->children.at(0);
|
||||
if (query->as<ASTSelectWithUnionQuery>())
|
||||
ASTPtr query;
|
||||
if (!args[i]->children.empty())
|
||||
query = args[i]->children.at(0);
|
||||
|
||||
if (query && query->as<ASTSelectWithUnionQuery>())
|
||||
{
|
||||
input_queries.emplace_back(std::move(query));
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::UNSUPPORTED_METHOD,
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"Table function '{}' argument is invalid {}",
|
||||
getName(),
|
||||
args[i]->formatForErrorMessage());
|
||||
|
@ -180,6 +180,8 @@ def check_pr_description(pr_info: PRInfo) -> Tuple[str, str]:
|
||||
entry = " ".join(entry_lines)
|
||||
# Don't accept changelog entries like '...'.
|
||||
entry = re.sub(r"[#>*_.\- ]", "", entry)
|
||||
# Don't accept changelog entries like 'Close #12345'.
|
||||
entry = re.sub(r"^[\w\-\s]{0,10}#?\d{5,6}\.?$", "", entry)
|
||||
else:
|
||||
i += 1
|
||||
|
||||
|
@ -1,243 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance(
|
||||
"node1",
|
||||
with_zookeeper=True,
|
||||
image="yandex/clickhouse-server",
|
||||
tag="19.4.5.35",
|
||||
stay_alive=True,
|
||||
with_installed_binary=True,
|
||||
)
|
||||
node2 = cluster.add_instance(
|
||||
"node2",
|
||||
with_zookeeper=True,
|
||||
image="yandex/clickhouse-server",
|
||||
tag="19.4.5.35",
|
||||
stay_alive=True,
|
||||
with_installed_binary=True,
|
||||
)
|
||||
node3 = cluster.add_instance(
|
||||
"node3",
|
||||
with_zookeeper=True,
|
||||
image="yandex/clickhouse-server",
|
||||
tag="19.4.5.35",
|
||||
stay_alive=True,
|
||||
with_installed_binary=True,
|
||||
)
|
||||
node4 = cluster.add_instance("node4")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_backup_from_old_version(started_cluster):
|
||||
node1.query(
|
||||
"CREATE TABLE source_table(A Int64, B String) Engine = MergeTree order by tuple()"
|
||||
)
|
||||
|
||||
node1.query("INSERT INTO source_table VALUES(1, '1')")
|
||||
|
||||
assert node1.query("SELECT COUNT() FROM source_table") == "1\n"
|
||||
|
||||
node1.query("ALTER TABLE source_table ADD COLUMN Y String")
|
||||
|
||||
node1.query("ALTER TABLE source_table FREEZE PARTITION tuple();")
|
||||
|
||||
# We don't want to wait old outdated version to finish properly, just terminate it
|
||||
node1.restart_with_latest_version(fix_metadata=True, signal=9)
|
||||
|
||||
node1.query(
|
||||
"CREATE TABLE dest_table (A Int64, B String, Y String) ENGINE = ReplicatedMergeTree('/test/dest_table1', '1') ORDER BY tuple()"
|
||||
)
|
||||
|
||||
node1.query("INSERT INTO dest_table VALUES(2, '2', 'Hello')")
|
||||
|
||||
assert node1.query("SELECT COUNT() FROM dest_table") == "1\n"
|
||||
|
||||
node1.exec_in_container(
|
||||
["find", "/var/lib/clickhouse/shadow/1/data/default/source_table"]
|
||||
)
|
||||
node1.exec_in_container(
|
||||
[
|
||||
"cp",
|
||||
"-r",
|
||||
"/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/",
|
||||
"/var/lib/clickhouse/data/default/dest_table/detached",
|
||||
]
|
||||
)
|
||||
|
||||
assert node1.query("SELECT COUNT() FROM dest_table") == "1\n"
|
||||
|
||||
node1.query("ALTER TABLE dest_table ATTACH PARTITION tuple()")
|
||||
|
||||
assert node1.query("SELECT sum(A) FROM dest_table") == "3\n"
|
||||
|
||||
node1.query("ALTER TABLE dest_table DETACH PARTITION tuple()")
|
||||
|
||||
node1.query("ALTER TABLE dest_table ATTACH PARTITION tuple()")
|
||||
|
||||
assert node1.query("SELECT sum(A) FROM dest_table") == "3\n"
|
||||
|
||||
assert node1.query("CHECK TABLE dest_table") == "1\n"
|
||||
|
||||
node1.query("DROP TABLE source_table")
|
||||
node1.query("DROP TABLE dest_table")
|
||||
|
||||
|
||||
def test_backup_from_old_version_setting(started_cluster):
|
||||
node2.query(
|
||||
"CREATE TABLE source_table(A Int64, B String) Engine = MergeTree order by tuple()"
|
||||
)
|
||||
|
||||
node2.query("INSERT INTO source_table VALUES(1, '1')")
|
||||
|
||||
assert node2.query("SELECT COUNT() FROM source_table") == "1\n"
|
||||
|
||||
node2.query("ALTER TABLE source_table ADD COLUMN Y String")
|
||||
|
||||
node2.query("ALTER TABLE source_table FREEZE PARTITION tuple();")
|
||||
|
||||
# We don't want to wait old outdated version to finish properly, just terminate it
|
||||
node2.restart_with_latest_version(fix_metadata=True, signal=9)
|
||||
|
||||
node2.query(
|
||||
"CREATE TABLE dest_table (A Int64, B String, Y String) ENGINE = ReplicatedMergeTree('/test/dest_table2', '1') ORDER BY tuple() SETTINGS enable_mixed_granularity_parts = 1"
|
||||
)
|
||||
|
||||
node2.query("INSERT INTO dest_table VALUES(2, '2', 'Hello')")
|
||||
|
||||
assert node2.query("SELECT COUNT() FROM dest_table") == "1\n"
|
||||
|
||||
node2.exec_in_container(
|
||||
[
|
||||
"cp",
|
||||
"-r",
|
||||
"/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/",
|
||||
"/var/lib/clickhouse/data/default/dest_table/detached",
|
||||
]
|
||||
)
|
||||
|
||||
assert node2.query("SELECT COUNT() FROM dest_table") == "1\n"
|
||||
|
||||
node2.query("ALTER TABLE dest_table ATTACH PARTITION tuple()")
|
||||
|
||||
assert node2.query("SELECT sum(A) FROM dest_table") == "3\n"
|
||||
|
||||
node2.query("ALTER TABLE dest_table DETACH PARTITION tuple()")
|
||||
|
||||
node2.query("ALTER TABLE dest_table ATTACH PARTITION tuple()")
|
||||
|
||||
assert node2.query("SELECT sum(A) FROM dest_table") == "3\n"
|
||||
|
||||
assert node2.query("CHECK TABLE dest_table") == "1\n"
|
||||
|
||||
node2.query("DROP TABLE source_table")
|
||||
node2.query("DROP TABLE dest_table")
|
||||
|
||||
|
||||
def test_backup_from_old_version_config(started_cluster):
|
||||
node3.query(
|
||||
"CREATE TABLE source_table(A Int64, B String) Engine = MergeTree order by tuple()"
|
||||
)
|
||||
|
||||
node3.query("INSERT INTO source_table VALUES(1, '1')")
|
||||
|
||||
assert node3.query("SELECT COUNT() FROM source_table") == "1\n"
|
||||
|
||||
node3.query("ALTER TABLE source_table ADD COLUMN Y String")
|
||||
|
||||
node3.query("ALTER TABLE source_table FREEZE PARTITION tuple();")
|
||||
|
||||
def callback(n):
|
||||
n.replace_config(
|
||||
"/etc/clickhouse-server/merge_tree_settings.xml",
|
||||
"<clickhouse><merge_tree><enable_mixed_granularity_parts>1</enable_mixed_granularity_parts></merge_tree></clickhouse>",
|
||||
)
|
||||
|
||||
# We don't want to wait old outdated version to finish properly, just terminate it
|
||||
node3.restart_with_latest_version(
|
||||
callback_onstop=callback, fix_metadata=True, signal=9
|
||||
)
|
||||
|
||||
node3.query(
|
||||
"CREATE TABLE dest_table (A Int64, B String, Y String) ENGINE = ReplicatedMergeTree('/test/dest_table3', '1') ORDER BY tuple() SETTINGS enable_mixed_granularity_parts = 1"
|
||||
)
|
||||
|
||||
node3.query("INSERT INTO dest_table VALUES(2, '2', 'Hello')")
|
||||
|
||||
assert node3.query("SELECT COUNT() FROM dest_table") == "1\n"
|
||||
|
||||
node3.exec_in_container(
|
||||
[
|
||||
"cp",
|
||||
"-r",
|
||||
"/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/",
|
||||
"/var/lib/clickhouse/data/default/dest_table/detached",
|
||||
]
|
||||
)
|
||||
|
||||
assert node3.query("SELECT COUNT() FROM dest_table") == "1\n"
|
||||
|
||||
node3.query("ALTER TABLE dest_table ATTACH PARTITION tuple()")
|
||||
|
||||
assert node3.query("SELECT sum(A) FROM dest_table") == "3\n"
|
||||
|
||||
node3.query("ALTER TABLE dest_table DETACH PARTITION tuple()")
|
||||
|
||||
node3.query("ALTER TABLE dest_table ATTACH PARTITION tuple()")
|
||||
|
||||
assert node3.query("SELECT sum(A) FROM dest_table") == "3\n"
|
||||
|
||||
assert node3.query("CHECK TABLE dest_table") == "1\n"
|
||||
|
||||
node3.query("DROP TABLE source_table")
|
||||
node3.query("DROP TABLE dest_table")
|
||||
|
||||
|
||||
def test_backup_and_alter(started_cluster):
|
||||
node4.query(
|
||||
"CREATE DATABASE test ENGINE=Ordinary",
|
||||
settings={"allow_deprecated_database_ordinary": 1},
|
||||
) # Different path in shadow/ with Atomic
|
||||
|
||||
node4.query(
|
||||
"CREATE TABLE test.backup_table(A Int64, B String, C Date) Engine = MergeTree order by tuple()"
|
||||
)
|
||||
|
||||
node4.query("INSERT INTO test.backup_table VALUES(2, '2', toDate('2019-10-01'))")
|
||||
|
||||
node4.query("ALTER TABLE test.backup_table FREEZE PARTITION tuple();")
|
||||
|
||||
node4.query("ALTER TABLE test.backup_table DROP COLUMN C")
|
||||
|
||||
node4.query("ALTER TABLE test.backup_table MODIFY COLUMN B UInt64")
|
||||
|
||||
node4.query("ALTER TABLE test.backup_table DROP PARTITION tuple()")
|
||||
|
||||
node4.exec_in_container(
|
||||
[
|
||||
"cp",
|
||||
"-r",
|
||||
"/var/lib/clickhouse/shadow/1/data/test/backup_table/all_1_1_0/",
|
||||
"/var/lib/clickhouse/data/test/backup_table/detached",
|
||||
]
|
||||
)
|
||||
|
||||
node4.query("ALTER TABLE test.backup_table ATTACH PARTITION tuple()")
|
||||
|
||||
assert node4.query("SELECT sum(A) FROM test.backup_table") == "2\n"
|
||||
assert node4.query("SELECT B + 2 FROM test.backup_table") == "4\n"
|
||||
|
||||
node4.query("DROP TABLE test.backup_table")
|
||||
node4.query("DROP DATABASE test")
|
@ -38,6 +38,116 @@ def started_cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
config = """<clickhouse>
|
||||
<openSSL>
|
||||
<client>
|
||||
<verificationMode>none</verificationMode>
|
||||
|
||||
<certificateFile>{certificateFile}</certificateFile>
|
||||
<privateKeyFile>{privateKeyFile}</privateKeyFile>
|
||||
<caConfig>{caConfig}</caConfig>
|
||||
|
||||
<invalidCertificateHandler>
|
||||
<name>AcceptCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
</clickhouse>"""
|
||||
|
||||
|
||||
def execute_query_native(node, query, user, cert_name, password=None):
|
||||
config_path = f"{SCRIPT_DIR}/configs/client.xml"
|
||||
|
||||
formatted = config.format(
|
||||
certificateFile=f"{SCRIPT_DIR}/certs/{cert_name}-cert.pem",
|
||||
privateKeyFile=f"{SCRIPT_DIR}/certs/{cert_name}-key.pem",
|
||||
caConfig=f"{SCRIPT_DIR}/certs/ca-cert.pem",
|
||||
)
|
||||
|
||||
file = open(config_path, "w")
|
||||
file.write(formatted)
|
||||
file.close()
|
||||
|
||||
client = Client(
|
||||
node.ip_address,
|
||||
9440,
|
||||
command=cluster.client_bin_path,
|
||||
secure=True,
|
||||
config=config_path,
|
||||
)
|
||||
|
||||
try:
|
||||
result = client.query(query, user=user, password=password)
|
||||
remove(config_path)
|
||||
return result
|
||||
except:
|
||||
remove(config_path)
|
||||
raise
|
||||
|
||||
|
||||
def test_native():
|
||||
assert (
|
||||
execute_query_native(
|
||||
instance, "SELECT currentUser()", user="john", cert_name="client1"
|
||||
)
|
||||
== "john\n"
|
||||
)
|
||||
assert (
|
||||
execute_query_native(
|
||||
instance, "SELECT currentUser()", user="lucy", cert_name="client2"
|
||||
)
|
||||
== "lucy\n"
|
||||
)
|
||||
assert (
|
||||
execute_query_native(
|
||||
instance, "SELECT currentUser()", user="lucy", cert_name="client3"
|
||||
)
|
||||
== "lucy\n"
|
||||
)
|
||||
|
||||
|
||||
def test_native_wrong_cert():
|
||||
# Wrong certificate: different user's certificate
|
||||
with pytest.raises(Exception) as err:
|
||||
execute_query_native(
|
||||
instance, "SELECT currentUser()", user="john", cert_name="client2"
|
||||
)
|
||||
assert "AUTHENTICATION_FAILED" in str(err.value)
|
||||
|
||||
# Wrong certificate: self-signed certificate.
|
||||
# In this case clickhouse-client itself will throw an error
|
||||
with pytest.raises(Exception) as err:
|
||||
execute_query_native(
|
||||
instance, "SELECT currentUser()", user="john", cert_name="wrong"
|
||||
)
|
||||
assert "UNKNOWN_CA" in str(err.value)
|
||||
|
||||
|
||||
def test_native_fallback_to_password():
|
||||
# Unrelated certificate, correct password
|
||||
assert (
|
||||
execute_query_native(
|
||||
instance,
|
||||
"SELECT currentUser()",
|
||||
user="jane",
|
||||
cert_name="client2",
|
||||
password="qwe123",
|
||||
)
|
||||
== "jane\n"
|
||||
)
|
||||
|
||||
# Unrelated certificate, wrong password
|
||||
with pytest.raises(Exception) as err:
|
||||
execute_query_native(
|
||||
instance,
|
||||
"SELECT currentUser()",
|
||||
user="jane",
|
||||
cert_name="client2",
|
||||
password="wrong",
|
||||
)
|
||||
assert "AUTHENTICATION_FAILED" in str(err.value)
|
||||
|
||||
|
||||
def get_ssl_context(cert_name):
|
||||
context = WrapSSLContextWithSNI(SSL_HOST, ssl.PROTOCOL_TLS_CLIENT)
|
||||
context.load_verify_locations(cafile=f"{SCRIPT_DIR}/certs/ca-cert.pem")
|
||||
@ -69,53 +179,6 @@ def execute_query_https(
|
||||
return response.decode("utf-8")
|
||||
|
||||
|
||||
config = """<clickhouse>
|
||||
<openSSL>
|
||||
<client>
|
||||
<verificationMode>none</verificationMode>
|
||||
|
||||
<certificateFile>{certificateFile}</certificateFile>
|
||||
<privateKeyFile>{privateKeyFile}</privateKeyFile>
|
||||
<caConfig>{caConfig}</caConfig>
|
||||
|
||||
<invalidCertificateHandler>
|
||||
<name>AcceptCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
</clickhouse>"""
|
||||
|
||||
|
||||
def execute_query_native(node, query, user, cert_name):
|
||||
config_path = f"{SCRIPT_DIR}/configs/client.xml"
|
||||
|
||||
formatted = config.format(
|
||||
certificateFile=f"{SCRIPT_DIR}/certs/{cert_name}-cert.pem",
|
||||
privateKeyFile=f"{SCRIPT_DIR}/certs/{cert_name}-key.pem",
|
||||
caConfig=f"{SCRIPT_DIR}/certs/ca-cert.pem",
|
||||
)
|
||||
|
||||
file = open(config_path, "w")
|
||||
file.write(formatted)
|
||||
file.close()
|
||||
|
||||
client = Client(
|
||||
node.ip_address,
|
||||
9440,
|
||||
command=cluster.client_bin_path,
|
||||
secure=True,
|
||||
config=config_path,
|
||||
)
|
||||
|
||||
try:
|
||||
result = client.query(query, user=user)
|
||||
remove(config_path)
|
||||
return result
|
||||
except:
|
||||
remove(config_path)
|
||||
raise
|
||||
|
||||
|
||||
def test_https():
|
||||
assert (
|
||||
execute_query_https("SELECT currentUser()", user="john", cert_name="client1")
|
||||
@ -131,27 +194,6 @@ def test_https():
|
||||
)
|
||||
|
||||
|
||||
def test_native():
|
||||
assert (
|
||||
execute_query_native(
|
||||
instance, "SELECT currentUser()", user="john", cert_name="client1"
|
||||
)
|
||||
== "john\n"
|
||||
)
|
||||
assert (
|
||||
execute_query_native(
|
||||
instance, "SELECT currentUser()", user="lucy", cert_name="client2"
|
||||
)
|
||||
== "lucy\n"
|
||||
)
|
||||
assert (
|
||||
execute_query_native(
|
||||
instance, "SELECT currentUser()", user="lucy", cert_name="client3"
|
||||
)
|
||||
== "lucy\n"
|
||||
)
|
||||
|
||||
|
||||
def test_https_wrong_cert():
|
||||
# Wrong certificate: different user's certificate
|
||||
with pytest.raises(Exception) as err:
|
||||
@ -178,23 +220,6 @@ def test_https_wrong_cert():
|
||||
)
|
||||
|
||||
|
||||
def test_native_wrong_cert():
|
||||
# Wrong certificate: different user's certificate
|
||||
with pytest.raises(Exception) as err:
|
||||
execute_query_native(
|
||||
instance, "SELECT currentUser()", user="john", cert_name="client2"
|
||||
)
|
||||
assert "AUTHENTICATION_FAILED" in str(err.value)
|
||||
|
||||
# Wrong certificate: self-signed certificate.
|
||||
# In this case clickhouse-client itself will throw an error
|
||||
with pytest.raises(Exception) as err:
|
||||
execute_query_native(
|
||||
instance, "SELECT currentUser()", user="john", cert_name="wrong"
|
||||
)
|
||||
assert "UNKNOWN_CA" in str(err.value)
|
||||
|
||||
|
||||
def test_https_non_ssl_auth():
|
||||
# Users with non-SSL authentication are allowed, in this case we can skip sending a client certificate at all (because "verificationMode" is set to "relaxed").
|
||||
# assert execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False) == "peter\n"
|
||||
|
@ -6,3 +6,4 @@ FROM executable(\'\', \'JSON\', \'data String\', SETTINGS max_command_execution_
|
||||
--------------------
|
||||
SELECT data
|
||||
FROM executable(\'\', \'JSON\', \'data String\', SETTINGS max_command_execution_time = 100, command_read_timeout = 1)
|
||||
--------------------
|
||||
|
@ -3,3 +3,7 @@ SELECT '--------------------';
|
||||
EXPLAIN SYNTAX SELECT * from executable('', 'JSON', 'data String', SETTINGS max_command_execution_time=100);
|
||||
SELECT '--------------------';
|
||||
EXPLAIN SYNTAX SELECT * from executable('', 'JSON', 'data String', SETTINGS max_command_execution_time=100, command_read_timeout=1);
|
||||
SELECT '--------------------';
|
||||
|
||||
SELECT * from executable('JSON', 'data String', SETTINGS max_command_execution_time=100); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||
SELECT * from executable('JSON', 'data String', 'TEST', 'TEST'); -- { serverError BAD_ARGUMENTS }
|
||||
|
@ -90,16 +90,16 @@ ORDER BY (coverage, situation_name, NAME_toe, NAME_cockroach);
|
||||
|
||||
insert into test select * from generateRandom() limit 10;
|
||||
|
||||
with dissonance as (
|
||||
Select cast(toStartOfInterval(coverage, INTERVAL 1 day) as Date) as flour, count() as regulation
|
||||
with dissonance as (
|
||||
Select cast(toStartOfInterval(coverage, INTERVAL 1 day) as Date) as flour, count() as regulation
|
||||
from test
|
||||
group by flour having flour >= toDate(now())-100
|
||||
group by flour having flour >= toDate(now())-100
|
||||
),
|
||||
cheetah as (
|
||||
Select flour, regulation from dissonance
|
||||
union distinct
|
||||
Select toDate(now())-1, ifnull((select regulation from dissonance where flour = toDate(now())-1),0) as regulation
|
||||
)
|
||||
cheetah as (
|
||||
Select flour, regulation from dissonance
|
||||
union distinct
|
||||
Select toDate(now())-1, ifnull((select regulation from dissonance where flour = toDate(now())-1),0) as regulation
|
||||
)
|
||||
Select flour, regulation from cheetah order by flour with fill step 1 limit 100 format Null;
|
||||
|
||||
drop table test;
|
||||
|
@ -0,0 +1 @@
|
||||
200
|
99
tests/queries/0_stateless/02540_duplicate_primary_key2.sql
Normal file
99
tests/queries/0_stateless/02540_duplicate_primary_key2.sql
Normal file
@ -0,0 +1,99 @@
|
||||
drop table if exists test;
|
||||
|
||||
set allow_suspicious_low_cardinality_types = 1;
|
||||
|
||||
CREATE TABLE test
|
||||
(
|
||||
`timestamp` DateTime,
|
||||
`latitude` Nullable(Float32) CODEC(Gorilla, ZSTD(1)),
|
||||
`longitude` Nullable(Float32) CODEC(Gorilla, ZSTD(1)),
|
||||
`xxxx1` LowCardinality(UInt8),
|
||||
`xxxx2` LowCardinality(Nullable(Int16)),
|
||||
`xxxx3` LowCardinality(Nullable(Int16)),
|
||||
`xxxx4` Nullable(Int32),
|
||||
`xxxx5` LowCardinality(Nullable(Int32)),
|
||||
`xxxx6` Nullable(Int32),
|
||||
`xxxx7` Nullable(Int32),
|
||||
`xxxx8` LowCardinality(Int32),
|
||||
`xxxx9` LowCardinality(Nullable(Int16)),
|
||||
`xxxx10` LowCardinality(Nullable(Int16)),
|
||||
`xxxx11` LowCardinality(Nullable(Int16)),
|
||||
`xxxx12` LowCardinality(String),
|
||||
`xxxx13` Nullable(Float32),
|
||||
`xxxx14` LowCardinality(String),
|
||||
`xxxx15` LowCardinality(Nullable(String)),
|
||||
`xxxx16` LowCardinality(String),
|
||||
`xxxx17` LowCardinality(String),
|
||||
`xxxx18` FixedString(19),
|
||||
`xxxx19` FixedString(17),
|
||||
`xxxx20` LowCardinality(UInt8),
|
||||
`xxxx21` LowCardinality(Nullable(Int16)),
|
||||
`xxxx22` LowCardinality(Nullable(Int16)),
|
||||
`xxxx23` LowCardinality(Nullable(Int16)),
|
||||
`xxxx24` LowCardinality(Nullable(Int16)),
|
||||
`xxxx25` LowCardinality(Nullable(Int16)),
|
||||
`xxxx26` LowCardinality(Nullable(Int16)),
|
||||
`xxxx27` Nullable(Float32),
|
||||
`xxxx28` LowCardinality(Nullable(String)),
|
||||
`xxxx29` LowCardinality(String),
|
||||
`xxxx30` LowCardinality(String),
|
||||
`xxxx31` LowCardinality(Nullable(String)),
|
||||
`xxxx32` UInt64,
|
||||
PROJECTION cumsum_projection_simple
|
||||
(
|
||||
SELECT
|
||||
xxxx1,
|
||||
toStartOfInterval(timestamp, toIntervalMonth(1)),
|
||||
toStartOfWeek(timestamp, 8),
|
||||
toStartOfInterval(timestamp, toIntervalDay(1)),
|
||||
xxxx17,
|
||||
xxxx16,
|
||||
xxxx14,
|
||||
xxxx9,
|
||||
xxxx10,
|
||||
xxxx21,
|
||||
xxxx22,
|
||||
xxxx11,
|
||||
sum(multiIf(xxxx21 IS NULL, 0, 1)),
|
||||
sum(multiIf(xxxx22 IS NULL, 0, 1)),
|
||||
sum(multiIf(xxxx23 IS NULL, 0, 1)),
|
||||
max(toStartOfInterval(timestamp, toIntervalDay(1))),
|
||||
max(CAST(CAST(toStartOfInterval(timestamp, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')),
|
||||
min(toStartOfInterval(timestamp, toIntervalDay(1))),
|
||||
min(CAST(CAST(toStartOfInterval(timestamp, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')),
|
||||
count(),
|
||||
sum(1),
|
||||
COUNTDistinct(xxxx16),
|
||||
COUNTDistinct(xxxx31),
|
||||
COUNTDistinct(xxxx14),
|
||||
COUNTDistinct(CAST(toStartOfInterval(timestamp, toIntervalDay(1)), 'Nullable(DATE)'))
|
||||
GROUP BY
|
||||
xxxx1,
|
||||
toStartOfInterval(timestamp, toIntervalMonth(1)),
|
||||
toStartOfWeek(timestamp, 8),
|
||||
toStartOfInterval(timestamp, toIntervalDay(1)),
|
||||
xxxx1,
|
||||
toStartOfInterval(timestamp, toIntervalMonth(1)),
|
||||
toStartOfWeek(timestamp, 8),
|
||||
toStartOfInterval(timestamp, toIntervalDay(1)),
|
||||
xxxx17,
|
||||
xxxx16,
|
||||
xxxx14,
|
||||
xxxx9,
|
||||
xxxx10,
|
||||
xxxx21,
|
||||
xxxx22,
|
||||
xxxx11
|
||||
)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(timestamp)
|
||||
ORDER BY (xxxx17, xxxx14, xxxx16, toStartOfDay(timestamp), left(xxxx19, 10), timestamp);
|
||||
|
||||
INSERT INTO test SELECT * replace 1 as xxxx16 replace 1 as xxxx1 replace '2022-02-02 01:00:00' as timestamp replace 'Airtel' as xxxx14 FROM generateRandom() LIMIT 100;
|
||||
INSERT INTO test SELECT * replace 1 as xxxx16 replace 1 as xxxx1 replace '2022-02-02 01:00:00' as timestamp replace 'BSNL' as xxxx14 FROM generateRandom() LIMIT 100;
|
||||
INSERT INTO test SELECT * replace 1 as xxxx16 replace 1 as xxxx1 replace '2022-02-02 01:00:00' as timestamp replace 'xxx' as xxxx14 FROM generateRandom() LIMIT 100;
|
||||
|
||||
select sum(1) from test where toStartOfInterval(timestamp, INTERVAL 1 day) >= TIMESTAMP '2022-02-01 01:00:00' and xxxx14 in ('Airtel', 'BSNL') and xxxx1 = 1 GROUP BY xxxx16;
|
||||
|
||||
drop table test;
|
@ -0,0 +1,17 @@
|
||||
Test 1: Check that you can specify options with a dashes, not an underscores
|
||||
Test 1.1: Check option from config - server_logs_file
|
||||
1
|
||||
OK
|
||||
1
|
||||
OK
|
||||
1
|
||||
OK
|
||||
Test 1.2: Check some option from Settings.h - allow_deprecated_syntax_for_merge_tree
|
||||
0
|
||||
Test 2: check that unicode dashes are handled correctly
|
||||
Test 2.1: check em-dash support
|
||||
1
|
||||
Test 2.2: check en-dash support
|
||||
1
|
||||
Test 2.3 check mathematical minus support
|
||||
1
|
52
tests/queries/0_stateless/02718_cli_dashed_options_parsing.sh
Executable file
52
tests/queries/0_stateless/02718_cli_dashed_options_parsing.sh
Executable file
@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
file_name=${CLICKHOUSE_TEST_UNIQUE_NAME}
|
||||
file_name_1=${file_name}_1
|
||||
file_name_2=${file_name}_2
|
||||
file_name_3=${file_name}_3
|
||||
|
||||
#################
|
||||
echo "Test 1: Check that you can specify options with a dashes, not an underscores"
|
||||
|
||||
[[ -e $file_name_1 ]] && rm $file_name_1
|
||||
[[ -e $file_name_2 ]] && rm $file_name_2
|
||||
[[ -e $file_name_3 ]] && rm $file_name_3
|
||||
|
||||
echo "Test 1.1: Check option from config - server_logs_file"
|
||||
|
||||
$CLICKHOUSE_LOCAL --log-level=debug --server-logs-file=$file_name_1 -q "SELECT 1;" 2> /dev/null
|
||||
[[ -e $file_name_1 ]] && echo OK
|
||||
$CLICKHOUSE_LOCAL --log-level=debug --server-logs-file $file_name_2 -q "SELECT 1;" 2> /dev/null
|
||||
[[ -e $file_name_2 ]] && echo OK
|
||||
$CLICKHOUSE_LOCAL --log-level=debug --server_logs_file $file_name_3 -q "SELECT 1;" 2> /dev/null
|
||||
[[ -e $file_name_3 ]] && echo OK
|
||||
|
||||
echo "Test 1.2: Check some option from Settings.h - allow_deprecated_syntax_for_merge_tree"
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test";
|
||||
$CLICKHOUSE_CLIENT --allow-deprecated-syntax-for-merge-tree=1 --query="CREATE TABLE test (d Date, s String) ENGINE = MergeTree(d, s, 8192)";
|
||||
$CLICKHOUSE_CLIENT --query="DROP TABLE test";
|
||||
echo $?
|
||||
|
||||
#################
|
||||
echo "Test 2: check that unicode dashes are handled correctly"
|
||||
|
||||
echo "Test 2.1: check em-dash support"
|
||||
# Unicode code: U+2014
|
||||
$CLICKHOUSE_LOCAL —query "SELECT 1";
|
||||
|
||||
echo "Test 2.2: check en-dash support"
|
||||
# Unicode code: U+2013
|
||||
$CLICKHOUSE_LOCAL –query "SELECT 1";
|
||||
|
||||
echo "Test 2.3 check mathematical minus support"
|
||||
# Unicode code: U+2212
|
||||
$CLICKHOUSE_LOCAL −query "SELECT 1";
|
||||
|
||||
rm $file_name_1
|
||||
rm $file_name_2
|
||||
rm $file_name_3
|
@ -0,0 +1 @@
|
||||
2024-01-01 Hello World
|
@ -0,0 +1,6 @@
|
||||
CREATE table if not exists table_with_dot_column (date Date, regular_column String, `other_column.2` String) ENGINE = MergeTree() ORDER BY date;
|
||||
INSERT INTO table_with_dot_column select '2020-01-01', 'Hello', 'World';
|
||||
INSERT INTO table_with_dot_column select '2024-01-01', 'Hello', 'World';
|
||||
CREATE ROW POLICY IF NOT EXISTS row_policy ON table_with_dot_column USING toDate(date) >= today() - 30 TO ALL;
|
||||
SELECT * FROM table_with_dot_column;
|
||||
DROP TABLE table_with_dot_column;
|
@ -0,0 +1,2 @@
|
||||
0
|
||||
1
|
5
tests/queries/0_stateless/02722_log_profile_events.sql
Normal file
5
tests/queries/0_stateless/02722_log_profile_events.sql
Normal file
@ -0,0 +1,5 @@
|
||||
-- There are no fatal errors:
|
||||
SELECT count() FROM system.events WHERE event = 'LogFatal';
|
||||
|
||||
-- It counts the trace log messages:
|
||||
SELECT count() > 0 FROM system.events WHERE event = 'LogTrace';
|
@ -67,6 +67,7 @@ EXTERN_TYPES_EXCLUDES=(
|
||||
ProfileEvents::Counters
|
||||
ProfileEvents::end
|
||||
ProfileEvents::increment
|
||||
ProfileEvents::incrementForLogMessage
|
||||
ProfileEvents::getName
|
||||
ProfileEvents::Type
|
||||
ProfileEvents::TypeEnum
|
||||
|
@ -1,9 +1,11 @@
|
||||
v23.3.2.37-lts 2023-04-22
|
||||
v23.3.1.2823-lts 2023-03-31
|
||||
v23.2.5.46-stable 2023-04-03
|
||||
v23.2.4.12-stable 2023-03-10
|
||||
v23.2.3.17-stable 2023-03-06
|
||||
v23.2.2.20-stable 2023-03-01
|
||||
v23.2.1.2537-stable 2023-02-23
|
||||
v23.1.7.30-stable 2023-04-22
|
||||
v23.1.6.42-stable 2023-04-03
|
||||
v23.1.5.24-stable 2023-03-10
|
||||
v23.1.4.58-stable 2023-03-01
|
||||
@ -36,6 +38,7 @@ v22.9.4.32-stable 2022-10-26
|
||||
v22.9.3.18-stable 2022-09-30
|
||||
v22.9.2.7-stable 2022-09-23
|
||||
v22.9.1.2603-stable 2022-09-22
|
||||
v22.8.17.17-lts 2023-04-22
|
||||
v22.8.16.32-lts 2023-04-04
|
||||
v22.8.15.23-lts 2023-03-10
|
||||
v22.8.14.53-lts 2023-02-27
|
||||
|
|
Loading…
Reference in New Issue
Block a user