Merge branch 'master' into turn_on_s3_tests

This commit is contained in:
mergify[bot] 2022-05-31 11:58:16 +00:00 committed by GitHub
commit d85c3ec69e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 508 additions and 188 deletions

View File

@ -215,8 +215,8 @@ jobs:
fetch-depth: 0 # For a proper version and performance artifacts
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -259,8 +259,8 @@ jobs:
fetch-depth: 0 # For a proper version and performance artifacts
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -305,8 +305,8 @@ jobs:
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -350,8 +350,8 @@ jobs:
# uses: actions/checkout@v2
# - name: Build
# run: |
# git -C "$GITHUB_WORKSPACE" submodule sync --recursive
# git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
# git -C "$GITHUB_WORKSPACE" submodule sync
# git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
# sudo rm -fr "$TEMP_PATH"
# mkdir -p "$TEMP_PATH"
# cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -395,8 +395,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -440,8 +440,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -485,8 +485,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -530,8 +530,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -575,8 +575,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -623,8 +623,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -668,8 +668,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -715,8 +715,8 @@ jobs:
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -762,8 +762,8 @@ jobs:
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -809,8 +809,8 @@ jobs:
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -856,8 +856,8 @@ jobs:
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -903,8 +903,8 @@ jobs:
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"

View File

@ -277,8 +277,8 @@ jobs:
fetch-depth: 0 # for performance artifact
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -322,8 +322,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -367,8 +367,8 @@ jobs:
# uses: actions/checkout@v2
# - name: Build
# run: |
# git -C "$GITHUB_WORKSPACE" submodule sync --recursive
# git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
# git -C "$GITHUB_WORKSPACE" submodule sync
# git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
# sudo rm -fr "$TEMP_PATH"
# mkdir -p "$TEMP_PATH"
# cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -414,8 +414,8 @@ jobs:
fetch-depth: 0 # for performance artifact
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -459,8 +459,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -504,8 +504,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -549,8 +549,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -594,8 +594,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -639,8 +639,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -687,8 +687,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -732,8 +732,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -777,8 +777,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -822,8 +822,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -867,8 +867,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -912,8 +912,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
@ -957,8 +957,8 @@ jobs:
uses: actions/checkout@v2
- name: Build
run: |
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
git -C "$GITHUB_WORKSPACE" submodule sync
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"

6
.gitmodules vendored
View File

@ -79,10 +79,10 @@
url = https://github.com/ClickHouse/snappy.git
[submodule "contrib/cppkafka"]
path = contrib/cppkafka
url = https://github.com/mfontanini/cppkafka.git
url = https://github.com/ClickHouse/cppkafka.git
[submodule "contrib/brotli"]
path = contrib/brotli
url = https://github.com/google/brotli.git
url = https://github.com/ClickHouse/brotli.git
[submodule "contrib/h3"]
path = contrib/h3
url = https://github.com/ClickHouse/h3
@ -144,7 +144,7 @@
ignore = untracked
[submodule "contrib/msgpack-c"]
path = contrib/msgpack-c
url = https://github.com/msgpack/msgpack-c
url = https://github.com/ClickHouse/msgpack-c
[submodule "contrib/libcpuid"]
path = contrib/libcpuid
url = https://github.com/ClickHouse/libcpuid.git

View File

@ -5,6 +5,11 @@ if (NOT ENABLE_AMQPCPP)
return()
endif()
if (NOT TARGET ch_contrib::uv)
message(STATUS "Not using AMQP-CPP because libuv is disabled")
return()
endif()
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP")
set (SRCS

2
contrib/arrow vendored

@ -1 +1 @@
Subproject commit efdcd015cfdee1b6aa349c9ca227ca12c3d697f5
Subproject commit 6f274b737c66a6c39bab0d3bdf6cf7d139ef06f5

2
contrib/brotli vendored

@ -1 +1 @@
Subproject commit 63be8a99401992075c23e99f7c84de1c653e39e2
Subproject commit 5bd78768449751a78d4b4c646b0612917986f5b1

View File

@ -5,6 +5,11 @@ if (NOT ENABLE_CASSANDRA)
return()
endif()
if (NOT TARGET ch_contrib::uv)
message(STATUS "Not using cassandra because libuv is disabled")
return()
endif()
if (APPLE)
set(CMAKE_MACOSX_RPATH ON)
endif()

2
contrib/cppkafka vendored

@ -1 +1 @@
Subproject commit 5a119f689f8a4d90d10a9635e7ee2bee5c127de1
Subproject commit 64bd67db12b9c705e9127439a5b05b351d9df7da

2
contrib/msgpack-c vendored

@ -1 +1 @@
Subproject commit 46684265d50b5d1b062d4c5c428ba08462844b1d
Subproject commit 790b3fe58ebded7a8bd130782ef28bec5784c248

2
contrib/rapidjson vendored

@ -1 +1 @@
Subproject commit c4ef90ccdbc21d5d5a628d08316bfd301e32d6fa
Subproject commit b571bd5c1a3b1fc931d77ae36932537a3c9018c3

2
contrib/snappy vendored

@ -1 +1 @@
Subproject commit fb057edfed820212076239fd32cb2ff23e9016bf
Subproject commit 3786173af204d21da97180977ad6ab4321138b3d

View File

@ -9,6 +9,8 @@
#include <IO/WriteHelpers.h>
#include <boost/algorithm/string/predicate.hpp>
#include <cmath>
namespace DB
{
@ -16,6 +18,7 @@ namespace ErrorCodes
{
extern const int SIZE_OF_FIXED_STRING_DOESNT_MATCH;
extern const int CANNOT_PARSE_BOOL;
extern const int CANNOT_PARSE_NUMBER;
}
@ -176,27 +179,75 @@ UInt64 SettingFieldMaxThreads::getAuto()
return getNumberOfPhysicalCPUCores();
}
namespace
{
Poco::Timespan::TimeDiff float64AsSecondsToTimespan(Float64 d)
{
if (d != 0.0 && !std::isnormal(d))
throw Exception(
ErrorCodes::CANNOT_PARSE_NUMBER, "A setting's value in seconds must be a normal floating point number or zero. Got {}", d);
return static_cast<Poco::Timespan::TimeDiff>(d * 1000000);
}
template <SettingFieldTimespanUnit unit_>
SettingFieldTimespan<unit_>::SettingFieldTimespan(const Field & f) : SettingFieldTimespan(fieldToNumber<UInt64>(f))
}
template <>
SettingFieldSeconds::SettingFieldTimespan(const Field & f) : SettingFieldTimespan(float64AsSecondsToTimespan(fieldToNumber<Float64>(f)))
{
}
template <SettingFieldTimespanUnit unit_>
SettingFieldTimespan<unit_> & SettingFieldTimespan<unit_>::operator=(const Field & f)
template <>
SettingFieldMilliseconds::SettingFieldTimespan(const Field & f) : SettingFieldTimespan(fieldToNumber<UInt64>(f))
{
}
template <>
SettingFieldTimespan<SettingFieldTimespanUnit::Second> & SettingFieldSeconds::operator=(const Field & f)
{
*this = Poco::Timespan{float64AsSecondsToTimespan(fieldToNumber<Float64>(f))};
return *this;
}
template <>
SettingFieldTimespan<SettingFieldTimespanUnit::Millisecond> & SettingFieldMilliseconds::operator=(const Field & f)
{
*this = fieldToNumber<UInt64>(f);
return *this;
}
template <SettingFieldTimespanUnit unit_>
String SettingFieldTimespan<unit_>::toString() const
template <>
String SettingFieldSeconds::toString() const
{
return ::DB::toString(static_cast<Float64>(value.totalMicroseconds()) / microseconds_per_unit);
}
template <>
String SettingFieldMilliseconds::toString() const
{
return ::DB::toString(operator UInt64());
}
template <SettingFieldTimespanUnit unit_>
void SettingFieldTimespan<unit_>::parseFromString(const String & str)
template <>
SettingFieldSeconds::operator Field() const
{
return static_cast<Float64>(value.totalMicroseconds()) / microseconds_per_unit;
}
template <>
SettingFieldMilliseconds::operator Field() const
{
return operator UInt64();
}
template <>
void SettingFieldSeconds::parseFromString(const String & str)
{
Float64 n = parse<Float64>(str.data(), str.size());
*this = Poco::Timespan{static_cast<Poco::Timespan::TimeDiff>(n * microseconds_per_unit)};
}
template <>
void SettingFieldMilliseconds::parseFromString(const String & str)
{
*this = stringToNumber<UInt64>(str);
}
@ -204,6 +255,13 @@ void SettingFieldTimespan<unit_>::parseFromString(const String & str)
template <SettingFieldTimespanUnit unit_>
void SettingFieldTimespan<unit_>::writeBinary(WriteBuffer & out) const
{
/// Note that this returns an UInt64 (for both seconds and milliseconds units) for compatibility reasons as the value
/// for seconds used to be a integer (now a Float64)
/// This method is only used to communicate with clients or servers older than DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS
/// in which the value was passed as binary (as a UInt64)
/// Later versions pass the setting values as String (using toString() and parseFromString()) and there passing "1.2" will
/// lead to `1` on releases with integer seconds or `1.2` on more recent releases
/// See https://github.com/ClickHouse/ClickHouse/issues/36940 for more details
auto num_units = operator UInt64();
writeVarUInt(num_units, out);
}

View File

@ -124,7 +124,7 @@ struct SettingFieldTimespan
operator std::chrono::duration<Rep, Period>() const { return std::chrono::duration_cast<std::chrono::duration<Rep, Period>>(std::chrono::microseconds(value.totalMicroseconds())); } /// NOLINT
explicit operator UInt64() const { return value.totalMicroseconds() / microseconds_per_unit; }
explicit operator Field() const { return operator UInt64(); }
explicit operator Field() const;
Poco::Timespan::TimeDiff totalMicroseconds() const { return value.totalMicroseconds(); }
Poco::Timespan::TimeDiff totalMilliseconds() const { return value.totalMilliseconds(); }

View File

@ -0,0 +1,12 @@
#include "FunctionShowCertificate.h"
#include <Functions/FunctionFactory.h>
namespace DB
{
void registerFunctionShowCertificate(FunctionFactory & factory)
{
factory.registerFunction<FunctionShowCertificate>();
}
}

View File

@ -0,0 +1,189 @@
#pragma once
#include <Common/config.h>
#include <Columns/ColumnMap.h>
#include <Columns/ColumnArray.h>
#include <Columns/ColumnString.h>
#include <DataTypes/DataTypeMap.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypesNumber.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/IFunction.h>
#include <Interpreters/Context.h>
#if USE_SSL
#include <openssl/x509v3.h>
#include "Poco/Net/SSLManager.h"
#include "Poco/Crypto/X509Certificate.h"
#endif
namespace DB
{
namespace ErrorCodes
{
extern const int SUPPORT_IS_DISABLED;
}
// showCertificate()
class FunctionShowCertificate : public IFunction
{
public:
static constexpr auto name = "showCertificate";
static FunctionPtr create(ContextPtr)
{
#if !defined(USE_SSL) || USE_SSL == 0
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support is disabled");
#endif
return std::make_shared<FunctionShowCertificate>();
}
String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 0; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo &) const override { return true; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName &) const override
{
return std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeString>());
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName &, const DataTypePtr &, size_t input_rows_count) const override
{
MutableColumnPtr keys = DataTypeString().createColumn();
MutableColumnPtr values = DataTypeString().createColumn();
MutableColumnPtr offsets = DataTypeNumber<IColumn::Offset>().createColumn();
if (input_rows_count)
{
#if USE_SSL
if (const X509 * cert = SSL_CTX_get0_certificate(Poco::Net::SSLManager::instance().defaultServerContext()->sslContext()))
{
BIO * b = BIO_new(BIO_s_mem());
SCOPE_EXIT(
{
BIO_free(b);
});
keys->insert("version");
values->insert(std::to_string(X509_get_version(cert) + 1));
{
char buf[1024] = {0};
const ASN1_INTEGER * sn = cert->cert_info->serialNumber;
BIGNUM * bnsn = ASN1_INTEGER_to_BN(sn, nullptr);
SCOPE_EXIT(
{
BN_free(bnsn);
});
if (BN_print(b, bnsn) > 0 && BIO_read(b, buf, sizeof(buf)) > 0)
{
keys->insert("serial_number");
values->insert(buf);
}
}
{
const ASN1_BIT_STRING *sig = nullptr;
const X509_ALGOR *al = nullptr;
char buf[1024] = {0};
X509_get0_signature(&sig, &al, cert);
if (al)
{
OBJ_obj2txt(buf, sizeof(buf), al->algorithm, 0);
keys->insert("signature_algo");
values->insert(buf);
}
}
char * issuer = X509_NAME_oneline(cert->cert_info->issuer, nullptr, 0);
if (issuer)
{
SCOPE_EXIT(
{
OPENSSL_free(issuer);
});
keys->insert("issuer");
values->insert(issuer);
}
{
char buf[1024] = {0};
if (ASN1_TIME_print(b, X509_get_notBefore(cert)) && BIO_read(b, buf, sizeof(buf)) > 0)
{
keys->insert("not_before");
values->insert(buf);
}
}
{
char buf[1024] = {0};
if (ASN1_TIME_print(b, X509_get_notAfter(cert)) && BIO_read(b, buf, sizeof(buf)) > 0)
{
keys->insert("not_after");
values->insert(buf);
}
}
char * subject = X509_NAME_oneline(cert->cert_info->subject, nullptr, 0);
if (subject)
{
SCOPE_EXIT(
{
OPENSSL_free(subject);
});
keys->insert("subject");
values->insert(subject);
}
if (X509_PUBKEY * pkey = X509_get_X509_PUBKEY(cert))
{
char buf[1024] = {0};
ASN1_OBJECT *ppkalg = nullptr;
const unsigned char *pk = nullptr;
int ppklen = 0;
X509_ALGOR *pa = nullptr;
if (X509_PUBKEY_get0_param(&ppkalg, &pk, &ppklen, &pa, pkey) &&
i2a_ASN1_OBJECT(b, ppkalg) > 0 && BIO_read(b, buf, sizeof(buf)) > 0)
{
keys->insert("pkey_algo");
values->insert(buf);
}
}
}
offsets->insert(keys->size());
#endif
}
size_t sz = keys->size();
if (sz && input_rows_count > 1)
{
keys->reserve(sz * input_rows_count);
values->reserve(sz * input_rows_count);
offsets->reserve(input_rows_count);
}
for (size_t i = 1; i < input_rows_count; ++i)
{
for (size_t j = 0; j < sz; ++j)
{
keys->insertFrom(*keys, j);
values->insertFrom(*values, j);
}
offsets->insert(keys->size());
}
auto nested_column = ColumnArray::create(
ColumnTuple::create(Columns{std::move(keys), std::move(values)}), std::move(offsets));
return ColumnMap::create(nested_column);
}
};
}

View File

@ -68,7 +68,7 @@ void registerFunctionEncrypt(FunctionFactory & factory);
void registerFunctionDecrypt(FunctionFactory & factory);
void registerFunctionAESEncryptMysql(FunctionFactory & factory);
void registerFunctionAESDecryptMysql(FunctionFactory & factory);
void registerFunctionShowCertificate(FunctionFactory &);
#endif
void registerFunctions()
@ -135,6 +135,7 @@ void registerFunctions()
registerFunctionDecrypt(factory);
registerFunctionAESEncryptMysql(factory);
registerFunctionAESDecryptMysql(factory);
registerFunctionShowCertificate(factory);
#endif
registerFunctionTid(factory);
registerFunctionLogTrace(factory);

View File

@ -81,20 +81,18 @@ namespace ErrorCodes
namespace
{
/// Fetch all window info and replace tumble or hop node names with windowID
struct FetchQueryInfoMatcher
struct WindowFunctionMatcher
{
using Visitor = InDepthNodeVisitor<FetchQueryInfoMatcher, true>;
using Visitor = InDepthNodeVisitor<WindowFunctionMatcher, true>;
using TypeToVisit = ASTFunction;
struct Data
{
ASTPtr window_function;
String window_id_name;
String window_id_alias;
String serialized_window_function;
String timestamp_column_name;
bool is_tumble = false;
bool is_hop = false;
bool check_duplicate_window = false;
};
static bool needChildVisit(ASTPtr &, const ASTPtr &) { return true; }
@ -111,18 +109,17 @@ namespace
temp_node->setAlias("");
if (!data.window_function)
{
data.serialized_window_function = serializeAST(*temp_node);
if (data.check_duplicate_window)
data.serialized_window_function = serializeAST(*temp_node);
t->name = "windowID";
data.window_id_name = t->getColumnName();
data.window_id_alias = t->alias;
data.window_function = t->clone();
data.window_function->setAlias("");
data.timestamp_column_name = t->arguments->children[0]->getColumnName();
}
else
{
if (serializeAST(*temp_node) != data.serialized_window_function)
throw Exception("WINDOW VIEW only support ONE TIME WINDOW FUNCTION", ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW);
if (data.check_duplicate_window && serializeAST(*temp_node) != data.serialized_window_function)
throw Exception(
"WINDOW VIEW only support ONE TIME WINDOW FUNCTION", ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW);
t->name = "windowID";
}
}
@ -190,24 +187,6 @@ namespace
using ReplaceFunctionNowVisitor = InDepthNodeVisitor<OneTypeMatcher<ReplaceFunctionNowData>, true>;
struct ReplaceFunctionWindowMatcher
{
using Visitor = InDepthNodeVisitor<ReplaceFunctionWindowMatcher, true>;
struct Data{};
static bool needChildVisit(ASTPtr &, const ASTPtr &) { return true; }
static void visit(ASTPtr & ast, Data &)
{
if (auto * t = ast->as<ASTFunction>())
{
if (t->name == "hop" || t->name == "tumble")
t->name = "windowID";
}
}
};
class ToIdentifierMatcher
{
public:
@ -267,7 +246,7 @@ namespace
{
if (auto * t = ast->as<ASTIdentifier>())
{
ast = std::make_shared<ASTIdentifier>(t->shortName());
t->setShortName(t->shortName());
}
}
};
@ -420,7 +399,7 @@ ASTPtr StorageWindowView::getCleanupQuery()
ASTPtr function_less;
function_less= makeASTFunction(
"less",
std::make_shared<ASTIdentifier>(inner_window_id_column_name),
std::make_shared<ASTIdentifier>(window_id_name),
std::make_shared<ASTLiteral>(getCleanupBound()));
auto alter_query = std::make_shared<ASTAlterQuery>();
@ -535,7 +514,7 @@ std::pair<BlocksPtr, Block> StorageWindowView::getNewBlocks(UInt32 watermark)
{
/// SELECT * FROM inner_table WHERE window_id_name == w_end
/// (because we fire at the end of windows)
filter_function = makeASTFunction("equals", std::make_shared<ASTIdentifier>(inner_window_id_column_name), std::make_shared<ASTLiteral>(watermark));
filter_function = makeASTFunction("equals", std::make_shared<ASTIdentifier>(window_id_name), std::make_shared<ASTLiteral>(watermark));
}
else
{
@ -554,7 +533,7 @@ std::pair<BlocksPtr, Block> StorageWindowView::getNewBlocks(UInt32 watermark)
func_array ->arguments->children.push_back(std::make_shared<ASTLiteral>(w_end));
w_end = addTime(w_end, window_kind, -slice_num_units, *time_zone);
}
filter_function = makeASTFunction("has", func_array, std::make_shared<ASTIdentifier>(inner_window_id_column_name));
filter_function = makeASTFunction("has", func_array, std::make_shared<ASTIdentifier>(window_id_name));
}
auto syntax_result = TreeRewriter(getContext()).analyze(filter_function, builder.getHeader().getNamesAndTypesList());
@ -569,7 +548,7 @@ std::pair<BlocksPtr, Block> StorageWindowView::getNewBlocks(UInt32 watermark)
/// Adding window column
DataTypes window_column_type{std::make_shared<DataTypeDateTime>(), std::make_shared<DataTypeDateTime>()};
ColumnWithTypeAndName column;
column.name = inner_window_column_name;
column.name = window_column_name;
column.type = std::make_shared<DataTypeTuple>(std::move(window_column_type));
column.column = column.type->createColumnConst(0, Tuple{w_start, watermark});
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
@ -582,7 +561,7 @@ std::pair<BlocksPtr, Block> StorageWindowView::getNewBlocks(UInt32 watermark)
/// Removing window id column
auto new_header = builder.getHeader();
new_header.erase(inner_window_id_column_name);
new_header.erase(window_id_name);
auto convert_actions_dag = ActionsDAG::makeConvertingActions(
builder.getHeader().getColumnsWithTypeAndName(),
new_header.getColumnsWithTypeAndName(),
@ -736,15 +715,14 @@ ASTPtr StorageWindowView::getSourceTableSelectQuery()
{
auto query = select_query->clone();
DropTableIdentifierMatcher::Data drop_table_identifier_data;
DropTableIdentifierMatcher::Visitor drop_table_identifier_visitor(drop_table_identifier_data);
drop_table_identifier_visitor.visit(query);
DropTableIdentifierMatcher::Visitor(drop_table_identifier_data).visit(query);
FetchQueryInfoMatcher::Data query_info_data;
FetchQueryInfoMatcher::Visitor(query_info_data).visit(query);
WindowFunctionMatcher::Data query_info_data;
WindowFunctionMatcher::Visitor(query_info_data).visit(query);
auto order_by = std::make_shared<ASTExpressionList>();
auto order_by_elem = std::make_shared<ASTOrderByElement>();
order_by_elem->children.push_back(std::make_shared<ASTIdentifier>(query_info_data.timestamp_column_name));
order_by_elem->children.push_back(std::make_shared<ASTIdentifier>(timestamp_column_name));
order_by_elem->direction = 1;
order_by->children.push_back(order_by_elem);
modified_select.setExpression(ASTSelectQuery::Expression::ORDER_BY, std::move(order_by));
@ -778,7 +756,7 @@ ASTPtr StorageWindowView::getInnerTableCreateQuery(const ASTPtr & inner_query, c
= InterpreterSelectQuery(inner_select_query, getContext(), SelectQueryOptions(QueryProcessingStage::WithMergeableState))
.getSampleBlock();
auto columns_list = std::make_shared<ASTExpressionList>();
ASTPtr columns_list = InterpreterCreateQuery::formatColumns(t_sample_block.getNamesAndTypesList());
if (is_time_column_func_now)
{
@ -786,31 +764,8 @@ ASTPtr StorageWindowView::getInnerTableCreateQuery(const ASTPtr & inner_query, c
column_window->name = window_id_name;
column_window->type = std::make_shared<ASTIdentifier>("UInt32");
columns_list->children.push_back(column_window);
inner_window_id_column_name = window_id_name;
}
for (const auto & column : t_sample_block.getColumnsWithTypeAndName())
{
ParserIdentifierWithOptionalParameters parser;
String sql = column.type->getName();
ASTPtr ast = parseQuery(parser, sql.data(), sql.data() + sql.size(), "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
auto column_dec = std::make_shared<ASTColumnDeclaration>();
column_dec->name = column.name;
column_dec->type = ast;
columns_list->children.push_back(column_dec);
if (!is_time_column_func_now && inner_window_id_column_name.empty() && startsWith(column.name, "windowID"))
{
inner_window_id_column_name = column.name;
}
}
if (inner_window_id_column_name.empty())
throw Exception(
"The first argument of time window function should not be a constant value.",
ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW);
inner_window_column_name = std::regex_replace(inner_window_id_column_name, std::regex("windowID"), is_tumble ? "tumble" : "hop");
ToIdentifierMatcher::Data query_data;
query_data.window_id_name = window_id_name;
query_data.window_id_alias = window_id_alias;
@ -818,8 +773,8 @@ ASTPtr StorageWindowView::getInnerTableCreateQuery(const ASTPtr & inner_query, c
ReplaceFunctionNowData time_now_data;
ReplaceFunctionNowVisitor time_now_visitor(time_now_data);
ReplaceFunctionWindowMatcher::Data func_hop_data;
ReplaceFunctionWindowMatcher::Visitor func_window_visitor(func_hop_data);
WindowFunctionMatcher::Data window_data;
WindowFunctionMatcher::Visitor window_visitor(window_data);
DropTableIdentifierMatcher::Data drop_table_identifier_data;
DropTableIdentifierMatcher::Visitor drop_table_identifier_visitor(drop_table_identifier_data);
@ -836,7 +791,7 @@ ASTPtr StorageWindowView::getInnerTableCreateQuery(const ASTPtr & inner_query, c
}
drop_table_identifier_visitor.visit(node);
/// tumble/hop -> windowID
func_window_visitor.visit(node);
window_visitor.visit(node);
to_identifier_visitor.visit(node);
node->setAlias("");
return node;
@ -1315,6 +1270,8 @@ ASTPtr StorageWindowView::initInnerQuery(ASTSelectQuery query, ContextPtr contex
if (is_time_column_func_now)
window_id_name = func_now_data.window_id_name;
window_column_name = std::regex_replace(window_id_name, std::regex("windowID"), is_tumble ? "tumble" : "hop");
/// Parse final query (same as mergeable query but has tumble/hop instead of windowID)
final_query = mergeable_query->clone();
ReplaceWindowIdMatcher::Data final_query_data;
@ -1331,16 +1288,15 @@ ASTPtr StorageWindowView::innerQueryParser(const ASTSelectQuery & query)
// Parse stage mergeable
ASTPtr result = query.clone();
FetchQueryInfoMatcher::Data query_info_data;
FetchQueryInfoMatcher::Visitor(query_info_data).visit(result);
WindowFunctionMatcher::Data query_info_data;
query_info_data.check_duplicate_window = true;
WindowFunctionMatcher::Visitor(query_info_data).visit(result);
if (!query_info_data.is_tumble && !query_info_data.is_hop)
throw Exception(ErrorCodes::INCORRECT_QUERY,
"TIME WINDOW FUNCTION is not specified for {}", getName());
window_id_name = query_info_data.window_id_name;
window_id_alias = query_info_data.window_id_alias;
timestamp_column_name = query_info_data.timestamp_column_name;
is_tumble = query_info_data.is_tumble;
// Parse time window function
@ -1350,6 +1306,14 @@ ASTPtr StorageWindowView::innerQueryParser(const ASTSelectQuery & query)
arguments.at(1), window_kind, window_num_units,
"Illegal type of second argument of function " + window_function.name + " should be Interval");
window_id_alias = window_function.alias;
if (auto * node = arguments[0]->as<ASTIdentifier>())
timestamp_column_name = node->shortName();
DropTableIdentifierMatcher::Data drop_identifier_data;
DropTableIdentifierMatcher::Visitor(drop_identifier_data).visit(query_info_data.window_function);
window_id_name = window_function.getColumnName();
slide_kind = window_kind;
slide_num_units = window_num_units;
@ -1614,31 +1578,6 @@ void StorageWindowView::writeIntoWindowView(
void StorageWindowView::startup()
{
if (is_time_column_func_now)
inner_window_id_column_name = window_id_name;
else
{
Aliases aliases;
QueryAliasesVisitor(aliases).visit(mergeable_query);
auto inner_query_normalized = mergeable_query->clone();
QueryNormalizer::Data normalizer_data(aliases, {}, false, getContext()->getSettingsRef(), false);
QueryNormalizer(normalizer_data).visit(inner_query_normalized);
auto inner_select_query = std::static_pointer_cast<ASTSelectQuery>(inner_query_normalized);
auto t_sample_block
= InterpreterSelectQuery(inner_select_query, getContext(), SelectQueryOptions(QueryProcessingStage::WithMergeableState))
.getSampleBlock();
for (const auto & column : t_sample_block.getColumnsWithTypeAndName())
{
if (startsWith(column.name, "windowID"))
{
inner_window_id_column_name = column.name;
break;
}
}
}
inner_window_column_name = std::regex_replace(inner_window_id_column_name, std::regex("windowID"), is_tumble ? "tumble" : "hop");
DatabaseCatalog::instance().addDependency(select_table_id, getStorageID());
// Start the working thread

View File

@ -238,8 +238,7 @@ private:
Int64 slide_num_units;
String window_id_name;
String window_id_alias;
String inner_window_column_name;
String inner_window_id_column_name;
String window_column_name;
String timestamp_column_name;
StorageID select_table_id = StorageID::createEmpty();

View File

@ -87,6 +87,11 @@ def reset_policies():
copy_policy_xml("normal_filters.xml")
for current_node in nodes:
current_node.query("DROP POLICY IF EXISTS pA, pB ON mydb.filtered_table1")
current_node.query("DROP POLICY IF EXISTS pC ON mydb.other_table")
current_node.query("DROP POLICY IF EXISTS all_data ON dist_tbl, local_tbl")
current_node.query(
"DROP POLICY IF EXISTS role1_data ON dist_tbl, local_tbl"
)
def test_smoke():

View File

@ -10,7 +10,7 @@ CREATE TABLE test_01047.`.inner.wv`\n(\n `b` Int32,\n `windowID(timestamp,
||---FUNCTION---
CREATE TABLE test_01047.`.inner.wv`\n(\n `plus(a, b)` Int64,\n `windowID(timestamp, toIntervalSecond(\'1\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nPRIMARY KEY `windowID(timestamp, toIntervalSecond(\'1\'))`\nORDER BY (`windowID(timestamp, toIntervalSecond(\'1\'))`, `plus(a, b)`)\nSETTINGS index_granularity = 8192
||---PARTITION---
CREATE TABLE test_01047.`.inner.wv`\n(\n `windowID(____timestamp, toIntervalSecond(\'1\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nPARTITION BY `windowID(____timestamp, toIntervalSecond(\'1\'))`\nORDER BY `windowID(____timestamp, toIntervalSecond(\'1\'))`\nSETTINGS index_granularity = 8192
CREATE TABLE test_01047.`.inner.wv`\n(\n `count(a)` AggregateFunction(count, Int32),\n `windowID(____timestamp, toIntervalSecond(\'1\'))` UInt32\n)\nENGINE = AggregatingMergeTree\nPARTITION BY `windowID(____timestamp, toIntervalSecond(\'1\'))`\nORDER BY `windowID(____timestamp, toIntervalSecond(\'1\'))`\nSETTINGS index_granularity = 8192
||---JOIN---
CREATE TABLE test_01047.`.inner.wv`\n(\n `windowID(timestamp, toIntervalSecond(\'1\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32),\n `count(mt_2.b)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nORDER BY `windowID(timestamp, toIntervalSecond(\'1\'))`\nSETTINGS index_granularity = 8192
CREATE TABLE test_01047.`.inner.wv`\n(\n `windowID(timestamp, toIntervalSecond(\'1\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32),\n `count(mt_2.b)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nORDER BY `windowID(timestamp, toIntervalSecond(\'1\'))`\nSETTINGS index_granularity = 8192
@ -26,7 +26,7 @@ CREATE TABLE test_01047.`.inner.wv`\n(\n `b` Int32,\n `windowID(timestamp,
||---FUNCTION---
CREATE TABLE test_01047.`.inner.wv`\n(\n `plus(a, b)` Int64,\n `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nPRIMARY KEY `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`\nORDER BY (`windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`, `plus(a, b)`)\nSETTINGS index_granularity = 8192
||---PARTITION---
CREATE TABLE test_01047.`.inner.wv`\n(\n `windowID(____timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nPARTITION BY `windowID(____timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`\nORDER BY `windowID(____timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`\nSETTINGS index_granularity = 8192
CREATE TABLE test_01047.`.inner.wv`\n(\n `count(a)` AggregateFunction(count, Int32),\n `windowID(____timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))` UInt32\n)\nENGINE = AggregatingMergeTree\nPARTITION BY `windowID(____timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`\nORDER BY `windowID(____timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`\nSETTINGS index_granularity = 8192
||---JOIN---
CREATE TABLE test_01047.`.inner.wv`\n(\n `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32),\n `count(mt_2.b)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nORDER BY `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`\nSETTINGS index_granularity = 8192
CREATE TABLE test_01047.`.inner.wv`\n(\n `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32),\n `count(mt_2.b)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nORDER BY `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`\nSETTINGS index_granularity = 8192

View File

@ -0,0 +1,7 @@
1 1 1990-01-01 12:00:05
1 2 1990-01-01 12:00:05
1 3 1990-01-01 12:00:05
1 4 1990-01-01 12:00:10
1 5 1990-01-01 12:00:10
1 6 1990-01-01 12:00:15
1 7 1990-01-01 12:00:15

View File

@ -0,0 +1,31 @@
#!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS wv;
CREATE TABLE mt(a Int32, market Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple();
CREATE WINDOW VIEW wv INNER ENGINE AggregatingMergeTree ORDER BY tuple(tumble(timestamp, INTERVAL '5' SECOND, 'US/Samoa'), market) ENGINE Memory WATERMARK=ASCENDING AS SELECT count(mt.a) AS count, market, tumbleEnd(wid) AS w_end FROM mt GROUP BY tumble(mt.timestamp, INTERVAL '5' SECOND, 'US/Samoa') AS wid, market;
INSERT INTO mt VALUES (1, 1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));
INSERT INTO mt VALUES (1, 2, toDateTime('1990/01/01 12:00:01', 'US/Samoa'));
INSERT INTO mt VALUES (1, 3, toDateTime('1990/01/01 12:00:02', 'US/Samoa'));
INSERT INTO mt VALUES (1, 4, toDateTime('1990/01/01 12:00:05', 'US/Samoa'));
INSERT INTO mt VALUES (1, 5, toDateTime('1990/01/01 12:00:06', 'US/Samoa'));
INSERT INTO mt VALUES (1, 6, toDateTime('1990/01/01 12:00:10', 'US/Samoa'));
INSERT INTO mt VALUES (1, 7, toDateTime('1990/01/01 12:00:11', 'US/Samoa'));
INSERT INTO mt VALUES (1, 8, toDateTime('1990/01/01 12:00:30', 'US/Samoa'));
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM wv" | grep -q "7" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"

View File

@ -1,11 +1,11 @@
-- Tags: no-fasttest
SET min_execution_speed = 100000000000, timeout_before_checking_execution_speed = 0.1;
SET min_execution_speed = 100000000000, timeout_before_checking_execution_speed = 0;
SELECT count() FROM system.numbers; -- { serverError 160 }
SELECT 'Ok (1)';
SET min_execution_speed = 0;
SET min_execution_speed_bytes = 800000000000, timeout_before_checking_execution_speed = 0.1;
SET min_execution_speed_bytes = 800000000000, timeout_before_checking_execution_speed = 0;
SELECT count() FROM system.numbers; -- { serverError 160 }
SELECT 'Ok (2)';
SET min_execution_speed_bytes = 0;

View File

@ -1,7 +1,7 @@
-- Tags: distributed
SET max_execution_speed = 1000000;
SET timeout_before_checking_execution_speed = 0.001;
SET timeout_before_checking_execution_speed = 0;
SET max_block_size = 100;
SET log_queries=1;

View File

@ -0,0 +1,6 @@
1
1
1
1
1
1

View File

@ -0,0 +1,11 @@
SELECT 1 SETTINGS max_execution_time=NaN; -- { serverError 72 }
SELECT 1 SETTINGS max_execution_time=Infinity; -- { serverError 72 };
SELECT 1 SETTINGS max_execution_time=-Infinity; -- { serverError 72 };
-- Ok values
SELECT 1 SETTINGS max_execution_time=-0.5;
SELECT 1 SETTINGS max_execution_time=0.5;
SELECT 1 SETTINGS max_execution_time=-1;
SELECT 1 SETTINGS max_execution_time=0.0;
SELECT 1 SETTINGS max_execution_time=-0.0;
SELECT 1 SETTINGS max_execution_time=10;

View File

@ -0,0 +1,8 @@
TCP CLIENT
maximum: 1.1
TCP CLIENT WITH SETTINGS IN QUERY
maximum: 1.1
HTTP CLIENT
maximum: 1.1
TABLE: system.settings
max_execution_time 30.5 1

View File

@ -0,0 +1,41 @@
#!/usr/bin/env bash
# Tags: long
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
set -e -o pipefail
MAX_TIMEOUT=1.1 # Use 1.1 because using 0.x truncates to 0 in older releases
function check_output() {
MAXTIME_USED=$(echo "$1" | grep -Eo "maximum: [0-9]+\.[0-9]+" | head -n1 || true)
if [ "${MAXTIME_USED}" != "maximum: ${MAX_TIMEOUT}" ];
then
echo "'$MAXTIME_USED' is not equal to 'maximum: ${MAX_TIMEOUT}'"
echo "OUTPUT: $1"
else
echo "$MAXTIME_USED"
fi
}
# TCP CLIENT
echo "TCP CLIENT"
OUTPUT=$($CLICKHOUSE_CLIENT --max_execution_time $MAX_TIMEOUT -q "SELECT count() FROM system.numbers" 2>&1 || true)
check_output "${OUTPUT}"
echo "TCP CLIENT WITH SETTINGS IN QUERY"
OUTPUT=$($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.numbers SETTINGS max_execution_time=$MAX_TIMEOUT" 2>&1 || true)
check_output "${OUTPUT}"
# HTTP CLIENT
echo "HTTP CLIENT"
OUTPUT=$(${CLICKHOUSE_CURL_COMMAND} -q -sS "$CLICKHOUSE_URL&max_execution_time=$MAX_TIMEOUT" -d \
"SELECT count() FROM system.numbers" || true)
check_output "${OUTPUT}"
# CHECK system.settings
echo "TABLE: system.settings"
echo "SELECT name, value, changed from system.settings where name = 'max_execution_time'" | clickhouse-client --max_execution_time 30.5

View File

@ -1,4 +1,4 @@
SET max_execution_speed = 4000000, timeout_before_checking_execution_speed = 0.001;
SET max_execution_speed = 4000000, timeout_before_checking_execution_speed = 0;
CREATE TEMPORARY TABLE times (t DateTime);

View File

@ -340,3 +340,6 @@ fi
# Forbid files that differ only by character case
find $ROOT_PATH | sort -f | uniq -i -c | awk '{ if ($1 > 1) print }'
# Forbid recursive submodules
find $ROOT_PATH/contrib -name '.gitmodules' -size +0 | xargs cat | grep -P '.' && echo "Recursive submodules are forbidden."