Merge branch 'master' into curl_update

This commit is contained in:
Alexey Milovidov 2023-02-10 07:28:56 +03:00 committed by GitHub
commit 92ca58fbb2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
43 changed files with 494 additions and 187 deletions

2
contrib/libxml2 vendored

@ -1 +1 @@
Subproject commit 7846b0a677f8d3ce72486125fa281e92ac9970e8
Subproject commit f507d167f1755b7eaea09fb1a44d29aab828b6d1

View File

@ -24,7 +24,6 @@ set(SRCS
"${LIBXML2_SOURCE_DIR}/xinclude.c"
"${LIBXML2_SOURCE_DIR}/nanohttp.c"
"${LIBXML2_SOURCE_DIR}/nanoftp.c"
"${LIBXML2_SOURCE_DIR}/DOCBparser.c"
"${LIBXML2_SOURCE_DIR}/catalog.c"
"${LIBXML2_SOURCE_DIR}/globals.c"
"${LIBXML2_SOURCE_DIR}/threads.c"
@ -36,7 +35,6 @@ set(SRCS
"${LIBXML2_SOURCE_DIR}/xmlschemastypes.c"
"${LIBXML2_SOURCE_DIR}/xmlunicode.c"
"${LIBXML2_SOURCE_DIR}/triostr.c"
#"${LIBXML2_SOURCE_DIR}/trio.c"
"${LIBXML2_SOURCE_DIR}/xmlreader.c"
"${LIBXML2_SOURCE_DIR}/relaxng.c"
"${LIBXML2_SOURCE_DIR}/dict.c"

View File

@ -1,6 +1,6 @@
/*
* Summary: compile-time version informations
* Description: compile-time version informations for the XML library
* Summary: compile-time version information
* Description: compile-time version information for the XML library
*
* Copy: See Copyright for the status of this software.
*
@ -29,28 +29,28 @@ XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
*
* the version string like "1.2.3"
*/
#define LIBXML_DOTTED_VERSION "2.9.8"
#define LIBXML_DOTTED_VERSION "2.10.3"
/**
* LIBXML_VERSION:
*
* the version number: 1.2.3 value is 10203
*/
#define LIBXML_VERSION 20908
#define LIBXML_VERSION 21003
/**
* LIBXML_VERSION_STRING:
*
* the version number string, 1.2.3 value is "10203"
*/
#define LIBXML_VERSION_STRING "20908"
#define LIBXML_VERSION_STRING "21003"
/**
* LIBXML_VERSION_EXTRA:
*
* extra version information, used to show a CVS compilation
* extra version information, used to show a git commit description
*/
#define LIBXML_VERSION_EXTRA "-GITv2.9.9-rc2-1-g6fc04d71"
#define LIBXML_VERSION_EXTRA ""
/**
* LIBXML_TEST_VERSION:
@ -58,7 +58,7 @@ XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
* Macro to check that the libxml version in use is compatible with
* the version the software has been compiled against
*/
#define LIBXML_TEST_VERSION xmlCheckVersion(20908);
#define LIBXML_TEST_VERSION xmlCheckVersion(21003);
#ifndef VMS
#if 0
@ -90,7 +90,9 @@ XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
*
* Whether the thread support is configured in
*/
#define LIBXML_THREAD_ENABLED 1
#if 1
#define LIBXML_THREAD_ENABLED
#endif
/**
* LIBXML_THREAD_ALLOC_ENABLED:
@ -169,7 +171,7 @@ XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
*
* Whether the FTP support is configured in
*/
#if 1
#if 0
#define LIBXML_FTP_ENABLED
#endif
@ -205,7 +207,7 @@ XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
*
* Whether the deprecated APIs are compiled in for compatibility
*/
#if 1
#if 0
#define LIBXML_LEGACY_ENABLED
#endif
@ -227,15 +229,6 @@ XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
#define LIBXML_CATALOG_ENABLED
#endif
/**
* LIBXML_DOCB_ENABLED:
*
* Whether the SGML Docbook support is configured in
*/
#if 1
#define LIBXML_DOCB_ENABLED
#endif
/**
* LIBXML_XPATH_ENABLED:
*
@ -254,6 +247,15 @@ XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
#define LIBXML_XPTR_ENABLED
#endif
/**
* LIBXML_XPTR_LOCS_ENABLED:
*
* Whether support for XPointer locations is configured in
*/
#if 0
#define LIBXML_XPTR_LOCS_ENABLED
#endif
/**
* LIBXML_XINCLUDE_ENABLED:
*
@ -268,7 +270,7 @@ XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
*
* Whether iconv support is available
*/
#if 0
#if 1
#define LIBXML_ICONV_ENABLED
#endif
@ -348,8 +350,10 @@ XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
* LIBXML_EXPR_ENABLED:
*
* Whether the formal expressions interfaces are compiled in
*
* This code is unused and disabled unconditionally for now.
*/
#if 1
#if 0
#define LIBXML_EXPR_ENABLED
#endif
@ -452,6 +456,15 @@ XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
# define LIBXML_ATTR_FORMAT(fmt,args)
#endif
#ifndef XML_DEPRECATED
# ifdef IN_LIBXML
# define XML_DEPRECATED
# else
/* Available since at least GCC 3.1 */
# define XML_DEPRECATED __attribute__((deprecated))
# endif
#endif
#else /* ! __GNUC__ */
/**
* ATTRIBUTE_UNUSED:
@ -471,6 +484,15 @@ XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
* Macro used to indicate to GCC the parameter are printf like
*/
#define LIBXML_ATTR_FORMAT(fmt,args)
/**
* XML_DEPRECATED:
*
* Macro used to indicate that a function, variable, type or struct member
* is deprecated.
*/
#ifndef XML_DEPRECATED
#define XML_DEPRECATED
#endif
#endif /* __GNUC__ */
#ifdef __cplusplus

View File

@ -16,7 +16,6 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
gnupg \
locales \
wget \
yasm \
tzdata \
&& apt-get clean

View File

@ -22,3 +22,8 @@ List of supported integrations:
- [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md)
- [SQLite](../../../engines/table-engines/integrations/sqlite.md)
- [Hive](../../../engines/table-engines/integrations/hive.md)
- [ExternalDistributed](../../../engines/table-engines/integrations/ExternalDistributed.md)
- [MaterializedPostgreSQL](../../../engines/table-engines/integrations/materialized-postgresql.md)
- [NATS](../../../engines/table-engines/integrations/nats.md)
- [DeltaLake](../../../engines/table-engines/integrations/deltalake.md)
- [Hudi](../../../engines/table-engines/integrations/hudi.md)

View File

@ -683,6 +683,11 @@ Example:
## JSONColumns {#jsoncolumns}
:::tip
The output of the JSONColumns* formats provides the ClickHouse field name and then the content of each row of the table for that field;
visually, the data is rotated 90 degrees to the left.
:::
In this format, all data is represented as a single JSON Object.
Note that JSONColumns output format buffers all data in memory to output it as a single block and it can lead to high memory consumption.

View File

@ -26,7 +26,6 @@
#include <IO/WriteHelpers.h>
#include <IO/Operators.h>
#include <IO/ConnectionTimeouts.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/UseSSL.h>
#include <QueryPipeline/RemoteQueryExecutor.h>
#include <Interpreters/Context.h>

View File

@ -1,7 +1,7 @@
#include "ClusterCopierApp.h"
#include <Common/StatusFile.h>
#include <Common/TerminalSize.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ConnectionTimeouts.h>
#include <Formats/registerFormats.h>
#include <Common/scope_guard_safe.h>
#include <unistd.h>

View File

@ -1,6 +1,6 @@
#include "LibraryBridgeHelper.h"
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ConnectionTimeouts.h>
namespace DB
{
@ -12,7 +12,7 @@ LibraryBridgeHelper::LibraryBridgeHelper(ContextPtr context_)
, http_timeout(context_->getGlobalContext()->getSettingsRef().http_receive_timeout.value)
, bridge_host(config.getString("library_bridge.host", DEFAULT_HOST))
, bridge_port(config.getUInt("library_bridge.port", DEFAULT_PORT))
, http_timeouts(ConnectionTimeouts::getHTTPTimeouts(context_))
, http_timeouts(ConnectionTimeouts::getHTTPTimeouts(context_->getSettingsRef(), {context_->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0}))
{
}

View File

@ -12,7 +12,7 @@
#include <Common/BridgeProtocolVersion.h>
#include <Common/ShellCommand.h>
#include <Common/logger_useful.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ConnectionTimeouts.h>
#include <base/range.h>
#include <BridgeHelper/IBridgeHelper.h>
@ -98,7 +98,7 @@ protected:
{
try
{
ReadWriteBufferFromHTTP buf(getPingURI(), Poco::Net::HTTPRequest::HTTP_GET, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials);
ReadWriteBufferFromHTTP buf(getPingURI(), Poco::Net::HTTPRequest::HTTP_GET, {}, getHTTPTimeouts(), credentials);
return checkString(PING_OK_ANSWER, buf);
}
catch (...)
@ -161,6 +161,10 @@ private:
Poco::Net::HTTPBasicCredentials credentials{};
ConnectionTimeouts getHTTPTimeouts()
{
return ConnectionTimeouts::getHTTPTimeouts(getContext()->getSettingsRef(), {getContext()->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0});
}
protected:
using URLParams = std::vector<std::pair<std::string, std::string>>;
@ -195,7 +199,7 @@ protected:
uri.addQueryParameter("connection_string", getConnectionString());
uri.addQueryParameter("use_connection_pooling", toString(use_connection_pooling));
ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials);
ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, getHTTPTimeouts(), credentials);
bool res;
readBoolText(res, buf);
@ -217,7 +221,7 @@ protected:
uri.addQueryParameter("connection_string", getConnectionString());
uri.addQueryParameter("use_connection_pooling", toString(use_connection_pooling));
ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials);
ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, getHTTPTimeouts(), credentials);
std::string character;
readStringBinary(character, buf);

View File

@ -342,14 +342,13 @@ set_source_files_properties(
PROPERTIES COMPILE_FLAGS "-mwaitpkg")
endif ()
target_link_libraries(common PUBLIC ch_contrib::re2_st)
target_link_libraries(common PUBLIC ch_contrib::re2)
target_link_libraries(clickhouse_common_io
PUBLIC
boost::program_options
boost::system
ch_contrib::cityhash
ch_contrib::re2
ch_contrib::re2_st
ch_contrib::zlib
pcg_random
Poco::Foundation

View File

@ -173,14 +173,12 @@ void registerCodecDeflateQpl(CompressionCodecFactory & factory);
/// Keeper use only general-purpose codecs, so we don't need these special codecs
/// in standalone build
#ifndef KEEPER_STANDALONE_BUILD
void registerCodecDelta(CompressionCodecFactory & factory);
void registerCodecT64(CompressionCodecFactory & factory);
void registerCodecDoubleDelta(CompressionCodecFactory & factory);
void registerCodecGorilla(CompressionCodecFactory & factory);
void registerCodecEncrypted(CompressionCodecFactory & factory);
void registerCodecFPC(CompressionCodecFactory & factory);
#endif
CompressionCodecFactory::CompressionCodecFactory()

View File

@ -52,6 +52,8 @@
/// the number is unmotivated
#define DEFAULT_COUNT_OF_HTTP_CONNECTIONS_PER_ENDPOINT 15
#define DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT 10
#define DBMS_DEFAULT_PATH "/var/lib/clickhouse/"
/// Actually, there may be multiple acquisitions of different locks for a given table within one query.

View File

@ -54,7 +54,6 @@ namespace
return applyVisitor(FieldVisitorConvertToNumber<T>(), f);
}
#ifndef KEEPER_STANDALONE_BUILD
Map stringToMap(const String & str)
{
/// Allow empty string as an empty map
@ -71,7 +70,7 @@ namespace
return (*column)[0].safeGet<Map>();
}
Map fieldToMap(const Field & f)
[[maybe_unused]] Map fieldToMap(const Field & f)
{
if (f.getType() == Field::Types::String)
{
@ -82,7 +81,6 @@ namespace
return f.safeGet<const Map &>();
}
#endif
}
@ -327,6 +325,13 @@ void SettingFieldString::readBinary(ReadBuffer & in)
*this = std::move(str);
}
/// Unbeautiful workaround for clickhouse-keeper standalone build ("-DBUILD_STANDALONE_KEEPER=1").
/// In this build, we don't build and link library dbms (to which SettingsField.cpp belongs) but
/// only build SettingsField.cpp. Further dependencies, e.g. DataTypeString and DataTypeMap below,
/// require building of further files for clickhouse-keeper. To keep dependencies slim, we don't do
/// that. The linker does not complain only because clickhouse-keeper does not call any of below
/// functions. A cleaner alternative would be more modular libraries, e.g. one for data types, which
/// could then be linked by the server and the linker.
#ifndef KEEPER_STANDALONE_BUILD
SettingFieldMap::SettingFieldMap(const Field & f) : value(fieldToMap(f)) {}

View File

@ -239,8 +239,6 @@ struct SettingFieldString
void readBinary(ReadBuffer & in);
};
#ifndef KEEPER_STANDALONE_BUILD
struct SettingFieldMap
{
public:
@ -264,8 +262,6 @@ public:
void readBinary(ReadBuffer & in);
};
#endif
struct SettingFieldChar
{
public:

View File

@ -100,7 +100,7 @@ inline UInt32 getDecimalScale(const DataTypeDecimal<T> & data_type)
template <typename FromDataType, typename ToDataType, typename ReturnType = void>
requires (IsDataTypeDecimal<FromDataType> && IsDataTypeDecimal<ToDataType>)
inline ReturnType convertDecimalsImpl(const typename FromDataType::FieldType & value, UInt32 scale_from, UInt32 scale_to, typename ToDataType::FieldType& result)
inline ReturnType convertDecimalsImpl(const typename FromDataType::FieldType & value, UInt32 scale_from, UInt32 scale_to, typename ToDataType::FieldType & result)
{
using FromFieldType = typename FromDataType::FieldType;
using ToFieldType = typename ToDataType::FieldType;
@ -121,8 +121,14 @@ inline ReturnType convertDecimalsImpl(const typename FromDataType::FieldType & v
return ReturnType(false);
}
}
else if (scale_to == scale_from)
{
converted_value = value.value;
}
else
{
converted_value = value.value / DecimalUtils::scaleMultiplier<MaxNativeType>(scale_from - scale_to);
}
if constexpr (sizeof(FromFieldType) > sizeof(ToFieldType))
{
@ -155,7 +161,7 @@ inline typename ToDataType::FieldType convertDecimals(const typename FromDataTyp
template <typename FromDataType, typename ToDataType>
requires (IsDataTypeDecimal<FromDataType> && IsDataTypeDecimal<ToDataType>)
inline bool tryConvertDecimals(const typename FromDataType::FieldType & value, UInt32 scale_from, UInt32 scale_to, typename ToDataType::FieldType& result)
inline bool tryConvertDecimals(const typename FromDataType::FieldType & value, UInt32 scale_from, UInt32 scale_to, typename ToDataType::FieldType & result)
{
return convertDecimalsImpl<FromDataType, ToDataType, bool>(value, scale_from, scale_to, result);
}

View File

@ -1,7 +1,6 @@
#include "HTTPDictionarySource.h"
#include <Formats/formatBlock.h>
#include <IO/ConnectionTimeouts.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ReadWriteBufferFromHTTP.h>
#include <IO/WriteBufferFromOStream.h>
#include <IO/WriteBufferFromString.h>
@ -39,7 +38,7 @@ HTTPDictionarySource::HTTPDictionarySource(
, configuration(configuration_)
, sample_block(sample_block_)
, context(context_)
, timeouts(ConnectionTimeouts::getHTTPTimeouts(context))
, timeouts(ConnectionTimeouts::getHTTPTimeouts(context->getSettingsRef(), {context->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0}))
{
credentials.setUsername(credentials_.getUsername());
credentials.setPassword(credentials_.getPassword());
@ -52,7 +51,7 @@ HTTPDictionarySource::HTTPDictionarySource(const HTTPDictionarySource & other)
, configuration(other.configuration)
, sample_block(other.sample_block)
, context(Context::createCopy(other.context))
, timeouts(ConnectionTimeouts::getHTTPTimeouts(context))
, timeouts(ConnectionTimeouts::getHTTPTimeouts(context->getSettingsRef(), {context->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0}))
{
credentials.setUsername(other.credentials.getUsername());
credentials.setPassword(other.credentials.getPassword());

View File

@ -4,7 +4,7 @@
#include <DataTypes/DataTypeString.h>
#include <IO/ReadWriteBufferFromHTTP.h>
#include <IO/WriteHelpers.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ConnectionTimeouts.h>
#include <Interpreters/Context.h>
#include <Poco/Net/HTTPRequest.h>
#include <Poco/Util/AbstractConfiguration.h>
@ -76,7 +76,7 @@ XDBCDictionarySource::XDBCDictionarySource(
, load_all_query(query_builder.composeLoadAllQuery())
, bridge_helper(bridge_)
, bridge_url(bridge_helper->getMainURI())
, timeouts(ConnectionTimeouts::getHTTPTimeouts(context_))
, timeouts(ConnectionTimeouts::getHTTPTimeouts(context_->getSettingsRef(), {context_->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0}))
{
auto url_params = bridge_helper->getURLParams(max_block_size);
for (const auto & [name, value] : url_params)

View File

@ -40,11 +40,15 @@ void WebObjectStorage::initialize(const String & uri_path) const
try
{
Poco::Net::HTTPBasicCredentials credentials{};
ReadWriteBufferFromHTTP metadata_buf(
Poco::URI(fs::path(uri_path) / ".index"),
Poco::Net::HTTPRequest::HTTP_GET,
ReadWriteBufferFromHTTP::OutStreamCallback(),
ConnectionTimeouts::getHTTPTimeouts(getContext()),
ConnectionTimeouts::getHTTPTimeouts(
getContext()->getSettingsRef(),
{getContext()->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0}),
credentials,
/* max_redirects= */ 0,
/* buffer_size_= */ DBMS_DEFAULT_BUFFER_SIZE,

View File

@ -377,7 +377,7 @@ struct ToDateTransform32Or64
static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone)
{
// since converting to Date, no need in values outside of default LUT range.
return (from < DATE_LUT_MAX_DAY_NUM)
return (from <= DATE_LUT_MAX_DAY_NUM)
? from
: time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF)));
}
@ -394,7 +394,7 @@ struct ToDateTransform32Or64Signed
/// The function should be monotonic (better for query optimizations), so we saturate instead of overflow.
if (from < 0)
return 0;
return (from < DATE_LUT_MAX_DAY_NUM)
return (from <= DATE_LUT_MAX_DAY_NUM)
? static_cast<ToType>(from)
: time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF)));
}

View File

@ -0,0 +1,126 @@
#include <IO/ConnectionTimeouts.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <Interpreters/Context.h>
namespace DB
{
ConnectionTimeouts::ConnectionTimeouts(
Poco::Timespan connection_timeout_,
Poco::Timespan send_timeout_,
Poco::Timespan receive_timeout_)
: connection_timeout(connection_timeout_)
, send_timeout(send_timeout_)
, receive_timeout(receive_timeout_)
, tcp_keep_alive_timeout(0)
, http_keep_alive_timeout(0)
, secure_connection_timeout(connection_timeout)
, hedged_connection_timeout(receive_timeout_)
, receive_data_timeout(receive_timeout_)
{
}
ConnectionTimeouts::ConnectionTimeouts(
Poco::Timespan connection_timeout_,
Poco::Timespan send_timeout_,
Poco::Timespan receive_timeout_,
Poco::Timespan tcp_keep_alive_timeout_)
: connection_timeout(connection_timeout_)
, send_timeout(send_timeout_)
, receive_timeout(receive_timeout_)
, tcp_keep_alive_timeout(tcp_keep_alive_timeout_)
, http_keep_alive_timeout(0)
, secure_connection_timeout(connection_timeout)
, hedged_connection_timeout(receive_timeout_)
, receive_data_timeout(receive_timeout_)
{
}
ConnectionTimeouts::ConnectionTimeouts(
Poco::Timespan connection_timeout_,
Poco::Timespan send_timeout_,
Poco::Timespan receive_timeout_,
Poco::Timespan tcp_keep_alive_timeout_,
Poco::Timespan http_keep_alive_timeout_)
: connection_timeout(connection_timeout_)
, send_timeout(send_timeout_)
, receive_timeout(receive_timeout_)
, tcp_keep_alive_timeout(tcp_keep_alive_timeout_)
, http_keep_alive_timeout(http_keep_alive_timeout_)
, secure_connection_timeout(connection_timeout)
, hedged_connection_timeout(receive_timeout_)
, receive_data_timeout(receive_timeout_)
{
}
ConnectionTimeouts::ConnectionTimeouts(
Poco::Timespan connection_timeout_,
Poco::Timespan send_timeout_,
Poco::Timespan receive_timeout_,
Poco::Timespan tcp_keep_alive_timeout_,
Poco::Timespan http_keep_alive_timeout_,
Poco::Timespan secure_connection_timeout_,
Poco::Timespan receive_hello_timeout_,
Poco::Timespan receive_data_timeout_)
: connection_timeout(connection_timeout_)
, send_timeout(send_timeout_)
, receive_timeout(receive_timeout_)
, tcp_keep_alive_timeout(tcp_keep_alive_timeout_)
, http_keep_alive_timeout(http_keep_alive_timeout_)
, secure_connection_timeout(secure_connection_timeout_)
, hedged_connection_timeout(receive_hello_timeout_)
, receive_data_timeout(receive_data_timeout_)
{
}
Poco::Timespan ConnectionTimeouts::saturate(Poco::Timespan timespan, Poco::Timespan limit)
{
if (limit.totalMicroseconds() == 0)
return timespan;
else
return (timespan > limit) ? limit : timespan;
}
ConnectionTimeouts ConnectionTimeouts::getSaturated(Poco::Timespan limit) const
{
return ConnectionTimeouts(saturate(connection_timeout, limit),
saturate(send_timeout, limit),
saturate(receive_timeout, limit),
saturate(tcp_keep_alive_timeout, limit),
saturate(http_keep_alive_timeout, limit),
saturate(secure_connection_timeout, limit),
saturate(hedged_connection_timeout, limit),
saturate(receive_data_timeout, limit));
}
/// Timeouts for the case when we have just single attempt to connect.
ConnectionTimeouts ConnectionTimeouts::getTCPTimeoutsWithoutFailover(const Settings & settings)
{
return ConnectionTimeouts(settings.connect_timeout, settings.send_timeout, settings.receive_timeout, settings.tcp_keep_alive_timeout);
}
/// Timeouts for the case when we will try many addresses in a loop.
ConnectionTimeouts ConnectionTimeouts::getTCPTimeoutsWithFailover(const Settings & settings)
{
return ConnectionTimeouts(
settings.connect_timeout_with_failover_ms,
settings.send_timeout,
settings.receive_timeout,
settings.tcp_keep_alive_timeout,
0,
settings.connect_timeout_with_failover_secure_ms,
settings.hedged_connection_timeout_ms,
settings.receive_data_timeout_ms);
}
ConnectionTimeouts ConnectionTimeouts::getHTTPTimeouts(const Settings & settings, Poco::Timespan http_keep_alive_timeout)
{
return ConnectionTimeouts(
settings.http_connection_timeout,
settings.http_send_timeout,
settings.http_receive_timeout,
settings.tcp_keep_alive_timeout,
http_keep_alive_timeout);
}
}

View File

@ -30,47 +30,18 @@ struct ConnectionTimeouts
ConnectionTimeouts(Poco::Timespan connection_timeout_,
Poco::Timespan send_timeout_,
Poco::Timespan receive_timeout_)
: connection_timeout(connection_timeout_),
send_timeout(send_timeout_),
receive_timeout(receive_timeout_),
tcp_keep_alive_timeout(0),
http_keep_alive_timeout(0),
secure_connection_timeout(connection_timeout),
hedged_connection_timeout(receive_timeout_),
receive_data_timeout(receive_timeout_)
{
}
Poco::Timespan receive_timeout_);
ConnectionTimeouts(Poco::Timespan connection_timeout_,
Poco::Timespan send_timeout_,
Poco::Timespan receive_timeout_,
Poco::Timespan tcp_keep_alive_timeout_)
: connection_timeout(connection_timeout_),
send_timeout(send_timeout_),
receive_timeout(receive_timeout_),
tcp_keep_alive_timeout(tcp_keep_alive_timeout_),
http_keep_alive_timeout(0),
secure_connection_timeout(connection_timeout),
hedged_connection_timeout(receive_timeout_),
receive_data_timeout(receive_timeout_)
{
}
Poco::Timespan tcp_keep_alive_timeout_);
ConnectionTimeouts(Poco::Timespan connection_timeout_,
Poco::Timespan send_timeout_,
Poco::Timespan receive_timeout_,
Poco::Timespan tcp_keep_alive_timeout_,
Poco::Timespan http_keep_alive_timeout_)
: connection_timeout(connection_timeout_),
send_timeout(send_timeout_),
receive_timeout(receive_timeout_),
tcp_keep_alive_timeout(tcp_keep_alive_timeout_),
http_keep_alive_timeout(http_keep_alive_timeout_),
secure_connection_timeout(connection_timeout),
hedged_connection_timeout(receive_timeout_),
receive_data_timeout(receive_timeout_)
{
}
Poco::Timespan http_keep_alive_timeout_);
ConnectionTimeouts(Poco::Timespan connection_timeout_,
Poco::Timespan send_timeout_,
@ -79,43 +50,17 @@ struct ConnectionTimeouts
Poco::Timespan http_keep_alive_timeout_,
Poco::Timespan secure_connection_timeout_,
Poco::Timespan receive_hello_timeout_,
Poco::Timespan receive_data_timeout_)
: connection_timeout(connection_timeout_),
send_timeout(send_timeout_),
receive_timeout(receive_timeout_),
tcp_keep_alive_timeout(tcp_keep_alive_timeout_),
http_keep_alive_timeout(http_keep_alive_timeout_),
secure_connection_timeout(secure_connection_timeout_),
hedged_connection_timeout(receive_hello_timeout_),
receive_data_timeout(receive_data_timeout_)
{
}
Poco::Timespan receive_data_timeout_);
static Poco::Timespan saturate(Poco::Timespan timespan, Poco::Timespan limit)
{
if (limit.totalMicroseconds() == 0)
return timespan;
else
return (timespan > limit) ? limit : timespan;
}
ConnectionTimeouts getSaturated(Poco::Timespan limit) const
{
return ConnectionTimeouts(saturate(connection_timeout, limit),
saturate(send_timeout, limit),
saturate(receive_timeout, limit),
saturate(tcp_keep_alive_timeout, limit),
saturate(http_keep_alive_timeout, limit),
saturate(secure_connection_timeout, limit),
saturate(hedged_connection_timeout, limit),
saturate(receive_data_timeout, limit));
}
static Poco::Timespan saturate(Poco::Timespan timespan, Poco::Timespan limit);
ConnectionTimeouts getSaturated(Poco::Timespan limit) const;
/// Timeouts for the case when we have just single attempt to connect.
static ConnectionTimeouts getTCPTimeoutsWithoutFailover(const Settings & settings);
/// Timeouts for the case when we will try many addresses in a loop.
static ConnectionTimeouts getTCPTimeoutsWithFailover(const Settings & settings);
static ConnectionTimeouts getHTTPTimeouts(ContextPtr context);
static ConnectionTimeouts getHTTPTimeouts(const Settings & settings, Poco::Timespan http_keep_alive_timeout);
};
}

View File

@ -1,38 +0,0 @@
#pragma once
#include <IO/ConnectionTimeouts.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <Interpreters/Context.h>
namespace DB
{
/// Timeouts for the case when we have just single attempt to connect.
inline ConnectionTimeouts ConnectionTimeouts::getTCPTimeoutsWithoutFailover(const Settings & settings)
{
return ConnectionTimeouts(settings.connect_timeout, settings.send_timeout, settings.receive_timeout, settings.tcp_keep_alive_timeout);
}
/// Timeouts for the case when we will try many addresses in a loop.
inline ConnectionTimeouts ConnectionTimeouts::getTCPTimeoutsWithFailover(const Settings & settings)
{
return ConnectionTimeouts(
settings.connect_timeout_with_failover_ms,
settings.send_timeout,
settings.receive_timeout,
settings.tcp_keep_alive_timeout,
0,
settings.connect_timeout_with_failover_secure_ms,
settings.hedged_connection_timeout_ms,
settings.receive_data_timeout_ms);
}
inline ConnectionTimeouts ConnectionTimeouts::getHTTPTimeouts(ContextPtr context)
{
const auto & settings = context->getSettingsRef();
const auto & config = context->getConfigRef();
Poco::Timespan http_keep_alive_timeout{config.getUInt("keep_alive_timeout", 10), 0};
return ConnectionTimeouts(settings.http_connection_timeout, settings.http_send_timeout, settings.http_receive_timeout, settings.tcp_keep_alive_timeout, http_keep_alive_timeout);
}
}

View File

@ -7,7 +7,7 @@
#include <Common/ProfileEvents.h>
#include <Common/checkStackSize.h>
#include <TableFunctions/TableFunctionFactory.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ConnectionTimeouts.h>
#include <Interpreters/AddDefaultDatabaseVisitor.h>
#include <Interpreters/RequiredSourceColumnsVisitor.h>
#include <Interpreters/TranslateQualifiedNamesVisitor.h>

View File

@ -12,7 +12,6 @@
#include <Processors/Transforms/ReadFromMergeTreeDependencyTransform.h>
#include <Interpreters/ActionsDAG.h>
#include <Interpreters/InterpreterSelectQuery.h>
#include <IO/ConnectionTimeoutsContext.h>
#include "Common/logger_useful.h"
#include <Common/checkStackSize.h>
#include <Core/QueryProcessingStage.h>

View File

@ -21,7 +21,7 @@
#include <Interpreters/Cluster.h>
#include <Interpreters/Context.h>
#include <Interpreters/InternalTextLogsQueue.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ConnectionTimeouts.h>
#include <Client/MultiplexedConnections.h>
#include <Client/HedgedConnections.h>
#include <Storages/MergeTree/MergeTreeDataPartUUID.h>

View File

@ -20,7 +20,6 @@
#include <Compression/CompressedReadBuffer.h>
#include <Compression/CheckingCompressedReadBuffer.h>
#include <IO/ConnectionTimeouts.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/Operators.h>
#include <Disks/IDisk.h>
#include <boost/algorithm/string/find_iterator.hpp>

View File

@ -13,7 +13,7 @@
#include <Compression/CompressedWriteBuffer.h>
#include <IO/Operators.h>
#include <IO/WriteBufferFromString.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ConnectionTimeouts.h>
#include <Formats/NativeWriter.h>
#include <Processors/Sinks/RemoteSink.h>
#include <Processors/Executors/PushingPipelineExecutor.h>

View File

@ -8,7 +8,7 @@
#include <Client/Connection.h>
#include <Core/QueryProcessingStage.h>
#include <DataTypes/DataTypeString.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ConnectionTimeouts.h>
#include <Interpreters/Context.h>
#include <Interpreters/getHeaderForProcessingStage.h>
#include <Interpreters/SelectQueryOptions.h>

View File

@ -81,7 +81,7 @@
#include <IO/ReadHelpers.h>
#include <IO/WriteBufferFromString.h>
#include <IO/Operators.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ConnectionTimeouts.h>
#include <memory>
#include <filesystem>

View File

@ -69,7 +69,6 @@
#include <IO/ReadBufferFromString.h>
#include <IO/Operators.h>
#include <IO/ConnectionTimeouts.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <Interpreters/ClusterProxy/executeQuery.h>
#include <Interpreters/ClusterProxy/SelectStreamFactory.h>
@ -233,6 +232,11 @@ zkutil::ZooKeeperPtr StorageReplicatedMergeTree::getZooKeeperAndAssertNotReadonl
return res;
}
static ConnectionTimeouts getHTTPTimeouts(ContextPtr context)
{
return ConnectionTimeouts::getHTTPTimeouts(context->getSettingsRef(), {context->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0});
}
static MergeTreePartInfo makeDummyDropRangeForMovePartitionOrAttachPartitionFrom(const String & partition_id)
{
/// NOTE We don't have special log entry type for MOVE PARTITION/ATTACH PARTITION FROM,
@ -2356,7 +2360,6 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
return true;
}
void StorageReplicatedMergeTree::executeClonePartFromShard(const LogEntry & entry)
{
auto zookeeper = getZooKeeper();
@ -2385,7 +2388,7 @@ void StorageReplicatedMergeTree::executeClonePartFromShard(const LogEntry & entr
auto metadata_snapshot = getInMemoryMetadataPtr();
String source_replica_path = entry.source_shard + "/replicas/" + replica;
ReplicatedMergeTreeAddress address(getZooKeeper()->get(source_replica_path + "/host"));
auto timeouts = ConnectionTimeouts::getHTTPTimeouts(getContext());
auto timeouts = getHTTPTimeouts(getContext());
auto credentials = getContext()->getInterserverCredentials();
String interserver_scheme = getContext()->getInterserverScheme();
@ -3614,7 +3617,7 @@ void StorageReplicatedMergeTree::stopBeingLeader()
ConnectionTimeouts StorageReplicatedMergeTree::getFetchPartHTTPTimeouts(ContextPtr local_context)
{
auto timeouts = ConnectionTimeouts::getHTTPTimeouts(local_context);
auto timeouts = getHTTPTimeouts(local_context);
auto settings = getSettings();
if (settings->replicated_fetches_http_connection_timeout.changed)
@ -4261,7 +4264,7 @@ MutableDataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart(
std::function<MutableDataPartPtr()> get_part;
ReplicatedMergeTreeAddress address(zookeeper->get(fs::path(source_replica_path) / "host"));
auto timeouts = ConnectionTimeouts::getHTTPTimeouts(getContext());
auto timeouts = getHTTPTimeouts(getContext());
auto credentials = getContext()->getInterserverCredentials();
String interserver_scheme = getContext()->getInterserverScheme();

View File

@ -8,7 +8,7 @@
#include "Client/Connection.h"
#include "Core/QueryProcessingStage.h"
#include <DataTypes/DataTypeString.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ConnectionTimeouts.h>
#include <IO/WriteBufferFromS3.h>
#include <IO/WriteHelpers.h>
#include <Interpreters/Context.h>

View File

@ -13,7 +13,6 @@
#include <Parsers/ASTIdentifier.h>
#include <IO/ConnectionTimeouts.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/IOThreadPool.h>
#include <IO/ParallelReadBuffer.h>
#include <IO/WriteBufferFromHTTP.h>
@ -82,6 +81,10 @@ static bool urlWithGlobs(const String & uri)
return (uri.find('{') != std::string::npos && uri.find('}') != std::string::npos) || uri.find('|') != std::string::npos;
}
static ConnectionTimeouts getHTTPTimeouts(ContextPtr context)
{
return ConnectionTimeouts::getHTTPTimeouts(context->getSettingsRef(), {context->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0});
}
IStorageURLBase::IStorageURLBase(
const String & uri_,
@ -632,7 +635,7 @@ ColumnsDescription IStorageURLBase::getTableStructureFromData(
{},
Poco::Net::HTTPRequest::HTTP_GET,
{},
ConnectionTimeouts::getHTTPTimeouts(context),
getHTTPTimeouts(context),
compression_method,
credentials,
headers,
@ -716,7 +719,7 @@ Pipe IStorageURLBase::read(
local_context,
columns_description,
max_block_size,
ConnectionTimeouts::getHTTPTimeouts(local_context),
getHTTPTimeouts(local_context),
compression_method,
download_threads,
headers,
@ -740,7 +743,7 @@ Pipe IStorageURLBase::read(
local_context,
columns_description,
max_block_size,
ConnectionTimeouts::getHTTPTimeouts(local_context),
getHTTPTimeouts(local_context),
compression_method,
max_download_threads,
headers,
@ -775,6 +778,7 @@ Pipe StorageURLWithFailover::read(
auto uri_info = std::make_shared<StorageURLSource::URIInfo>();
uri_info->uri_list_to_read.emplace_back(uri_options);
auto pipe = Pipe(std::make_shared<StorageURLSource>(
uri_info,
getReadMethod(),
@ -786,7 +790,7 @@ Pipe StorageURLWithFailover::read(
local_context,
columns_description,
max_block_size,
ConnectionTimeouts::getHTTPTimeouts(local_context),
getHTTPTimeouts(local_context),
compression_method,
local_context->getSettingsRef().max_download_threads,
headers,
@ -815,7 +819,7 @@ SinkToStoragePtr IStorageURLBase::write(const ASTPtr & query, const StorageMetad
format_settings,
metadata_snapshot->getSampleBlock(),
context,
ConnectionTimeouts::getHTTPTimeouts(context),
getHTTPTimeouts(context),
compression_method,
http_method);
}
@ -827,7 +831,7 @@ SinkToStoragePtr IStorageURLBase::write(const ASTPtr & query, const StorageMetad
format_settings,
metadata_snapshot->getSampleBlock(),
context,
ConnectionTimeouts::getHTTPTimeouts(context),
getHTTPTimeouts(context),
compression_method,
http_method);
}
@ -896,7 +900,7 @@ std::optional<time_t> IStorageURLBase::getLastModificationTime(
Poco::URI(url),
Poco::Net::HTTPRequest::HTTP_GET,
{},
ConnectionTimeouts::getHTTPTimeouts(context),
getHTTPTimeouts(context),
credentials,
settings.max_http_get_redirects,
settings.max_read_buffer_size,

View File

@ -5,7 +5,7 @@
#include <Storages/checkAndGetLiteralArgument.h>
#include <Formats/FormatFactory.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ConnectionTimeouts.h>
#include <Interpreters/Context.h>
#include <Interpreters/evaluateConstantExpression.h>
#include <Parsers/ASTLiteral.h>
@ -130,13 +130,16 @@ SinkToStoragePtr StorageXDBC::write(const ASTPtr & /* query */, const StorageMet
request_uri.addQueryParameter("format_name", format_name);
request_uri.addQueryParameter("sample_block", metadata_snapshot->getSampleBlock().getNamesAndTypesList().toString());
return std::make_shared<StorageURLSink>(
request_uri.toString(),
format_name,
getFormatSettings(local_context),
metadata_snapshot->getSampleBlock(),
local_context,
ConnectionTimeouts::getHTTPTimeouts(local_context),
ConnectionTimeouts::getHTTPTimeouts(
local_context->getSettingsRef(),
{local_context->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0}),
compression_method);
}

View File

@ -2,7 +2,7 @@
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <IO/ReadWriteBufferFromHTTP.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <IO/ConnectionTimeouts.h>
#include <Interpreters/evaluateConstantExpression.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTLiteral.h>
@ -76,7 +76,14 @@ ColumnsDescription ITableFunctionXDBC::getActualTableStructure(ContextPtr contex
columns_info_uri.addQueryParameter("external_table_functions_use_nulls", toString(use_nulls));
Poco::Net::HTTPBasicCredentials credentials{};
ReadWriteBufferFromHTTP buf(columns_info_uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(context), credentials);
ReadWriteBufferFromHTTP buf(
columns_info_uri,
Poco::Net::HTTPRequest::HTTP_POST,
{},
ConnectionTimeouts::getHTTPTimeouts(
context->getSettingsRef(),
{context->getConfigRef().getUInt("keep_alive_timeout", DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT), 0}),
credentials);
std::string columns_info;
readStringBinary(columns_info, buf);

View File

@ -167,7 +167,7 @@ def test_smoke():
def test_smoke_parallel():
threads = []
for _ in range(100):
for _ in range(50):
threads.append(SafeThread(target=execute_smoke_query))
for thread in threads:
thread.start()
@ -178,7 +178,7 @@ def test_smoke_parallel():
def test_smoke_parallel_dict_reload():
threads = []
for _ in range(100):
for _ in range(90):
threads.append(SafeThread(target=execute_reload_dictionary_slow_dict_3))
for thread in threads:
thread.start()

View File

@ -251,7 +251,7 @@ def test_restore_another_bucket_path(cluster, db_atomic, zero_copy):
node_another_bucket = cluster.instances["node_another_bucket"]
create_restore_file(node_another_bucket, bucket="root")
node_another_bucket.restart_clickhouse()
node_another_bucket.restart_clickhouse(stop_start_wait_sec=120)
create_table(
node_another_bucket, "test", schema, attach=True, db_atomic=db_atomic, uuid=uuid
)

View File

@ -0,0 +1,4 @@
<test>
<query>SELECT count() FROM numbers(20000000) WHERE NOT ignore(toDateTime64(rand(), 0))</query>
<query>SELECT count() FROM numbers(20000000) WHERE NOT ignore(toDateTime64(rand(), 3))</query>
</test>

View File

@ -0,0 +1,15 @@
runtime messages 0.001
runtime exceptions 0.05
messages shorter than 10 10
messages shorter than 16 40
exceptions shorter than 30 125
noisy messages 0.3
noisy Trace messages 0.16
noisy Debug messages 0.09
noisy Info messages 0.05
noisy Warning messages 0.01
noisy Error messages 0.02
no Fatal messages 0
number of too noisy messages 3
number of noisy messages 10
incorrect patterns 15

View File

@ -0,0 +1,64 @@
-- Tags: no-parallel, no-fasttest
-- no-parallel because we want to run this test when most of the other tests already passed
-- If this test fails, see the "Top patterns of log messages" diagnostics in the end of run.log
system flush logs;
drop table if exists logs;
create view logs as select * from system.text_log where now() - toIntervalMinute(120) < event_time;
-- Check that we don't have too many messages formatted with fmt::runtime or strings concatenation.
-- 0.001 threshold should be always enough, the value was about 0.00025
select 'runtime messages', max2(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.001) from logs;
-- Check the same for exceptions. The value was 0.03
select 'runtime exceptions', max2(coalesce(sum(length(message_format_string) = 0) / countOrNull(), 0), 0.05) from logs where message like '%DB::Exception%';
-- Check that we don't have too many short meaningless message patterns.
select 'messages shorter than 10', max2(countDistinctOrDefault(message_format_string), 10) from logs where length(message_format_string) < 10;
-- Same as above. Feel free to update the threshold or remove this query if really necessary
select 'messages shorter than 16', max2(countDistinctOrDefault(message_format_string), 40) from logs where length(message_format_string) < 16;
-- Same as above, but exceptions must be more informative. Feel free to update the threshold or remove this query if really necessary
select 'exceptions shorter than 30', max2(countDistinctOrDefault(message_format_string), 125) from logs where length(message_format_string) < 30 and message ilike '%DB::Exception%';
-- Avoid too noisy messages: top 1 message frequency must be less than 30%. We should reduce the threshold
select 'noisy messages', max2((select count() from logs group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.30);
-- Same as above, but excluding Test level (actually finds top 1 Trace message)
with ('Access granted: {}{}', '{} -> {}') as frequent_in_tests
select 'noisy Trace messages', max2((select count() from logs where level!='Test' and message_format_string not in frequent_in_tests
group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.16);
-- Same as above for Debug
select 'noisy Debug messages', max2((select count() from logs where level <= 'Debug' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.09);
-- Same as above for Info
select 'noisy Info messages', max2((select count() from logs where level <= 'Information' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.05);
-- Same as above for Warning
with ('Not enabled four letter command {}') as frequent_in_tests
select 'noisy Warning messages', max2((select countOrDefault() from logs where level = 'Warning' and message_format_string not in frequent_in_tests
group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.01);
-- Same as above for Error
select 'noisy Error messages', max2((select countOrDefault() from logs where level = 'Error' group by message_format_string order by count() desc limit 1) / (select count() from logs), 0.02);
select 'no Fatal messages', count() from logs where level = 'Fatal';
-- Avoid too noisy messages: limit the number of messages with high frequency
select 'number of too noisy messages', max2(count(), 3) from (select count() / (select count() from logs) as freq, message_format_string from logs group by message_format_string having freq > 0.10);
select 'number of noisy messages', max2(count(), 10) from (select count() / (select count() from logs) as freq, message_format_string from logs group by message_format_string having freq > 0.05);
-- Each message matches its pattern (returns 0 rows)
-- FIXME maybe we should make it stricter ('Code:%Exception: '||s||'%'), but it's not easy because of addMessage
select 'incorrect patterns', max2(countDistinct(message_format_string), 15) from (
select message_format_string, any(message) as any_message from logs
where message not like (replaceRegexpAll(message_format_string, '{[:.0-9dfx]*}', '%') as s)
and message not like ('%Exception: '||s||'%') group by message_format_string
) where any_message not like '%Poco::Exception%';
drop table logs;

View File

@ -1,4 +1,4 @@
-- Tags: no-parallel
-- Tags: no-parallel, no-random-merge-tree-settings
set allow_deprecated_syntax_for_merge_tree=1;
set optimize_on_insert = 0;

View File

@ -0,0 +1,36 @@
2149-06-06 65535
2149-06-06 toUInt16(65535)
2149-06-06 toInt32(65535)
2149-06-06 toUInt32(65535)
2149-06-06 toDate(65535)
2149-06-06 CAST(65535 as UInt16)
2149-06-06 CAST(65535 as Int32)
2149-06-06 CAST(65535 as UInt32)
2149-06-06 CAST(65535 as Date)
2149-06-05 65534
2149-06-05 toUInt16(65534)
2149-06-05 toInt32(65534)
2149-06-05 toUInt32(65534)
2149-06-05 toDate(65534)
2149-06-05 CAST(65534 as UInt16)
2149-06-05 CAST(65534 as Int32)
2149-06-05 CAST(65534 as UInt32)
2149-06-05 CAST(65534 as Date)
1970-01-01 0
1970-01-01 toUInt16(0)
1970-01-01 toInt32(0)
1970-01-01 toUInt32(0)
1970-01-01 toDate(0)
1970-01-01 CAST(0 as UInt16)
1970-01-01 CAST(0 as Int32)
1970-01-01 CAST(0 as UInt32)
1970-01-01 CAST(0 as Date)
1 65536
1 toInt32(65536)
1 toUInt32(65536)
1 toDate(65536)
1 CAST(65536 as Int32)
1 CAST(65536 as UInt32)
1 CAST(65536 as Date)
1970-01-01 toUInt16(65536)
1970-01-01 CAST(65536 as UInt16)

View File

@ -0,0 +1,99 @@
DROP TABLE IF EXISTS 02540_date;
CREATE TABLE 02540_date (txt String, x Date) engine=Memory;
-- Date: Supported range of values: [1970-01-01, 2149-06-06].
-- ^----closed interval---^
INSERT INTO 02540_date VALUES('65535', 65535);
INSERT INTO 02540_date VALUES('toUInt16(65535)', toUInt16(65535)); -- #43370 weird one -> used to be 1970-01-01
INSERT INTO 02540_date VALUES('toInt32(65535)', toInt32(65535));
INSERT INTO 02540_date VALUES('toUInt32(65535)', toUInt32(65535));
INSERT INTO 02540_date VALUES('toDate(65535)', toDate(65535));
INSERT INTO 02540_date VALUES('CAST(65535 as UInt16)', CAST(65535 as UInt16));
INSERT INTO 02540_date VALUES('CAST(65535 as Int32)', CAST(65535 as Int32));
INSERT INTO 02540_date VALUES('CAST(65535 as UInt32)', CAST(65535 as UInt32));
INSERT INTO 02540_date VALUES('CAST(65535 as Date)', CAST(65535 as Date));
INSERT INTO 02540_date VALUES('65534', 65534);
INSERT INTO 02540_date VALUES('toUInt16(65534)', toUInt16(65534));
INSERT INTO 02540_date VALUES('toInt32(65534)', toInt32(65534));
INSERT INTO 02540_date VALUES('toUInt32(65534)', toUInt32(65534));
INSERT INTO 02540_date VALUES('toDate(65534)', toDate(65534));
INSERT INTO 02540_date VALUES('CAST(65534 as UInt16)', CAST(65534 as UInt16));
INSERT INTO 02540_date VALUES('CAST(65534 as Int32)', CAST(65534 as Int32));
INSERT INTO 02540_date VALUES('CAST(65534 as UInt32)', CAST(65534 as UInt32));
INSERT INTO 02540_date VALUES('CAST(65534 as Date)', CAST(65534 as Date));
INSERT INTO 02540_date VALUES('0', 0);
INSERT INTO 02540_date VALUES('toUInt16(0)', toUInt16(0));
INSERT INTO 02540_date VALUES('toInt32(0)', toInt32(0));
INSERT INTO 02540_date VALUES('toUInt32(0)', toUInt32(0));
INSERT INTO 02540_date VALUES('toDate(0)', toDate(0));
INSERT INTO 02540_date VALUES('CAST(0 as UInt16)', CAST(0 as UInt16));
INSERT INTO 02540_date VALUES('CAST(0 as Int32)', CAST(0 as Int32));
INSERT INTO 02540_date VALUES('CAST(0 as UInt32)', CAST(0 as UInt32));
INSERT INTO 02540_date VALUES('CAST(0 as Date)', CAST(0 as Date));
-- 65536 will be done using the TZ settings (comments in #45914)
-- We can expect either 1970-01-01 or 1970-01-02
-- time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF)))
INSERT INTO 02540_date VALUES('65536', 65536);
INSERT INTO 02540_date VALUES('toUInt16(65536)', toUInt16(65536)); -- Narrowing conversion 65536 ==> 0
INSERT INTO 02540_date VALUES('toInt32(65536)', toInt32(65536));
INSERT INTO 02540_date VALUES('toUInt32(65536)', toUInt32(65536));
INSERT INTO 02540_date VALUES('toDate(65536)', toDate(65536));
INSERT INTO 02540_date VALUES('CAST(65536 as UInt16)', CAST(65536 as UInt16)); -- Narrowing conversion 65536 ==> 0
INSERT INTO 02540_date VALUES('CAST(65536 as Int32)', CAST(65536 as Int32));
INSERT INTO 02540_date VALUES('CAST(65536 as UInt32)', CAST(65536 as UInt32));
INSERT INTO 02540_date VALUES('CAST(65536 as Date)', CAST(65536 as Date));
SELECT x, txt FROM 02540_date WHERE txt == '65535';
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt16(65535)';
SELECT x, txt FROM 02540_date WHERE txt == 'toInt32(65535)';
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt32(65535)';
SELECT x, txt FROM 02540_date WHERE txt == 'toDate(65535)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65535 as UInt16)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65535 as Int32)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65535 as UInt32)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65535 as Date)';
SELECT x, txt FROM 02540_date WHERE txt == '65534';
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt16(65534)';
SELECT x, txt FROM 02540_date WHERE txt == 'toInt32(65534)';
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt32(65534)';
SELECT x, txt FROM 02540_date WHERE txt == 'toDate(65534)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65534 as UInt16)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65534 as Int32)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65534 as UInt32)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65534 as Date)';
SELECT x, txt FROM 02540_date WHERE txt == '0';
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt16(0)';
SELECT x, txt FROM 02540_date WHERE txt == 'toInt32(0)';
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt32(0)';
SELECT x, txt FROM 02540_date WHERE txt == 'toDate(0)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(0 as UInt16)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(0 as Int32)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(0 as UInt32)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(0 as Date)';
SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == '65536';
SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == 'toInt32(65536)';
SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == 'toUInt32(65536)';
SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == 'toDate(65536)';
SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == 'CAST(65536 as Int32)';
SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == 'CAST(65536 as UInt32)';
SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == 'CAST(65536 as Date)';
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt16(65536)';
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65536 as UInt16)';