mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge branch 'master' into hashed-dictionary-complex-key-update-field-initial-load-fix
This commit is contained in:
commit
e0b74f2f85
@ -159,17 +159,12 @@ void IBridge::initialize(Application & self)
|
||||
if (port > 0xFFFF)
|
||||
throw Exception("Out of range 'http-port': " + std::to_string(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||
|
||||
http_timeout = config().getUInt("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT);
|
||||
http_timeout = config().getUInt64("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT);
|
||||
max_server_connections = config().getUInt("max-server-connections", 1024);
|
||||
keep_alive_timeout = config().getUInt("keep-alive-timeout", 10);
|
||||
keep_alive_timeout = config().getUInt64("keep-alive-timeout", 10);
|
||||
|
||||
initializeTerminationAndSignalProcessing();
|
||||
|
||||
#if USE_ODBC
|
||||
if (bridgeName() == "ODBCBridge")
|
||||
Poco::Data::ODBC::Connector::registerConnector();
|
||||
#endif
|
||||
|
||||
ServerApplication::initialize(self); // NOLINT
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,10 @@ uint64_t getThreadId()
|
||||
current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid
|
||||
#elif defined(OS_FREEBSD)
|
||||
current_tid = pthread_getthreadid_np();
|
||||
#elif defined(OS_SUNOS)
|
||||
// On Solaris-derived systems, this returns the ID of the LWP, analogous
|
||||
// to a thread.
|
||||
current_tid = static_cast<uint64_t>(pthread_self());
|
||||
#else
|
||||
if (0 != pthread_threadid_np(nullptr, ¤t_tid))
|
||||
throw std::logic_error("pthread_threadid_np returned error");
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <time.h>
|
||||
|
||||
#if defined (OS_DARWIN)
|
||||
#if defined (OS_DARWIN) || defined (OS_SUNOS)
|
||||
# define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
|
||||
#elif defined (OS_FREEBSD)
|
||||
# define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST
|
||||
|
@ -13,7 +13,12 @@ using char8_t = unsigned char;
|
||||
#endif
|
||||
|
||||
/// This is needed for more strict aliasing. https://godbolt.org/z/xpJBSb https://stackoverflow.com/a/57453713
|
||||
#if !defined(PVS_STUDIO) /// But PVS-Studio does not treat it correctly.
|
||||
using UInt8 = char8_t;
|
||||
#else
|
||||
using UInt8 = uint8_t;
|
||||
#endif
|
||||
|
||||
using UInt16 = uint16_t;
|
||||
using UInt32 = uint32_t;
|
||||
using UInt64 = uint64_t;
|
||||
|
@ -35,7 +35,7 @@ PEERDIR(
|
||||
CFLAGS(-g0)
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests/ | grep -v -F Replxx | grep -v -F Readline | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests/ | grep -v -F examples | grep -v -F Replxx | grep -v -F Readline | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -1643,22 +1643,22 @@ typedef setseq_base<pcg128_t, pcg128_t, xsl_rr_rr_mixin>
|
||||
|
||||
template <bitcount_t table_pow2, bitcount_t advance_pow2,
|
||||
typename BaseRNG, bool kdd = true>
|
||||
using ext_std8 = extended<table_pow2, advance_pow2, BaseRNG,
|
||||
using ext_std8 = pcg_detail::extended<table_pow2, advance_pow2, BaseRNG,
|
||||
oneseq_rxs_m_xs_8_8, kdd>;
|
||||
|
||||
template <bitcount_t table_pow2, bitcount_t advance_pow2,
|
||||
typename BaseRNG, bool kdd = true>
|
||||
using ext_std16 = extended<table_pow2, advance_pow2, BaseRNG,
|
||||
using ext_std16 = pcg_detail::extended<table_pow2, advance_pow2, BaseRNG,
|
||||
oneseq_rxs_m_xs_16_16, kdd>;
|
||||
|
||||
template <bitcount_t table_pow2, bitcount_t advance_pow2,
|
||||
typename BaseRNG, bool kdd = true>
|
||||
using ext_std32 = extended<table_pow2, advance_pow2, BaseRNG,
|
||||
using ext_std32 = pcg_detail::extended<table_pow2, advance_pow2, BaseRNG,
|
||||
oneseq_rxs_m_xs_32_32, kdd>;
|
||||
|
||||
template <bitcount_t table_pow2, bitcount_t advance_pow2,
|
||||
typename BaseRNG, bool kdd = true>
|
||||
using ext_std64 = extended<table_pow2, advance_pow2, BaseRNG,
|
||||
using ext_std64 = pcg_detail::extended<table_pow2, advance_pow2, BaseRNG,
|
||||
oneseq_rxs_m_xs_64_64, kdd>;
|
||||
|
||||
|
||||
|
@ -40,7 +40,7 @@ if (SANITIZE)
|
||||
# RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to
|
||||
# keep the binary size down.
|
||||
# TODO: try compiling with -Og and with ld.gold.
|
||||
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt")
|
||||
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt")
|
||||
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
|
||||
|
@ -12,6 +12,9 @@ elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||
elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
set (OS_DARWIN 1)
|
||||
add_definitions(-D OS_DARWIN)
|
||||
elseif (CMAKE_SYSTEM_NAME MATCHES "SunOS")
|
||||
set (OS_SUNOS 1)
|
||||
add_definitions(-D OS_SUNOS)
|
||||
endif ()
|
||||
|
||||
if (CMAKE_CROSSCOMPILING)
|
||||
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
||||
Subproject commit 9f0ff347e50429686604002d8ad1fd07515c4f31
|
||||
Subproject commit 1ccbb5a522a571ce83b606dbc2e1011c42ecccfb
|
2
contrib/llvm
vendored
2
contrib/llvm
vendored
@ -1 +1 @@
|
||||
Subproject commit 8f24d507c1cfeec66d27f48fe74518fd278e2d25
|
||||
Subproject commit cfaf365cf96918999d09d976ec736b4518cf5d02
|
@ -64,6 +64,8 @@ RUN groupadd -r clickhouse --gid=101 \
|
||||
clickhouse-client=$version \
|
||||
clickhouse-server=$version ; \
|
||||
fi \
|
||||
&& wget --progress=bar:force:noscroll "https://github.com/tianon/gosu/releases/download/$gosu_ver/gosu-$(dpkg --print-architecture)" -O /bin/gosu \
|
||||
&& chmod +x /bin/gosu \
|
||||
&& clickhouse-local -q 'SELECT * FROM system.build_options' \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
@ -76,8 +78,6 @@ RUN groupadd -r clickhouse --gid=101 \
|
||||
# we need to allow "others" access to clickhouse folder, because docker container
|
||||
# can be started with arbitrary uid (openshift usecase)
|
||||
|
||||
ADD https://github.com/tianon/gosu/releases/download/$gosu_ver/gosu-amd64 /bin/gosu
|
||||
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
@ -88,10 +88,7 @@ RUN mkdir /docker-entrypoint-initdb.d
|
||||
|
||||
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
RUN chmod +x \
|
||||
/entrypoint.sh \
|
||||
/bin/gosu
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
EXPOSE 9000 8123 9009
|
||||
VOLUME /var/lib/clickhouse
|
||||
|
@ -51,13 +51,13 @@ RUN apt-get update \
|
||||
# Sanitizer options for services (clickhouse-server)
|
||||
RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7'" >> /etc/environment; \
|
||||
echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \
|
||||
echo "MSAN_OPTIONS='abort_on_error=1'" >> /etc/environment; \
|
||||
echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment; \
|
||||
echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment; \
|
||||
ln -s /usr/lib/llvm-${LLVM_VERSION}/bin/llvm-symbolizer /usr/bin/llvm-symbolizer;
|
||||
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
|
||||
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
|
||||
ENV TSAN_OPTIONS='halt_on_error=1 history_size=7'
|
||||
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
||||
ENV MSAN_OPTIONS='abort_on_error=1'
|
||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
||||
|
||||
CMD sleep 1
|
||||
|
@ -102,7 +102,9 @@ For non-Linux operating systems and for AArch64 CPU arhitecture, ClickHouse buil
|
||||
- [FreeBSD](https://builds.clickhouse.tech/master/freebsd/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/freebsd/clickhouse' && chmod a+x ./clickhouse`
|
||||
- [AArch64](https://builds.clickhouse.tech/master/aarch64/clickhouse) — `curl -O 'https://builds.clickhouse.tech/master/aarch64/clickhouse' && chmod a+x ./clickhouse`
|
||||
|
||||
After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data. To run `clickhouse server`, you have to additionally download [server](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.xml) and [users](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/users.xml) configuration files from GitHub.
|
||||
After downloading, you can use the `clickhouse client` to connect to the server, or `clickhouse local` to process local data.
|
||||
|
||||
Run `sudo ./clickhouse install` if you want to install clickhouse system-wide (also with needed condiguration files, configuring users etc.). After that run `clickhouse start` commands to start the clickhouse-server and `clickhouse-client` to connect to it.
|
||||
|
||||
These builds are not recommended for use in production environments because they are less thoroughly tested, but you can do so on your own risk. They also have only a subset of ClickHouse features available.
|
||||
|
||||
|
@ -43,15 +43,15 @@ ClickHouse中提供的其他联接类型:
|
||||
|
||||
Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.
|
||||
|
||||
### ASOF加入使用 {#asof-join-usage}
|
||||
### ASOF JOIN使用 {#asof-join-usage}
|
||||
|
||||
`ASOF JOIN` 当您需要连接没有完全匹配的记录时非常有用。
|
||||
|
||||
算法需要表中的特殊列。 本专栏:
|
||||
该算法需要表中的特殊列。 该列需要满足:
|
||||
|
||||
- 必须包含有序序列。
|
||||
- 可以是以下类型之一: [Int*,UInt*](../../../sql-reference/data-types/int-uint.md), [浮动\*](../../../sql-reference/data-types/float.md), [日期](../../../sql-reference/data-types/date.md), [日期时间](../../../sql-reference/data-types/datetime.md), [十进制\*](../../../sql-reference/data-types/decimal.md).
|
||||
- 不能是唯一的列 `JOIN`
|
||||
- 可以是以下类型之一: [Int*,UInt*](../../../sql-reference/data-types/int-uint.md), [Float\*](../../../sql-reference/data-types/float.md), [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md), [Decimal\*](../../../sql-reference/data-types/decimal.md).
|
||||
- 不能是`JOIN`子句中唯一的列
|
||||
|
||||
语法 `ASOF JOIN ... ON`:
|
||||
|
||||
@ -62,9 +62,9 @@ ASOF LEFT JOIN table_2
|
||||
ON equi_cond AND closest_match_cond
|
||||
```
|
||||
|
||||
您可以使用任意数量的相等条件和恰好一个最接近的匹配条件。 例如, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`.
|
||||
您可以使用任意数量的相等条件和一个且只有一个最接近的匹配条件。 例如, `SELECT count() FROM table_1 ASOF LEFT JOIN table_2 ON table_1.a == table_2.b AND table_2.t <= table_1.t`.
|
||||
|
||||
支持最接近匹配的条件: `>`, `>=`, `<`, `<=`.
|
||||
支持最接近匹配的运算符: `>`, `>=`, `<`, `<=`.
|
||||
|
||||
语法 `ASOF JOIN ... USING`:
|
||||
|
||||
@ -75,9 +75,9 @@ ASOF JOIN table_2
|
||||
USING (equi_column1, ... equi_columnN, asof_column)
|
||||
```
|
||||
|
||||
`ASOF JOIN` 用途 `equi_columnX` 对于加入平等和 `asof_column` 用于加入与最接近的比赛 `table_1.asof_column >= table_2.asof_column` 条件。 该 `asof_column` 列总是在最后一个 `USING` 条款
|
||||
`table_1.asof_column >= table_2.asof_column` 中, `ASOF JOIN` 使用 `equi_columnX` 来进行条件匹配, `asof_column` 用于JOIN最接近匹配。 `asof_column` 列总是在最后一个 `USING` 条件中。
|
||||
|
||||
例如,请考虑下表:
|
||||
例如,参考下表:
|
||||
|
||||
table_1 table_2
|
||||
event | ev_time | user_id event | ev_time | user_id
|
||||
@ -88,10 +88,10 @@ USING (equi_column1, ... equi_columnN, asof_column)
|
||||
event_1_2 | 13:00 | 42 event_2_3 | 13:00 | 42
|
||||
... ...
|
||||
|
||||
`ASOF JOIN` 可以从用户事件的时间戳 `table_1` 并找到一个事件 `table_2` 其中时间戳最接近事件的时间戳 `table_1` 对应于最接近的匹配条件。 如果可用,则相等的时间戳值是最接近的值。 在这里,该 `user_id` 列可用于连接相等和 `ev_time` 列可用于在最接近的匹配加入。 在我们的例子中, `event_1_1` 可以加入 `event_2_1` 和 `event_1_2` 可以加入 `event_2_3`,但是 `event_2_2` 不能加入。
|
||||
`ASOF JOIN`会从 `table_2` 中的用户事件时间戳找出和 `table_1` 中用户事件时间戳中最近的一个时间戳,来满足最接近匹配的条件。如果有得话,则相等的时间戳值是最接近的值。在此例中,`user_id` 列可用于条件匹配,`ev_time` 列可用于最接近匹配。在此例中,`event_1_1` 可以 JOIN `event_2_1`,`event_1_2` 可以JOIN `event_2_3`,但是 `event_2_2` 不能被JOIN。
|
||||
|
||||
!!! note "注"
|
||||
`ASOF` 加入是 **不** 支持在 [加入我们](../../../engines/table-engines/special/join.md) 表引擎。
|
||||
`ASOF JOIN`在 [JOIN](../../../engines/table-engines/special/join.md) 表引擎中 **不受** 支持。
|
||||
|
||||
## 分布式联接 {#global-join}
|
||||
|
||||
|
@ -844,8 +844,8 @@ namespace
|
||||
fmt::print("The pidof command returned unusual output.\n");
|
||||
}
|
||||
|
||||
WriteBufferFromFileDescriptor stderr(STDERR_FILENO);
|
||||
copyData(sh->err, stderr);
|
||||
WriteBufferFromFileDescriptor std_err(STDERR_FILENO);
|
||||
copyData(sh->err, std_err);
|
||||
|
||||
sh->tryWait();
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -8,7 +8,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F GroupBitmap | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | grep -v -F GroupBitmap | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -55,7 +55,7 @@ protected:
|
||||
|
||||
virtual Poco::Logger * getLog() const = 0;
|
||||
|
||||
virtual const Poco::Timespan & getHTTPTimeout() const = 0;
|
||||
virtual Poco::Timespan getHTTPTimeout() const = 0;
|
||||
|
||||
virtual Poco::URI createBaseURI() const = 0;
|
||||
|
||||
|
@ -28,7 +28,7 @@ LibraryBridgeHelper::LibraryBridgeHelper(
|
||||
, log(&Poco::Logger::get("LibraryBridgeHelper"))
|
||||
, sample_block(sample_block_)
|
||||
, config(context_->getConfigRef())
|
||||
, http_timeout(context_->getSettingsRef().http_receive_timeout.value.totalSeconds())
|
||||
, http_timeout(context_->getSettingsRef().http_receive_timeout.value)
|
||||
, dictionary_id(dictionary_id_)
|
||||
{
|
||||
bridge_port = config.getUInt("library_bridge.port", DEFAULT_PORT);
|
||||
|
@ -57,7 +57,7 @@ protected:
|
||||
|
||||
Poco::Logger * getLog() const override { return log; }
|
||||
|
||||
const Poco::Timespan & getHTTPTimeout() const override { return http_timeout; }
|
||||
Poco::Timespan getHTTPTimeout() const override { return http_timeout; }
|
||||
|
||||
Poco::URI createBaseURI() const override;
|
||||
|
||||
|
@ -62,9 +62,9 @@ public:
|
||||
static constexpr inline auto SCHEMA_ALLOWED_HANDLER = "/schema_allowed";
|
||||
|
||||
XDBCBridgeHelper(
|
||||
ContextPtr global_context_,
|
||||
const Poco::Timespan & http_timeout_,
|
||||
const std::string & connection_string_)
|
||||
ContextPtr global_context_,
|
||||
Poco::Timespan http_timeout_,
|
||||
const std::string & connection_string_)
|
||||
: IXDBCBridgeHelper(global_context_)
|
||||
, log(&Poco::Logger::get(BridgeHelperMixin::getName() + "BridgeHelper"))
|
||||
, connection_string(connection_string_)
|
||||
@ -90,7 +90,7 @@ protected:
|
||||
|
||||
String configPrefix() const override { return BridgeHelperMixin::configPrefix(); }
|
||||
|
||||
const Poco::Timespan & getHTTPTimeout() const override { return http_timeout; }
|
||||
Poco::Timespan getHTTPTimeout() const override { return http_timeout; }
|
||||
|
||||
const Poco::Util::AbstractConfiguration & getConfig() const override { return config; }
|
||||
|
||||
@ -118,7 +118,7 @@ private:
|
||||
|
||||
Poco::Logger * log;
|
||||
std::string connection_string;
|
||||
const Poco::Timespan & http_timeout;
|
||||
Poco::Timespan http_timeout;
|
||||
std::string bridge_host;
|
||||
size_t bridge_port;
|
||||
|
||||
|
@ -8,7 +8,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -680,8 +680,12 @@ void Connection::sendExternalTablesData(ExternalTablesData & data)
|
||||
PipelineExecutorPtr executor;
|
||||
auto on_cancel = [& executor]() { executor->cancel(); };
|
||||
|
||||
if (!elem->pipe)
|
||||
elem->pipe = elem->creating_pipe_callback();
|
||||
|
||||
QueryPipeline pipeline;
|
||||
pipeline.init(std::move(*elem->pipe));
|
||||
elem->pipe.reset();
|
||||
pipeline.resize(1);
|
||||
auto sink = std::make_shared<ExternalTableDataSink>(pipeline.getHeader(), *this, *elem, std::move(on_cancel));
|
||||
pipeline.setSinks([&](const Block &, QueryPipeline::StreamType type) -> ProcessorPtr
|
||||
|
@ -41,6 +41,7 @@ struct ExternalTableData
|
||||
/// Pipe of data form table;
|
||||
std::unique_ptr<Pipe> pipe;
|
||||
std::string table_name;
|
||||
std::function<std::unique_ptr<Pipe>()> creating_pipe_callback;
|
||||
/// Flag if need to stop reading.
|
||||
std::atomic_bool is_cancelled = false;
|
||||
};
|
||||
|
@ -116,7 +116,7 @@ ConnectionEstablisherAsync::ConnectionEstablisherAsync(
|
||||
epoll.add(receive_timeout.getDescriptor());
|
||||
}
|
||||
|
||||
void ConnectionEstablisherAsync::Routine::ReadCallback::operator()(int fd, const Poco::Timespan & timeout, const std::string &)
|
||||
void ConnectionEstablisherAsync::Routine::ReadCallback::operator()(int fd, Poco::Timespan timeout, const std::string &)
|
||||
{
|
||||
/// Check if it's the first time and we need to add socket fd to epoll.
|
||||
if (connection_establisher_async.socket_fd == -1)
|
||||
|
@ -92,7 +92,7 @@ private:
|
||||
ConnectionEstablisherAsync & connection_establisher_async;
|
||||
Fiber & fiber;
|
||||
|
||||
void operator()(int fd, const Poco::Timespan & timeout, const std::string &);
|
||||
void operator()(int fd, Poco::Timespan timeout, const std::string &);
|
||||
};
|
||||
|
||||
Fiber operator()(Fiber && sink);
|
||||
|
@ -98,7 +98,7 @@ private:
|
||||
PacketReceiver & receiver;
|
||||
Fiber & sink;
|
||||
|
||||
void operator()(int, const Poco::Timespan & timeout, const std::string &)
|
||||
void operator()(int, Poco::Timespan timeout, const std::string &)
|
||||
{
|
||||
receiver.receive_timeout.setRelative(timeout);
|
||||
receiver.is_read_in_process = true;
|
||||
|
@ -9,7 +9,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -17,7 +17,7 @@ PEERDIR(
|
||||
)
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -234,7 +234,7 @@ void ConfigProcessor::merge(XMLDocumentPtr config, XMLDocumentPtr with)
|
||||
|
||||
static std::string layerFromHost()
|
||||
{
|
||||
utsname buf;
|
||||
struct utsname buf;
|
||||
if (uname(&buf))
|
||||
throw Poco::Exception(std::string("uname failed: ") + errnoToString(errno));
|
||||
|
||||
|
@ -129,7 +129,12 @@ String Elf::getBuildID() const
|
||||
return {};
|
||||
}
|
||||
|
||||
|
||||
#if defined(OS_SUNOS)
|
||||
String Elf::getBuildID(const char * nhdr_pos, size_t size)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
#else
|
||||
String Elf::getBuildID(const char * nhdr_pos, size_t size)
|
||||
{
|
||||
const char * nhdr_end = nhdr_pos + size;
|
||||
@ -149,6 +154,7 @@ String Elf::getBuildID(const char * nhdr_pos, size_t size)
|
||||
|
||||
return {};
|
||||
}
|
||||
#endif // OS_SUNOS
|
||||
|
||||
|
||||
String Elf::getBinaryHash() const
|
||||
|
@ -9,7 +9,7 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
using AsyncCallback = std::function<void(int, const Poco::Timespan &, const std::string &)>;
|
||||
using AsyncCallback = std::function<void(int, Poco::Timespan, const std::string &)>;
|
||||
|
||||
class Epoll
|
||||
{
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <Core/Defines.h>
|
||||
#include <common/types.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/MemorySanitizer.h>
|
||||
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -584,8 +585,19 @@ protected:
|
||||
void destroyElements()
|
||||
{
|
||||
if (!std::is_trivially_destructible_v<Cell>)
|
||||
{
|
||||
for (iterator it = begin(), it_end = end(); it != it_end; ++it)
|
||||
{
|
||||
it.ptr->~Cell();
|
||||
/// In case of poison_in_dtor=1 it will be poisoned,
|
||||
/// but it maybe used later, during iteration.
|
||||
///
|
||||
/// NOTE, that technically this is UB [1], but OK for now.
|
||||
///
|
||||
/// [1]: https://github.com/google/sanitizers/issues/854#issuecomment-329661378
|
||||
__msan_unpoison(it.ptr, sizeof(*it.ptr));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -11,7 +11,7 @@ struct OpenTelemetryTraceContext
|
||||
// The incoming tracestate header and the trace flags, we just pass them
|
||||
// downstream. See https://www.w3.org/TR/trace-context/
|
||||
String tracestate;
|
||||
__uint8_t trace_flags = 0;
|
||||
uint8_t trace_flags = 0;
|
||||
|
||||
// Parse/compose OpenTelemetry traceparent header.
|
||||
bool parseTraceparentHeader(const std::string & traceparent, std::string & error);
|
||||
|
@ -7,8 +7,12 @@ StopwatchRUsage::Timestamp StopwatchRUsage::Timestamp::current()
|
||||
|
||||
::rusage rusage {};
|
||||
#if !defined(__APPLE__)
|
||||
#if defined(OS_SUNOS)
|
||||
::getrusage(RUSAGE_LWP, &rusage);
|
||||
#else
|
||||
::getrusage(RUSAGE_THREAD, &rusage);
|
||||
#endif
|
||||
#endif // OS_SUNOS
|
||||
#endif // __APPLE__
|
||||
res.user_ns = rusage.ru_utime.tv_sec * 1000000000UL + rusage.ru_utime.tv_usec * 1000UL;
|
||||
res.sys_ns = rusage.ru_stime.tv_sec * 1000000000UL + rusage.ru_stime.tv_usec * 1000UL;
|
||||
return res;
|
||||
|
@ -1,5 +1,8 @@
|
||||
#include <unistd.h>
|
||||
#include <sys/ioctl.h>
|
||||
#if defined(OS_SUNOS)
|
||||
# include <sys/termios.h>
|
||||
#endif
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <boost/program_options.hpp>
|
||||
@ -14,7 +17,7 @@ uint16_t getTerminalWidth()
|
||||
{
|
||||
if (isatty(STDIN_FILENO))
|
||||
{
|
||||
winsize terminal_size {};
|
||||
struct winsize terminal_size {};
|
||||
|
||||
if (ioctl(STDIN_FILENO, TIOCGWINSZ, &terminal_size))
|
||||
DB::throwFromErrno("Cannot obtain terminal window size (ioctl TIOCGWINSZ)", DB::ErrorCodes::SYSTEM_ERROR);
|
||||
|
@ -105,8 +105,12 @@ struct RUsageCounters
|
||||
{
|
||||
::rusage rusage {};
|
||||
#if !defined(__APPLE__)
|
||||
#if defined(OS_SUNOS)
|
||||
::getrusage(RUSAGE_LWP, &rusage);
|
||||
#else
|
||||
::getrusage(RUSAGE_THREAD, &rusage);
|
||||
#endif
|
||||
#endif // OS_SUNOS
|
||||
#endif // __APPLE
|
||||
return RUsageCounters(rusage, getClockMonotonic());
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ void TimerDescriptor::drain() const
|
||||
}
|
||||
}
|
||||
|
||||
void TimerDescriptor::setRelative(const Poco::Timespan & timespan) const
|
||||
void TimerDescriptor::setRelative(Poco::Timespan timespan) const
|
||||
{
|
||||
itimerspec spec;
|
||||
spec.it_interval.tv_nsec = 0;
|
||||
|
@ -24,7 +24,7 @@ public:
|
||||
|
||||
void reset() const;
|
||||
void drain() const;
|
||||
void setRelative(const Poco::Timespan & timespan) const;
|
||||
void setRelative(Poco::Timespan timespan) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ __attribute__((__weak__)) void checkStackSize()
|
||||
stack_address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(pthread_get_stackaddr_np(thread)) - max_stack_size);
|
||||
#else
|
||||
pthread_attr_t attr;
|
||||
# if defined(__FreeBSD__)
|
||||
# if defined(__FreeBSD__) || defined(OS_SUNOS)
|
||||
pthread_attr_init(&attr);
|
||||
if (0 != pthread_attr_get_np(pthread_self(), &attr))
|
||||
throwFromErrno("Cannot pthread_attr_get_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
static void setAffinity()
|
||||
{
|
||||
#if !defined(__APPLE__) && !defined(__FreeBSD__)
|
||||
#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__sun)
|
||||
cpu_set_t mask;
|
||||
CPU_ZERO(&mask);
|
||||
CPU_SET(0, &mask);
|
||||
|
@ -85,19 +85,19 @@ inline bool parseIPv6(const char * src, unsigned char * dst)
|
||||
return clear_dst();
|
||||
|
||||
unsigned char tmp[IPV6_BINARY_LENGTH]{};
|
||||
auto * tp = tmp;
|
||||
auto * endp = tp + IPV6_BINARY_LENGTH;
|
||||
const auto * curtok = src;
|
||||
auto saw_xdigit = false;
|
||||
unsigned char * tp = tmp;
|
||||
unsigned char * endp = tp + IPV6_BINARY_LENGTH;
|
||||
const char * curtok = src;
|
||||
bool saw_xdigit = false;
|
||||
UInt32 val{};
|
||||
unsigned char * colonp = nullptr;
|
||||
|
||||
/// Assuming zero-terminated string.
|
||||
while (const auto ch = *src++)
|
||||
while (char ch = *src++)
|
||||
{
|
||||
const auto num = unhex(ch);
|
||||
UInt8 num = unhex(ch);
|
||||
|
||||
if (num != u8'\xff')
|
||||
if (num != 0xFF)
|
||||
{
|
||||
val <<= 4;
|
||||
val |= num;
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include <pthread.h>
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#if defined(__APPLE__) || defined(OS_SUNOS)
|
||||
#elif defined(__FreeBSD__)
|
||||
#include <pthread_np.h>
|
||||
#else
|
||||
@ -34,6 +34,8 @@ void setThreadName(const char * name)
|
||||
if ((false))
|
||||
#elif defined(OS_DARWIN)
|
||||
if (0 != pthread_setname_np(name))
|
||||
#elif defined(OS_SUNOS)
|
||||
if (0 != pthread_setname_np(pthread_self(), name))
|
||||
#else
|
||||
if (0 != prctl(PR_SET_NAME, name, 0, 0, 0))
|
||||
#endif
|
||||
@ -44,7 +46,7 @@ std::string getThreadName()
|
||||
{
|
||||
std::string name(16, '\0');
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#if defined(__APPLE__) || defined(OS_SUNOS)
|
||||
if (pthread_getname_np(pthread_self(), name.data(), name.size()))
|
||||
throw DB::Exception("Cannot get thread name with pthread_getname_np()", DB::ErrorCodes::PTHREAD_ERROR);
|
||||
#elif defined(__FreeBSD__)
|
||||
|
@ -24,7 +24,7 @@ INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc)
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -15,7 +15,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -101,7 +101,7 @@ struct SettingFieldTimespan
|
||||
Poco::Timespan value;
|
||||
bool changed = false;
|
||||
|
||||
explicit SettingFieldTimespan(const Poco::Timespan & x = {}) : value(x) {}
|
||||
explicit SettingFieldTimespan(Poco::Timespan x = {}) : value(x) {}
|
||||
|
||||
template <class Rep, class Period = std::ratio<1>>
|
||||
explicit SettingFieldTimespan(const std::chrono::duration<Rep, Period> & x)
|
||||
@ -110,7 +110,7 @@ struct SettingFieldTimespan
|
||||
explicit SettingFieldTimespan(UInt64 x) : SettingFieldTimespan(Poco::Timespan{static_cast<Poco::Timespan::TimeDiff>(x * microseconds_per_unit)}) {}
|
||||
explicit SettingFieldTimespan(const Field & f);
|
||||
|
||||
SettingFieldTimespan & operator =(const Poco::Timespan & x) { value = x; changed = true; return *this; }
|
||||
SettingFieldTimespan & operator =(Poco::Timespan x) { value = x; changed = true; return *this; }
|
||||
|
||||
template <class Rep, class Period = std::ratio<1>>
|
||||
SettingFieldTimespan & operator =(const std::chrono::duration<Rep, Period> & x) { *this = Poco::Timespan{static_cast<Poco::Timespan::TimeDiff>(std::chrono::duration_cast<std::chrono::microseconds>(x).count())}; return *this; }
|
||||
|
@ -10,7 +10,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -458,8 +458,6 @@ void RemoteQueryExecutor::sendScalars()
|
||||
|
||||
void RemoteQueryExecutor::sendExternalTables()
|
||||
{
|
||||
SelectQueryInfo query_info;
|
||||
|
||||
size_t count = connections->size();
|
||||
|
||||
{
|
||||
@ -474,24 +472,29 @@ void RemoteQueryExecutor::sendExternalTables()
|
||||
for (const auto & table : external_tables)
|
||||
{
|
||||
StoragePtr cur = table.second;
|
||||
auto metadata_snapshot = cur->getInMemoryMetadataPtr();
|
||||
QueryProcessingStage::Enum read_from_table_stage = cur->getQueryProcessingStage(
|
||||
context, QueryProcessingStage::Complete, query_info);
|
||||
|
||||
Pipe pipe = cur->read(
|
||||
metadata_snapshot->getColumns().getNamesOfPhysical(),
|
||||
metadata_snapshot, query_info, context,
|
||||
read_from_table_stage, DEFAULT_BLOCK_SIZE, 1);
|
||||
|
||||
auto data = std::make_unique<ExternalTableData>();
|
||||
data->table_name = table.first;
|
||||
data->creating_pipe_callback = [cur, context = this->context]()
|
||||
{
|
||||
SelectQueryInfo query_info;
|
||||
auto metadata_snapshot = cur->getInMemoryMetadataPtr();
|
||||
QueryProcessingStage::Enum read_from_table_stage = cur->getQueryProcessingStage(
|
||||
context, QueryProcessingStage::Complete, query_info);
|
||||
|
||||
if (pipe.empty())
|
||||
data->pipe = std::make_unique<Pipe>(
|
||||
Pipe pipe = cur->read(
|
||||
metadata_snapshot->getColumns().getNamesOfPhysical(),
|
||||
metadata_snapshot, query_info, context,
|
||||
read_from_table_stage, DEFAULT_BLOCK_SIZE, 1);
|
||||
|
||||
if (pipe.empty())
|
||||
return std::make_unique<Pipe>(
|
||||
std::make_shared<SourceFromSingleChunk>(metadata_snapshot->getSampleBlock(), Chunk()));
|
||||
else
|
||||
data->pipe = std::make_unique<Pipe>(std::move(pipe));
|
||||
|
||||
return std::make_unique<Pipe>(std::move(pipe));
|
||||
};
|
||||
|
||||
data->pipe = data->creating_pipe_callback();
|
||||
res.emplace_back(std::move(data));
|
||||
}
|
||||
external_tables_data.push_back(std::move(res));
|
||||
|
@ -19,7 +19,7 @@ struct RemoteQueryExecutorRoutine
|
||||
RemoteQueryExecutorReadContext & read_context;
|
||||
Fiber & fiber;
|
||||
|
||||
void operator()(int fd, const Poco::Timespan & timeout = 0, const std::string fd_description = "")
|
||||
void operator()(int fd, Poco::Timespan timeout = 0, const std::string fd_description = "")
|
||||
{
|
||||
try
|
||||
{
|
||||
@ -89,7 +89,7 @@ RemoteQueryExecutorReadContext::RemoteQueryExecutorReadContext(IConnections & co
|
||||
fiber = boost::context::fiber(std::allocator_arg_t(), stack, std::move(routine));
|
||||
}
|
||||
|
||||
void RemoteQueryExecutorReadContext::setConnectionFD(int fd, const Poco::Timespan & timeout, const std::string & fd_description)
|
||||
void RemoteQueryExecutorReadContext::setConnectionFD(int fd, Poco::Timespan timeout, const std::string & fd_description)
|
||||
{
|
||||
if (fd == connection_fd)
|
||||
return;
|
||||
|
@ -58,7 +58,7 @@ public:
|
||||
bool checkTimeout(bool blocking = false);
|
||||
bool checkTimeoutImpl(bool blocking);
|
||||
|
||||
void setConnectionFD(int fd, const Poco::Timespan & timeout = 0, const std::string & fd_description = "");
|
||||
void setConnectionFD(int fd, Poco::Timespan timeout = 0, const std::string & fd_description = "");
|
||||
void setTimer() const;
|
||||
|
||||
bool resumeRoutine();
|
||||
|
@ -12,7 +12,7 @@ NO_COMPILER_WARNINGS()
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -P 'tests|PostgreSQL' | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -P 'tests|PostgreSQL' | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -9,7 +9,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -8,7 +8,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F 'PostgreSQL' | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F examples | grep -v -F 'PostgreSQL' | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -30,14 +30,14 @@ FlatDictionary::FlatDictionary(
|
||||
DictionarySourcePtr source_ptr_,
|
||||
const DictionaryLifetime dict_lifetime_,
|
||||
Configuration configuration_,
|
||||
BlockPtr previously_loaded_block_)
|
||||
BlockPtr update_field_loaded_block_)
|
||||
: IDictionary(dict_id_)
|
||||
, dict_struct(dict_struct_)
|
||||
, source_ptr{std::move(source_ptr_)}
|
||||
, dict_lifetime(dict_lifetime_)
|
||||
, configuration(configuration_)
|
||||
, loaded_keys(configuration.initial_array_size, false)
|
||||
, previously_loaded_block(std::move(previously_loaded_block_))
|
||||
, update_field_loaded_block(std::move(update_field_loaded_block_))
|
||||
{
|
||||
createAttributes();
|
||||
loadData();
|
||||
@ -273,7 +273,7 @@ void FlatDictionary::blockToAttributes(const Block & block)
|
||||
|
||||
void FlatDictionary::updateData()
|
||||
{
|
||||
if (!previously_loaded_block || previously_loaded_block->rows() == 0)
|
||||
if (!update_field_loaded_block || update_field_loaded_block->rows() == 0)
|
||||
{
|
||||
auto stream = source_ptr->loadUpdatedAll();
|
||||
stream->readPrefix();
|
||||
@ -281,13 +281,13 @@ void FlatDictionary::updateData()
|
||||
while (const auto block = stream->read())
|
||||
{
|
||||
/// We are using this to keep saved data if input stream consists of multiple blocks
|
||||
if (!previously_loaded_block)
|
||||
previously_loaded_block = std::make_shared<DB::Block>(block.cloneEmpty());
|
||||
if (!update_field_loaded_block)
|
||||
update_field_loaded_block = std::make_shared<DB::Block>(block.cloneEmpty());
|
||||
|
||||
for (size_t column_index = 0; column_index < block.columns(); ++column_index)
|
||||
{
|
||||
const IColumn & update_column = *block.getByPosition(column_index).column.get();
|
||||
MutableColumnPtr saved_column = previously_loaded_block->getByPosition(column_index).column->assumeMutable();
|
||||
MutableColumnPtr saved_column = update_field_loaded_block->getByPosition(column_index).column->assumeMutable();
|
||||
saved_column->insertRangeFrom(update_column, 0, update_column.size());
|
||||
}
|
||||
}
|
||||
@ -298,12 +298,12 @@ void FlatDictionary::updateData()
|
||||
auto stream = source_ptr->loadUpdatedAll();
|
||||
mergeBlockWithStream<DictionaryKeyType::simple>(
|
||||
dict_struct.getKeysSize(),
|
||||
*previously_loaded_block,
|
||||
*update_field_loaded_block,
|
||||
stream);
|
||||
}
|
||||
|
||||
if (previously_loaded_block)
|
||||
blockToAttributes(*previously_loaded_block.get());
|
||||
if (update_field_loaded_block)
|
||||
blockToAttributes(*update_field_loaded_block.get());
|
||||
}
|
||||
|
||||
void FlatDictionary::loadData()
|
||||
@ -347,6 +347,9 @@ void FlatDictionary::calculateBytesAllocated()
|
||||
|
||||
callOnDictionaryAttributeType(attribute.type, type_call);
|
||||
}
|
||||
|
||||
if (update_field_loaded_block)
|
||||
bytes_allocated += update_field_loaded_block->allocatedBytes();
|
||||
}
|
||||
|
||||
FlatDictionary::Attribute FlatDictionary::createAttribute(const DictionaryAttribute & dictionary_attribute, const Field & null_value)
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
DictionarySourcePtr source_ptr_,
|
||||
const DictionaryLifetime dict_lifetime_,
|
||||
Configuration configuration_,
|
||||
BlockPtr previously_loaded_block_ = nullptr);
|
||||
BlockPtr update_field_loaded_block_ = nullptr);
|
||||
|
||||
std::string getTypeName() const override { return "Flat"; }
|
||||
|
||||
@ -55,7 +55,7 @@ public:
|
||||
|
||||
std::shared_ptr<const IExternalLoadable> clone() const override
|
||||
{
|
||||
return std::make_shared<FlatDictionary>(getDictionaryID(), dict_struct, source_ptr->clone(), dict_lifetime, configuration, previously_loaded_block);
|
||||
return std::make_shared<FlatDictionary>(getDictionaryID(), dict_struct, source_ptr->clone(), dict_lifetime, configuration, update_field_loaded_block);
|
||||
}
|
||||
|
||||
const IDictionarySource * getSource() const override { return source_ptr.get(); }
|
||||
@ -184,7 +184,7 @@ private:
|
||||
size_t bucket_count = 0;
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
|
||||
BlockPtr previously_loaded_block;
|
||||
BlockPtr update_field_loaded_block;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -42,13 +42,13 @@ HashedDictionary<dictionary_key_type, sparse>::HashedDictionary(
|
||||
DictionarySourcePtr source_ptr_,
|
||||
const DictionaryLifetime dict_lifetime_,
|
||||
bool require_nonempty_,
|
||||
BlockPtr previously_loaded_block_)
|
||||
BlockPtr update_field_loaded_block_)
|
||||
: IDictionary(dict_id_)
|
||||
, dict_struct(dict_struct_)
|
||||
, source_ptr(std::move(source_ptr_))
|
||||
, dict_lifetime(dict_lifetime_)
|
||||
, require_nonempty(require_nonempty_)
|
||||
, previously_loaded_block(std::move(previously_loaded_block_))
|
||||
, update_field_loaded_block(std::move(update_field_loaded_block_))
|
||||
{
|
||||
createAttributes();
|
||||
loadData();
|
||||
@ -343,7 +343,7 @@ void HashedDictionary<dictionary_key_type, sparse>::createAttributes()
|
||||
template <DictionaryKeyType dictionary_key_type, bool sparse>
|
||||
void HashedDictionary<dictionary_key_type, sparse>::updateData()
|
||||
{
|
||||
if (!previously_loaded_block || previously_loaded_block->rows() == 0)
|
||||
if (!update_field_loaded_block || update_field_loaded_block->rows() == 0)
|
||||
{
|
||||
auto stream = source_ptr->loadUpdatedAll();
|
||||
stream->readPrefix();
|
||||
@ -351,13 +351,13 @@ void HashedDictionary<dictionary_key_type, sparse>::updateData()
|
||||
while (const auto block = stream->read())
|
||||
{
|
||||
/// We are using this to keep saved data if input stream consists of multiple blocks
|
||||
if (!previously_loaded_block)
|
||||
previously_loaded_block = std::make_shared<DB::Block>(block.cloneEmpty());
|
||||
if (!update_field_loaded_block)
|
||||
update_field_loaded_block = std::make_shared<DB::Block>(block.cloneEmpty());
|
||||
|
||||
for (size_t attribute_index = 0; attribute_index < block.columns(); ++attribute_index)
|
||||
{
|
||||
const IColumn & update_column = *block.getByPosition(attribute_index).column.get();
|
||||
MutableColumnPtr saved_column = previously_loaded_block->getByPosition(attribute_index).column->assumeMutable();
|
||||
const IColumn & update_column = *block.getByPosition(attribute_idx).column.get();
|
||||
MutableColumnPtr saved_column = update_field_loaded_block->getByPosition(attribute_idx).column->assumeMutable();
|
||||
saved_column->insertRangeFrom(update_column, 0, update_column.size());
|
||||
}
|
||||
}
|
||||
@ -368,14 +368,14 @@ void HashedDictionary<dictionary_key_type, sparse>::updateData()
|
||||
auto stream = source_ptr->loadUpdatedAll();
|
||||
mergeBlockWithStream<dictionary_key_type>(
|
||||
dict_struct.getKeysSize(),
|
||||
*previously_loaded_block,
|
||||
*update_field_loaded_block,
|
||||
stream);
|
||||
}
|
||||
|
||||
if (previously_loaded_block)
|
||||
if (update_field_loaded_block)
|
||||
{
|
||||
resize(previously_loaded_block->rows());
|
||||
blockToAttributes(*previously_loaded_block.get());
|
||||
resize(update_field_loaded_block->rows());
|
||||
blockToAttributes(*update_field_loaded_block.get());
|
||||
}
|
||||
}
|
||||
|
||||
@ -586,6 +586,9 @@ void HashedDictionary<dictionary_key_type, sparse>::calculateBytesAllocated()
|
||||
}
|
||||
|
||||
bytes_allocated += complex_key_arena.size();
|
||||
|
||||
if (update_field_loaded_block)
|
||||
bytes_allocated += update_field_loaded_block->allocatedBytes();
|
||||
}
|
||||
|
||||
template <DictionaryKeyType dictionary_key_type, bool sparse>
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
DictionarySourcePtr source_ptr_,
|
||||
const DictionaryLifetime dict_lifetime_,
|
||||
bool require_nonempty_,
|
||||
BlockPtr previously_loaded_block_ = nullptr);
|
||||
BlockPtr update_field_loaded_block_ = nullptr);
|
||||
|
||||
std::string getTypeName() const override
|
||||
{
|
||||
@ -67,7 +67,7 @@ public:
|
||||
|
||||
std::shared_ptr<const IExternalLoadable> clone() const override
|
||||
{
|
||||
return std::make_shared<HashedDictionary<dictionary_key_type, sparse>>(getDictionaryID(), dict_struct, source_ptr->clone(), dict_lifetime, require_nonempty, previously_loaded_block);
|
||||
return std::make_shared<HashedDictionary<dictionary_key_type, sparse>>(getDictionaryID(), dict_struct, source_ptr->clone(), dict_lifetime, require_nonempty, update_field_loaded_block);
|
||||
}
|
||||
|
||||
const IDictionarySource * getSource() const override { return source_ptr.get(); }
|
||||
@ -220,7 +220,7 @@ private:
|
||||
size_t bucket_count = 0;
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
|
||||
BlockPtr previously_loaded_block;
|
||||
BlockPtr update_field_loaded_block;
|
||||
Arena complex_key_arena;
|
||||
};
|
||||
|
||||
|
@ -19,7 +19,7 @@ NO_COMPILER_WARNINGS()
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -P 'tests|PostgreSQL' | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -P 'tests|PostgreSQL' | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -8,7 +8,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F S3 | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | grep -v -F S3 | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -10,7 +10,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -149,8 +149,7 @@ ColumnPtr wrapInNullable(const ColumnPtr & src, const ColumnsWithTypeAndName & a
|
||||
const NullMap & src_null_map = assert_cast<const ColumnUInt8 &>(*null_map_column).getData();
|
||||
|
||||
for (size_t i = 0, size = result_null_map.size(); i < size; ++i)
|
||||
if (src_null_map[i])
|
||||
result_null_map[i] = 1;
|
||||
result_null_map[i] |= src_null_map[i];
|
||||
|
||||
result_null_map_column = std::move(mutable_result_null_map_column);
|
||||
}
|
||||
@ -179,10 +178,8 @@ NullPresence getNullPresense(const ColumnsWithTypeAndName & args)
|
||||
|
||||
for (const auto & elem : args)
|
||||
{
|
||||
if (!res.has_nullable)
|
||||
res.has_nullable = elem.type->isNullable();
|
||||
if (!res.has_null_constant)
|
||||
res.has_null_constant = elem.type->onlyNull();
|
||||
res.has_nullable |= elem.type->isNullable();
|
||||
res.has_null_constant |= elem.type->onlyNull();
|
||||
}
|
||||
|
||||
return res;
|
||||
|
@ -15,6 +15,8 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int DECIMAL_OVERFLOW;
|
||||
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||
}
|
||||
|
||||
enum class AggregateOperation
|
||||
@ -22,7 +24,8 @@ enum class AggregateOperation
|
||||
min,
|
||||
max,
|
||||
sum,
|
||||
average
|
||||
average,
|
||||
product
|
||||
};
|
||||
|
||||
/**
|
||||
@ -54,6 +57,12 @@ struct ArrayAggregateResultImpl<ArrayElement, AggregateOperation::average>
|
||||
using Result = Float64;
|
||||
};
|
||||
|
||||
template <typename ArrayElement>
|
||||
struct ArrayAggregateResultImpl<ArrayElement, AggregateOperation::product>
|
||||
{
|
||||
using Result = Float64;
|
||||
};
|
||||
|
||||
template <typename ArrayElement>
|
||||
struct ArrayAggregateResultImpl<ArrayElement, AggregateOperation::sum>
|
||||
{
|
||||
@ -86,7 +95,7 @@ struct ArrayAggregateImpl
|
||||
using Types = std::decay_t<decltype(types)>;
|
||||
using DataType = typename Types::LeftType;
|
||||
|
||||
if constexpr (aggregate_operation == AggregateOperation::average)
|
||||
if constexpr (aggregate_operation == AggregateOperation::average || aggregate_operation == AggregateOperation::product)
|
||||
{
|
||||
result = std::make_shared<DataTypeFloat64>();
|
||||
|
||||
@ -124,17 +133,17 @@ struct ArrayAggregateImpl
|
||||
template <typename Element>
|
||||
static NO_SANITIZE_UNDEFINED bool executeType(const ColumnPtr & mapped, const ColumnArray::Offsets & offsets, ColumnPtr & res_ptr)
|
||||
{
|
||||
using Result = ArrayAggregateResult<Element, aggregate_operation>;
|
||||
using ResultType = ArrayAggregateResult<Element, aggregate_operation>;
|
||||
using ColVecType = std::conditional_t<IsDecimalNumber<Element>, ColumnDecimal<Element>, ColumnVector<Element>>;
|
||||
using ColVecResult = std::conditional_t<IsDecimalNumber<Result>, ColumnDecimal<Result>, ColumnVector<Result>>;
|
||||
using ColVecResultType = std::conditional_t<IsDecimalNumber<ResultType>, ColumnDecimal<ResultType>, ColumnVector<ResultType>>;
|
||||
|
||||
/// For average of array we return Float64 as result, but we want to keep precision
|
||||
/// so we convert to Float64 as last step, but intermediate sum is represented as result of sum operation
|
||||
static constexpr bool is_average_operation = aggregate_operation == AggregateOperation::average;
|
||||
/// For average and product of array we return Float64 as result, but we want to keep precision
|
||||
/// so we convert to Float64 as last step, but intermediate value is represented as result of sum operation
|
||||
static constexpr bool is_average_or_product_operation = aggregate_operation == AggregateOperation::average ||
|
||||
aggregate_operation == AggregateOperation::product;
|
||||
using SummAggregationType = ArrayAggregateResult<Element, AggregateOperation::sum>;
|
||||
|
||||
using AggregationType = std::conditional_t<is_average_operation, SummAggregationType, Result>;
|
||||
|
||||
using AggregationType = std::conditional_t<is_average_or_product_operation, SummAggregationType, ResultType>;
|
||||
|
||||
const ColVecType * column = checkAndGetColumn<ColVecType>(&*mapped);
|
||||
|
||||
@ -147,18 +156,15 @@ struct ArrayAggregateImpl
|
||||
return false;
|
||||
|
||||
const AggregationType x = column_const->template getValue<Element>(); // NOLINT
|
||||
const typename ColVecType::Container & data
|
||||
= checkAndGetColumn<ColVecType>(&column_const->getDataColumn())->getData();
|
||||
const auto & data = checkAndGetColumn<ColVecType>(&column_const->getDataColumn())->getData();
|
||||
|
||||
typename ColVecResult::MutablePtr res_column;
|
||||
typename ColVecResultType::MutablePtr res_column;
|
||||
if constexpr (IsDecimalNumber<Element>)
|
||||
{
|
||||
res_column = ColVecResult::create(offsets.size(), data.getScale());
|
||||
}
|
||||
res_column = ColVecResultType::create(offsets.size(), data.getScale());
|
||||
else
|
||||
res_column = ColVecResult::create(offsets.size());
|
||||
res_column = ColVecResultType::create(offsets.size());
|
||||
|
||||
typename ColVecResult::Container & res = res_column->getData();
|
||||
auto & res = res_column->getData();
|
||||
|
||||
size_t pos = 0;
|
||||
for (size_t i = 0; i < offsets.size(); ++i)
|
||||
@ -178,13 +184,45 @@ struct ArrayAggregateImpl
|
||||
{
|
||||
if constexpr (IsDecimalNumber<Element>)
|
||||
{
|
||||
res[i] = DecimalUtils::convertTo<Result>(x, data.getScale());
|
||||
res[i] = DecimalUtils::convertTo<ResultType>(x, data.getScale());
|
||||
}
|
||||
else
|
||||
{
|
||||
res[i] = x;
|
||||
}
|
||||
}
|
||||
else if constexpr (aggregate_operation == AggregateOperation::product)
|
||||
{
|
||||
size_t array_size = offsets[i] - pos;
|
||||
AggregationType product = x;
|
||||
|
||||
if constexpr (IsDecimalNumber<Element>)
|
||||
{
|
||||
using T = decltype(x.value);
|
||||
T x_val = x.value;
|
||||
|
||||
for (size_t array_index = 1; array_index < array_size; ++array_index)
|
||||
{
|
||||
T product_val = product.value;
|
||||
|
||||
if (common::mulOverflow(x_val, product_val, product.value))
|
||||
throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "Decimal math overflow");
|
||||
}
|
||||
|
||||
auto result_scale = data.getScale() * array_size;
|
||||
if (unlikely(result_scale > DecimalUtils::max_precision<AggregationType>))
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale {} is out of bounds", result_scale);
|
||||
|
||||
res[i] = DecimalUtils::convertTo<ResultType>(product, data.getScale() * array_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t array_index = 1; array_index < array_size; ++array_index)
|
||||
product = product * x;
|
||||
|
||||
res[i] = product;
|
||||
}
|
||||
}
|
||||
|
||||
pos = offsets[i];
|
||||
}
|
||||
@ -193,30 +231,30 @@ struct ArrayAggregateImpl
|
||||
return true;
|
||||
}
|
||||
|
||||
const typename ColVecType::Container & data = column->getData();
|
||||
const auto & data = column->getData();
|
||||
|
||||
typename ColVecResult::MutablePtr res_column;
|
||||
typename ColVecResultType::MutablePtr res_column;
|
||||
if constexpr (IsDecimalNumber<Element>)
|
||||
res_column = ColVecResult::create(offsets.size(), data.getScale());
|
||||
res_column = ColVecResultType::create(offsets.size(), data.getScale());
|
||||
else
|
||||
res_column = ColVecResult::create(offsets.size());
|
||||
res_column = ColVecResultType::create(offsets.size());
|
||||
|
||||
typename ColVecResult::Container & res = res_column->getData();
|
||||
typename ColVecResultType::Container & res = res_column->getData();
|
||||
|
||||
size_t pos = 0;
|
||||
for (size_t i = 0; i < offsets.size(); ++i)
|
||||
{
|
||||
AggregationType s = 0;
|
||||
AggregationType aggregate_value = 0;
|
||||
|
||||
/// Array is empty
|
||||
if (offsets[i] == pos)
|
||||
{
|
||||
res[i] = s;
|
||||
res[i] = aggregate_value;
|
||||
continue;
|
||||
}
|
||||
|
||||
size_t count = 1;
|
||||
s = data[pos]; // NOLINT
|
||||
aggregate_value = data[pos]; // NOLINT
|
||||
++pos;
|
||||
|
||||
for (; pos < offsets[i]; ++pos)
|
||||
@ -226,20 +264,36 @@ struct ArrayAggregateImpl
|
||||
if constexpr (aggregate_operation == AggregateOperation::sum ||
|
||||
aggregate_operation == AggregateOperation::average)
|
||||
{
|
||||
s += element;
|
||||
aggregate_value += element;
|
||||
}
|
||||
else if constexpr (aggregate_operation == AggregateOperation::min)
|
||||
{
|
||||
if (element < s)
|
||||
if (element < aggregate_value)
|
||||
{
|
||||
s = element;
|
||||
aggregate_value = element;
|
||||
}
|
||||
}
|
||||
else if constexpr (aggregate_operation == AggregateOperation::max)
|
||||
{
|
||||
if (element > s)
|
||||
if (element > aggregate_value)
|
||||
{
|
||||
s = element;
|
||||
aggregate_value = element;
|
||||
}
|
||||
}
|
||||
else if constexpr (aggregate_operation == AggregateOperation::product)
|
||||
{
|
||||
if constexpr (IsDecimalNumber<Element>)
|
||||
{
|
||||
using AggregateValueDecimalUnderlyingValue = decltype(aggregate_value.value);
|
||||
AggregateValueDecimalUnderlyingValue current_aggregate_value = aggregate_value.value;
|
||||
AggregateValueDecimalUnderlyingValue element_value = static_cast<AggregateValueDecimalUnderlyingValue>(element.value);
|
||||
|
||||
if (common::mulOverflow(current_aggregate_value, element_value, aggregate_value.value))
|
||||
throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "Decimal math overflow");
|
||||
}
|
||||
else
|
||||
{
|
||||
aggregate_value *= element;
|
||||
}
|
||||
}
|
||||
|
||||
@ -250,17 +304,26 @@ struct ArrayAggregateImpl
|
||||
{
|
||||
if constexpr (IsDecimalNumber<Element>)
|
||||
{
|
||||
s = s / count;
|
||||
res[i] = DecimalUtils::convertTo<Result>(s, data.getScale());
|
||||
aggregate_value = aggregate_value / count;
|
||||
res[i] = DecimalUtils::convertTo<ResultType>(aggregate_value, data.getScale());
|
||||
}
|
||||
else
|
||||
{
|
||||
res[i] = static_cast<Result>(s) / count;
|
||||
res[i] = static_cast<ResultType>(aggregate_value) / count;
|
||||
}
|
||||
}
|
||||
else if constexpr (aggregate_operation == AggregateOperation::product && IsDecimalNumber<Element>)
|
||||
{
|
||||
auto result_scale = data.getScale() * count;
|
||||
|
||||
if (unlikely(result_scale > DecimalUtils::max_precision<AggregationType>))
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Scale {} is out of bounds", result_scale);
|
||||
|
||||
res[i] = DecimalUtils::convertTo<ResultType>(aggregate_value, result_scale);
|
||||
}
|
||||
else
|
||||
{
|
||||
res[i] = s;
|
||||
res[i] = aggregate_value;
|
||||
}
|
||||
}
|
||||
|
||||
@ -291,7 +354,7 @@ struct ArrayAggregateImpl
|
||||
executeType<Decimal128>(mapped, offsets, res))
|
||||
return res;
|
||||
else
|
||||
throw Exception("Unexpected column for arraySum: " + mapped->getName(), ErrorCodes::ILLEGAL_COLUMN);
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Unexpected column for arraySum: {}" + mapped->getName());
|
||||
}
|
||||
};
|
||||
|
||||
@ -307,12 +370,16 @@ using FunctionArraySum = FunctionArrayMapped<ArrayAggregateImpl<AggregateOperati
|
||||
struct NameArrayAverage { static constexpr auto name = "arrayAvg"; };
|
||||
using FunctionArrayAverage = FunctionArrayMapped<ArrayAggregateImpl<AggregateOperation::average>, NameArrayAverage>;
|
||||
|
||||
struct NameArrayProduct { static constexpr auto name = "arrayProduct"; };
|
||||
using FunctionArrayProduct = FunctionArrayMapped<ArrayAggregateImpl<AggregateOperation::product>, NameArrayProduct>;
|
||||
|
||||
void registerFunctionArrayAggregation(FunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction<FunctionArrayMin>();
|
||||
factory.registerFunction<FunctionArrayMax>();
|
||||
factory.registerFunction<FunctionArraySum>();
|
||||
factory.registerFunction<FunctionArrayAverage>();
|
||||
factory.registerFunction<FunctionArrayProduct>();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ PEERDIR(
|
||||
# "Arcadia" build is slightly deficient. It lacks many libraries that we need.
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -i -v -P 'tests|Bitmap|abtesting' | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -i -v -P 'tests|Bitmap|abtesting' | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -9,6 +9,8 @@
|
||||
|
||||
#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined (__ANDROID__)
|
||||
# include <sys/endian.h>
|
||||
#elif defined(__sun)
|
||||
# include <endian.h>
|
||||
#elif defined(__APPLE__)
|
||||
# include <libkern/OSByteOrder.h>
|
||||
|
||||
|
@ -24,9 +24,9 @@ struct ConnectionTimeouts
|
||||
|
||||
ConnectionTimeouts() = default;
|
||||
|
||||
ConnectionTimeouts(const Poco::Timespan & connection_timeout_,
|
||||
const Poco::Timespan & send_timeout_,
|
||||
const Poco::Timespan & receive_timeout_)
|
||||
ConnectionTimeouts(Poco::Timespan connection_timeout_,
|
||||
Poco::Timespan send_timeout_,
|
||||
Poco::Timespan receive_timeout_)
|
||||
: connection_timeout(connection_timeout_),
|
||||
send_timeout(send_timeout_),
|
||||
receive_timeout(receive_timeout_),
|
||||
@ -38,10 +38,10 @@ struct ConnectionTimeouts
|
||||
{
|
||||
}
|
||||
|
||||
ConnectionTimeouts(const Poco::Timespan & connection_timeout_,
|
||||
const Poco::Timespan & send_timeout_,
|
||||
const Poco::Timespan & receive_timeout_,
|
||||
const Poco::Timespan & tcp_keep_alive_timeout_)
|
||||
ConnectionTimeouts(Poco::Timespan connection_timeout_,
|
||||
Poco::Timespan send_timeout_,
|
||||
Poco::Timespan receive_timeout_,
|
||||
Poco::Timespan tcp_keep_alive_timeout_)
|
||||
: connection_timeout(connection_timeout_),
|
||||
send_timeout(send_timeout_),
|
||||
receive_timeout(receive_timeout_),
|
||||
@ -52,11 +52,11 @@ struct ConnectionTimeouts
|
||||
receive_data_timeout(receive_timeout_)
|
||||
{
|
||||
}
|
||||
ConnectionTimeouts(const Poco::Timespan & connection_timeout_,
|
||||
const Poco::Timespan & send_timeout_,
|
||||
const Poco::Timespan & receive_timeout_,
|
||||
const Poco::Timespan & tcp_keep_alive_timeout_,
|
||||
const Poco::Timespan & http_keep_alive_timeout_)
|
||||
ConnectionTimeouts(Poco::Timespan connection_timeout_,
|
||||
Poco::Timespan send_timeout_,
|
||||
Poco::Timespan receive_timeout_,
|
||||
Poco::Timespan tcp_keep_alive_timeout_,
|
||||
Poco::Timespan http_keep_alive_timeout_)
|
||||
: connection_timeout(connection_timeout_),
|
||||
send_timeout(send_timeout_),
|
||||
receive_timeout(receive_timeout_),
|
||||
@ -68,14 +68,14 @@ struct ConnectionTimeouts
|
||||
{
|
||||
}
|
||||
|
||||
ConnectionTimeouts(const Poco::Timespan & connection_timeout_,
|
||||
const Poco::Timespan & send_timeout_,
|
||||
const Poco::Timespan & receive_timeout_,
|
||||
const Poco::Timespan & tcp_keep_alive_timeout_,
|
||||
const Poco::Timespan & http_keep_alive_timeout_,
|
||||
const Poco::Timespan & secure_connection_timeout_,
|
||||
const Poco::Timespan & receive_hello_timeout_,
|
||||
const Poco::Timespan & receive_data_timeout_)
|
||||
ConnectionTimeouts(Poco::Timespan connection_timeout_,
|
||||
Poco::Timespan send_timeout_,
|
||||
Poco::Timespan receive_timeout_,
|
||||
Poco::Timespan tcp_keep_alive_timeout_,
|
||||
Poco::Timespan http_keep_alive_timeout_,
|
||||
Poco::Timespan secure_connection_timeout_,
|
||||
Poco::Timespan receive_hello_timeout_,
|
||||
Poco::Timespan receive_data_timeout_)
|
||||
: connection_timeout(connection_timeout_),
|
||||
send_timeout(send_timeout_),
|
||||
receive_timeout(receive_timeout_),
|
||||
@ -87,7 +87,7 @@ struct ConnectionTimeouts
|
||||
{
|
||||
}
|
||||
|
||||
static Poco::Timespan saturate(const Poco::Timespan & timespan, const Poco::Timespan & limit)
|
||||
static Poco::Timespan saturate(Poco::Timespan timespan, Poco::Timespan limit)
|
||||
{
|
||||
if (limit.totalMicroseconds() == 0)
|
||||
return timespan;
|
||||
@ -95,7 +95,7 @@ struct ConnectionTimeouts
|
||||
return (timespan > limit) ? limit : timespan;
|
||||
}
|
||||
|
||||
ConnectionTimeouts getSaturated(const Poco::Timespan & limit) const
|
||||
ConnectionTimeouts getSaturated(Poco::Timespan limit) const
|
||||
{
|
||||
return ConnectionTimeouts(saturate(connection_timeout, limit),
|
||||
saturate(send_timeout, limit),
|
||||
|
@ -8,7 +8,7 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
using AsyncCallback = std::function<void(int, const Poco::Timespan &, const std::string &)>;
|
||||
using AsyncCallback = std::function<void(int, Poco::Timespan, const std::string &)>;
|
||||
|
||||
/// Works with the ready Poco::Net::Socket. Blocking operations.
|
||||
class ReadBufferFromPocoSocket : public BufferWithOwnMemory<ReadBuffer>
|
||||
|
@ -7,8 +7,8 @@ namespace DB
|
||||
{
|
||||
|
||||
TimeoutSetter::TimeoutSetter(Poco::Net::StreamSocket & socket_,
|
||||
const Poco::Timespan & send_timeout_,
|
||||
const Poco::Timespan & receive_timeout_,
|
||||
Poco::Timespan send_timeout_,
|
||||
Poco::Timespan receive_timeout_,
|
||||
bool limit_max_timeout)
|
||||
: socket(socket_), send_timeout(send_timeout_), receive_timeout(receive_timeout_)
|
||||
{
|
||||
@ -22,7 +22,7 @@ TimeoutSetter::TimeoutSetter(Poco::Net::StreamSocket & socket_,
|
||||
socket.setReceiveTimeout(receive_timeout);
|
||||
}
|
||||
|
||||
TimeoutSetter::TimeoutSetter(Poco::Net::StreamSocket & socket_, const Poco::Timespan & timeout_, bool limit_max_timeout)
|
||||
TimeoutSetter::TimeoutSetter(Poco::Net::StreamSocket & socket_, Poco::Timespan timeout_, bool limit_max_timeout)
|
||||
: TimeoutSetter(socket_, timeout_, timeout_, limit_max_timeout)
|
||||
{
|
||||
}
|
||||
|
@ -11,11 +11,11 @@ namespace DB
|
||||
struct TimeoutSetter
|
||||
{
|
||||
TimeoutSetter(Poco::Net::StreamSocket & socket_,
|
||||
const Poco::Timespan & send_timeout_,
|
||||
const Poco::Timespan & receive_timeout_,
|
||||
Poco::Timespan send_timeout_,
|
||||
Poco::Timespan receive_timeout_,
|
||||
bool limit_max_timeout = false);
|
||||
|
||||
TimeoutSetter(Poco::Net::StreamSocket & socket_, const Poco::Timespan & timeout_, bool limit_max_timeout = false);
|
||||
TimeoutSetter(Poco::Net::StreamSocket & socket_, Poco::Timespan timeout_, bool limit_max_timeout = false);
|
||||
|
||||
~TimeoutSetter();
|
||||
|
||||
|
@ -18,7 +18,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -P 'S3|HDFS' | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | grep -v -P 'S3|HDFS' | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -541,7 +541,7 @@ Cluster::Cluster(const Settings & settings, const std::vector<std::vector<String
|
||||
}
|
||||
|
||||
|
||||
Poco::Timespan Cluster::saturate(const Poco::Timespan & v, const Poco::Timespan & limit)
|
||||
Poco::Timespan Cluster::saturate(Poco::Timespan v, Poco::Timespan limit)
|
||||
{
|
||||
if (limit.totalMicroseconds() == 0)
|
||||
return v;
|
||||
|
@ -52,7 +52,7 @@ public:
|
||||
Cluster & operator=(const Cluster &) = delete;
|
||||
|
||||
/// is used to set a limit on the size of the timeout
|
||||
static Poco::Timespan saturate(const Poco::Timespan & v, const Poco::Timespan & limit);
|
||||
static Poco::Timespan saturate(Poco::Timespan v, Poco::Timespan limit);
|
||||
|
||||
public:
|
||||
using SlotToShard = std::vector<UInt64>;
|
||||
|
@ -42,7 +42,7 @@ public:
|
||||
/// Load configuration from some concrete source to AbstractConfiguration
|
||||
virtual LoadablesConfigurationPtr load(const std::string & path) = 0;
|
||||
|
||||
virtual ~IExternalLoaderConfigRepository() {}
|
||||
virtual ~IExternalLoaderConfigRepository() = default;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ NO_COMPILER_WARNINGS()
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F JIT | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F JIT | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -8,7 +8,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F New | grep -v -F tests | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F New | grep -v -F tests | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -1,4 +1,7 @@
|
||||
#include <sys/ioctl.h>
|
||||
#if defined(OS_SUNOS)
|
||||
# include <sys/termios.h>
|
||||
#endif
|
||||
#include <unistd.h>
|
||||
#include <Processors/Formats/Impl/PrettyBlockOutputFormat.h>
|
||||
#include <Formats/FormatFactory.h>
|
||||
|
@ -10,7 +10,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -P 'Arrow|Avro|ORC|Parquet|CapnProto' | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | grep -v -P 'Arrow|Avro|ORC|Parquet|CapnProto' | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -475,7 +475,7 @@ void TCPHandler::runImpl()
|
||||
}
|
||||
|
||||
|
||||
bool TCPHandler::readDataNext(const size_t & poll_interval, const int & receive_timeout)
|
||||
bool TCPHandler::readDataNext(size_t poll_interval, time_t receive_timeout)
|
||||
{
|
||||
Stopwatch watch(CLOCK_MONOTONIC_COARSE);
|
||||
|
||||
@ -493,8 +493,8 @@ bool TCPHandler::readDataNext(const size_t & poll_interval, const int & receive_
|
||||
* If we periodically poll, the receive_timeout of the socket itself does not work.
|
||||
* Therefore, an additional check is added.
|
||||
*/
|
||||
double elapsed = watch.elapsedSeconds();
|
||||
if (elapsed > receive_timeout)
|
||||
Float64 elapsed = watch.elapsedSeconds();
|
||||
if (elapsed > static_cast<Float64>(receive_timeout))
|
||||
{
|
||||
throw Exception(ErrorCodes::SOCKET_TIMEOUT,
|
||||
"Timeout exceeded while receiving data from client. Waited for {} seconds, timeout is {} seconds.",
|
||||
@ -535,10 +535,7 @@ std::tuple<size_t, int> TCPHandler::getReadTimeouts(const Settings & connection_
|
||||
|
||||
void TCPHandler::readData(const Settings & connection_settings)
|
||||
{
|
||||
size_t poll_interval;
|
||||
int receive_timeout;
|
||||
|
||||
std::tie(poll_interval, receive_timeout) = getReadTimeouts(connection_settings);
|
||||
auto [poll_interval, receive_timeout] = getReadTimeouts(connection_settings);
|
||||
sendLogs();
|
||||
|
||||
while (readDataNext(poll_interval, receive_timeout))
|
||||
|
@ -174,7 +174,7 @@ private:
|
||||
void receiveIgnoredPartUUIDs();
|
||||
String receiveReadTaskResponseAssumeLocked();
|
||||
bool receiveData(bool scalar);
|
||||
bool readDataNext(const size_t & poll_interval, const int & receive_timeout);
|
||||
bool readDataNext(size_t poll_interval, time_t receive_timeout);
|
||||
void readData(const Settings & connection_settings);
|
||||
void receiveClusterNameAndSalt();
|
||||
std::tuple<size_t, int> getReadTimeouts(const Settings & connection_settings);
|
||||
|
@ -9,7 +9,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -122,11 +122,10 @@ public:
|
||||
current_path = uri + path;
|
||||
|
||||
auto compression = chooseCompressionMethod(path, compression_method);
|
||||
auto read_buf = wrapReadBufferWithCompressionMethod(std::make_unique<ReadBufferFromHDFS>(uri, path, getContext()->getGlobalContext()->getConfigRef()), compression);
|
||||
read_buf = wrapReadBufferWithCompressionMethod(std::make_unique<ReadBufferFromHDFS>(uri, path, getContext()->getGlobalContext()->getConfigRef()), compression);
|
||||
auto input_format = FormatFactory::instance().getInput(format, *read_buf, sample_block, getContext(), max_block_size);
|
||||
auto input_stream = std::make_shared<InputStreamFromInputFormat>(input_format);
|
||||
|
||||
reader = std::make_shared<OwningBlockInputStream<ReadBuffer>>(input_stream, std::move(read_buf));
|
||||
reader = std::make_shared<InputStreamFromInputFormat>(input_format);
|
||||
reader->readPrefix();
|
||||
}
|
||||
|
||||
@ -156,10 +155,12 @@ public:
|
||||
|
||||
reader->readSuffix();
|
||||
reader.reset();
|
||||
read_buf.reset();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<ReadBuffer> read_buf;
|
||||
BlockInputStreamPtr reader;
|
||||
SourcesInfoPtr source_info;
|
||||
String uri;
|
||||
|
@ -1653,13 +1653,16 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context
|
||||
ErrorCodes::ALTER_OF_COLUMN_IS_FORBIDDEN);
|
||||
}
|
||||
|
||||
const auto & deps_mv = name_deps[command.column_name];
|
||||
if (!deps_mv.empty())
|
||||
if (!command.clear)
|
||||
{
|
||||
throw Exception(
|
||||
"Trying to ALTER DROP column " + backQuoteIfNeed(command.column_name) + " which is referenced by materialized view "
|
||||
+ toString(deps_mv),
|
||||
ErrorCodes::ALTER_OF_COLUMN_IS_FORBIDDEN);
|
||||
const auto & deps_mv = name_deps[command.column_name];
|
||||
if (!deps_mv.empty())
|
||||
{
|
||||
throw Exception(
|
||||
"Trying to ALTER DROP column " + backQuoteIfNeed(command.column_name) + " which is referenced by materialized view "
|
||||
+ toString(deps_mv),
|
||||
ErrorCodes::ALTER_OF_COLUMN_IS_FORBIDDEN);
|
||||
}
|
||||
}
|
||||
|
||||
dropped_columns.emplace(command.column_name);
|
||||
|
@ -941,7 +941,10 @@ void MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(ReadResult & r
|
||||
|
||||
auto columns = block.getColumns();
|
||||
filterColumns(columns, row_level_filter);
|
||||
block.setColumns(columns);
|
||||
if (columns.empty())
|
||||
block = block.cloneEmpty();
|
||||
else
|
||||
block.setColumns(columns);
|
||||
}
|
||||
|
||||
prewhere_info->prewhere_actions->execute(block);
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <Core/Types.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include "PostgreSQLConnectionPool.h"
|
||||
|
@ -998,7 +998,7 @@ void StorageBuffer::checkAlterIsPossible(const AlterCommands & commands, Context
|
||||
throw Exception(
|
||||
"Alter of type '" + alterTypeToString(command.type) + "' is not supported by storage " + getName(),
|
||||
ErrorCodes::NOT_IMPLEMENTED);
|
||||
if (command.type == AlterCommand::Type::DROP_COLUMN)
|
||||
if (command.type == AlterCommand::Type::DROP_COLUMN && !command.clear)
|
||||
{
|
||||
const auto & deps_mv = name_deps[command.column_name];
|
||||
if (!deps_mv.empty())
|
||||
|
@ -723,7 +723,7 @@ void StorageDistributed::checkAlterIsPossible(const AlterCommands & commands, Co
|
||||
|
||||
throw Exception("Alter of type '" + alterTypeToString(command.type) + "' is not supported by storage " + getName(),
|
||||
ErrorCodes::NOT_IMPLEMENTED);
|
||||
if (command.type == AlterCommand::DROP_COLUMN)
|
||||
if (command.type == AlterCommand::DROP_COLUMN && !command.clear)
|
||||
{
|
||||
const auto & deps_mv = name_deps[command.column_name];
|
||||
if (!deps_mv.empty())
|
||||
|
@ -510,7 +510,7 @@ void StorageMerge::checkAlterIsPossible(const AlterCommands & commands, ContextP
|
||||
throw Exception(
|
||||
"Alter of type '" + alterTypeToString(command.type) + "' is not supported by storage " + getName(),
|
||||
ErrorCodes::NOT_IMPLEMENTED);
|
||||
if (command.type == AlterCommand::Type::DROP_COLUMN)
|
||||
if (command.type == AlterCommand::Type::DROP_COLUMN && !command.clear)
|
||||
{
|
||||
const auto & deps_mv = name_deps[command.column_name];
|
||||
if (!deps_mv.empty())
|
||||
|
@ -46,7 +46,7 @@ void StorageNull::checkAlterIsPossible(const AlterCommands & commands, ContextPt
|
||||
throw Exception(
|
||||
"Alter of type '" + alterTypeToString(command.type) + "' is not supported by storage " + getName(),
|
||||
ErrorCodes::NOT_IMPLEMENTED);
|
||||
if (command.type == AlterCommand::DROP_COLUMN)
|
||||
if (command.type == AlterCommand::DROP_COLUMN && !command.clear)
|
||||
{
|
||||
const auto & deps_mv = name_deps[command.column_name];
|
||||
if (!deps_mv.empty())
|
||||
|
@ -10,7 +10,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -P 'Kafka|RabbitMQ|S3|HDFS|Licenses|TimeZones|RocksDB|PostgreSQL' | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F examples | grep -v -P 'Kafka|RabbitMQ|S3|HDFS|Licenses|TimeZones|RocksDB|PostgreSQL' | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -22,7 +22,7 @@ private:
|
||||
|
||||
/* A factory method to create bridge helper, that will assist in remote interaction */
|
||||
virtual BridgeHelperPtr createBridgeHelper(ContextPtr context,
|
||||
const Poco::Timespan & http_timeout_,
|
||||
Poco::Timespan http_timeout_,
|
||||
const std::string & connection_string_) const = 0;
|
||||
|
||||
ColumnsDescription getActualTableStructure(ContextPtr context) const override;
|
||||
@ -48,7 +48,7 @@ public:
|
||||
|
||||
private:
|
||||
BridgeHelperPtr createBridgeHelper(ContextPtr context,
|
||||
const Poco::Timespan & http_timeout_,
|
||||
Poco::Timespan http_timeout_,
|
||||
const std::string & connection_string_) const override
|
||||
{
|
||||
return std::make_shared<XDBCBridgeHelper<JDBCBridgeMixin>>(context, http_timeout_, connection_string_);
|
||||
@ -68,7 +68,7 @@ public:
|
||||
|
||||
private:
|
||||
BridgeHelperPtr createBridgeHelper(ContextPtr context,
|
||||
const Poco::Timespan & http_timeout_,
|
||||
Poco::Timespan http_timeout_,
|
||||
const std::string & connection_string_) const override
|
||||
{
|
||||
return std::make_shared<XDBCBridgeHelper<ODBCBridgeMixin>>(context, http_timeout_, connection_string_);
|
||||
|
@ -8,7 +8,7 @@ PEERDIR(
|
||||
|
||||
|
||||
SRCS(
|
||||
<? find . -name '*.cpp' | grep -v -P 'S3|HDFS|PostgreSQL' | sed 's/^\.\// /' | sort ?>
|
||||
<? find . -name '*.cpp' | grep -v -P 'S3|HDFS|PostgreSQL' | grep -v -F examples | sed 's/^\.\// /' | sort ?>
|
||||
)
|
||||
|
||||
END()
|
||||
|
@ -492,7 +492,7 @@ def run_tests_array(all_tests_with_params):
|
||||
failures_chain += 1
|
||||
status += MSG_FAIL
|
||||
status += print_test_time(total_time)
|
||||
status += " - Test runs too long (> 30s). Make it faster."
|
||||
status += " - Test runs too long (> 30s). Make it faster.\n"
|
||||
status += 'Database: ' + testcase_args.testcase_database
|
||||
else:
|
||||
passed_total += 1
|
||||
|
@ -6,10 +6,28 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
for _ in {1..200}; do echo "drop table if exists view_00840" | $CLICKHOUSE_CLIENT; echo "create view view_00840 as select count(*),database,table from system.columns group by database,table" | $CLICKHOUSE_CLIENT; done &
|
||||
for _ in {1..500}; do echo "select * from view_00840 order by table" | $CLICKHOUSE_CLIENT >/dev/null 2>&1 || true; done &
|
||||
function cleanup()
|
||||
{
|
||||
echo Failed
|
||||
wait
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "create view view_00840 as select count(*),database,table from system.columns group by database,table"
|
||||
|
||||
for _ in {1..200}; do
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
drop table if exists view_00840;
|
||||
create view view_00840 as select count(*),database,table from system.columns group by database,table;
|
||||
"
|
||||
done &
|
||||
for _ in {1..500}; do
|
||||
$CLICKHOUSE_CLIENT -q "select * from view_00840 order by table" >/dev/null 2>&1 || true
|
||||
done &
|
||||
|
||||
wait
|
||||
trap '' EXIT
|
||||
|
||||
echo "drop table view_00840" | $CLICKHOUSE_CLIENT
|
||||
|
||||
|
18
tests/queries/0_stateless/01768_array_product.reference
Normal file
18
tests/queries/0_stateless/01768_array_product.reference
Normal file
@ -0,0 +1,18 @@
|
||||
Array product with constant column
|
||||
720 Float64
|
||||
24 Float64
|
||||
3.5 Float64
|
||||
6 Float64
|
||||
Array product with non constant column
|
||||
24
|
||||
0
|
||||
6
|
||||
24
|
||||
0
|
||||
6
|
||||
Types of aggregation result array product
|
||||
Float64 Float64 Float64 Float64
|
||||
Float64 Float64 Float64 Float64
|
||||
Float64 Float64 Float64
|
||||
Float64 Float64
|
||||
Float64 Float64 Float64
|
26
tests/queries/0_stateless/01768_array_product.sql
Normal file
26
tests/queries/0_stateless/01768_array_product.sql
Normal file
@ -0,0 +1,26 @@
|
||||
SELECT 'Array product with constant column';
|
||||
|
||||
SELECT arrayProduct([1,2,3,4,5,6]) as a, toTypeName(a);
|
||||
SELECT arrayProduct(array(1.0,2.0,3.0,4.0)) as a, toTypeName(a);
|
||||
SELECT arrayProduct(array(1,3.5)) as a, toTypeName(a);
|
||||
SELECT arrayProduct([toDecimal64(1,8), toDecimal64(2,8), toDecimal64(3,8)]) as a, toTypeName(a);
|
||||
|
||||
SELECT 'Array product with non constant column';
|
||||
|
||||
DROP TABLE IF EXISTS test_aggregation;
|
||||
CREATE TABLE test_aggregation (x Array(Int)) ENGINE=TinyLog;
|
||||
INSERT INTO test_aggregation VALUES ([1,2,3,4]), ([]), ([1,2,3]);
|
||||
SELECT arrayProduct(x) FROM test_aggregation;
|
||||
DROP TABLE test_aggregation;
|
||||
|
||||
CREATE TABLE test_aggregation (x Array(Decimal64(8))) ENGINE=TinyLog;
|
||||
INSERT INTO test_aggregation VALUES ([1,2,3,4]), ([]), ([1,2,3]);
|
||||
SELECT arrayProduct(x) FROM test_aggregation;
|
||||
DROP TABLE test_aggregation;
|
||||
|
||||
SELECT 'Types of aggregation result array product';
|
||||
SELECT toTypeName(arrayProduct([toInt8(0)])), toTypeName(arrayProduct([toInt16(0)])), toTypeName(arrayProduct([toInt32(0)])), toTypeName(arrayProduct([toInt64(0)]));
|
||||
SELECT toTypeName(arrayProduct([toUInt8(0)])), toTypeName(arrayProduct([toUInt16(0)])), toTypeName(arrayProduct([toUInt32(0)])), toTypeName(arrayProduct([toUInt64(0)]));
|
||||
SELECT toTypeName(arrayProduct([toInt128(0)])), toTypeName(arrayProduct([toInt256(0)])), toTypeName(arrayProduct([toUInt256(0)]));
|
||||
SELECT toTypeName(arrayProduct([toFloat32(0)])), toTypeName(arrayProduct([toFloat64(0)]));
|
||||
SELECT toTypeName(arrayProduct([toDecimal32(0, 8)])), toTypeName(arrayProduct([toDecimal64(0, 8)])), toTypeName(arrayProduct([toDecimal128(0, 8)]));
|
@ -0,0 +1,12 @@
|
||||
INSERT INTO main_table_01818
|
||||
INSERT INTO tmp_table_01818
|
||||
INSERT INTO tmp_table_01818
|
||||
ALL tmp_table_01818 200
|
||||
ALL main_table_01818 100
|
||||
tmp_table_01818 100
|
||||
main_table_01818 100
|
||||
Executing ALTER TABLE MOVE PARTITION...
|
||||
ALL tmp_table_01818 100
|
||||
ALL main_table_01818 200
|
||||
tmp_table_01818 0
|
||||
main_table_01818 200
|
122
tests/queries/0_stateless/01818_move_partition_simple.sql
Normal file
122
tests/queries/0_stateless/01818_move_partition_simple.sql
Normal file
@ -0,0 +1,122 @@
|
||||
DROP TABLE IF EXISTS main_table_01818;
|
||||
DROP TABLE IF EXISTS tmp_table_01818;
|
||||
|
||||
|
||||
CREATE TABLE main_table_01818
|
||||
(
|
||||
`id` UInt32,
|
||||
`advertiser_id` String,
|
||||
`campaign_id` String,
|
||||
`name` String,
|
||||
`budget` Float64,
|
||||
`budget_mode` String,
|
||||
`landing_type` String,
|
||||
`status` String,
|
||||
`modify_time` String,
|
||||
`campaign_type` String,
|
||||
`campaign_create_time` DateTime,
|
||||
`campaign_modify_time` DateTime,
|
||||
`create_time` DateTime,
|
||||
`update_time` DateTime
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY advertiser_id
|
||||
ORDER BY campaign_id
|
||||
SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE tmp_table_01818
|
||||
(
|
||||
`id` UInt32,
|
||||
`advertiser_id` String,
|
||||
`campaign_id` String,
|
||||
`name` String,
|
||||
`budget` Float64,
|
||||
`budget_mode` String,
|
||||
`landing_type` String,
|
||||
`status` String,
|
||||
`modify_time` String,
|
||||
`campaign_type` String,
|
||||
`campaign_create_time` DateTime,
|
||||
`campaign_modify_time` DateTime,
|
||||
`create_time` DateTime,
|
||||
`update_time` DateTime
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY advertiser_id
|
||||
ORDER BY campaign_id
|
||||
SETTINGS index_granularity = 8192;
|
||||
|
||||
SELECT 'INSERT INTO main_table_01818';
|
||||
INSERT INTO main_table_01818 SELECT 1 as `id`, 'ClickHouse' as `advertiser_id`, * EXCEPT (`id`, `advertiser_id`)
|
||||
FROM generateRandom(
|
||||
'`id` UInt32,
|
||||
`advertiser_id` String,
|
||||
`campaign_id` String,
|
||||
`name` String,
|
||||
`budget` Float64,
|
||||
`budget_mode` String,
|
||||
`landing_type` String,
|
||||
`status` String,
|
||||
`modify_time` String,
|
||||
`campaign_type` String,
|
||||
`campaign_create_time` DateTime,
|
||||
`campaign_modify_time` DateTime,
|
||||
`create_time` DateTime,
|
||||
`update_time` DateTime', 10, 10, 10)
|
||||
LIMIT 100;
|
||||
|
||||
SELECT 'INSERT INTO tmp_table_01818';
|
||||
INSERT INTO tmp_table_01818 SELECT 2 as `id`, 'Database' as `advertiser_id`, * EXCEPT (`id`, `advertiser_id`)
|
||||
FROM generateRandom(
|
||||
'`id` UInt32,
|
||||
`advertiser_id` String,
|
||||
`campaign_id` String,
|
||||
`name` String,
|
||||
`budget` Float64,
|
||||
`budget_mode` String,
|
||||
`landing_type` String,
|
||||
`status` String,
|
||||
`modify_time` String,
|
||||
`campaign_type` String,
|
||||
`campaign_create_time` DateTime,
|
||||
`campaign_modify_time` DateTime,
|
||||
`create_time` DateTime,
|
||||
`update_time` DateTime', 10, 10, 10)
|
||||
LIMIT 100;
|
||||
|
||||
SELECT 'INSERT INTO tmp_table_01818';
|
||||
INSERT INTO tmp_table_01818 SELECT 3 as `id`, 'ClickHouse' as `advertiser_id`, * EXCEPT (`id`, `advertiser_id`)
|
||||
FROM generateRandom(
|
||||
'`id` UInt32,
|
||||
`advertiser_id` String,
|
||||
`campaign_id` String,
|
||||
`name` String,
|
||||
`budget` Float64,
|
||||
`budget_mode` String,
|
||||
`landing_type` String,
|
||||
`status` String,
|
||||
`modify_time` String,
|
||||
`campaign_type` String,
|
||||
`campaign_create_time` DateTime,
|
||||
`campaign_modify_time` DateTime,
|
||||
`create_time` DateTime,
|
||||
`update_time` DateTime', 10, 10, 10)
|
||||
LIMIT 100;
|
||||
|
||||
SELECT 'ALL tmp_table_01818', count() FROM tmp_table_01818;
|
||||
SELECT 'ALL main_table_01818', count() FROM main_table_01818;
|
||||
SELECT 'tmp_table_01818', count() FROM tmp_table_01818 WHERE `advertiser_id` = 'ClickHouse';
|
||||
SELECT 'main_table_01818', count() FROM main_table_01818 WHERE `advertiser_id` = 'ClickHouse';
|
||||
|
||||
SELECT 'Executing ALTER TABLE MOVE PARTITION...';
|
||||
ALTER TABLE tmp_table_01818 MOVE PARTITION 'ClickHouse' TO TABLE main_table_01818;
|
||||
|
||||
|
||||
SELECT 'ALL tmp_table_01818', count() FROM tmp_table_01818;
|
||||
SELECT 'ALL main_table_01818', count() FROM main_table_01818;
|
||||
SELECT 'tmp_table_01818', count() FROM tmp_table_01818 WHERE `advertiser_id` = 'ClickHouse';
|
||||
SELECT 'main_table_01818', count() FROM main_table_01818 WHERE `advertiser_id` = 'ClickHouse';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS main_table_01818;
|
||||
DROP TABLE IF EXISTS tmp_table_01818;
|
@ -0,0 +1,35 @@
|
||||
DROP TABLE IF EXISTS `01851_merge_tree`;
|
||||
CREATE TABLE `01851_merge_tree`
|
||||
(
|
||||
`n1` Int8,
|
||||
`n2` Int8,
|
||||
`n3` Int8,
|
||||
`n4` Int8
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY n1;
|
||||
|
||||
DROP TABLE IF EXISTS `001851_merge_tree_mv`;
|
||||
CREATE MATERIALIZED VIEW `01851_merge_tree_mv`
|
||||
ENGINE = Memory AS
|
||||
SELECT
|
||||
n2,
|
||||
n3
|
||||
FROM `01851_merge_tree`;
|
||||
|
||||
ALTER TABLE `01851_merge_tree`
|
||||
DROP COLUMN n3; -- { serverError 524 }
|
||||
|
||||
ALTER TABLE `01851_merge_tree`
|
||||
DROP COLUMN n2; -- { serverError 524 }
|
||||
|
||||
-- ok
|
||||
ALTER TABLE `01851_merge_tree`
|
||||
DROP COLUMN n4;
|
||||
|
||||
-- CLEAR COLUMN is OK
|
||||
ALTER TABLE `01851_merge_tree`
|
||||
CLEAR COLUMN n2;
|
||||
|
||||
DROP TABLE `01851_merge_tree`;
|
||||
DROP TABLE `01851_merge_tree_mv`;
|
@ -0,0 +1,12 @@
|
||||
drop table if exists tbl;
|
||||
create table tbl (s String, i int) engine MergeTree order by i;
|
||||
|
||||
insert into tbl values ('123', 123);
|
||||
|
||||
drop row policy if exists filter on tbl;
|
||||
create row policy filter on tbl using (s = 'non_existing_domain') to all;
|
||||
|
||||
select * from tbl prewhere s = '123' where i = 123;
|
||||
|
||||
drop row policy filter on tbl;
|
||||
drop table tbl;
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user