mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-06 07:32:27 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse into feature-quantileBfloat16
This commit is contained in:
commit
9417460d0b
@ -290,6 +290,12 @@ if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
|
||||
endif ()
|
||||
|
||||
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
|
||||
# benchmarks.
|
||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
|
||||
endif ()
|
||||
|
||||
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
|
||||
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
|
||||
|
||||
|
@ -12,7 +12,8 @@
|
||||
///
|
||||
/// NOTE: it should be used with caution.
|
||||
#define SCOPE_EXIT_MEMORY(...) SCOPE_EXIT( \
|
||||
MemoryTracker::LockExceptionInThread lock_memory_tracker; \
|
||||
MemoryTracker::LockExceptionInThread \
|
||||
lock_memory_tracker(VariableContext::Global); \
|
||||
__VA_ARGS__; \
|
||||
)
|
||||
|
||||
@ -56,7 +57,8 @@
|
||||
#define SCOPE_EXIT_MEMORY_SAFE(...) SCOPE_EXIT( \
|
||||
try \
|
||||
{ \
|
||||
MemoryTracker::LockExceptionInThread lock_memory_tracker; \
|
||||
MemoryTracker::LockExceptionInThread \
|
||||
lock_memory_tracker(VariableContext::Global); \
|
||||
__VA_ARGS__; \
|
||||
} \
|
||||
catch (...) \
|
||||
|
6
contrib/CMakeLists.txt
vendored
6
contrib/CMakeLists.txt
vendored
@ -96,14 +96,8 @@ if (USE_INTERNAL_ZLIB_LIBRARY)
|
||||
add_subdirectory (${INTERNAL_ZLIB_NAME})
|
||||
# We should use same defines when including zlib.h as used when zlib compiled
|
||||
target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
|
||||
if (TARGET zlibstatic)
|
||||
target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
|
||||
endif ()
|
||||
if (ARCH_AMD64 OR ARCH_AARCH64)
|
||||
target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK)
|
||||
if (TARGET zlibstatic)
|
||||
target_compile_definitions (zlibstatic PUBLIC X86_64 UNALIGNED_OK)
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
|
2
contrib/zlib-ng
vendored
2
contrib/zlib-ng
vendored
@ -1 +1 @@
|
||||
Subproject commit 6fd1846c8b8f59436fe2dd752d0f316ddbb64df6
|
||||
Subproject commit b82d3497a5afc46dec3c5d07e4b163b169f251d7
|
1
debian/clickhouse-common-static.install
vendored
1
debian/clickhouse-common-static.install
vendored
@ -3,4 +3,3 @@ usr/bin/clickhouse-odbc-bridge
|
||||
usr/bin/clickhouse-library-bridge
|
||||
usr/bin/clickhouse-extract-from-config
|
||||
usr/share/bash-completion/completions
|
||||
etc/security/limits.d/clickhouse.conf
|
||||
|
16
debian/clickhouse-server.config
vendored
16
debian/clickhouse-server.config
vendored
@ -1,16 +0,0 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
test -f /usr/share/debconf/confmodule && . /usr/share/debconf/confmodule
|
||||
|
||||
db_fget clickhouse-server/default-password seen || true
|
||||
password_seen="$RET"
|
||||
|
||||
if [ "$1" = "reconfigure" ]; then
|
||||
password_seen=false
|
||||
fi
|
||||
|
||||
if [ "$password_seen" != "true" ]; then
|
||||
db_input high clickhouse-server/default-password || true
|
||||
db_go || true
|
||||
fi
|
||||
db_go || true
|
8
debian/clickhouse-server.postinst
vendored
8
debian/clickhouse-server.postinst
vendored
@ -23,11 +23,13 @@ if [ ! -f "/etc/debian_version" ]; then
|
||||
fi
|
||||
|
||||
if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then
|
||||
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}"
|
||||
|
||||
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
|
||||
# if old rc.d service present - remove it
|
||||
if [ -x "/etc/init.d/clickhouse-server" ] && [ -x "/usr/sbin/update-rc.d" ]; then
|
||||
/usr/sbin/update-rc.d clickhouse-server remove
|
||||
echo "ClickHouse init script has migrated to systemd. Please manually stop old server and restart the service: sudo killall clickhouse-server && sleep 5 && sudo service clickhouse-server restart"
|
||||
fi
|
||||
|
||||
/bin/systemctl daemon-reload
|
||||
@ -38,10 +40,8 @@ if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then
|
||||
if [ -x "/usr/sbin/update-rc.d" ]; then
|
||||
/usr/sbin/update-rc.d clickhouse-server defaults 19 19 >/dev/null || exit $?
|
||||
else
|
||||
echo # TODO [ "$OS" = "rhel" ] || [ "$OS" = "centos" ] || [ "$OS" = "fedora" ]
|
||||
echo # Other OS
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}"
|
||||
fi
|
||||
|
8
debian/clickhouse-server.preinst
vendored
8
debian/clickhouse-server.preinst
vendored
@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ "$1" = "upgrade" ]; then
|
||||
# Return etc/cron.d/clickhouse-server to original state
|
||||
service clickhouse-server disable_cron ||:
|
||||
fi
|
||||
|
||||
#DEBHELPER#
|
6
debian/clickhouse-server.prerm
vendored
6
debian/clickhouse-server.prerm
vendored
@ -1,6 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ "$1" = "upgrade" ] || [ "$1" = "remove" ]; then
|
||||
# Return etc/cron.d/clickhouse-server to original state
|
||||
service clickhouse-server disable_cron ||:
|
||||
fi
|
3
debian/clickhouse-server.templates
vendored
3
debian/clickhouse-server.templates
vendored
@ -1,3 +0,0 @@
|
||||
Template: clickhouse-server/default-password
|
||||
Type: password
|
||||
Description: Enter password for default user:
|
2
debian/clickhouse.limits
vendored
2
debian/clickhouse.limits
vendored
@ -1,2 +0,0 @@
|
||||
clickhouse soft nofile 262144
|
||||
clickhouse hard nofile 262144
|
3
debian/rules
vendored
3
debian/rules
vendored
@ -113,9 +113,6 @@ override_dh_install:
|
||||
ln -sf clickhouse-server.docs debian/clickhouse-client.docs
|
||||
ln -sf clickhouse-server.docs debian/clickhouse-common-static.docs
|
||||
|
||||
mkdir -p $(DESTDIR)/etc/security/limits.d
|
||||
cp debian/clickhouse.limits $(DESTDIR)/etc/security/limits.d/clickhouse.conf
|
||||
|
||||
# systemd compatibility
|
||||
mkdir -p $(DESTDIR)/etc/systemd/system/
|
||||
cp debian/clickhouse-server.service $(DESTDIR)/etc/systemd/system/
|
||||
|
2
debian/watch
vendored
2
debian/watch
vendored
@ -1,6 +1,6 @@
|
||||
version=4
|
||||
|
||||
opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)-stable\.tar\.gz%clickhouse-$1.tar.gz%" \
|
||||
https://github.com/yandex/clickhouse/tags \
|
||||
https://github.com/ClickHouse/ClickHouse/tags \
|
||||
(?:.*?/)?v?(\d[\d.]*)-stable\.tar\.gz debian uupdate
|
||||
|
||||
|
@ -112,10 +112,13 @@ if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
|
||||
fi
|
||||
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
|
||||
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
|
||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
||||
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
||||
tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||:
|
||||
fi
|
||||
|
@ -562,20 +562,32 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
|
||||
bool stdin_is_a_tty = isatty(STDIN_FILENO);
|
||||
bool stdout_is_a_tty = isatty(STDOUT_FILENO);
|
||||
bool is_interactive = stdin_is_a_tty && stdout_is_a_tty;
|
||||
|
||||
/// dpkg or apt installers can ask for non-interactive work explicitly.
|
||||
|
||||
const char * debian_frontend_var = getenv("DEBIAN_FRONTEND");
|
||||
bool noninteractive = debian_frontend_var && debian_frontend_var == std::string_view("noninteractive");
|
||||
|
||||
bool is_interactive = !noninteractive && stdin_is_a_tty && stdout_is_a_tty;
|
||||
|
||||
/// We can ask password even if stdin is closed/redirected but /dev/tty is available.
|
||||
bool can_ask_password = !noninteractive && stdout_is_a_tty;
|
||||
|
||||
if (has_password_for_default_user)
|
||||
{
|
||||
fmt::print(HILITE "Password for default user is already specified. To remind or reset, see {} and {}." END_HILITE,
|
||||
fmt::print(HILITE "Password for default user is already specified. To remind or reset, see {} and {}." END_HILITE "\n",
|
||||
users_config_file.string(), users_d.string());
|
||||
}
|
||||
else if (!is_interactive)
|
||||
else if (!can_ask_password)
|
||||
{
|
||||
fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE,
|
||||
fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE "\n",
|
||||
users_config_file.string(), users_d.string());
|
||||
}
|
||||
else
|
||||
{
|
||||
/// NOTE: When installing debian package with dpkg -i, stdin is not a terminal but we are still being able to enter password.
|
||||
/// More sophisticated method with /dev/tty is used inside the `readpassphrase` function.
|
||||
|
||||
char buf[1000] = {};
|
||||
std::string password;
|
||||
if (auto * result = readpassphrase("Enter password for default user: ", buf, sizeof(buf), 0))
|
||||
@ -603,7 +615,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
"</yandex>\n";
|
||||
out.sync();
|
||||
out.finalize();
|
||||
fmt::print("Password for default user is saved in file {}.\n", password_file);
|
||||
fmt::print(HILITE "Password for default user is saved in file {}." END_HILITE "\n", password_file);
|
||||
#else
|
||||
out << "<yandex>\n"
|
||||
" <users>\n"
|
||||
@ -614,12 +626,12 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
"</yandex>\n";
|
||||
out.sync();
|
||||
out.finalize();
|
||||
fmt::print("Password for default user is saved in plaintext in file {}.\n", password_file);
|
||||
fmt::print(HILITE "Password for default user is saved in plaintext in file {}." END_HILITE "\n", password_file);
|
||||
#endif
|
||||
has_password_for_default_user = true;
|
||||
}
|
||||
else
|
||||
fmt::print("Password for default user is empty string. See {} and {} to change it.\n",
|
||||
fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE "\n",
|
||||
users_config_file.string(), users_d.string());
|
||||
}
|
||||
|
||||
@ -644,7 +656,6 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
" This is optional. Taskstats accounting will be disabled."
|
||||
" To enable taskstats accounting you may add the required capability later manually.\"",
|
||||
"/tmp/test_setcap.sh", fs::canonical(main_bin_path).string());
|
||||
fmt::print(" {}\n", command);
|
||||
executeScript(command);
|
||||
#endif
|
||||
|
||||
|
@ -7,7 +7,20 @@
|
||||
-->
|
||||
<yandex>
|
||||
<logger>
|
||||
<!-- Possible levels: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105 -->
|
||||
<!-- Possible levels [1]:
|
||||
|
||||
- none (turns off logging)
|
||||
- fatal
|
||||
- critical
|
||||
- error
|
||||
- warning
|
||||
- notice
|
||||
- information
|
||||
- debug
|
||||
- trace
|
||||
|
||||
[1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
|
||||
-->
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
|
@ -150,7 +150,7 @@ void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_
|
||||
///
|
||||
/// And in this case the exception will not be logged, so let's block the
|
||||
/// MemoryTracker until the exception will be logged.
|
||||
MemoryTracker::LockExceptionInThread lock_memory_tracker;
|
||||
MemoryTracker::LockExceptionInThread lock_memory_tracker(VariableContext::Global);
|
||||
|
||||
try
|
||||
{
|
||||
|
@ -24,8 +24,8 @@ namespace
|
||||
///
|
||||
/// - when it is explicitly blocked with LockExceptionInThread
|
||||
///
|
||||
/// - to avoid std::terminate(), when stack unwinding is currently in progress
|
||||
/// in this thread.
|
||||
/// - when there are uncaught exceptions objects in the current thread
|
||||
/// (to avoid std::terminate())
|
||||
///
|
||||
/// NOTE: that since C++11 destructor marked with noexcept by default, and
|
||||
/// this means that any throw from destructor (that is not marked with
|
||||
|
@ -311,7 +311,7 @@ void PushingToViewsBlockOutputStream::writeSuffix()
|
||||
UInt64 milliseconds = main_watch.elapsedMilliseconds();
|
||||
if (views.size() > 1)
|
||||
{
|
||||
LOG_TRACE(log, "Pushing from {} to {} views took {} ms.",
|
||||
LOG_DEBUG(log, "Pushing from {} to {} views took {} ms.",
|
||||
storage->getStorageID().getNameForLogs(), views.size(),
|
||||
milliseconds);
|
||||
}
|
||||
|
@ -49,6 +49,16 @@ void copyDataImpl(IBlockInputStream & from, IBlockOutputStream & to, TCancelCall
|
||||
to.writeSuffix();
|
||||
}
|
||||
|
||||
void copyData(IBlockInputStream & from, IBlockOutputStream & to, const std::function<void(const Block & block)> & progress,
|
||||
std::atomic<bool> * is_cancelled)
|
||||
{
|
||||
auto is_cancelled_pred = [is_cancelled] ()
|
||||
{
|
||||
return isAtomicSet(is_cancelled);
|
||||
};
|
||||
|
||||
copyDataImpl(from, to, is_cancelled_pred, progress);
|
||||
}
|
||||
|
||||
inline void doNothing(const Block &) {}
|
||||
|
||||
|
@ -16,6 +16,9 @@ class Block;
|
||||
*/
|
||||
void copyData(IBlockInputStream & from, IBlockOutputStream & to, std::atomic<bool> * is_cancelled = nullptr);
|
||||
|
||||
void copyData(IBlockInputStream & from, IBlockOutputStream & to, const std::function<void(const Block & block)> & progress,
|
||||
std::atomic<bool> * is_cancelled = nullptr);
|
||||
|
||||
void copyData(IBlockInputStream & from, IBlockOutputStream & to, const std::function<bool()> & is_cancelled);
|
||||
|
||||
void copyData(IBlockInputStream & from, IBlockOutputStream & to, const std::function<bool()> & is_cancelled,
|
||||
|
@ -122,7 +122,7 @@ void registerDiskS3(DiskFactory & factory)
|
||||
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
client_configuration.connectTimeoutMs = config.getUInt(config_prefix + ".connect_timeout_ms", 10000);
|
||||
client_configuration.httpRequestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 5000);
|
||||
client_configuration.requestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 5000);
|
||||
client_configuration.maxConnections = config.getUInt(config_prefix + ".max_connections", 100);
|
||||
client_configuration.endpointOverride = uri.endpoint;
|
||||
|
||||
|
@ -50,7 +50,7 @@ BrotliWriteBuffer::BrotliWriteBuffer(std::unique_ptr<WriteBuffer> out_, int comp
|
||||
BrotliWriteBuffer::~BrotliWriteBuffer()
|
||||
{
|
||||
/// FIXME move final flush into the caller
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
finish();
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ LZMADeflatingWriteBuffer::LZMADeflatingWriteBuffer(
|
||||
LZMADeflatingWriteBuffer::~LZMADeflatingWriteBuffer()
|
||||
{
|
||||
/// FIXME move final flush into the caller
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
|
||||
finish();
|
||||
lzma_end(&lstr);
|
||||
|
@ -81,8 +81,8 @@ PocoHTTPClient::PocoHTTPClient(const PocoHTTPClientConfiguration & clientConfigu
|
||||
: per_request_configuration(clientConfiguration.perRequestConfiguration)
|
||||
, timeouts(ConnectionTimeouts(
|
||||
Poco::Timespan(clientConfiguration.connectTimeoutMs * 1000), /// connection timeout.
|
||||
Poco::Timespan(clientConfiguration.httpRequestTimeoutMs * 1000), /// send timeout.
|
||||
Poco::Timespan(clientConfiguration.httpRequestTimeoutMs * 1000) /// receive timeout.
|
||||
Poco::Timespan(clientConfiguration.requestTimeoutMs * 1000), /// send timeout.
|
||||
Poco::Timespan(clientConfiguration.requestTimeoutMs * 1000) /// receive timeout.
|
||||
))
|
||||
, remote_host_filter(clientConfiguration.remote_host_filter)
|
||||
, s3_max_redirects(clientConfiguration.s3_max_redirects)
|
||||
|
@ -79,7 +79,7 @@ WriteBufferFromFile::~WriteBufferFromFile()
|
||||
return;
|
||||
|
||||
/// FIXME move final flush into the caller
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
|
||||
next();
|
||||
|
||||
|
@ -98,7 +98,7 @@ WriteBufferFromFileDescriptor::~WriteBufferFromFileDescriptor()
|
||||
}
|
||||
|
||||
/// FIXME move final flush into the caller
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
next();
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ WriteBufferFromOStream::WriteBufferFromOStream(
|
||||
WriteBufferFromOStream::~WriteBufferFromOStream()
|
||||
{
|
||||
/// FIXME move final flush into the caller
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
next();
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ WriteBufferFromPocoSocket::WriteBufferFromPocoSocket(Poco::Net::Socket & socket_
|
||||
WriteBufferFromPocoSocket::~WriteBufferFromPocoSocket()
|
||||
{
|
||||
/// FIXME move final flush into the caller
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
next();
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ void WriteBufferFromS3::allocateBuffer()
|
||||
void WriteBufferFromS3::finalize()
|
||||
{
|
||||
/// FIXME move final flush into the caller
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
finalizeImpl();
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ public:
|
||||
~WriteBufferFromVector() override
|
||||
{
|
||||
/// FIXME move final flush into the caller
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
finalize();
|
||||
}
|
||||
};
|
||||
|
@ -138,7 +138,7 @@ void WriteBufferValidUTF8::finish()
|
||||
WriteBufferValidUTF8::~WriteBufferValidUTF8()
|
||||
{
|
||||
/// FIXME move final flush into the caller
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
finish();
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ ZlibDeflatingWriteBuffer::ZlibDeflatingWriteBuffer(
|
||||
ZlibDeflatingWriteBuffer::~ZlibDeflatingWriteBuffer()
|
||||
{
|
||||
/// FIXME move final flush into the caller
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
|
||||
finish();
|
||||
|
||||
|
@ -31,7 +31,7 @@ ZstdDeflatingWriteBuffer::ZstdDeflatingWriteBuffer(
|
||||
ZstdDeflatingWriteBuffer::~ZstdDeflatingWriteBuffer()
|
||||
{
|
||||
/// FIXME move final flush into the caller
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
|
||||
finish();
|
||||
|
||||
|
@ -13,9 +13,22 @@ namespace DB
|
||||
void ApplyWithSubqueryVisitor::visit(ASTPtr & ast, const Data & data)
|
||||
{
|
||||
if (auto * node_select = ast->as<ASTSelectQuery>())
|
||||
visit(*node_select, data);
|
||||
else
|
||||
{
|
||||
for (auto & child : ast->children)
|
||||
visit(child, data);
|
||||
if (auto * node_func = ast->as<ASTFunction>())
|
||||
visit(*node_func, data);
|
||||
else if (auto * node_table = ast->as<ASTTableExpression>())
|
||||
visit(*node_table, data);
|
||||
}
|
||||
}
|
||||
|
||||
void ApplyWithSubqueryVisitor::visit(ASTSelectQuery & ast, const Data & data)
|
||||
{
|
||||
std::optional<Data> new_data;
|
||||
if (auto with = node_select->with())
|
||||
if (auto with = ast.with())
|
||||
{
|
||||
for (auto & child : with->children)
|
||||
{
|
||||
@ -29,21 +42,17 @@ void ApplyWithSubqueryVisitor::visit(ASTPtr & ast, const Data & data)
|
||||
}
|
||||
}
|
||||
|
||||
for (auto & child : node_select->children)
|
||||
for (auto & child : ast.children)
|
||||
{
|
||||
if (child != node_select->with())
|
||||
if (child != ast.with())
|
||||
visit(child, new_data ? *new_data : data);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto & child : ast->children)
|
||||
}
|
||||
|
||||
void ApplyWithSubqueryVisitor::visit(ASTSelectWithUnionQuery & ast, const Data & data)
|
||||
{
|
||||
for (auto & child : ast.children)
|
||||
visit(child, data);
|
||||
if (auto * node_func = ast->as<ASTFunction>())
|
||||
visit(*node_func, data);
|
||||
else if (auto * node_table = ast->as<ASTTableExpression>())
|
||||
visit(*node_table, data);
|
||||
}
|
||||
}
|
||||
|
||||
void ApplyWithSubqueryVisitor::visit(ASTTableExpression & table, const Data & data)
|
||||
|
@ -7,6 +7,8 @@
|
||||
namespace DB
|
||||
{
|
||||
class ASTFunction;
|
||||
class ASTSelectQuery;
|
||||
class ASTSelectWithUnionQuery;
|
||||
struct ASTTableExpression;
|
||||
|
||||
class ApplyWithSubqueryVisitor
|
||||
@ -18,9 +20,13 @@ public:
|
||||
};
|
||||
|
||||
static void visit(ASTPtr & ast) { visit(ast, {}); }
|
||||
static void visit(ASTSelectQuery & select) { visit(select, {}); }
|
||||
static void visit(ASTSelectWithUnionQuery & select) { visit(select, {}); }
|
||||
|
||||
private:
|
||||
static void visit(ASTPtr & ast, const Data & data);
|
||||
static void visit(ASTSelectQuery & ast, const Data & data);
|
||||
static void visit(ASTSelectWithUnionQuery & ast, const Data & data);
|
||||
static void visit(ASTTableExpression & table, const Data & data);
|
||||
static void visit(ASTFunction & func, const Data & data);
|
||||
};
|
||||
|
@ -16,26 +16,6 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
void addAndTerm(ASTPtr & ast, const ASTPtr & term)
|
||||
{
|
||||
if (!ast)
|
||||
ast = term;
|
||||
else
|
||||
ast = makeASTFunction("and", ast, term);
|
||||
}
|
||||
|
||||
/// If this is an inner join and the expression related to less than 2 tables, then move it to WHERE
|
||||
bool canMoveToWhere(std::pair<size_t, size_t> table_numbers, ASTTableJoin::Kind kind)
|
||||
{
|
||||
return kind == ASTTableJoin::Kind::Inner &&
|
||||
(table_numbers.first == table_numbers.second || table_numbers.first == 0 || table_numbers.second == 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void CollectJoinOnKeysMatcher::Data::addJoinKeys(const ASTPtr & left_ast, const ASTPtr & right_ast,
|
||||
const std::pair<size_t, size_t> & table_no)
|
||||
{
|
||||
@ -49,7 +29,6 @@ void CollectJoinOnKeysMatcher::Data::addJoinKeys(const ASTPtr & left_ast, const
|
||||
else
|
||||
throw Exception("Cannot detect left and right JOIN keys. JOIN ON section is ambiguous.",
|
||||
ErrorCodes::AMBIGUOUS_COLUMN_NAME);
|
||||
if (table_no.first != table_no.second && table_no.first > 0 && table_no.second > 0)
|
||||
has_some = true;
|
||||
}
|
||||
|
||||
@ -99,45 +78,22 @@ void CollectJoinOnKeysMatcher::visit(const ASTFunction & func, const ASTPtr & as
|
||||
{
|
||||
ASTPtr left = func.arguments->children.at(0);
|
||||
ASTPtr right = func.arguments->children.at(1);
|
||||
auto table_numbers = getTableNumbers(left, right, data);
|
||||
|
||||
if (canMoveToWhere(table_numbers, data.kind))
|
||||
{
|
||||
addAndTerm(data.new_where_conditions, ast);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (data.kind == ASTTableJoin::Kind::Inner)
|
||||
{
|
||||
addAndTerm(data.new_on_expression, ast);
|
||||
}
|
||||
auto table_numbers = getTableNumbers(ast, left, right, data);
|
||||
data.addJoinKeys(left, right, table_numbers);
|
||||
}
|
||||
}
|
||||
else if (inequality != ASOF::Inequality::None && !data.is_asof)
|
||||
{
|
||||
ASTPtr left = func.arguments->children.at(0);
|
||||
ASTPtr right = func.arguments->children.at(1);
|
||||
auto table_numbers = getTableNumbers(left, right, data);
|
||||
if (canMoveToWhere(table_numbers, data.kind))
|
||||
{
|
||||
addAndTerm(data.new_where_conditions, ast);
|
||||
}
|
||||
else
|
||||
else if (inequality != ASOF::Inequality::None)
|
||||
{
|
||||
if (!data.is_asof)
|
||||
throw Exception("JOIN ON inequalities are not supported. Unexpected '" + queryToString(ast) + "'",
|
||||
ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
}
|
||||
else if (inequality != ASOF::Inequality::None && data.is_asof)
|
||||
{
|
||||
|
||||
if (data.asof_left_key || data.asof_right_key)
|
||||
throw Exception("ASOF JOIN expects exactly one inequality in ON section. Unexpected '" + queryToString(ast) + "'",
|
||||
ErrorCodes::INVALID_JOIN_ON_EXPRESSION);
|
||||
|
||||
ASTPtr left = func.arguments->children.at(0);
|
||||
ASTPtr right = func.arguments->children.at(1);
|
||||
auto table_numbers = getTableNumbers(left, right, data);
|
||||
auto table_numbers = getTableNumbers(ast, left, right, data);
|
||||
|
||||
data.addAsofJoinKeys(left, right, table_numbers, inequality);
|
||||
}
|
||||
@ -162,8 +118,7 @@ void CollectJoinOnKeysMatcher::getIdentifiers(const ASTPtr & ast, std::vector<co
|
||||
getIdentifiers(child, out);
|
||||
}
|
||||
|
||||
|
||||
std::pair<size_t, size_t> CollectJoinOnKeysMatcher::getTableNumbers(const ASTPtr & left_ast, const ASTPtr & right_ast,
|
||||
std::pair<size_t, size_t> CollectJoinOnKeysMatcher::getTableNumbers(const ASTPtr & expr, const ASTPtr & left_ast, const ASTPtr & right_ast,
|
||||
Data & data)
|
||||
{
|
||||
std::vector<const ASTIdentifier *> left_identifiers;
|
||||
@ -172,13 +127,23 @@ std::pair<size_t, size_t> CollectJoinOnKeysMatcher::getTableNumbers(const ASTPtr
|
||||
getIdentifiers(left_ast, left_identifiers);
|
||||
getIdentifiers(right_ast, right_identifiers);
|
||||
|
||||
size_t left_idents_table = 0;
|
||||
size_t right_idents_table = 0;
|
||||
if (left_identifiers.empty() || right_identifiers.empty())
|
||||
{
|
||||
throw Exception("Not equi-join ON expression: " + queryToString(expr) + ". No columns in one of equality side.",
|
||||
ErrorCodes::INVALID_JOIN_ON_EXPRESSION);
|
||||
}
|
||||
|
||||
if (!left_identifiers.empty())
|
||||
left_idents_table = getTableForIdentifiers(left_identifiers, data);
|
||||
if (!right_identifiers.empty())
|
||||
right_idents_table = getTableForIdentifiers(right_identifiers, data);
|
||||
size_t left_idents_table = getTableForIdentifiers(left_identifiers, data);
|
||||
size_t right_idents_table = getTableForIdentifiers(right_identifiers, data);
|
||||
|
||||
if (left_idents_table && left_idents_table == right_idents_table)
|
||||
{
|
||||
auto left_name = queryToString(*left_identifiers[0]);
|
||||
auto right_name = queryToString(*right_identifiers[0]);
|
||||
|
||||
throw Exception("In expression " + queryToString(expr) + " columns " + left_name + " and " + right_name
|
||||
+ " are from the same table but from different arguments of equal function", ErrorCodes::INVALID_JOIN_ON_EXPRESSION);
|
||||
}
|
||||
|
||||
return std::make_pair(left_idents_table, right_idents_table);
|
||||
}
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <Interpreters/InDepthNodeVisitor.h>
|
||||
#include <Interpreters/DatabaseAndTableWithAlias.h>
|
||||
#include <Interpreters/Aliases.h>
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -31,11 +30,8 @@ public:
|
||||
const TableWithColumnNamesAndTypes & right_table;
|
||||
const Aliases & aliases;
|
||||
const bool is_asof{false};
|
||||
ASTTableJoin::Kind kind;
|
||||
ASTPtr asof_left_key{};
|
||||
ASTPtr asof_right_key{};
|
||||
ASTPtr new_on_expression{};
|
||||
ASTPtr new_where_conditions{};
|
||||
bool has_some{false};
|
||||
|
||||
void addJoinKeys(const ASTPtr & left_ast, const ASTPtr & right_ast, const std::pair<size_t, size_t> & table_no);
|
||||
@ -61,7 +57,7 @@ private:
|
||||
static void visit(const ASTFunction & func, const ASTPtr & ast, Data & data);
|
||||
|
||||
static void getIdentifiers(const ASTPtr & ast, std::vector<const ASTIdentifier *> & out);
|
||||
static std::pair<size_t, size_t> getTableNumbers(const ASTPtr & left_ast, const ASTPtr & right_ast, Data & data);
|
||||
static std::pair<size_t, size_t> getTableNumbers(const ASTPtr & expr, const ASTPtr & left_ast, const ASTPtr & right_ast, Data & data);
|
||||
static const ASTIdentifier * unrollAliases(const ASTIdentifier * identifier, const Aliases & aliases);
|
||||
static size_t getTableForIdentifiers(std::vector<const ASTIdentifier *> & identifiers, const Data & data);
|
||||
};
|
||||
|
@ -59,6 +59,7 @@
|
||||
#include <Interpreters/QueryLog.h>
|
||||
#include <Interpreters/addTypeConversionToAST.h>
|
||||
#include <Interpreters/FunctionNameNormalizer.h>
|
||||
#include <Interpreters/ApplyWithSubqueryVisitor.h>
|
||||
|
||||
#include <TableFunctions/TableFunctionFactory.h>
|
||||
#include <common/logger_useful.h>
|
||||
@ -723,7 +724,7 @@ static void generateUUIDForTable(ASTCreateQuery & create)
|
||||
/// If destination table (to_table_id) is not specified for materialized view,
|
||||
/// then MV will create inner table. We should generate UUID of inner table here,
|
||||
/// so it will be the same on all hosts if query in ON CLUSTER or database engine is Replicated.
|
||||
bool need_uuid_for_inner_table = create.is_materialized_view && !create.to_table_id;
|
||||
bool need_uuid_for_inner_table = !create.attach && create.is_materialized_view && !create.to_table_id;
|
||||
if (need_uuid_for_inner_table && create.to_inner_uuid == UUIDHelpers::Nil)
|
||||
create.to_inner_uuid = UUIDHelpers::generateV4();
|
||||
}
|
||||
@ -890,6 +891,8 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
|
||||
|
||||
if (create.select && create.isView())
|
||||
{
|
||||
// Expand CTE before filling default database
|
||||
ApplyWithSubqueryVisitor().visit(*create.select);
|
||||
AddDefaultDatabaseVisitor visitor(current_database);
|
||||
visitor.visit(*create.select);
|
||||
}
|
||||
|
@ -323,7 +323,7 @@ void ThreadStatus::finalizeQueryProfiler()
|
||||
|
||||
void ThreadStatus::detachQuery(bool exit_if_already_detached, bool thread_exits)
|
||||
{
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
|
||||
if (exit_if_already_detached && thread_state == ThreadState::DetachedFromQuery)
|
||||
{
|
||||
|
@ -403,13 +403,13 @@ void setJoinStrictness(ASTSelectQuery & select_query, JoinStrictness join_defaul
|
||||
|
||||
/// Find the columns that are obtained by JOIN.
|
||||
void collectJoinedColumns(TableJoin & analyzed_join, const ASTSelectQuery & select_query,
|
||||
const TablesWithColumns & tables, const Aliases & aliases, ASTPtr & new_where_conditions)
|
||||
const TablesWithColumns & tables, const Aliases & aliases)
|
||||
{
|
||||
const ASTTablesInSelectQueryElement * node = select_query.join();
|
||||
if (!node || tables.size() < 2)
|
||||
return;
|
||||
|
||||
auto & table_join = node->table_join->as<ASTTableJoin &>();
|
||||
const auto & table_join = node->table_join->as<ASTTableJoin &>();
|
||||
|
||||
if (table_join.using_expression_list)
|
||||
{
|
||||
@ -428,31 +428,14 @@ void collectJoinedColumns(TableJoin & analyzed_join, const ASTSelectQuery & sele
|
||||
{
|
||||
bool is_asof = (table_join.strictness == ASTTableJoin::Strictness::Asof);
|
||||
|
||||
CollectJoinOnKeysVisitor::Data data{analyzed_join, tables[0], tables[1], aliases, is_asof, table_join.kind};
|
||||
CollectJoinOnKeysVisitor::Data data{analyzed_join, tables[0], tables[1], aliases, is_asof};
|
||||
CollectJoinOnKeysVisitor(data).visit(table_join.on_expression);
|
||||
if (!data.has_some)
|
||||
throw Exception("Cannot get JOIN keys from JOIN ON section: " + queryToString(table_join.on_expression),
|
||||
ErrorCodes::INVALID_JOIN_ON_EXPRESSION);
|
||||
if (is_asof)
|
||||
{
|
||||
data.asofToJoinKeys();
|
||||
}
|
||||
else if (data.new_on_expression)
|
||||
{
|
||||
table_join.on_expression = data.new_on_expression;
|
||||
new_where_conditions = data.new_where_conditions;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Move joined key related to only one table to WHERE clause
|
||||
void moveJoinedKeyToWhere(ASTSelectQuery * select_query, ASTPtr & new_where_conditions)
|
||||
{
|
||||
if (select_query->where())
|
||||
select_query->setExpression(ASTSelectQuery::Expression::WHERE,
|
||||
makeASTFunction("and", new_where_conditions, select_query->where()));
|
||||
else
|
||||
select_query->setExpression(ASTSelectQuery::Expression::WHERE, new_where_conditions->clone());
|
||||
}
|
||||
|
||||
|
||||
@ -840,11 +823,7 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect(
|
||||
|
||||
setJoinStrictness(*select_query, settings.join_default_strictness, settings.any_join_distinct_right_table_keys,
|
||||
result.analyzed_join->table_join);
|
||||
|
||||
ASTPtr new_where_condition = nullptr;
|
||||
collectJoinedColumns(*result.analyzed_join, *select_query, tables_with_columns, result.aliases, new_where_condition);
|
||||
if (new_where_condition)
|
||||
moveJoinedKeyToWhere(select_query, new_where_condition);
|
||||
collectJoinedColumns(*result.analyzed_join, *select_query, tables_with_columns, result.aliases);
|
||||
|
||||
/// rewrite filters for select query, must go after getArrayJoinedColumns
|
||||
if (settings.optimize_respect_aliases && result.metadata_snapshot)
|
||||
|
@ -196,7 +196,7 @@ void WriteBufferFromHTTPServerResponse::finalize()
|
||||
WriteBufferFromHTTPServerResponse::~WriteBufferFromHTTPServerResponse()
|
||||
{
|
||||
/// FIXME move final flush into the caller
|
||||
MemoryTracker::LockExceptionInThread lock;
|
||||
MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
|
||||
finalize();
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Macros.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/config_version.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
@ -585,6 +586,8 @@ void StorageKafka::threadFunc(size_t idx)
|
||||
|
||||
bool StorageKafka::streamToViews()
|
||||
{
|
||||
Stopwatch watch;
|
||||
|
||||
auto table_id = getStorageID();
|
||||
auto table = DatabaseCatalog::instance().getTable(table_id, getContext());
|
||||
if (!table)
|
||||
@ -637,7 +640,11 @@ bool StorageKafka::streamToViews()
|
||||
// We can't cancel during copyData, as it's not aware of commits and other kafka-related stuff.
|
||||
// It will be cancelled on underlying layer (kafka buffer)
|
||||
std::atomic<bool> stub = {false};
|
||||
copyData(*in, *block_io.out, &stub);
|
||||
size_t rows = 0;
|
||||
copyData(*in, *block_io.out, [&rows](const Block & block)
|
||||
{
|
||||
rows += block.rows();
|
||||
}, &stub);
|
||||
|
||||
bool some_stream_is_stalled = false;
|
||||
for (auto & stream : streams)
|
||||
@ -646,6 +653,10 @@ bool StorageKafka::streamToViews()
|
||||
stream->as<KafkaBlockInputStream>()->commit();
|
||||
}
|
||||
|
||||
UInt64 milliseconds = watch.elapsedMilliseconds();
|
||||
LOG_DEBUG(log, "Pushing {} rows to {} took {} ms.",
|
||||
formatReadableQuantity(rows), table_id.getNameForLogs(), milliseconds);
|
||||
|
||||
return some_stream_is_stalled;
|
||||
}
|
||||
|
||||
|
@ -469,7 +469,7 @@ QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage(
|
||||
|
||||
/// Always calculate optimized cluster here, to avoid conditions during read()
|
||||
/// (Anyway it will be calculated in the read())
|
||||
if (settings.optimize_skip_unused_shards)
|
||||
if (getClusterQueriedNodes(settings, cluster) > 1 && settings.optimize_skip_unused_shards)
|
||||
{
|
||||
ClusterPtr optimized_cluster = getOptimizedCluster(local_context, metadata_snapshot, query_info.query);
|
||||
if (optimized_cluster)
|
||||
|
@ -76,10 +76,15 @@ StorageMaterializedView::StorageMaterializedView(
|
||||
storage_metadata.setSelectQuery(select);
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
|
||||
bool point_to_itself_by_uuid = has_inner_table && query.to_inner_uuid != UUIDHelpers::Nil
|
||||
&& query.to_inner_uuid == table_id_.uuid;
|
||||
bool point_to_itself_by_name = !has_inner_table && query.to_table_id.database_name == table_id_.database_name
|
||||
&& query.to_table_id.table_name == table_id_.table_name;
|
||||
if (point_to_itself_by_uuid || point_to_itself_by_name)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Materialized view {} cannot point to itself", table_id_.getFullTableName());
|
||||
|
||||
if (!has_inner_table)
|
||||
{
|
||||
if (query.to_table_id.database_name == table_id_.database_name && query.to_table_id.table_name == table_id_.table_name)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Materialized view {} cannot point to itself", table_id_.getFullTableName());
|
||||
target_table_id = query.to_table_id;
|
||||
}
|
||||
else if (attach_)
|
||||
|
@ -26,6 +26,8 @@
|
||||
<election_timeout_upper_bound_ms>4000</election_timeout_upper_bound_ms>
|
||||
<raft_logs_level>trace</raft_logs_level>
|
||||
<force_sync>false</force_sync>
|
||||
<!-- we want all logs for complex problems investigation -->
|
||||
<reserved_log_items>1000000000000000</reserved_log_items>
|
||||
</coordination_settings>
|
||||
|
||||
<raft_configuration>
|
||||
|
@ -8,6 +8,8 @@
|
||||
<session_timeout_ms>30000</session_timeout_ms>
|
||||
<force_sync>false</force_sync>
|
||||
<startup_timeout>60000</startup_timeout>
|
||||
<!-- we want all logs for complex problems investigation -->
|
||||
<reserved_log_items>1000000000000000</reserved_log_items>
|
||||
</coordination_settings>
|
||||
|
||||
<raft_configuration>
|
||||
|
@ -435,7 +435,7 @@ class ClickhouseIntegrationTestsRunner:
|
||||
time.sleep(5)
|
||||
|
||||
logging.info("Finally all tests done, going to compress test dir")
|
||||
test_logs = os.path.join(str(self.path()), "./test_dir.tar")
|
||||
test_logs = os.path.join(str(self.path()), "./test_dir.tar.gz")
|
||||
self._compress_logs("{}/tests/integration".format(repo_path), test_logs)
|
||||
logging.info("Compression finished")
|
||||
|
||||
@ -500,7 +500,7 @@ class ClickhouseIntegrationTestsRunner:
|
||||
break
|
||||
|
||||
logging.info("Finally all tests done, going to compress test dir")
|
||||
test_logs = os.path.join(str(self.path()), "./test_dir.tar")
|
||||
test_logs = os.path.join(str(self.path()), "./test_dir.tar.gz")
|
||||
self._compress_logs("{}/tests/integration".format(repo_path), test_logs)
|
||||
logging.info("Compression finished")
|
||||
|
||||
|
@ -68,6 +68,16 @@ def create_table(cluster, table_name, additional_settings=None):
|
||||
node.query(create_table_statement)
|
||||
|
||||
|
||||
def wait_for_delete_s3_objects(cluster, expected, timeout=30):
|
||||
minio = cluster.minio_client
|
||||
while timeout > 0:
|
||||
if len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == expected:
|
||||
return
|
||||
timeout -= 1
|
||||
time.sleep(1)
|
||||
assert(len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == expected)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def drop_table(cluster):
|
||||
yield
|
||||
@ -75,8 +85,9 @@ def drop_table(cluster):
|
||||
minio = cluster.minio_client
|
||||
|
||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
||||
|
||||
try:
|
||||
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 0
|
||||
wait_for_delete_s3_objects(cluster, 0)
|
||||
finally:
|
||||
# Remove extra objects to prevent tests cascade failing
|
||||
for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')):
|
||||
@ -151,7 +162,7 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical):
|
||||
|
||||
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
|
||||
assert node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)"
|
||||
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD
|
||||
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD)
|
||||
|
||||
|
||||
def test_alter_table_columns(cluster):
|
||||
@ -167,32 +178,20 @@ def test_alter_table_columns(cluster):
|
||||
# To ensure parts have merged
|
||||
node.query("OPTIMIZE TABLE s3_test")
|
||||
|
||||
# Wait for merges, mutations and old parts deletion
|
||||
time.sleep(3)
|
||||
|
||||
assert node.query("SELECT sum(col1) FROM s3_test FORMAT Values") == "(8192)"
|
||||
assert node.query("SELECT sum(col1) FROM s3_test WHERE id > 0 FORMAT Values") == "(4096)"
|
||||
assert len(list(minio.list_objects(cluster.minio_bucket,
|
||||
'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN
|
||||
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN)
|
||||
|
||||
node.query("ALTER TABLE s3_test MODIFY COLUMN col1 String", settings={"mutations_sync": 2})
|
||||
|
||||
# Wait for old parts deletion
|
||||
time.sleep(3)
|
||||
|
||||
assert node.query("SELECT distinct(col1) FROM s3_test FORMAT Values") == "('1')"
|
||||
# and file with mutation
|
||||
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == (
|
||||
FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1)
|
||||
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1)
|
||||
|
||||
node.query("ALTER TABLE s3_test DROP COLUMN col1", settings={"mutations_sync": 2})
|
||||
|
||||
# Wait for old parts deletion
|
||||
time.sleep(3)
|
||||
|
||||
# and 2 files with mutations
|
||||
assert len(
|
||||
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2
|
||||
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2)
|
||||
|
||||
|
||||
def test_attach_detach_partition(cluster):
|
||||
@ -320,9 +319,7 @@ def test_move_replace_partition_to_another_table(cluster):
|
||||
assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)"
|
||||
|
||||
# Wait for outdated partitions deletion.
|
||||
time.sleep(3)
|
||||
assert len(list(
|
||||
minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4
|
||||
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4)
|
||||
|
||||
node.query("DROP TABLE s3_clone NO DELAY")
|
||||
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
|
||||
@ -338,7 +335,8 @@ def test_move_replace_partition_to_another_table(cluster):
|
||||
|
||||
node.query("DROP TABLE s3_test NO DELAY")
|
||||
# Backup data should remain in S3.
|
||||
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD_PER_PART_WIDE * 4
|
||||
|
||||
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE * 4)
|
||||
|
||||
for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')):
|
||||
minio.remove_object(cluster.minio_bucket, obj.object_name)
|
||||
|
@ -7,7 +7,6 @@ fun:tolower
|
||||
|
||||
# Suppress some failures in contrib so that we can enable MSan in CI.
|
||||
# Ideally, we should report these upstream.
|
||||
src:*/contrib/zlib-ng/*
|
||||
|
||||
# Hyperscan
|
||||
fun:roseRunProgram
|
||||
|
@ -109,7 +109,7 @@ SELECT
|
||||
t2_00826.a,
|
||||
t2_00826.b
|
||||
FROM t1_00826
|
||||
ALL INNER JOIN t2_00826 ON (((a = t2_00826.a) AND (a = t2_00826.a)) AND (a = t2_00826.a)) AND (b = t2_00826.b)
|
||||
ALL INNER JOIN t2_00826 ON (a = t2_00826.a) AND (a = t2_00826.a) AND (a = t2_00826.a) AND (b = t2_00826.b)
|
||||
WHERE (a = t2_00826.a) AND ((a = t2_00826.a) AND ((a = t2_00826.a) AND (b = t2_00826.b)))
|
||||
--- cross split conjunction ---
|
||||
SELECT
|
||||
|
@ -127,7 +127,7 @@ FROM
|
||||
) AS `--.s`
|
||||
CROSS JOIN t3
|
||||
) AS `--.s`
|
||||
ALL INNER JOIN t4 ON ((a = `--t1.a`) AND (a = `--t2.a`)) AND (a = `--t3.a`)
|
||||
ALL INNER JOIN t4 ON (a = `--t1.a`) AND (a = `--t2.a`) AND (a = `--t3.a`)
|
||||
WHERE (a = `--t1.a`) AND (a = `--t2.a`) AND (a = `--t3.a`)
|
||||
SELECT `--t1.a` AS `t1.a`
|
||||
FROM
|
||||
|
@ -4,3 +4,8 @@
|
||||
4 0
|
||||
5 0
|
||||
6 0
|
||||
-
|
||||
1 0
|
||||
2 2 a2
|
||||
1 0
|
||||
2 2 a2
|
||||
|
@ -1,10 +1,35 @@
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
|
||||
select ax, c from (select [1,2] ax, 0 c) array join ax join (select 0 c) using(c);
|
||||
select ax, c from (select [3,4] ax, 0 c) join (select 0 c) using(c) array join ax;
|
||||
select ax, c from (select [5,6] ax, 0 c) s1 join system.one s2 ON s1.c = s2.dummy array join ax;
|
||||
SELECT ax, c FROM (SELECT [1,2] ax, 0 c) ARRAY JOIN ax JOIN (SELECT 0 c) USING (c);
|
||||
SELECT ax, c FROM (SELECT [3,4] ax, 0 c) JOIN (SELECT 0 c) USING (c) ARRAY JOIN ax;
|
||||
SELECT ax, c FROM (SELECT [5,6] ax, 0 c) s1 JOIN system.one s2 ON s1.c = s2.dummy ARRAY JOIN ax;
|
||||
|
||||
|
||||
SELECT ax, c FROM (SELECT [101,102] ax, 0 c) s1
|
||||
JOIN system.one s2 ON s1.c = s2.dummy
|
||||
JOIN system.one s3 ON s1.c = s3.dummy
|
||||
ARRAY JOIN ax; -- { serverError 48 }
|
||||
|
||||
SELECT '-';
|
||||
|
||||
SET joined_subquery_requires_alias = 1;
|
||||
|
||||
DROP TABLE IF EXISTS f;
|
||||
DROP TABLE IF EXISTS d;
|
||||
|
||||
CREATE TABLE f (`d_ids` Array(Int64) ) ENGINE = TinyLog;
|
||||
INSERT INTO f VALUES ([1, 2]);
|
||||
|
||||
CREATE TABLE d (`id` Int64, `name` String ) ENGINE = TinyLog;
|
||||
|
||||
INSERT INTO d VALUES (2, 'a2'), (3, 'a3');
|
||||
|
||||
SELECT d_ids, id, name FROM f LEFT ARRAY JOIN d_ids LEFT JOIN d ON d.id = d_ids ORDER BY id;
|
||||
SELECT did, id, name FROM f LEFT ARRAY JOIN d_ids as did LEFT JOIN d ON d.id = did ORDER BY id;
|
||||
|
||||
-- name clash, doesn't work yet
|
||||
SELECT id, name FROM f LEFT ARRAY JOIN d_ids as id LEFT JOIN d ON d.id = id ORDER BY id; -- { serverError 403 }
|
||||
|
||||
DROP TABLE IF EXISTS f;
|
||||
DROP TABLE IF EXISTS d;
|
||||
|
||||
select ax, c from (select [7,8] ax, 0 c) s1
|
||||
join system.one s2 ON s1.c = s2.dummy
|
||||
join system.one s3 ON s1.c = s3.dummy
|
||||
array join ax; -- { serverError 48 }
|
||||
|
@ -23,8 +23,6 @@ join_use_nulls = 1
|
||||
-
|
||||
\N \N
|
||||
-
|
||||
1 1 \N \N
|
||||
2 2 \N \N
|
||||
-
|
||||
1 1 1 1
|
||||
2 2 \N \N
|
||||
@ -51,8 +49,6 @@ join_use_nulls = 0
|
||||
-
|
||||
-
|
||||
-
|
||||
1 1 0 0
|
||||
2 2 0 0
|
||||
-
|
||||
1 1 1 1
|
||||
2 2 0 0
|
||||
|
@ -30,11 +30,11 @@ select * from t left outer join s on (t.a=s.a and t.b=s.b) where s.a is null;
|
||||
select '-';
|
||||
select s.* from t left outer join s on (t.a=s.a and t.b=s.b) where s.a is null;
|
||||
select '-';
|
||||
select t.*, s.* from t left join s on (s.a=t.a and t.b=s.b and t.a=toInt64(2)) order by t.a;
|
||||
select t.*, s.* from t left join s on (s.a=t.a and t.b=s.b and t.a=toInt64(2)) order by t.a; -- {serverError 403 }
|
||||
select '-';
|
||||
select t.*, s.* from t left join s on (s.a=t.a) order by t.a;
|
||||
select '-';
|
||||
select t.*, s.* from t left join s on (t.b=toInt64(2) and s.a=t.a) where s.b=2;
|
||||
select t.*, s.* from t left join s on (t.b=toInt64(2) and s.a=t.a) where s.b=2; -- {serverError 403 }
|
||||
|
||||
select 'join_use_nulls = 0';
|
||||
set join_use_nulls = 0;
|
||||
@ -58,11 +58,11 @@ select '-';
|
||||
select '-';
|
||||
-- select s.* from t left outer join s on (t.a=s.a and t.b=s.b) where s.a is null; -- TODO
|
||||
select '-';
|
||||
select t.*, s.* from t left join s on (s.a=t.a and t.b=s.b and t.a=toInt64(2)) order by t.a;
|
||||
select t.*, s.* from t left join s on (s.a=t.a and t.b=s.b and t.a=toInt64(2)) order by t.a; -- {serverError 403 }
|
||||
select '-';
|
||||
select t.*, s.* from t left join s on (s.a=t.a) order by t.a;
|
||||
select '-';
|
||||
select t.*, s.* from t left join s on (t.b=toInt64(2) and s.a=t.a) where s.b=2;
|
||||
select t.*, s.* from t left join s on (t.b=toInt64(2) and s.a=t.a) where s.b=2; -- {serverError 403 }
|
||||
|
||||
drop table t;
|
||||
drop table s;
|
||||
|
22
tests/queries/0_stateless/01153_attach_mv_uuid.reference
Normal file
22
tests/queries/0_stateless/01153_attach_mv_uuid.reference
Normal file
@ -0,0 +1,22 @@
|
||||
1 1
|
||||
2 4
|
||||
1 1
|
||||
2 4
|
||||
3 9
|
||||
4 16
|
||||
CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n AS\nSELECT\n n,\n n * n AS n2\nFROM default.src
|
||||
1 1
|
||||
2 4
|
||||
CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n AS\nSELECT\n n,\n n * n AS n2\nFROM default.src
|
||||
1 1
|
||||
2 4
|
||||
3 9
|
||||
4 16
|
||||
CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\' TO INNER UUID \'3bd68e3c-2693-4352-ad66-a66eba9e345e\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n AS\nSELECT\n n,\n n * n AS n2\nFROM default.src
|
||||
1 1
|
||||
2 4
|
||||
CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\' TO INNER UUID \'3bd68e3c-2693-4352-ad66-a66eba9e345e\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n AS\nSELECT\n n,\n n * n AS n2\nFROM default.src
|
||||
1 1
|
||||
2 4
|
||||
3 9
|
||||
4 16
|
42
tests/queries/0_stateless/01153_attach_mv_uuid.sql
Normal file
42
tests/queries/0_stateless/01153_attach_mv_uuid.sql
Normal file
@ -0,0 +1,42 @@
|
||||
DROP TABLE IF EXISTS src;
|
||||
DROP TABLE IF EXISTS mv;
|
||||
DROP TABLE IF EXISTS ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2";
|
||||
|
||||
CREATE TABLE src (n UInt64) ENGINE=MergeTree ORDER BY n;
|
||||
CREATE MATERIALIZED VIEW mv (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src;
|
||||
INSERT INTO src VALUES (1), (2);
|
||||
SELECT * FROM mv ORDER BY n;
|
||||
DETACH TABLE mv;
|
||||
ATTACH TABLE mv;
|
||||
INSERT INTO src VALUES (3), (4);
|
||||
SELECT * FROM mv ORDER BY n;
|
||||
DROP TABLE mv SYNC;
|
||||
|
||||
SET show_table_uuid_in_table_create_query_if_not_nil=1;
|
||||
CREATE TABLE ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2" (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n;
|
||||
ATTACH MATERIALIZED VIEW mv UUID 'e15f3ab5-6cae-4df3-b879-f40deafd82c2' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src;
|
||||
SHOW CREATE TABLE mv;
|
||||
INSERT INTO src VALUES (1), (2);
|
||||
SELECT * FROM mv ORDER BY n;
|
||||
DETACH TABLE mv;
|
||||
ATTACH TABLE mv;
|
||||
SHOW CREATE TABLE mv;
|
||||
INSERT INTO src VALUES (3), (4);
|
||||
SELECT * FROM mv ORDER BY n;
|
||||
DROP TABLE mv SYNC;
|
||||
|
||||
CREATE TABLE ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2" UUID '3bd68e3c-2693-4352-ad66-a66eba9e345e' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n;
|
||||
ATTACH MATERIALIZED VIEW mv UUID 'e15f3ab5-6cae-4df3-b879-f40deafd82c2' TO INNER UUID '3bd68e3c-2693-4352-ad66-a66eba9e345e' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src;
|
||||
SHOW CREATE TABLE mv;
|
||||
INSERT INTO src VALUES (1), (2);
|
||||
SELECT * FROM mv ORDER BY n;
|
||||
DETACH TABLE mv;
|
||||
ATTACH TABLE mv;
|
||||
SHOW CREATE TABLE mv;
|
||||
INSERT INTO src VALUES (3), (4);
|
||||
SELECT * FROM mv ORDER BY n;
|
||||
DROP TABLE mv SYNC;
|
||||
|
||||
ATTACH MATERIALIZED VIEW mv UUID '3bd68e3c-2693-4352-ad66-a66eba9e345e' TO INNER UUID '3bd68e3c-2693-4352-ad66-a66eba9e345e' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src; -- { serverError 36 }
|
||||
|
||||
DROP TABLE src;
|
@ -1,140 +0,0 @@
|
||||
---------Q1----------
|
||||
2 2 2 20
|
||||
SELECT
|
||||
a,
|
||||
b,
|
||||
table2.a,
|
||||
table2.b
|
||||
FROM table1
|
||||
ALL INNER JOIN
|
||||
(
|
||||
SELECT
|
||||
a,
|
||||
b
|
||||
FROM table2
|
||||
) AS table2 ON a = table2.a
|
||||
WHERE table2.b = toUInt32(20)
|
||||
---------Q2----------
|
||||
2 2 2 20
|
||||
SELECT
|
||||
a,
|
||||
b,
|
||||
table2.a,
|
||||
table2.b
|
||||
FROM table1
|
||||
ALL INNER JOIN
|
||||
(
|
||||
SELECT
|
||||
a,
|
||||
b
|
||||
FROM table2
|
||||
) AS table2 ON a = table2.a
|
||||
WHERE (table2.a < table2.b) AND (table2.b = toUInt32(20))
|
||||
---------Q3----------
|
||||
---------Q4----------
|
||||
6 40
|
||||
SELECT
|
||||
a,
|
||||
table2.b
|
||||
FROM table1
|
||||
ALL INNER JOIN
|
||||
(
|
||||
SELECT
|
||||
a,
|
||||
b
|
||||
FROM table2
|
||||
) AS table2 ON a = toUInt32(10 - table2.a)
|
||||
WHERE (b = 6) AND (table2.b > 20)
|
||||
---------Q5----------
|
||||
SELECT
|
||||
a,
|
||||
table2.b
|
||||
FROM table1
|
||||
ALL INNER JOIN
|
||||
(
|
||||
SELECT
|
||||
a,
|
||||
b
|
||||
FROM table2
|
||||
WHERE 0
|
||||
) AS table2 ON a = table2.a
|
||||
WHERE 0
|
||||
---------Q6----------
|
||||
---------Q7----------
|
||||
0 0 0 0
|
||||
SELECT
|
||||
a,
|
||||
b,
|
||||
table2.a,
|
||||
table2.b
|
||||
FROM table1
|
||||
ALL INNER JOIN
|
||||
(
|
||||
SELECT
|
||||
a,
|
||||
b
|
||||
FROM table2
|
||||
) AS table2 ON a = table2.a
|
||||
WHERE (table2.b < toUInt32(40)) AND (b < 1)
|
||||
---------Q8----------
|
||||
---------Q9---will not be optimized----------
|
||||
SELECT
|
||||
a,
|
||||
b,
|
||||
table2.a,
|
||||
table2.b
|
||||
FROM table1
|
||||
ALL LEFT JOIN
|
||||
(
|
||||
SELECT
|
||||
a,
|
||||
b
|
||||
FROM table2
|
||||
) AS table2 ON (a = table2.a) AND (b = toUInt32(10))
|
||||
SELECT
|
||||
a,
|
||||
b,
|
||||
table2.a,
|
||||
table2.b
|
||||
FROM table1
|
||||
ALL RIGHT JOIN
|
||||
(
|
||||
SELECT
|
||||
a,
|
||||
b
|
||||
FROM table2
|
||||
) AS table2 ON (a = table2.a) AND (b = toUInt32(10))
|
||||
SELECT
|
||||
a,
|
||||
b,
|
||||
table2.a,
|
||||
table2.b
|
||||
FROM table1
|
||||
ALL FULL OUTER JOIN
|
||||
(
|
||||
SELECT
|
||||
a,
|
||||
b
|
||||
FROM table2
|
||||
) AS table2 ON (a = table2.a) AND (b = toUInt32(10))
|
||||
SELECT
|
||||
a,
|
||||
b,
|
||||
table2.a,
|
||||
table2.b
|
||||
FROM table1
|
||||
ALL FULL OUTER JOIN
|
||||
(
|
||||
SELECT
|
||||
a,
|
||||
b
|
||||
FROM table2
|
||||
) AS table2 ON (a = table2.a) AND (table2.b = toUInt32(10))
|
||||
WHERE a < toUInt32(20)
|
||||
SELECT
|
||||
a,
|
||||
b,
|
||||
table2.a,
|
||||
table2.b
|
||||
FROM table1
|
||||
CROSS JOIN table2
|
@ -1,48 +0,0 @@
|
||||
DROP TABLE IF EXISTS table1;
|
||||
DROP TABLE IF EXISTS table2;
|
||||
|
||||
CREATE TABLE table1 (a UInt32, b UInt32) ENGINE = Memory;
|
||||
CREATE TABLE table2 (a UInt32, b UInt32) ENGINE = Memory;
|
||||
|
||||
INSERT INTO table1 SELECT number, number FROM numbers(10);
|
||||
INSERT INTO table2 SELECT number * 2, number * 20 FROM numbers(6);
|
||||
|
||||
SELECT '---------Q1----------';
|
||||
SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.b = toUInt32(20));
|
||||
EXPLAIN SYNTAX SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.b = toUInt32(20));
|
||||
|
||||
SELECT '---------Q2----------';
|
||||
SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.a < table2.b) AND (table2.b = toUInt32(20));
|
||||
EXPLAIN SYNTAX SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.a < table2.b) AND (table2.b = toUInt32(20));
|
||||
|
||||
SELECT '---------Q3----------';
|
||||
SELECT * FROM table1 JOIN table2 ON (table1.a = toUInt32(table2.a + 5)) AND (table2.a < table1.b) AND (table2.b > toUInt32(20)); -- { serverError 48 }
|
||||
|
||||
SELECT '---------Q4----------';
|
||||
SELECT table1.a, table2.b FROM table1 INNER JOIN table2 ON (table1.a = toUInt32(10 - table2.a)) AND (table1.b = 6) AND (table2.b > 20);
|
||||
EXPLAIN SYNTAX SELECT table1.a, table2.b FROM table1 INNER JOIN table2 ON (table1.a = toUInt32(10 - table2.a)) AND (table1.b = 6) AND (table2.b > 20);
|
||||
|
||||
SELECT '---------Q5----------';
|
||||
SELECT table1.a, table2.b FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table1.b = 6) AND (table2.b > 20) AND (10 < 6);
|
||||
EXPLAIN SYNTAX SELECT table1.a, table2.b FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table1.b = 6) AND (table2.b > 20) AND (10 < 6);
|
||||
|
||||
SELECT '---------Q6----------';
|
||||
SELECT table1.a, table2.b FROM table1 JOIN table2 ON (table1.b = 6) AND (table2.b > 20); -- { serverError 403 }
|
||||
|
||||
SELECT '---------Q7----------';
|
||||
SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.b < toUInt32(40)) where table1.b < 1;
|
||||
EXPLAIN SYNTAX SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.b < toUInt32(40)) where table1.b < 1;
|
||||
SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.b < toUInt32(40)) where table1.b > 10;
|
||||
|
||||
SELECT '---------Q8----------';
|
||||
SELECT * FROM table1 INNER JOIN table2 ON (table1.a = table2.a) AND (table2.b < toUInt32(table1, 10)); -- { serverError 47 }
|
||||
|
||||
SELECT '---------Q9---will not be optimized----------';
|
||||
EXPLAIN SYNTAX SELECT * FROM table1 LEFT JOIN table2 ON (table1.a = table2.a) AND (table1.b = toUInt32(10));
|
||||
EXPLAIN SYNTAX SELECT * FROM table1 RIGHT JOIN table2 ON (table1.a = table2.a) AND (table1.b = toUInt32(10));
|
||||
EXPLAIN SYNTAX SELECT * FROM table1 FULL JOIN table2 ON (table1.a = table2.a) AND (table1.b = toUInt32(10));
|
||||
EXPLAIN SYNTAX SELECT * FROM table1 FULL JOIN table2 ON (table1.a = table2.a) AND (table2.b = toUInt32(10)) WHERE table1.a < toUInt32(20);
|
||||
EXPLAIN SYNTAX SELECT * FROM table1 , table2;
|
||||
|
||||
DROP TABLE table1;
|
||||
DROP TABLE table2;
|
@ -1,3 +1,7 @@
|
||||
drop table if exists t;
|
||||
create table t engine = Memory as with cte as (select * from numbers(10)) select * from cte;
|
||||
drop table t;
|
||||
|
||||
drop table if exists view1;
|
||||
create view view1 as with t as (select number n from numbers(3)) select n from t;
|
||||
drop table view1;
|
||||
|
@ -0,0 +1,3 @@
|
||||
-- remote() does not have sharding key, while force_optimize_skip_unused_shards=2 requires from table to have it.
|
||||
-- But due to only one node, everything works.
|
||||
select * from remote('127.1', system.one) settings optimize_skip_unused_shards=1, force_optimize_skip_unused_shards=2 format Null;
|
@ -91,6 +91,7 @@
|
||||
01125_dict_ddl_cannot_add_column
|
||||
01129_dict_get_join_lose_constness
|
||||
01138_join_on_distributed_and_tmp
|
||||
01153_attach_mv_uuid
|
||||
01191_rename_dictionary
|
||||
01200_mutations_memory_consumption
|
||||
01211_optimize_skip_unused_shards_type_mismatch
|
||||
|
@ -105,7 +105,8 @@
|
||||
"00604_show_create_database",
|
||||
"00609_mv_index_in_in",
|
||||
"00510_materizlized_view_and_deduplication_zookeeper",
|
||||
"00738_lock_for_inner_table"
|
||||
"00738_lock_for_inner_table",
|
||||
"01153_attach_mv_uuid"
|
||||
],
|
||||
"database-replicated": [
|
||||
"memory_tracking",
|
||||
@ -557,6 +558,7 @@
|
||||
"01135_default_and_alter_zookeeper",
|
||||
"01148_zookeeper_path_macros_unfolding",
|
||||
"01150_ddl_guard_rwr",
|
||||
"01153_attach_mv_uuid",
|
||||
"01152_cross_replication",
|
||||
"01185_create_or_replace_table",
|
||||
"01190_full_attach_syntax",
|
||||
|
Loading…
Reference in New Issue
Block a user