diff --git a/CMakeLists.txt b/CMakeLists.txt
index 736a6577660..d5f389cfa99 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -290,6 +290,12 @@ if (COMPILER_GCC OR COMPILER_CLANG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
endif ()
+# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
+# benchmarks.
+if (COMPILER_GCC OR COMPILER_CLANG)
+ set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
+endif ()
+
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
diff --git a/base/ext/scope_guard_safe.h b/base/ext/scope_guard_safe.h
index 7cfb3959a81..55140213572 100644
--- a/base/ext/scope_guard_safe.h
+++ b/base/ext/scope_guard_safe.h
@@ -12,7 +12,8 @@
///
/// NOTE: it should be used with caution.
#define SCOPE_EXIT_MEMORY(...) SCOPE_EXIT( \
- MemoryTracker::LockExceptionInThread lock_memory_tracker; \
+ MemoryTracker::LockExceptionInThread \
+ lock_memory_tracker(VariableContext::Global); \
__VA_ARGS__; \
)
@@ -56,7 +57,8 @@
#define SCOPE_EXIT_MEMORY_SAFE(...) SCOPE_EXIT( \
try \
{ \
- MemoryTracker::LockExceptionInThread lock_memory_tracker; \
+ MemoryTracker::LockExceptionInThread \
+ lock_memory_tracker(VariableContext::Global); \
__VA_ARGS__; \
} \
catch (...) \
diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt
index d05177739fe..42792784a14 100644
--- a/contrib/CMakeLists.txt
+++ b/contrib/CMakeLists.txt
@@ -96,14 +96,8 @@ if (USE_INTERNAL_ZLIB_LIBRARY)
add_subdirectory (${INTERNAL_ZLIB_NAME})
# We should use same defines when including zlib.h as used when zlib compiled
target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
- if (TARGET zlibstatic)
- target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
- endif ()
if (ARCH_AMD64 OR ARCH_AARCH64)
target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK)
- if (TARGET zlibstatic)
- target_compile_definitions (zlibstatic PUBLIC X86_64 UNALIGNED_OK)
- endif ()
endif ()
endif ()
diff --git a/contrib/zlib-ng b/contrib/zlib-ng
index 6fd1846c8b8..b82d3497a5a 160000
--- a/contrib/zlib-ng
+++ b/contrib/zlib-ng
@@ -1 +1 @@
-Subproject commit 6fd1846c8b8f59436fe2dd752d0f316ddbb64df6
+Subproject commit b82d3497a5afc46dec3c5d07e4b163b169f251d7
diff --git a/debian/clickhouse-common-static.install b/debian/clickhouse-common-static.install
index bd65f17ad42..087a6dbba8f 100644
--- a/debian/clickhouse-common-static.install
+++ b/debian/clickhouse-common-static.install
@@ -3,4 +3,3 @@ usr/bin/clickhouse-odbc-bridge
usr/bin/clickhouse-library-bridge
usr/bin/clickhouse-extract-from-config
usr/share/bash-completion/completions
-etc/security/limits.d/clickhouse.conf
diff --git a/debian/clickhouse-server.config b/debian/clickhouse-server.config
deleted file mode 100644
index 636ff7f4da7..00000000000
--- a/debian/clickhouse-server.config
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh -e
-
-test -f /usr/share/debconf/confmodule && . /usr/share/debconf/confmodule
-
-db_fget clickhouse-server/default-password seen || true
-password_seen="$RET"
-
-if [ "$1" = "reconfigure" ]; then
- password_seen=false
-fi
-
-if [ "$password_seen" != "true" ]; then
- db_input high clickhouse-server/default-password || true
- db_go || true
-fi
-db_go || true
diff --git a/debian/clickhouse-server.postinst b/debian/clickhouse-server.postinst
index dc876f45954..419c13e3daf 100644
--- a/debian/clickhouse-server.postinst
+++ b/debian/clickhouse-server.postinst
@@ -23,11 +23,13 @@ if [ ! -f "/etc/debian_version" ]; then
fi
if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then
+
+ ${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}"
+
if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then
# if old rc.d service present - remove it
if [ -x "/etc/init.d/clickhouse-server" ] && [ -x "/usr/sbin/update-rc.d" ]; then
/usr/sbin/update-rc.d clickhouse-server remove
- echo "ClickHouse init script has migrated to systemd. Please manually stop old server and restart the service: sudo killall clickhouse-server && sleep 5 && sudo service clickhouse-server restart"
fi
/bin/systemctl daemon-reload
@@ -38,10 +40,8 @@ if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then
if [ -x "/usr/sbin/update-rc.d" ]; then
/usr/sbin/update-rc.d clickhouse-server defaults 19 19 >/dev/null || exit $?
else
- echo # TODO [ "$OS" = "rhel" ] || [ "$OS" = "centos" ] || [ "$OS" = "fedora" ]
+ echo # Other OS
fi
fi
fi
-
- ${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}"
fi
diff --git a/debian/clickhouse-server.preinst b/debian/clickhouse-server.preinst
deleted file mode 100644
index 3529aefa7da..00000000000
--- a/debian/clickhouse-server.preinst
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-
-if [ "$1" = "upgrade" ]; then
- # Return etc/cron.d/clickhouse-server to original state
- service clickhouse-server disable_cron ||:
-fi
-
-#DEBHELPER#
diff --git a/debian/clickhouse-server.prerm b/debian/clickhouse-server.prerm
deleted file mode 100644
index 02e855a7125..00000000000
--- a/debian/clickhouse-server.prerm
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-
-if [ "$1" = "upgrade" ] || [ "$1" = "remove" ]; then
- # Return etc/cron.d/clickhouse-server to original state
- service clickhouse-server disable_cron ||:
-fi
diff --git a/debian/clickhouse-server.templates b/debian/clickhouse-server.templates
deleted file mode 100644
index dd55824e15c..00000000000
--- a/debian/clickhouse-server.templates
+++ /dev/null
@@ -1,3 +0,0 @@
-Template: clickhouse-server/default-password
-Type: password
-Description: Enter password for default user:
diff --git a/debian/clickhouse.limits b/debian/clickhouse.limits
deleted file mode 100644
index aca44082c4e..00000000000
--- a/debian/clickhouse.limits
+++ /dev/null
@@ -1,2 +0,0 @@
-clickhouse soft nofile 262144
-clickhouse hard nofile 262144
diff --git a/debian/rules b/debian/rules
index 8eb47e95389..73d1f3d3b34 100755
--- a/debian/rules
+++ b/debian/rules
@@ -113,9 +113,6 @@ override_dh_install:
ln -sf clickhouse-server.docs debian/clickhouse-client.docs
ln -sf clickhouse-server.docs debian/clickhouse-common-static.docs
- mkdir -p $(DESTDIR)/etc/security/limits.d
- cp debian/clickhouse.limits $(DESTDIR)/etc/security/limits.d/clickhouse.conf
-
# systemd compatibility
mkdir -p $(DESTDIR)/etc/systemd/system/
cp debian/clickhouse-server.service $(DESTDIR)/etc/systemd/system/
diff --git a/debian/watch b/debian/watch
index 7ad4cedf713..ed3cab97ade 100644
--- a/debian/watch
+++ b/debian/watch
@@ -1,6 +1,6 @@
version=4
opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*)-stable\.tar\.gz%clickhouse-$1.tar.gz%" \
- https://github.com/yandex/clickhouse/tags \
+ https://github.com/ClickHouse/ClickHouse/tags \
(?:.*?/)?v?(\d[\d.]*)-stable\.tar\.gz debian uupdate
diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh
index 20132eafb75..b475b8490f4 100755
--- a/docker/test/stateless/run.sh
+++ b/docker/test/stateless/run.sh
@@ -112,10 +112,13 @@ if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
fi
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
+tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
+ tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
+ tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||:
fi
diff --git a/programs/install/Install.cpp b/programs/install/Install.cpp
index 3cab7a0ce96..2b0f390f709 100644
--- a/programs/install/Install.cpp
+++ b/programs/install/Install.cpp
@@ -562,20 +562,32 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
bool stdin_is_a_tty = isatty(STDIN_FILENO);
bool stdout_is_a_tty = isatty(STDOUT_FILENO);
- bool is_interactive = stdin_is_a_tty && stdout_is_a_tty;
+
+ /// dpkg or apt installers can ask for non-interactive work explicitly.
+
+ const char * debian_frontend_var = getenv("DEBIAN_FRONTEND");
+ bool noninteractive = debian_frontend_var && debian_frontend_var == std::string_view("noninteractive");
+
+ bool is_interactive = !noninteractive && stdin_is_a_tty && stdout_is_a_tty;
+
+ /// We can ask password even if stdin is closed/redirected but /dev/tty is available.
+ bool can_ask_password = !noninteractive && stdout_is_a_tty;
if (has_password_for_default_user)
{
- fmt::print(HILITE "Password for default user is already specified. To remind or reset, see {} and {}." END_HILITE,
+ fmt::print(HILITE "Password for default user is already specified. To remind or reset, see {} and {}." END_HILITE "\n",
users_config_file.string(), users_d.string());
}
- else if (!is_interactive)
+ else if (!can_ask_password)
{
- fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE,
+ fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE "\n",
users_config_file.string(), users_d.string());
}
else
{
+ /// NOTE: When installing debian package with dpkg -i, stdin is not a terminal but we are still being able to enter password.
+ /// More sophisticated method with /dev/tty is used inside the `readpassphrase` function.
+
char buf[1000] = {};
std::string password;
if (auto * result = readpassphrase("Enter password for default user: ", buf, sizeof(buf), 0))
@@ -603,7 +615,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
"\n";
out.sync();
out.finalize();
- fmt::print("Password for default user is saved in file {}.\n", password_file);
+ fmt::print(HILITE "Password for default user is saved in file {}." END_HILITE "\n", password_file);
#else
out << "\n"
" \n"
@@ -614,12 +626,12 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
"\n";
out.sync();
out.finalize();
- fmt::print("Password for default user is saved in plaintext in file {}.\n", password_file);
+ fmt::print(HILITE "Password for default user is saved in plaintext in file {}." END_HILITE "\n", password_file);
#endif
has_password_for_default_user = true;
}
else
- fmt::print("Password for default user is empty string. See {} and {} to change it.\n",
+ fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE "\n",
users_config_file.string(), users_d.string());
}
@@ -644,7 +656,6 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
" This is optional. Taskstats accounting will be disabled."
" To enable taskstats accounting you may add the required capability later manually.\"",
"/tmp/test_setcap.sh", fs::canonical(main_bin_path).string());
- fmt::print(" {}\n", command);
executeScript(command);
#endif
diff --git a/programs/server/config.xml b/programs/server/config.xml
index 9c01b328290..d75163ca907 100644
--- a/programs/server/config.xml
+++ b/programs/server/config.xml
@@ -7,7 +7,20 @@
-->
-
+
trace
/var/log/clickhouse-server/clickhouse-server.log
/var/log/clickhouse-server/clickhouse-server.err.log
diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp
index e8a98021588..dca19eea7f2 100644
--- a/src/Common/Exception.cpp
+++ b/src/Common/Exception.cpp
@@ -150,7 +150,7 @@ void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_
///
/// And in this case the exception will not be logged, so let's block the
/// MemoryTracker until the exception will be logged.
- MemoryTracker::LockExceptionInThread lock_memory_tracker;
+ MemoryTracker::LockExceptionInThread lock_memory_tracker(VariableContext::Global);
try
{
diff --git a/src/Common/MemoryTracker.cpp b/src/Common/MemoryTracker.cpp
index a584885cf0f..60fb4d06b14 100644
--- a/src/Common/MemoryTracker.cpp
+++ b/src/Common/MemoryTracker.cpp
@@ -24,8 +24,8 @@ namespace
///
/// - when it is explicitly blocked with LockExceptionInThread
///
-/// - to avoid std::terminate(), when stack unwinding is currently in progress
-/// in this thread.
+/// - when there are uncaught exceptions objects in the current thread
+/// (to avoid std::terminate())
///
/// NOTE: that since C++11 destructor marked with noexcept by default, and
/// this means that any throw from destructor (that is not marked with
diff --git a/src/DataStreams/PushingToViewsBlockOutputStream.cpp b/src/DataStreams/PushingToViewsBlockOutputStream.cpp
index 56100205b0d..16baf4377e0 100644
--- a/src/DataStreams/PushingToViewsBlockOutputStream.cpp
+++ b/src/DataStreams/PushingToViewsBlockOutputStream.cpp
@@ -311,7 +311,7 @@ void PushingToViewsBlockOutputStream::writeSuffix()
UInt64 milliseconds = main_watch.elapsedMilliseconds();
if (views.size() > 1)
{
- LOG_TRACE(log, "Pushing from {} to {} views took {} ms.",
+ LOG_DEBUG(log, "Pushing from {} to {} views took {} ms.",
storage->getStorageID().getNameForLogs(), views.size(),
milliseconds);
}
diff --git a/src/DataStreams/copyData.cpp b/src/DataStreams/copyData.cpp
index a0651999034..a26052778a8 100644
--- a/src/DataStreams/copyData.cpp
+++ b/src/DataStreams/copyData.cpp
@@ -49,6 +49,16 @@ void copyDataImpl(IBlockInputStream & from, IBlockOutputStream & to, TCancelCall
to.writeSuffix();
}
+void copyData(IBlockInputStream & from, IBlockOutputStream & to, const std::function & progress,
+ std::atomic * is_cancelled)
+{
+ auto is_cancelled_pred = [is_cancelled] ()
+ {
+ return isAtomicSet(is_cancelled);
+ };
+
+ copyDataImpl(from, to, is_cancelled_pred, progress);
+}
inline void doNothing(const Block &) {}
diff --git a/src/DataStreams/copyData.h b/src/DataStreams/copyData.h
index f2bce8f411b..3dc90aed37d 100644
--- a/src/DataStreams/copyData.h
+++ b/src/DataStreams/copyData.h
@@ -16,6 +16,9 @@ class Block;
*/
void copyData(IBlockInputStream & from, IBlockOutputStream & to, std::atomic * is_cancelled = nullptr);
+void copyData(IBlockInputStream & from, IBlockOutputStream & to, const std::function & progress,
+ std::atomic * is_cancelled = nullptr);
+
void copyData(IBlockInputStream & from, IBlockOutputStream & to, const std::function & is_cancelled);
void copyData(IBlockInputStream & from, IBlockOutputStream & to, const std::function & is_cancelled,
diff --git a/src/Disks/S3/registerDiskS3.cpp b/src/Disks/S3/registerDiskS3.cpp
index a15b6bcf822..7e3fbf27c6d 100644
--- a/src/Disks/S3/registerDiskS3.cpp
+++ b/src/Disks/S3/registerDiskS3.cpp
@@ -122,7 +122,7 @@ void registerDiskS3(DiskFactory & factory)
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
client_configuration.connectTimeoutMs = config.getUInt(config_prefix + ".connect_timeout_ms", 10000);
- client_configuration.httpRequestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 5000);
+ client_configuration.requestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 5000);
client_configuration.maxConnections = config.getUInt(config_prefix + ".max_connections", 100);
client_configuration.endpointOverride = uri.endpoint;
diff --git a/src/IO/BrotliWriteBuffer.cpp b/src/IO/BrotliWriteBuffer.cpp
index 512ed5fc93f..3d0afa86360 100644
--- a/src/IO/BrotliWriteBuffer.cpp
+++ b/src/IO/BrotliWriteBuffer.cpp
@@ -50,7 +50,7 @@ BrotliWriteBuffer::BrotliWriteBuffer(std::unique_ptr out_, int comp
BrotliWriteBuffer::~BrotliWriteBuffer()
{
/// FIXME move final flush into the caller
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
finish();
}
diff --git a/src/IO/LZMADeflatingWriteBuffer.cpp b/src/IO/LZMADeflatingWriteBuffer.cpp
index 7ea4f7945dc..29cde872241 100644
--- a/src/IO/LZMADeflatingWriteBuffer.cpp
+++ b/src/IO/LZMADeflatingWriteBuffer.cpp
@@ -50,7 +50,7 @@ LZMADeflatingWriteBuffer::LZMADeflatingWriteBuffer(
LZMADeflatingWriteBuffer::~LZMADeflatingWriteBuffer()
{
/// FIXME move final flush into the caller
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
finish();
lzma_end(&lstr);
diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp
index 6bd6d0d36ba..6df8d5ec5ad 100644
--- a/src/IO/S3/PocoHTTPClient.cpp
+++ b/src/IO/S3/PocoHTTPClient.cpp
@@ -81,8 +81,8 @@ PocoHTTPClient::PocoHTTPClient(const PocoHTTPClientConfiguration & clientConfigu
: per_request_configuration(clientConfiguration.perRequestConfiguration)
, timeouts(ConnectionTimeouts(
Poco::Timespan(clientConfiguration.connectTimeoutMs * 1000), /// connection timeout.
- Poco::Timespan(clientConfiguration.httpRequestTimeoutMs * 1000), /// send timeout.
- Poco::Timespan(clientConfiguration.httpRequestTimeoutMs * 1000) /// receive timeout.
+ Poco::Timespan(clientConfiguration.requestTimeoutMs * 1000), /// send timeout.
+ Poco::Timespan(clientConfiguration.requestTimeoutMs * 1000) /// receive timeout.
))
, remote_host_filter(clientConfiguration.remote_host_filter)
, s3_max_redirects(clientConfiguration.s3_max_redirects)
diff --git a/src/IO/WriteBufferFromFile.cpp b/src/IO/WriteBufferFromFile.cpp
index b3a63842326..67cd7ba27d6 100644
--- a/src/IO/WriteBufferFromFile.cpp
+++ b/src/IO/WriteBufferFromFile.cpp
@@ -79,7 +79,7 @@ WriteBufferFromFile::~WriteBufferFromFile()
return;
/// FIXME move final flush into the caller
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
next();
diff --git a/src/IO/WriteBufferFromFileDescriptor.cpp b/src/IO/WriteBufferFromFileDescriptor.cpp
index bfd874ee396..cd265653bb9 100644
--- a/src/IO/WriteBufferFromFileDescriptor.cpp
+++ b/src/IO/WriteBufferFromFileDescriptor.cpp
@@ -98,7 +98,7 @@ WriteBufferFromFileDescriptor::~WriteBufferFromFileDescriptor()
}
/// FIXME move final flush into the caller
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
next();
}
diff --git a/src/IO/WriteBufferFromOStream.cpp b/src/IO/WriteBufferFromOStream.cpp
index cf731934c93..36fbf8301c1 100644
--- a/src/IO/WriteBufferFromOStream.cpp
+++ b/src/IO/WriteBufferFromOStream.cpp
@@ -43,7 +43,7 @@ WriteBufferFromOStream::WriteBufferFromOStream(
WriteBufferFromOStream::~WriteBufferFromOStream()
{
/// FIXME move final flush into the caller
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
next();
}
diff --git a/src/IO/WriteBufferFromPocoSocket.cpp b/src/IO/WriteBufferFromPocoSocket.cpp
index 45f6e96218a..7338bcabdb5 100644
--- a/src/IO/WriteBufferFromPocoSocket.cpp
+++ b/src/IO/WriteBufferFromPocoSocket.cpp
@@ -73,7 +73,7 @@ WriteBufferFromPocoSocket::WriteBufferFromPocoSocket(Poco::Net::Socket & socket_
WriteBufferFromPocoSocket::~WriteBufferFromPocoSocket()
{
/// FIXME move final flush into the caller
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
next();
}
diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp
index 93aaf9456b5..a63e00efce4 100644
--- a/src/IO/WriteBufferFromS3.cpp
+++ b/src/IO/WriteBufferFromS3.cpp
@@ -87,7 +87,7 @@ void WriteBufferFromS3::allocateBuffer()
void WriteBufferFromS3::finalize()
{
/// FIXME move final flush into the caller
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
finalizeImpl();
}
diff --git a/src/IO/WriteBufferFromVector.h b/src/IO/WriteBufferFromVector.h
index 6341a9b698b..0e0cd3c522b 100644
--- a/src/IO/WriteBufferFromVector.h
+++ b/src/IO/WriteBufferFromVector.h
@@ -95,7 +95,7 @@ public:
~WriteBufferFromVector() override
{
/// FIXME move final flush into the caller
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
finalize();
}
};
diff --git a/src/IO/WriteBufferValidUTF8.cpp b/src/IO/WriteBufferValidUTF8.cpp
index 1071ac1078d..ecdf38eae34 100644
--- a/src/IO/WriteBufferValidUTF8.cpp
+++ b/src/IO/WriteBufferValidUTF8.cpp
@@ -138,7 +138,7 @@ void WriteBufferValidUTF8::finish()
WriteBufferValidUTF8::~WriteBufferValidUTF8()
{
/// FIXME move final flush into the caller
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
finish();
}
diff --git a/src/IO/ZlibDeflatingWriteBuffer.cpp b/src/IO/ZlibDeflatingWriteBuffer.cpp
index 7e91820f298..3cf00f627b3 100644
--- a/src/IO/ZlibDeflatingWriteBuffer.cpp
+++ b/src/IO/ZlibDeflatingWriteBuffer.cpp
@@ -49,7 +49,7 @@ ZlibDeflatingWriteBuffer::ZlibDeflatingWriteBuffer(
ZlibDeflatingWriteBuffer::~ZlibDeflatingWriteBuffer()
{
/// FIXME move final flush into the caller
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
finish();
diff --git a/src/IO/ZstdDeflatingWriteBuffer.cpp b/src/IO/ZstdDeflatingWriteBuffer.cpp
index 5b97588b33e..08b289a2e04 100644
--- a/src/IO/ZstdDeflatingWriteBuffer.cpp
+++ b/src/IO/ZstdDeflatingWriteBuffer.cpp
@@ -31,7 +31,7 @@ ZstdDeflatingWriteBuffer::ZstdDeflatingWriteBuffer(
ZstdDeflatingWriteBuffer::~ZstdDeflatingWriteBuffer()
{
/// FIXME move final flush into the caller
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
finish();
diff --git a/src/Interpreters/ApplyWithSubqueryVisitor.cpp b/src/Interpreters/ApplyWithSubqueryVisitor.cpp
index f3d0c82e5f1..27e0e438a71 100644
--- a/src/Interpreters/ApplyWithSubqueryVisitor.cpp
+++ b/src/Interpreters/ApplyWithSubqueryVisitor.cpp
@@ -13,28 +13,7 @@ namespace DB
void ApplyWithSubqueryVisitor::visit(ASTPtr & ast, const Data & data)
{
if (auto * node_select = ast->as())
- {
- std::optional new_data;
- if (auto with = node_select->with())
- {
- for (auto & child : with->children)
- {
- visit(child, new_data ? *new_data : data);
- if (auto * ast_with_elem = child->as())
- {
- if (!new_data)
- new_data = data;
- new_data->subqueries[ast_with_elem->name] = ast_with_elem->subquery;
- }
- }
- }
-
- for (auto & child : node_select->children)
- {
- if (child != node_select->with())
- visit(child, new_data ? *new_data : data);
- }
- }
+ visit(*node_select, data);
else
{
for (auto & child : ast->children)
@@ -46,6 +25,36 @@ void ApplyWithSubqueryVisitor::visit(ASTPtr & ast, const Data & data)
}
}
+void ApplyWithSubqueryVisitor::visit(ASTSelectQuery & ast, const Data & data)
+{
+ std::optional new_data;
+ if (auto with = ast.with())
+ {
+ for (auto & child : with->children)
+ {
+ visit(child, new_data ? *new_data : data);
+ if (auto * ast_with_elem = child->as())
+ {
+ if (!new_data)
+ new_data = data;
+ new_data->subqueries[ast_with_elem->name] = ast_with_elem->subquery;
+ }
+ }
+ }
+
+ for (auto & child : ast.children)
+ {
+ if (child != ast.with())
+ visit(child, new_data ? *new_data : data);
+ }
+}
+
+void ApplyWithSubqueryVisitor::visit(ASTSelectWithUnionQuery & ast, const Data & data)
+{
+ for (auto & child : ast.children)
+ visit(child, data);
+}
+
void ApplyWithSubqueryVisitor::visit(ASTTableExpression & table, const Data & data)
{
if (table.database_and_table_name)
diff --git a/src/Interpreters/ApplyWithSubqueryVisitor.h b/src/Interpreters/ApplyWithSubqueryVisitor.h
index ed3f9fbcd3e..3b17de1d691 100644
--- a/src/Interpreters/ApplyWithSubqueryVisitor.h
+++ b/src/Interpreters/ApplyWithSubqueryVisitor.h
@@ -7,6 +7,8 @@
namespace DB
{
class ASTFunction;
+class ASTSelectQuery;
+class ASTSelectWithUnionQuery;
struct ASTTableExpression;
class ApplyWithSubqueryVisitor
@@ -18,9 +20,13 @@ public:
};
static void visit(ASTPtr & ast) { visit(ast, {}); }
+ static void visit(ASTSelectQuery & select) { visit(select, {}); }
+ static void visit(ASTSelectWithUnionQuery & select) { visit(select, {}); }
private:
static void visit(ASTPtr & ast, const Data & data);
+ static void visit(ASTSelectQuery & ast, const Data & data);
+ static void visit(ASTSelectWithUnionQuery & ast, const Data & data);
static void visit(ASTTableExpression & table, const Data & data);
static void visit(ASTFunction & func, const Data & data);
};
diff --git a/src/Interpreters/CollectJoinOnKeysVisitor.cpp b/src/Interpreters/CollectJoinOnKeysVisitor.cpp
index a0ea27e9905..3b3fdaa65cb 100644
--- a/src/Interpreters/CollectJoinOnKeysVisitor.cpp
+++ b/src/Interpreters/CollectJoinOnKeysVisitor.cpp
@@ -16,26 +16,6 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
}
-namespace
-{
-
-void addAndTerm(ASTPtr & ast, const ASTPtr & term)
-{
- if (!ast)
- ast = term;
- else
- ast = makeASTFunction("and", ast, term);
-}
-
-/// If this is an inner join and the expression related to less than 2 tables, then move it to WHERE
-bool canMoveToWhere(std::pair table_numbers, ASTTableJoin::Kind kind)
-{
- return kind == ASTTableJoin::Kind::Inner &&
- (table_numbers.first == table_numbers.second || table_numbers.first == 0 || table_numbers.second == 0);
-}
-
-}
-
void CollectJoinOnKeysMatcher::Data::addJoinKeys(const ASTPtr & left_ast, const ASTPtr & right_ast,
const std::pair & table_no)
{
@@ -49,8 +29,7 @@ void CollectJoinOnKeysMatcher::Data::addJoinKeys(const ASTPtr & left_ast, const
else
throw Exception("Cannot detect left and right JOIN keys. JOIN ON section is ambiguous.",
ErrorCodes::AMBIGUOUS_COLUMN_NAME);
- if (table_no.first != table_no.second && table_no.first > 0 && table_no.second > 0)
- has_some = true;
+ has_some = true;
}
void CollectJoinOnKeysMatcher::Data::addAsofJoinKeys(const ASTPtr & left_ast, const ASTPtr & right_ast,
@@ -99,45 +78,22 @@ void CollectJoinOnKeysMatcher::visit(const ASTFunction & func, const ASTPtr & as
{
ASTPtr left = func.arguments->children.at(0);
ASTPtr right = func.arguments->children.at(1);
- auto table_numbers = getTableNumbers(left, right, data);
-
- if (canMoveToWhere(table_numbers, data.kind))
- {
- addAndTerm(data.new_where_conditions, ast);
- }
- else
- {
- if (data.kind == ASTTableJoin::Kind::Inner)
- {
- addAndTerm(data.new_on_expression, ast);
- }
- data.addJoinKeys(left, right, table_numbers);
- }
+ auto table_numbers = getTableNumbers(ast, left, right, data);
+ data.addJoinKeys(left, right, table_numbers);
}
- else if (inequality != ASOF::Inequality::None && !data.is_asof)
+ else if (inequality != ASOF::Inequality::None)
{
- ASTPtr left = func.arguments->children.at(0);
- ASTPtr right = func.arguments->children.at(1);
- auto table_numbers = getTableNumbers(left, right, data);
- if (canMoveToWhere(table_numbers, data.kind))
- {
- addAndTerm(data.new_where_conditions, ast);
- }
- else
- {
+ if (!data.is_asof)
throw Exception("JOIN ON inequalities are not supported. Unexpected '" + queryToString(ast) + "'",
- ErrorCodes::NOT_IMPLEMENTED);
- }
- }
- else if (inequality != ASOF::Inequality::None && data.is_asof)
- {
+ ErrorCodes::NOT_IMPLEMENTED);
+
if (data.asof_left_key || data.asof_right_key)
throw Exception("ASOF JOIN expects exactly one inequality in ON section. Unexpected '" + queryToString(ast) + "'",
- ErrorCodes::INVALID_JOIN_ON_EXPRESSION);
+ ErrorCodes::INVALID_JOIN_ON_EXPRESSION);
ASTPtr left = func.arguments->children.at(0);
ASTPtr right = func.arguments->children.at(1);
- auto table_numbers = getTableNumbers(left, right, data);
+ auto table_numbers = getTableNumbers(ast, left, right, data);
data.addAsofJoinKeys(left, right, table_numbers, inequality);
}
@@ -162,8 +118,7 @@ void CollectJoinOnKeysMatcher::getIdentifiers(const ASTPtr & ast, std::vector CollectJoinOnKeysMatcher::getTableNumbers(const ASTPtr & left_ast, const ASTPtr & right_ast,
+std::pair CollectJoinOnKeysMatcher::getTableNumbers(const ASTPtr & expr, const ASTPtr & left_ast, const ASTPtr & right_ast,
Data & data)
{
std::vector left_identifiers;
@@ -172,13 +127,23 @@ std::pair CollectJoinOnKeysMatcher::getTableNumbers(const ASTPtr
getIdentifiers(left_ast, left_identifiers);
getIdentifiers(right_ast, right_identifiers);
- size_t left_idents_table = 0;
- size_t right_idents_table = 0;
+ if (left_identifiers.empty() || right_identifiers.empty())
+ {
+ throw Exception("Not equi-join ON expression: " + queryToString(expr) + ". No columns in one of equality side.",
+ ErrorCodes::INVALID_JOIN_ON_EXPRESSION);
+ }
- if (!left_identifiers.empty())
- left_idents_table = getTableForIdentifiers(left_identifiers, data);
- if (!right_identifiers.empty())
- right_idents_table = getTableForIdentifiers(right_identifiers, data);
+ size_t left_idents_table = getTableForIdentifiers(left_identifiers, data);
+ size_t right_idents_table = getTableForIdentifiers(right_identifiers, data);
+
+ if (left_idents_table && left_idents_table == right_idents_table)
+ {
+ auto left_name = queryToString(*left_identifiers[0]);
+ auto right_name = queryToString(*right_identifiers[0]);
+
+ throw Exception("In expression " + queryToString(expr) + " columns " + left_name + " and " + right_name
+ + " are from the same table but from different arguments of equal function", ErrorCodes::INVALID_JOIN_ON_EXPRESSION);
+ }
return std::make_pair(left_idents_table, right_idents_table);
}
diff --git a/src/Interpreters/CollectJoinOnKeysVisitor.h b/src/Interpreters/CollectJoinOnKeysVisitor.h
index aa2fd80d07c..54e008a114e 100644
--- a/src/Interpreters/CollectJoinOnKeysVisitor.h
+++ b/src/Interpreters/CollectJoinOnKeysVisitor.h
@@ -5,7 +5,6 @@
#include
#include
#include
-#include
namespace DB
@@ -31,11 +30,8 @@ public:
const TableWithColumnNamesAndTypes & right_table;
const Aliases & aliases;
const bool is_asof{false};
- ASTTableJoin::Kind kind;
ASTPtr asof_left_key{};
ASTPtr asof_right_key{};
- ASTPtr new_on_expression{};
- ASTPtr new_where_conditions{};
bool has_some{false};
void addJoinKeys(const ASTPtr & left_ast, const ASTPtr & right_ast, const std::pair & table_no);
@@ -61,7 +57,7 @@ private:
static void visit(const ASTFunction & func, const ASTPtr & ast, Data & data);
static void getIdentifiers(const ASTPtr & ast, std::vector & out);
- static std::pair getTableNumbers(const ASTPtr & left_ast, const ASTPtr & right_ast, Data & data);
+ static std::pair getTableNumbers(const ASTPtr & expr, const ASTPtr & left_ast, const ASTPtr & right_ast, Data & data);
static const ASTIdentifier * unrollAliases(const ASTIdentifier * identifier, const Aliases & aliases);
static size_t getTableForIdentifiers(std::vector & identifiers, const Data & data);
};
diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp
index 5d58d19ffaa..6d234f5f846 100644
--- a/src/Interpreters/InterpreterCreateQuery.cpp
+++ b/src/Interpreters/InterpreterCreateQuery.cpp
@@ -59,6 +59,7 @@
#include
#include
#include
+#include
#include
#include
@@ -723,7 +724,7 @@ static void generateUUIDForTable(ASTCreateQuery & create)
/// If destination table (to_table_id) is not specified for materialized view,
/// then MV will create inner table. We should generate UUID of inner table here,
/// so it will be the same on all hosts if query in ON CLUSTER or database engine is Replicated.
- bool need_uuid_for_inner_table = create.is_materialized_view && !create.to_table_id;
+ bool need_uuid_for_inner_table = !create.attach && create.is_materialized_view && !create.to_table_id;
if (need_uuid_for_inner_table && create.to_inner_uuid == UUIDHelpers::Nil)
create.to_inner_uuid = UUIDHelpers::generateV4();
}
@@ -890,6 +891,8 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
if (create.select && create.isView())
{
+ // Expand CTE before filling default database
+ ApplyWithSubqueryVisitor().visit(*create.select);
AddDefaultDatabaseVisitor visitor(current_database);
visitor.visit(*create.select);
}
diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp
index ec6bdb8d526..c04534e11a1 100644
--- a/src/Interpreters/ThreadStatusExt.cpp
+++ b/src/Interpreters/ThreadStatusExt.cpp
@@ -323,7 +323,7 @@ void ThreadStatus::finalizeQueryProfiler()
void ThreadStatus::detachQuery(bool exit_if_already_detached, bool thread_exits)
{
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
if (exit_if_already_detached && thread_state == ThreadState::DetachedFromQuery)
{
diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp
index 354f9d10099..eb9602b4759 100644
--- a/src/Interpreters/TreeRewriter.cpp
+++ b/src/Interpreters/TreeRewriter.cpp
@@ -403,13 +403,13 @@ void setJoinStrictness(ASTSelectQuery & select_query, JoinStrictness join_defaul
/// Find the columns that are obtained by JOIN.
void collectJoinedColumns(TableJoin & analyzed_join, const ASTSelectQuery & select_query,
- const TablesWithColumns & tables, const Aliases & aliases, ASTPtr & new_where_conditions)
+ const TablesWithColumns & tables, const Aliases & aliases)
{
const ASTTablesInSelectQueryElement * node = select_query.join();
if (!node || tables.size() < 2)
return;
- auto & table_join = node->table_join->as();
+ const auto & table_join = node->table_join->as();
if (table_join.using_expression_list)
{
@@ -428,33 +428,16 @@ void collectJoinedColumns(TableJoin & analyzed_join, const ASTSelectQuery & sele
{
bool is_asof = (table_join.strictness == ASTTableJoin::Strictness::Asof);
- CollectJoinOnKeysVisitor::Data data{analyzed_join, tables[0], tables[1], aliases, is_asof, table_join.kind};
+ CollectJoinOnKeysVisitor::Data data{analyzed_join, tables[0], tables[1], aliases, is_asof};
CollectJoinOnKeysVisitor(data).visit(table_join.on_expression);
if (!data.has_some)
throw Exception("Cannot get JOIN keys from JOIN ON section: " + queryToString(table_join.on_expression),
ErrorCodes::INVALID_JOIN_ON_EXPRESSION);
if (is_asof)
- {
data.asofToJoinKeys();
- }
- else if (data.new_on_expression)
- {
- table_join.on_expression = data.new_on_expression;
- new_where_conditions = data.new_where_conditions;
- }
}
}
-/// Move joined key related to only one table to WHERE clause
-void moveJoinedKeyToWhere(ASTSelectQuery * select_query, ASTPtr & new_where_conditions)
-{
- if (select_query->where())
- select_query->setExpression(ASTSelectQuery::Expression::WHERE,
- makeASTFunction("and", new_where_conditions, select_query->where()));
- else
- select_query->setExpression(ASTSelectQuery::Expression::WHERE, new_where_conditions->clone());
-}
-
std::vector getAggregates(ASTPtr & query, const ASTSelectQuery & select_query)
{
@@ -840,11 +823,7 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect(
setJoinStrictness(*select_query, settings.join_default_strictness, settings.any_join_distinct_right_table_keys,
result.analyzed_join->table_join);
-
- ASTPtr new_where_condition = nullptr;
- collectJoinedColumns(*result.analyzed_join, *select_query, tables_with_columns, result.aliases, new_where_condition);
- if (new_where_condition)
- moveJoinedKeyToWhere(select_query, new_where_condition);
+ collectJoinedColumns(*result.analyzed_join, *select_query, tables_with_columns, result.aliases);
/// rewrite filters for select query, must go after getArrayJoinedColumns
if (settings.optimize_respect_aliases && result.metadata_snapshot)
diff --git a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp
index 355af038da9..a4fe3649e6f 100644
--- a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp
+++ b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp
@@ -196,7 +196,7 @@ void WriteBufferFromHTTPServerResponse::finalize()
WriteBufferFromHTTPServerResponse::~WriteBufferFromHTTPServerResponse()
{
/// FIXME move final flush into the caller
- MemoryTracker::LockExceptionInThread lock;
+ MemoryTracker::LockExceptionInThread lock(VariableContext::Global);
finalize();
}
diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp
index e2b5ce7c325..15dd5b553b0 100644
--- a/src/Storages/Kafka/StorageKafka.cpp
+++ b/src/Storages/Kafka/StorageKafka.cpp
@@ -28,6 +28,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -585,6 +586,8 @@ void StorageKafka::threadFunc(size_t idx)
bool StorageKafka::streamToViews()
{
+ Stopwatch watch;
+
auto table_id = getStorageID();
auto table = DatabaseCatalog::instance().getTable(table_id, getContext());
if (!table)
@@ -637,7 +640,11 @@ bool StorageKafka::streamToViews()
// We can't cancel during copyData, as it's not aware of commits and other kafka-related stuff.
// It will be cancelled on underlying layer (kafka buffer)
std::atomic stub = {false};
- copyData(*in, *block_io.out, &stub);
+ size_t rows = 0;
+ copyData(*in, *block_io.out, [&rows](const Block & block)
+ {
+ rows += block.rows();
+ }, &stub);
bool some_stream_is_stalled = false;
for (auto & stream : streams)
@@ -646,6 +653,10 @@ bool StorageKafka::streamToViews()
stream->as()->commit();
}
+ UInt64 milliseconds = watch.elapsedMilliseconds();
+ LOG_DEBUG(log, "Pushing {} rows to {} took {} ms.",
+ formatReadableQuantity(rows), table_id.getNameForLogs(), milliseconds);
+
return some_stream_is_stalled;
}
diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp
index 5c82e5378e8..3d96796a79b 100644
--- a/src/Storages/StorageDistributed.cpp
+++ b/src/Storages/StorageDistributed.cpp
@@ -469,7 +469,7 @@ QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage(
/// Always calculate optimized cluster here, to avoid conditions during read()
/// (Anyway it will be calculated in the read())
- if (settings.optimize_skip_unused_shards)
+ if (getClusterQueriedNodes(settings, cluster) > 1 && settings.optimize_skip_unused_shards)
{
ClusterPtr optimized_cluster = getOptimizedCluster(local_context, metadata_snapshot, query_info.query);
if (optimized_cluster)
diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp
index ffd57008e6e..89b8bc72526 100644
--- a/src/Storages/StorageMaterializedView.cpp
+++ b/src/Storages/StorageMaterializedView.cpp
@@ -76,10 +76,15 @@ StorageMaterializedView::StorageMaterializedView(
storage_metadata.setSelectQuery(select);
setInMemoryMetadata(storage_metadata);
+ bool point_to_itself_by_uuid = has_inner_table && query.to_inner_uuid != UUIDHelpers::Nil
+ && query.to_inner_uuid == table_id_.uuid;
+ bool point_to_itself_by_name = !has_inner_table && query.to_table_id.database_name == table_id_.database_name
+ && query.to_table_id.table_name == table_id_.table_name;
+ if (point_to_itself_by_uuid || point_to_itself_by_name)
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "Materialized view {} cannot point to itself", table_id_.getFullTableName());
+
if (!has_inner_table)
{
- if (query.to_table_id.database_name == table_id_.database_name && query.to_table_id.table_name == table_id_.table_name)
- throw Exception(ErrorCodes::BAD_ARGUMENTS, "Materialized view {} cannot point to itself", table_id_.getFullTableName());
target_table_id = query.to_table_id;
}
else if (attach_)
diff --git a/tests/config/config.d/database_replicated.xml b/tests/config/config.d/database_replicated.xml
index 1e6871a525d..9a3b4d68ea6 100644
--- a/tests/config/config.d/database_replicated.xml
+++ b/tests/config/config.d/database_replicated.xml
@@ -26,6 +26,8 @@
4000
trace
false
+
+ 1000000000000000
diff --git a/tests/config/config.d/keeper_port.xml b/tests/config/config.d/keeper_port.xml
index c41040f1613..b21df47bc85 100644
--- a/tests/config/config.d/keeper_port.xml
+++ b/tests/config/config.d/keeper_port.xml
@@ -8,6 +8,8 @@
30000
false
60000
+
+ 1000000000000000
diff --git a/tests/integration/ci-runner.py b/tests/integration/ci-runner.py
index 3bd30982098..a21f3d344ba 100755
--- a/tests/integration/ci-runner.py
+++ b/tests/integration/ci-runner.py
@@ -435,7 +435,7 @@ class ClickhouseIntegrationTestsRunner:
time.sleep(5)
logging.info("Finally all tests done, going to compress test dir")
- test_logs = os.path.join(str(self.path()), "./test_dir.tar")
+ test_logs = os.path.join(str(self.path()), "./test_dir.tar.gz")
self._compress_logs("{}/tests/integration".format(repo_path), test_logs)
logging.info("Compression finished")
@@ -500,7 +500,7 @@ class ClickhouseIntegrationTestsRunner:
break
logging.info("Finally all tests done, going to compress test dir")
- test_logs = os.path.join(str(self.path()), "./test_dir.tar")
+ test_logs = os.path.join(str(self.path()), "./test_dir.tar.gz")
self._compress_logs("{}/tests/integration".format(repo_path), test_logs)
logging.info("Compression finished")
diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py
index 3ab8d5d006b..4b685542170 100644
--- a/tests/integration/test_merge_tree_s3/test.py
+++ b/tests/integration/test_merge_tree_s3/test.py
@@ -68,6 +68,16 @@ def create_table(cluster, table_name, additional_settings=None):
node.query(create_table_statement)
+def wait_for_delete_s3_objects(cluster, expected, timeout=30):
+ minio = cluster.minio_client
+ while timeout > 0:
+ if len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == expected:
+ return
+ timeout -= 1
+ time.sleep(1)
+ assert(len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == expected)
+
+
@pytest.fixture(autouse=True)
def drop_table(cluster):
yield
@@ -75,8 +85,9 @@ def drop_table(cluster):
minio = cluster.minio_client
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
+
try:
- assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 0
+ wait_for_delete_s3_objects(cluster, 0)
finally:
# Remove extra objects to prevent tests cascade failing
for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')):
@@ -151,7 +162,7 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical):
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)"
- assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD
+ wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD)
def test_alter_table_columns(cluster):
@@ -167,32 +178,20 @@ def test_alter_table_columns(cluster):
# To ensure parts have merged
node.query("OPTIMIZE TABLE s3_test")
- # Wait for merges, mutations and old parts deletion
- time.sleep(3)
-
assert node.query("SELECT sum(col1) FROM s3_test FORMAT Values") == "(8192)"
assert node.query("SELECT sum(col1) FROM s3_test WHERE id > 0 FORMAT Values") == "(4096)"
- assert len(list(minio.list_objects(cluster.minio_bucket,
- 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN
+ wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN)
node.query("ALTER TABLE s3_test MODIFY COLUMN col1 String", settings={"mutations_sync": 2})
- # Wait for old parts deletion
- time.sleep(3)
-
assert node.query("SELECT distinct(col1) FROM s3_test FORMAT Values") == "('1')"
# and file with mutation
- assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == (
- FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1)
+ wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1)
node.query("ALTER TABLE s3_test DROP COLUMN col1", settings={"mutations_sync": 2})
- # Wait for old parts deletion
- time.sleep(3)
-
# and 2 files with mutations
- assert len(
- list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2
+ wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2)
def test_attach_detach_partition(cluster):
@@ -320,9 +319,7 @@ def test_move_replace_partition_to_another_table(cluster):
assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)"
# Wait for outdated partitions deletion.
- time.sleep(3)
- assert len(list(
- minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4
+ wait_for_delete_s3_objects(cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4)
node.query("DROP TABLE s3_clone NO DELAY")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
@@ -338,7 +335,8 @@ def test_move_replace_partition_to_another_table(cluster):
node.query("DROP TABLE s3_test NO DELAY")
# Backup data should remain in S3.
- assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD_PER_PART_WIDE * 4
+
+ wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE * 4)
for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')):
minio.remove_object(cluster.minio_bucket, obj.object_name)
diff --git a/tests/msan_suppressions.txt b/tests/msan_suppressions.txt
index 4c7aeaf4a4c..cf468b0be96 100644
--- a/tests/msan_suppressions.txt
+++ b/tests/msan_suppressions.txt
@@ -7,7 +7,6 @@ fun:tolower
# Suppress some failures in contrib so that we can enable MSan in CI.
# Ideally, we should report these upstream.
-src:*/contrib/zlib-ng/*
# Hyperscan
fun:roseRunProgram
diff --git a/tests/queries/0_stateless/00826_cross_to_inner_join.reference b/tests/queries/0_stateless/00826_cross_to_inner_join.reference
index 9b630d0d391..973c5b078a3 100644
--- a/tests/queries/0_stateless/00826_cross_to_inner_join.reference
+++ b/tests/queries/0_stateless/00826_cross_to_inner_join.reference
@@ -109,7 +109,7 @@ SELECT
t2_00826.a,
t2_00826.b
FROM t1_00826
-ALL INNER JOIN t2_00826 ON (((a = t2_00826.a) AND (a = t2_00826.a)) AND (a = t2_00826.a)) AND (b = t2_00826.b)
+ALL INNER JOIN t2_00826 ON (a = t2_00826.a) AND (a = t2_00826.a) AND (a = t2_00826.a) AND (b = t2_00826.b)
WHERE (a = t2_00826.a) AND ((a = t2_00826.a) AND ((a = t2_00826.a) AND (b = t2_00826.b)))
--- cross split conjunction ---
SELECT
diff --git a/tests/queries/0_stateless/00849_multiple_comma_join_2.reference b/tests/queries/0_stateless/00849_multiple_comma_join_2.reference
index 4db65b0b795..fc39ef13935 100644
--- a/tests/queries/0_stateless/00849_multiple_comma_join_2.reference
+++ b/tests/queries/0_stateless/00849_multiple_comma_join_2.reference
@@ -127,7 +127,7 @@ FROM
) AS `--.s`
CROSS JOIN t3
) AS `--.s`
-ALL INNER JOIN t4 ON ((a = `--t1.a`) AND (a = `--t2.a`)) AND (a = `--t3.a`)
+ALL INNER JOIN t4 ON (a = `--t1.a`) AND (a = `--t2.a`) AND (a = `--t3.a`)
WHERE (a = `--t1.a`) AND (a = `--t2.a`) AND (a = `--t3.a`)
SELECT `--t1.a` AS `t1.a`
FROM
diff --git a/tests/queries/0_stateless/00855_join_with_array_join.reference b/tests/queries/0_stateless/00855_join_with_array_join.reference
index 386bde518ea..88f9253500c 100644
--- a/tests/queries/0_stateless/00855_join_with_array_join.reference
+++ b/tests/queries/0_stateless/00855_join_with_array_join.reference
@@ -4,3 +4,8 @@
4 0
5 0
6 0
+-
+1 0
+2 2 a2
+1 0
+2 2 a2
diff --git a/tests/queries/0_stateless/00855_join_with_array_join.sql b/tests/queries/0_stateless/00855_join_with_array_join.sql
index 10b03fec062..506d9479110 100644
--- a/tests/queries/0_stateless/00855_join_with_array_join.sql
+++ b/tests/queries/0_stateless/00855_join_with_array_join.sql
@@ -1,10 +1,35 @@
SET joined_subquery_requires_alias = 0;
-select ax, c from (select [1,2] ax, 0 c) array join ax join (select 0 c) using(c);
-select ax, c from (select [3,4] ax, 0 c) join (select 0 c) using(c) array join ax;
-select ax, c from (select [5,6] ax, 0 c) s1 join system.one s2 ON s1.c = s2.dummy array join ax;
+SELECT ax, c FROM (SELECT [1,2] ax, 0 c) ARRAY JOIN ax JOIN (SELECT 0 c) USING (c);
+SELECT ax, c FROM (SELECT [3,4] ax, 0 c) JOIN (SELECT 0 c) USING (c) ARRAY JOIN ax;
+SELECT ax, c FROM (SELECT [5,6] ax, 0 c) s1 JOIN system.one s2 ON s1.c = s2.dummy ARRAY JOIN ax;
+
+
+SELECT ax, c FROM (SELECT [101,102] ax, 0 c) s1
+JOIN system.one s2 ON s1.c = s2.dummy
+JOIN system.one s3 ON s1.c = s3.dummy
+ARRAY JOIN ax; -- { serverError 48 }
+
+SELECT '-';
+
+SET joined_subquery_requires_alias = 1;
+
+DROP TABLE IF EXISTS f;
+DROP TABLE IF EXISTS d;
+
+CREATE TABLE f (`d_ids` Array(Int64) ) ENGINE = TinyLog;
+INSERT INTO f VALUES ([1, 2]);
+
+CREATE TABLE d (`id` Int64, `name` String ) ENGINE = TinyLog;
+
+INSERT INTO d VALUES (2, 'a2'), (3, 'a3');
+
+SELECT d_ids, id, name FROM f LEFT ARRAY JOIN d_ids LEFT JOIN d ON d.id = d_ids ORDER BY id;
+SELECT did, id, name FROM f LEFT ARRAY JOIN d_ids as did LEFT JOIN d ON d.id = did ORDER BY id;
+
+-- name clash, doesn't work yet
+SELECT id, name FROM f LEFT ARRAY JOIN d_ids as id LEFT JOIN d ON d.id = id ORDER BY id; -- { serverError 403 }
+
+DROP TABLE IF EXISTS f;
+DROP TABLE IF EXISTS d;
-select ax, c from (select [7,8] ax, 0 c) s1
-join system.one s2 ON s1.c = s2.dummy
-join system.one s3 ON s1.c = s3.dummy
-array join ax; -- { serverError 48 }
diff --git a/tests/queries/0_stateless/00878_join_unexpected_results.reference b/tests/queries/0_stateless/00878_join_unexpected_results.reference
index 65fcbc257ca..a389cb47a96 100644
--- a/tests/queries/0_stateless/00878_join_unexpected_results.reference
+++ b/tests/queries/0_stateless/00878_join_unexpected_results.reference
@@ -23,8 +23,6 @@ join_use_nulls = 1
-
\N \N
-
-1 1 \N \N
-2 2 \N \N
-
1 1 1 1
2 2 \N \N
@@ -51,8 +49,6 @@ join_use_nulls = 0
-
-
-
-1 1 0 0
-2 2 0 0
-
1 1 1 1
2 2 0 0
diff --git a/tests/queries/0_stateless/00878_join_unexpected_results.sql b/tests/queries/0_stateless/00878_join_unexpected_results.sql
index 6f6cd6e6479..0aef5208b26 100644
--- a/tests/queries/0_stateless/00878_join_unexpected_results.sql
+++ b/tests/queries/0_stateless/00878_join_unexpected_results.sql
@@ -30,11 +30,11 @@ select * from t left outer join s on (t.a=s.a and t.b=s.b) where s.a is null;
select '-';
select s.* from t left outer join s on (t.a=s.a and t.b=s.b) where s.a is null;
select '-';
-select t.*, s.* from t left join s on (s.a=t.a and t.b=s.b and t.a=toInt64(2)) order by t.a;
+select t.*, s.* from t left join s on (s.a=t.a and t.b=s.b and t.a=toInt64(2)) order by t.a; -- {serverError 403 }
select '-';
select t.*, s.* from t left join s on (s.a=t.a) order by t.a;
select '-';
-select t.*, s.* from t left join s on (t.b=toInt64(2) and s.a=t.a) where s.b=2;
+select t.*, s.* from t left join s on (t.b=toInt64(2) and s.a=t.a) where s.b=2; -- {serverError 403 }
select 'join_use_nulls = 0';
set join_use_nulls = 0;
@@ -58,11 +58,11 @@ select '-';
select '-';
-- select s.* from t left outer join s on (t.a=s.a and t.b=s.b) where s.a is null; -- TODO
select '-';
-select t.*, s.* from t left join s on (s.a=t.a and t.b=s.b and t.a=toInt64(2)) order by t.a;
+select t.*, s.* from t left join s on (s.a=t.a and t.b=s.b and t.a=toInt64(2)) order by t.a; -- {serverError 403 }
select '-';
select t.*, s.* from t left join s on (s.a=t.a) order by t.a;
select '-';
-select t.*, s.* from t left join s on (t.b=toInt64(2) and s.a=t.a) where s.b=2;
+select t.*, s.* from t left join s on (t.b=toInt64(2) and s.a=t.a) where s.b=2; -- {serverError 403 }
drop table t;
drop table s;
diff --git a/tests/queries/0_stateless/01153_attach_mv_uuid.reference b/tests/queries/0_stateless/01153_attach_mv_uuid.reference
new file mode 100644
index 00000000000..e37fe28e303
--- /dev/null
+++ b/tests/queries/0_stateless/01153_attach_mv_uuid.reference
@@ -0,0 +1,22 @@
+1 1
+2 4
+1 1
+2 4
+3 9
+4 16
+CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n AS\nSELECT\n n,\n n * n AS n2\nFROM default.src
+1 1
+2 4
+CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n AS\nSELECT\n n,\n n * n AS n2\nFROM default.src
+1 1
+2 4
+3 9
+4 16
+CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\' TO INNER UUID \'3bd68e3c-2693-4352-ad66-a66eba9e345e\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n AS\nSELECT\n n,\n n * n AS n2\nFROM default.src
+1 1
+2 4
+CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\' TO INNER UUID \'3bd68e3c-2693-4352-ad66-a66eba9e345e\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n AS\nSELECT\n n,\n n * n AS n2\nFROM default.src
+1 1
+2 4
+3 9
+4 16
diff --git a/tests/queries/0_stateless/01153_attach_mv_uuid.sql b/tests/queries/0_stateless/01153_attach_mv_uuid.sql
new file mode 100644
index 00000000000..86d768d94a7
--- /dev/null
+++ b/tests/queries/0_stateless/01153_attach_mv_uuid.sql
@@ -0,0 +1,42 @@
+DROP TABLE IF EXISTS src;
+DROP TABLE IF EXISTS mv;
+DROP TABLE IF EXISTS ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2";
+
+CREATE TABLE src (n UInt64) ENGINE=MergeTree ORDER BY n;
+CREATE MATERIALIZED VIEW mv (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src;
+INSERT INTO src VALUES (1), (2);
+SELECT * FROM mv ORDER BY n;
+DETACH TABLE mv;
+ATTACH TABLE mv;
+INSERT INTO src VALUES (3), (4);
+SELECT * FROM mv ORDER BY n;
+DROP TABLE mv SYNC;
+
+SET show_table_uuid_in_table_create_query_if_not_nil=1;
+CREATE TABLE ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2" (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n;
+ATTACH MATERIALIZED VIEW mv UUID 'e15f3ab5-6cae-4df3-b879-f40deafd82c2' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src;
+SHOW CREATE TABLE mv;
+INSERT INTO src VALUES (1), (2);
+SELECT * FROM mv ORDER BY n;
+DETACH TABLE mv;
+ATTACH TABLE mv;
+SHOW CREATE TABLE mv;
+INSERT INTO src VALUES (3), (4);
+SELECT * FROM mv ORDER BY n;
+DROP TABLE mv SYNC;
+
+CREATE TABLE ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2" UUID '3bd68e3c-2693-4352-ad66-a66eba9e345e' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n;
+ATTACH MATERIALIZED VIEW mv UUID 'e15f3ab5-6cae-4df3-b879-f40deafd82c2' TO INNER UUID '3bd68e3c-2693-4352-ad66-a66eba9e345e' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src;
+SHOW CREATE TABLE mv;
+INSERT INTO src VALUES (1), (2);
+SELECT * FROM mv ORDER BY n;
+DETACH TABLE mv;
+ATTACH TABLE mv;
+SHOW CREATE TABLE mv;
+INSERT INTO src VALUES (3), (4);
+SELECT * FROM mv ORDER BY n;
+DROP TABLE mv SYNC;
+
+ATTACH MATERIALIZED VIEW mv UUID '3bd68e3c-2693-4352-ad66-a66eba9e345e' TO INNER UUID '3bd68e3c-2693-4352-ad66-a66eba9e345e' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src; -- { serverError 36 }
+
+DROP TABLE src;
diff --git a/tests/queries/0_stateless/01653_move_conditions_from_join_on_to_where.reference b/tests/queries/0_stateless/01653_move_conditions_from_join_on_to_where.reference
deleted file mode 100644
index 19487c9f942..00000000000
--- a/tests/queries/0_stateless/01653_move_conditions_from_join_on_to_where.reference
+++ /dev/null
@@ -1,140 +0,0 @@
----------Q1----------
-2 2 2 20
-SELECT
- a,
- b,
- table2.a,
- table2.b
-FROM table1
-ALL INNER JOIN
-(
- SELECT
- a,
- b
- FROM table2
-) AS table2 ON a = table2.a
-WHERE table2.b = toUInt32(20)
----------Q2----------
-2 2 2 20
-SELECT
- a,
- b,
- table2.a,
- table2.b
-FROM table1
-ALL INNER JOIN
-(
- SELECT
- a,
- b
- FROM table2
-) AS table2 ON a = table2.a
-WHERE (table2.a < table2.b) AND (table2.b = toUInt32(20))
----------Q3----------
----------Q4----------
-6 40
-SELECT
- a,
- table2.b
-FROM table1
-ALL INNER JOIN
-(
- SELECT
- a,
- b
- FROM table2
-) AS table2 ON a = toUInt32(10 - table2.a)
-WHERE (b = 6) AND (table2.b > 20)
----------Q5----------
-SELECT
- a,
- table2.b
-FROM table1
-ALL INNER JOIN
-(
- SELECT
- a,
- b
- FROM table2
- WHERE 0
-) AS table2 ON a = table2.a
-WHERE 0
----------Q6----------
----------Q7----------
-0 0 0 0
-SELECT
- a,
- b,
- table2.a,
- table2.b
-FROM table1
-ALL INNER JOIN
-(
- SELECT
- a,
- b
- FROM table2
-) AS table2 ON a = table2.a
-WHERE (table2.b < toUInt32(40)) AND (b < 1)
----------Q8----------
----------Q9---will not be optimized----------
-SELECT
- a,
- b,
- table2.a,
- table2.b
-FROM table1
-ALL LEFT JOIN
-(
- SELECT
- a,
- b
- FROM table2
-) AS table2 ON (a = table2.a) AND (b = toUInt32(10))
-SELECT
- a,
- b,
- table2.a,
- table2.b
-FROM table1
-ALL RIGHT JOIN
-(
- SELECT
- a,
- b
- FROM table2
-) AS table2 ON (a = table2.a) AND (b = toUInt32(10))
-SELECT
- a,
- b,
- table2.a,
- table2.b
-FROM table1
-ALL FULL OUTER JOIN
-(
- SELECT
- a,
- b
- FROM table2
-) AS table2 ON (a = table2.a) AND (b = toUInt32(10))
-SELECT
- a,
- b,
- table2.a,
- table2.b
-FROM table1
-ALL FULL OUTER JOIN
-(
- SELECT
- a,
- b
- FROM table2
-) AS table2 ON (a = table2.a) AND (table2.b = toUInt32(10))
-WHERE a < toUInt32(20)
-SELECT
- a,
- b,
- table2.a,
- table2.b
-FROM table1
-CROSS JOIN table2
diff --git a/tests/queries/0_stateless/01653_move_conditions_from_join_on_to_where.sql b/tests/queries/0_stateless/01653_move_conditions_from_join_on_to_where.sql
deleted file mode 100644
index 23871a9c47c..00000000000
--- a/tests/queries/0_stateless/01653_move_conditions_from_join_on_to_where.sql
+++ /dev/null
@@ -1,48 +0,0 @@
-DROP TABLE IF EXISTS table1;
-DROP TABLE IF EXISTS table2;
-
-CREATE TABLE table1 (a UInt32, b UInt32) ENGINE = Memory;
-CREATE TABLE table2 (a UInt32, b UInt32) ENGINE = Memory;
-
-INSERT INTO table1 SELECT number, number FROM numbers(10);
-INSERT INTO table2 SELECT number * 2, number * 20 FROM numbers(6);
-
-SELECT '---------Q1----------';
-SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.b = toUInt32(20));
-EXPLAIN SYNTAX SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.b = toUInt32(20));
-
-SELECT '---------Q2----------';
-SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.a < table2.b) AND (table2.b = toUInt32(20));
-EXPLAIN SYNTAX SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.a < table2.b) AND (table2.b = toUInt32(20));
-
-SELECT '---------Q3----------';
-SELECT * FROM table1 JOIN table2 ON (table1.a = toUInt32(table2.a + 5)) AND (table2.a < table1.b) AND (table2.b > toUInt32(20)); -- { serverError 48 }
-
-SELECT '---------Q4----------';
-SELECT table1.a, table2.b FROM table1 INNER JOIN table2 ON (table1.a = toUInt32(10 - table2.a)) AND (table1.b = 6) AND (table2.b > 20);
-EXPLAIN SYNTAX SELECT table1.a, table2.b FROM table1 INNER JOIN table2 ON (table1.a = toUInt32(10 - table2.a)) AND (table1.b = 6) AND (table2.b > 20);
-
-SELECT '---------Q5----------';
-SELECT table1.a, table2.b FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table1.b = 6) AND (table2.b > 20) AND (10 < 6);
-EXPLAIN SYNTAX SELECT table1.a, table2.b FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table1.b = 6) AND (table2.b > 20) AND (10 < 6);
-
-SELECT '---------Q6----------';
-SELECT table1.a, table2.b FROM table1 JOIN table2 ON (table1.b = 6) AND (table2.b > 20); -- { serverError 403 }
-
-SELECT '---------Q7----------';
-SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.b < toUInt32(40)) where table1.b < 1;
-EXPLAIN SYNTAX SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.b < toUInt32(40)) where table1.b < 1;
-SELECT * FROM table1 JOIN table2 ON (table1.a = table2.a) AND (table2.b < toUInt32(40)) where table1.b > 10;
-
-SELECT '---------Q8----------';
-SELECT * FROM table1 INNER JOIN table2 ON (table1.a = table2.a) AND (table2.b < toUInt32(table1, 10)); -- { serverError 47 }
-
-SELECT '---------Q9---will not be optimized----------';
-EXPLAIN SYNTAX SELECT * FROM table1 LEFT JOIN table2 ON (table1.a = table2.a) AND (table1.b = toUInt32(10));
-EXPLAIN SYNTAX SELECT * FROM table1 RIGHT JOIN table2 ON (table1.a = table2.a) AND (table1.b = toUInt32(10));
-EXPLAIN SYNTAX SELECT * FROM table1 FULL JOIN table2 ON (table1.a = table2.a) AND (table1.b = toUInt32(10));
-EXPLAIN SYNTAX SELECT * FROM table1 FULL JOIN table2 ON (table1.a = table2.a) AND (table2.b = toUInt32(10)) WHERE table1.a < toUInt32(20);
-EXPLAIN SYNTAX SELECT * FROM table1 , table2;
-
-DROP TABLE table1;
-DROP TABLE table2;
diff --git a/tests/queries/0_stateless/01711_cte_subquery_fix.sql b/tests/queries/0_stateless/01711_cte_subquery_fix.sql
index ddea548eada..10ad9019209 100644
--- a/tests/queries/0_stateless/01711_cte_subquery_fix.sql
+++ b/tests/queries/0_stateless/01711_cte_subquery_fix.sql
@@ -1,3 +1,7 @@
drop table if exists t;
create table t engine = Memory as with cte as (select * from numbers(10)) select * from cte;
drop table t;
+
+drop table if exists view1;
+create view view1 as with t as (select number n from numbers(3)) select n from t;
+drop table view1;
diff --git a/tests/queries/0_stateless/018001_dateDiff_DateTime64.reference b/tests/queries/0_stateless/01801_dateDiff_DateTime64.reference
similarity index 100%
rename from tests/queries/0_stateless/018001_dateDiff_DateTime64.reference
rename to tests/queries/0_stateless/01801_dateDiff_DateTime64.reference
diff --git a/tests/queries/0_stateless/018001_dateDiff_DateTime64.sql b/tests/queries/0_stateless/01801_dateDiff_DateTime64.sql
similarity index 100%
rename from tests/queries/0_stateless/018001_dateDiff_DateTime64.sql
rename to tests/queries/0_stateless/01801_dateDiff_DateTime64.sql
diff --git a/tests/queries/0_stateless/018002_formatDateTime_DateTime64_century.reference b/tests/queries/0_stateless/01802_formatDateTime_DateTime64_century.reference
similarity index 100%
rename from tests/queries/0_stateless/018002_formatDateTime_DateTime64_century.reference
rename to tests/queries/0_stateless/01802_formatDateTime_DateTime64_century.reference
diff --git a/tests/queries/0_stateless/018002_formatDateTime_DateTime64_century.sql b/tests/queries/0_stateless/01802_formatDateTime_DateTime64_century.sql
similarity index 100%
rename from tests/queries/0_stateless/018002_formatDateTime_DateTime64_century.sql
rename to tests/queries/0_stateless/01802_formatDateTime_DateTime64_century.sql
diff --git a/tests/queries/0_stateless/018002_toDateTime64_large_values.reference b/tests/queries/0_stateless/01802_toDateTime64_large_values.reference
similarity index 100%
rename from tests/queries/0_stateless/018002_toDateTime64_large_values.reference
rename to tests/queries/0_stateless/01802_toDateTime64_large_values.reference
diff --git a/tests/queries/0_stateless/018002_toDateTime64_large_values.sql b/tests/queries/0_stateless/01802_toDateTime64_large_values.sql
similarity index 100%
rename from tests/queries/0_stateless/018002_toDateTime64_large_values.sql
rename to tests/queries/0_stateless/01802_toDateTime64_large_values.sql
diff --git a/tests/queries/0_stateless/01812_optimize_skip_unused_shards_single_node.reference b/tests/queries/0_stateless/01812_optimize_skip_unused_shards_single_node.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/01812_optimize_skip_unused_shards_single_node.sql b/tests/queries/0_stateless/01812_optimize_skip_unused_shards_single_node.sql
new file mode 100644
index 00000000000..c39947f2c04
--- /dev/null
+++ b/tests/queries/0_stateless/01812_optimize_skip_unused_shards_single_node.sql
@@ -0,0 +1,3 @@
+-- remote() does not have sharding key, while force_optimize_skip_unused_shards=2 requires from table to have it.
+-- But due to only one node, everything works.
+select * from remote('127.1', system.one) settings optimize_skip_unused_shards=1, force_optimize_skip_unused_shards=2 format Null;
diff --git a/tests/queries/0_stateless/arcadia_skip_list.txt b/tests/queries/0_stateless/arcadia_skip_list.txt
index 0af6f1b72c1..f435c00a989 100644
--- a/tests/queries/0_stateless/arcadia_skip_list.txt
+++ b/tests/queries/0_stateless/arcadia_skip_list.txt
@@ -91,6 +91,7 @@
01125_dict_ddl_cannot_add_column
01129_dict_get_join_lose_constness
01138_join_on_distributed_and_tmp
+01153_attach_mv_uuid
01191_rename_dictionary
01200_mutations_memory_consumption
01211_optimize_skip_unused_shards_type_mismatch
diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json
index 95d6eaade77..37b099b0b5a 100644
--- a/tests/queries/skip_list.json
+++ b/tests/queries/skip_list.json
@@ -105,7 +105,8 @@
"00604_show_create_database",
"00609_mv_index_in_in",
"00510_materizlized_view_and_deduplication_zookeeper",
- "00738_lock_for_inner_table"
+ "00738_lock_for_inner_table",
+ "01153_attach_mv_uuid"
],
"database-replicated": [
"memory_tracking",
@@ -557,6 +558,7 @@
"01135_default_and_alter_zookeeper",
"01148_zookeeper_path_macros_unfolding",
"01150_ddl_guard_rwr",
+ "01153_attach_mv_uuid",
"01152_cross_replication",
"01185_create_or_replace_table",
"01190_full_attach_syntax",