diff --git a/base/base/cgroupsv2.cpp b/base/base/cgroupsv2.cpp
index f20b9daf22e..87f62bf377d 100644
--- a/base/base/cgroupsv2.cpp
+++ b/base/base/cgroupsv2.cpp
@@ -3,8 +3,9 @@
#include
#include
-#include
+#include
+namespace fs = std::filesystem;
bool cgroupsV2Enabled()
{
@@ -13,11 +14,11 @@ bool cgroupsV2Enabled()
{
/// This file exists iff the host has cgroups v2 enabled.
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
- if (!std::filesystem::exists(controllers_file))
+ if (!fs::exists(controllers_file))
return false;
return true;
}
- catch (const std::filesystem::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
+ catch (const fs::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
{
return false; /// not logging the exception as most callers fall back to cgroups v1
}
@@ -33,8 +34,9 @@ bool cgroupsV2MemoryControllerEnabled()
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
- std::string cgroup = cgroupV2OfProcess();
- auto cgroup_dir = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
+ fs::path cgroup_dir = cgroupV2PathOfProcess();
+ if (cgroup_dir.empty())
+ return false;
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
if (!controllers_file.is_open())
return false;
@@ -46,7 +48,7 @@ bool cgroupsV2MemoryControllerEnabled()
#endif
}
-std::string cgroupV2OfProcess()
+fs::path cgroupV2PathOfProcess()
{
#if defined(OS_LINUX)
chassert(cgroupsV2Enabled());
@@ -54,17 +56,18 @@ std::string cgroupV2OfProcess()
/// A simpler way to get the membership is:
std::ifstream cgroup_name_file("/proc/self/cgroup");
if (!cgroup_name_file.is_open())
- return "";
+ return {};
/// With cgroups v2, there will be a *single* line with prefix "0::/"
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
std::string cgroup;
std::getline(cgroup_name_file, cgroup);
static const std::string v2_prefix = "0::/";
if (!cgroup.starts_with(v2_prefix))
- return "";
+ return {};
cgroup = cgroup.substr(v2_prefix.length());
- return cgroup;
+ /// Note: The 'root' cgroup can have an empty cgroup name, this is valid
+ return default_cgroups_mount / cgroup;
#else
- return "";
+ return {};
#endif
}
diff --git a/base/base/cgroupsv2.h b/base/base/cgroupsv2.h
index 70219d87cd1..cfb916ff358 100644
--- a/base/base/cgroupsv2.h
+++ b/base/base/cgroupsv2.h
@@ -1,7 +1,6 @@
#pragma once
#include
-#include
#if defined(OS_LINUX)
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
@@ -16,7 +15,7 @@ bool cgroupsV2Enabled();
/// Assumes that cgroupsV2Enabled() is enabled.
bool cgroupsV2MemoryControllerEnabled();
-/// Which cgroup does the process belong to?
-/// Returns an empty string if the cgroup cannot be determined.
+/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
+/// Returns an empty path the cgroup cannot be determined.
/// Assumes that cgroupsV2Enabled() is enabled.
-std::string cgroupV2OfProcess();
+std::filesystem::path cgroupV2PathOfProcess();
diff --git a/base/base/getMemoryAmount.cpp b/base/base/getMemoryAmount.cpp
index f47cba9833d..afdb6ba068a 100644
--- a/base/base/getMemoryAmount.cpp
+++ b/base/base/getMemoryAmount.cpp
@@ -23,8 +23,9 @@ std::optional getCgroupsV2MemoryLimit()
if (!cgroupsV2MemoryControllerEnabled())
return {};
- std::string cgroup = cgroupV2OfProcess();
- auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
+ std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
+ if (current_cgroup.empty())
+ return {};
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
/// level, try again at the parent level as memory settings are inherited.
diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh
index c0bfc12bc75..2b535f8dd23 100755
--- a/docker/test/stateless/run.sh
+++ b/docker/test/stateless/run.sh
@@ -12,7 +12,7 @@ MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 7200 : MAX_RUN_TIME))
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
-RUN_SEQUENTIAL_TESTS_IN_PARALLEL=1
+RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
diff --git a/docs/en/sql-reference/data-types/array.md b/docs/en/sql-reference/data-types/array.md
index e5a8ce5d18b..1737fdd88b2 100644
--- a/docs/en/sql-reference/data-types/array.md
+++ b/docs/en/sql-reference/data-types/array.md
@@ -96,3 +96,22 @@ Result:
│ 1 │ [2] │ [[4,1]] │
└───────────┴───────────┴───────────┘
```
+
+## Reading nested subcolumns from Array
+
+If nested type `T` inside `Array` has subcolumns (for example, if it's a [named tuple](./tuple.md)), you can read its subcolumns from an `Array(T)` type with the same subcolumn names. The type of a subcolumn will be `Array` of the type of original subcolumn.
+
+**Example**
+
+```sql
+CREATE TABLE t_arr (arr Array(Tuple(field1 UInt32, field2 String))) ENGINE = MergeTree ORDER BY tuple();
+INSERT INTO t_arr VALUES ([(1, 'Hello'), (2, 'World')]), ([(3, 'This'), (4, 'is'), (5, 'subcolumn')]);
+SELECT arr.field1, toTypeName(arr.field1), arr.field2, toTypeName(arr.field2) from t_arr;
+```
+
+```test
+┌─arr.field1─┬─toTypeName(arr.field1)─┬─arr.field2────────────────┬─toTypeName(arr.field2)─┐
+│ [1,2] │ Array(UInt32) │ ['Hello','World'] │ Array(String) │
+│ [3,4,5] │ Array(UInt32) │ ['This','is','subcolumn'] │ Array(String) │
+└────────────┴────────────────────────┴───────────────────────────┴────────────────────────┘
+```
diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp
index 63788560959..f68d327fcc3 100644
--- a/src/Analyzer/Resolve/QueryAnalyzer.cpp
+++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp
@@ -3832,6 +3832,10 @@ ProjectionNames QueryAnalyzer::resolveExpressionNode(
node->convertToNullable();
break;
}
+
+ /// Check parent scopes until find current query scope.
+ if (scope_ptr->scope_node->getNodeType() == QueryTreeNodeType::QUERY)
+ break;
}
}
diff --git a/src/Backups/BackupIO_S3.cpp b/src/Backups/BackupIO_S3.cpp
index aef37021227..687096d0404 100644
--- a/src/Backups/BackupIO_S3.cpp
+++ b/src/Backups/BackupIO_S3.cpp
@@ -55,9 +55,9 @@ namespace
S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
settings.auth_settings.region,
context->getRemoteHostFilter(),
- static_cast(global_settings.s3_max_redirects),
- static_cast(global_settings.s3_retry_attempts),
- global_settings.enable_s3_requests_logging,
+ static_cast(local_settings.s3_max_redirects),
+ static_cast(local_settings.backup_restore_s3_retry_attempts),
+ local_settings.enable_s3_requests_logging,
/* for_disk_s3 = */ false,
request_settings.get_request_throttler,
request_settings.put_request_throttler,
diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp
index 5d472ba99b9..c6a070219a3 100644
--- a/src/Client/ClientBase.cpp
+++ b/src/Client/ClientBase.cpp
@@ -80,6 +80,7 @@
#include
#include "config.h"
+
namespace fs = std::filesystem;
using namespace std::literals;
@@ -2565,12 +2566,12 @@ void ClientBase::runInteractive()
word_break_characters,
highlight_callback);
#else
+ (void)word_break_characters;
LineReader lr(
history_file,
getClientConfiguration().has("multiline"),
query_extenders,
- query_delimiters,
- word_break_characters);
+ query_delimiters);
#endif
static const std::initializer_list> backslash_aliases =
diff --git a/src/Client/LineReader.h b/src/Client/LineReader.h
index 0172bd7ec22..8c101401190 100644
--- a/src/Client/LineReader.h
+++ b/src/Client/LineReader.h
@@ -46,8 +46,7 @@ public:
Patterns delimiters,
std::istream & input_stream_ = std::cin,
std::ostream & output_stream_ = std::cout,
- int in_fd_ = STDIN_FILENO
- );
+ int in_fd_ = STDIN_FILENO);
virtual ~LineReader() = default;
diff --git a/src/Client/ReplxxLineReader.cpp b/src/Client/ReplxxLineReader.cpp
index 46600168695..3b3508d1a58 100644
--- a/src/Client/ReplxxLineReader.cpp
+++ b/src/Client/ReplxxLineReader.cpp
@@ -362,6 +362,9 @@ ReplxxLineReader::ReplxxLineReader(
rx.bind_key(Replxx::KEY::control('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_NEXT, code); });
rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); });
+ /// We don't want the default, "suspend" behavior, it confuses people.
+ rx.bind_key_internal(replxx::Replxx::KEY::control('Z'), "insert_character");
+
auto commit_action = [this](char32_t code)
{
/// If we allow multiline and there is already something in the input, start a newline.
diff --git a/src/Common/CgroupsMemoryUsageObserver.cpp b/src/Common/CgroupsMemoryUsageObserver.cpp
index d36c7fd08aa..02bde0d80b7 100644
--- a/src/Common/CgroupsMemoryUsageObserver.cpp
+++ b/src/Common/CgroupsMemoryUsageObserver.cpp
@@ -25,6 +25,7 @@
#endif
using namespace DB;
+namespace fs = std::filesystem;
namespace DB
{
@@ -69,7 +70,7 @@ uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & ke
struct CgroupsV1Reader : ICgroupsReader
{
- explicit CgroupsV1Reader(const std::filesystem::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
+ explicit CgroupsV1Reader(const fs::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
uint64_t readMemoryUsage() override
{
@@ -85,7 +86,7 @@ private:
struct CgroupsV2Reader : ICgroupsReader
{
- explicit CgroupsV2Reader(const std::filesystem::path & stat_file_dir)
+ explicit CgroupsV2Reader(const fs::path & stat_file_dir)
: current_buf(stat_file_dir / "memory.current"), stat_buf(stat_file_dir / "memory.stat")
{
}
@@ -129,8 +130,9 @@ std::optional getCgroupsV2Path()
if (!cgroupsV2MemoryControllerEnabled())
return {};
- String cgroup = cgroupV2OfProcess();
- auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
+ fs::path current_cgroup = cgroupV2PathOfProcess();
+ if (current_cgroup.empty())
+ return {};
/// Return the bottom-most nested current memory file. If there is no such file at the current
/// level, try again at the parent level as memory settings are inherited.
@@ -138,7 +140,7 @@ std::optional getCgroupsV2Path()
{
const auto current_path = current_cgroup / "memory.current";
const auto stat_path = current_cgroup / "memory.stat";
- if (std::filesystem::exists(current_path) && std::filesystem::exists(stat_path))
+ if (fs::exists(current_path) && fs::exists(stat_path))
return {current_cgroup};
current_cgroup = current_cgroup.parent_path();
}
@@ -148,7 +150,7 @@ std::optional getCgroupsV2Path()
std::optional getCgroupsV1Path()
{
auto path = default_cgroups_mount / "memory/memory.stat";
- if (!std::filesystem::exists(path))
+ if (!fs::exists(path))
return {};
return {default_cgroups_mount / "memory"};
}
diff --git a/src/Common/getNumberOfPhysicalCPUCores.cpp b/src/Common/getNumberOfPhysicalCPUCores.cpp
index 7e18a93e6ed..34a1add2f0e 100644
--- a/src/Common/getNumberOfPhysicalCPUCores.cpp
+++ b/src/Common/getNumberOfPhysicalCPUCores.cpp
@@ -37,12 +37,12 @@ uint32_t getCGroupLimitedCPUCores(unsigned default_cpu_count)
/// cgroupsv2
if (cgroupsV2Enabled())
{
- /// First, we identify the cgroup the process belongs
- std::string cgroup = cgroupV2OfProcess();
- if (cgroup.empty())
+ /// First, we identify the path of the cgroup the process belongs
+ std::filesystem::path cgroup_path = cgroupV2PathOfProcess();
+ if (cgroup_path.empty())
return default_cpu_count;
- auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
+ auto current_cgroup = cgroup_path;
// Looking for cpu.max in directories from the current cgroup to the top level
// It does not stop on the first time since the child could have a greater value than parent
@@ -62,7 +62,7 @@ uint32_t getCGroupLimitedCPUCores(unsigned default_cpu_count)
}
current_cgroup = current_cgroup.parent_path();
}
- current_cgroup = default_cgroups_mount / cgroup;
+ current_cgroup = cgroup_path;
// Looking for cpuset.cpus.effective in directories from the current cgroup to the top level
while (current_cgroup != default_cgroups_mount.parent_path())
{
diff --git a/src/Core/Settings.h b/src/Core/Settings.h
index 2391c02752c..59b4d211b27 100644
--- a/src/Core/Settings.h
+++ b/src/Core/Settings.h
@@ -502,6 +502,7 @@ class IColumn;
M(UInt64, backup_restore_keeper_value_max_size, 1048576, "Maximum size of data of a [Zoo]Keeper's node during backup", 0) \
M(UInt64, backup_restore_batch_size_for_keeper_multiread, 10000, "Maximum size of batch for multiread request to [Zoo]Keeper during backup or restore", 0) \
M(UInt64, backup_restore_batch_size_for_keeper_multi, 1000, "Maximum size of batch for multi request to [Zoo]Keeper during backup or restore", 0) \
+ M(UInt64, backup_restore_s3_retry_attempts, 1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore.", 0) \
M(UInt64, max_backup_bandwidth, 0, "The maximum read speed in bytes per second for particular backup on server. Zero means unlimited.", 0) \
\
M(Bool, log_profile_events, true, "Log query performance statistics into the query_log, query_thread_log and query_views_log.", 0) \
diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp
index f26c9a701f0..c47679cb606 100644
--- a/src/Core/SettingsChangesHistory.cpp
+++ b/src/Core/SettingsChangesHistory.cpp
@@ -78,6 +78,7 @@ static std::initializer_list& error, long attemptedRetries) const override;
diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp
index fa8ebd2c0f0..0c7cad4360d 100644
--- a/src/Interpreters/HashJoin/HashJoin.cpp
+++ b/src/Interpreters/HashJoin/HashJoin.cpp
@@ -125,7 +125,7 @@ HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_s
if (isCrossOrComma(kind))
{
data->type = Type::CROSS;
- sample_block_with_columns_to_add = right_sample_block;
+ sample_block_with_columns_to_add = materializeBlock(right_sample_block);
}
else if (table_join->getClauses().empty())
{
diff --git a/src/Storages/RocksDB/EmbeddedRocksDBBulkSink.cpp b/src/Storages/RocksDB/EmbeddedRocksDBBulkSink.cpp
index b53f694bc5a..24411620c21 100644
--- a/src/Storages/RocksDB/EmbeddedRocksDBBulkSink.cpp
+++ b/src/Storages/RocksDB/EmbeddedRocksDBBulkSink.cpp
@@ -226,10 +226,11 @@ void EmbeddedRocksDBBulkSink::consume(Chunk chunk_)
if (chunks_to_write.empty())
return;
+ size_t num_chunks = chunks_to_write.size();
auto [serialized_key_column, serialized_value_column]
= storage.ttl > 0 ? serializeChunks(std::move(chunks_to_write)) : serializeChunks(std::move(chunks_to_write));
auto sst_file_path = getTemporarySSTFilePath();
- LOG_DEBUG(getLogger("EmbeddedRocksDBBulkSink"), "Writing {} rows to SST file {}", serialized_key_column->size(), sst_file_path);
+ LOG_DEBUG(getLogger("EmbeddedRocksDBBulkSink"), "Writing {} rows from {} chunks to SST file {}", serialized_key_column->size(), num_chunks, sst_file_path);
if (auto status = buildSSTFile(sst_file_path, *serialized_key_column, *serialized_value_column); !status.ok())
throw Exception(ErrorCodes::ROCKSDB_ERROR, "RocksDB write error: {}", status.ToString());
diff --git a/src/Storages/transformQueryForExternalDatabase.cpp b/src/Storages/transformQueryForExternalDatabase.cpp
index f1b73e939b8..251470a17a8 100644
--- a/src/Storages/transformQueryForExternalDatabase.cpp
+++ b/src/Storages/transformQueryForExternalDatabase.cpp
@@ -420,7 +420,7 @@ String transformQueryForExternalDatabase(
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "No column names for query '{}' to external table '{}.{}'",
query_info.query_tree->formatASTForErrorMessage(), database, table);
- auto clone_query = getASTForExternalDatabaseFromQueryTree(query_info.query_tree);
+ auto clone_query = getASTForExternalDatabaseFromQueryTree(query_info.query_tree, query_info.table_expression);
return transformQueryForExternalDatabaseImpl(
clone_query,
diff --git a/src/Storages/transformQueryForExternalDatabaseAnalyzer.cpp b/src/Storages/transformQueryForExternalDatabaseAnalyzer.cpp
index 5e0bfdd5f2a..dc1749b3196 100644
--- a/src/Storages/transformQueryForExternalDatabaseAnalyzer.cpp
+++ b/src/Storages/transformQueryForExternalDatabaseAnalyzer.cpp
@@ -3,6 +3,7 @@
#include
#include
+#include
#include
#include
@@ -10,7 +11,7 @@
#include
#include
#include
-
+#include
#include
@@ -20,6 +21,7 @@ namespace DB
namespace ErrorCodes
{
extern const int UNSUPPORTED_METHOD;
+ extern const int LOGICAL_ERROR;
}
namespace
@@ -55,7 +57,7 @@ public:
}
-ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tree)
+ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tree, const QueryTreeNodePtr & table_expression)
{
auto new_tree = query_tree->clone();
@@ -63,6 +65,21 @@ ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tre
visitor.visit(new_tree);
const auto * query_node = new_tree->as();
+ const auto & join_tree = query_node->getJoinTree();
+ bool allow_where = true;
+ if (const auto * join_node = join_tree->as())
+ {
+ if (join_node->getStrictness() != JoinStrictness::All)
+ allow_where = false;
+
+ if (join_node->getKind() == JoinKind::Left)
+ allow_where = join_node->getLeftTableExpression()->isEqual(*table_expression);
+ else if (join_node->getKind() == JoinKind::Right)
+ allow_where = join_node->getRightTableExpression()->isEqual(*table_expression);
+ else
+ allow_where = (join_node->getKind() == JoinKind::Inner);
+ }
+
auto query_node_ast = query_node->toAST({ .add_cast_for_constants = false, .fully_qualified_identifiers = false });
const IAST * ast = query_node_ast.get();
@@ -76,7 +93,13 @@ ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tre
if (union_ast->list_of_selects->children.size() != 1)
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "QueryNode AST is not a single ASTSelectQuery, got {}", union_ast->list_of_selects->children.size());
- return union_ast->list_of_selects->children.at(0);
+ ASTPtr select_query = union_ast->list_of_selects->children.at(0);
+ auto * select_query_typed = select_query->as();
+ if (!select_query_typed)
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected ASTSelectQuery, got {}", select_query ? select_query->formatForErrorMessage() : "nullptr");
+ if (!allow_where)
+ select_query_typed->setExpression(ASTSelectQuery::Expression::WHERE, nullptr);
+ return select_query;
}
}
diff --git a/src/Storages/transformQueryForExternalDatabaseAnalyzer.h b/src/Storages/transformQueryForExternalDatabaseAnalyzer.h
index f8983619d1f..7d8bf99646b 100644
--- a/src/Storages/transformQueryForExternalDatabaseAnalyzer.h
+++ b/src/Storages/transformQueryForExternalDatabaseAnalyzer.h
@@ -6,6 +6,6 @@
namespace DB
{
-ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tree);
+ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tree, const QueryTreeNodePtr & table_expression);
}
diff --git a/tests/integration/test_mask_sensitive_info/configs/users.xml b/tests/integration/test_mask_sensitive_info/configs/users.xml
index f129a5bb3e3..f767216e907 100644
--- a/tests/integration/test_mask_sensitive_info/configs/users.xml
+++ b/tests/integration/test_mask_sensitive_info/configs/users.xml
@@ -2,6 +2,7 @@
5
+ 5
diff --git a/tests/integration/test_storage_postgresql/test.py b/tests/integration/test_storage_postgresql/test.py
index d9f3a9917ab..ffcff36c47e 100644
--- a/tests/integration/test_storage_postgresql/test.py
+++ b/tests/integration/test_storage_postgresql/test.py
@@ -834,6 +834,60 @@ def test_literal_escaping(started_cluster):
cursor.execute(f"DROP TABLE escaping")
+def test_filter_pushdown(started_cluster):
+ cursor = started_cluster.postgres_conn.cursor()
+ cursor.execute("CREATE SCHEMA test_filter_pushdown")
+ cursor.execute(
+ "CREATE TABLE test_filter_pushdown.test_table (id integer, value integer)"
+ )
+ cursor.execute(
+ "INSERT INTO test_filter_pushdown.test_table VALUES (1, 10), (1, 110), (2, 0), (3, 33), (4, 0)"
+ )
+
+ node1.query(
+ """
+ CREATE TABLE test_filter_pushdown_pg_table (id UInt32, value UInt32)
+ ENGINE PostgreSQL('postgres1:5432', 'postgres', 'test_table', 'postgres', 'mysecretpassword', 'test_filter_pushdown');
+ """
+ )
+
+ node1.query(
+ """
+ CREATE TABLE test_filter_pushdown_local_table (id UInt32, value UInt32) ENGINE Memory AS SELECT * FROM test_filter_pushdown_pg_table
+ """
+ )
+
+ node1.query(
+ "CREATE TABLE ch_table (id UInt32, pg_id UInt32) ENGINE MergeTree ORDER BY id"
+ )
+ node1.query("INSERT INTO ch_table VALUES (1, 1), (2, 2), (3, 1), (4, 2), (5, 999)")
+
+ def compare_results(query, **kwargs):
+ result1 = node1.query(
+ query.format(pg_table="test_filter_pushdown_pg_table", **kwargs)
+ )
+ result2 = node1.query(
+ query.format(pg_table="test_filter_pushdown_local_table", **kwargs)
+ )
+ assert result1 == result2
+
+ for kind in ["INNER", "LEFT", "RIGHT", "FULL"]:
+ for value in [0, 10]:
+ compare_results(
+ "SELECT * FROM ch_table {kind} JOIN {pg_table} as p ON ch_table.pg_id = p.id WHERE value = {value} ORDER BY ALL",
+ kind=kind,
+ value=value,
+ )
+
+ compare_results(
+ "SELECT * FROM {pg_table} as p {kind} JOIN ch_table ON ch_table.pg_id = p.id WHERE value = {value} ORDER BY ALL",
+ kind=kind,
+ value=value,
+ )
+
+ cursor.execute("DROP SCHEMA test_filter_pushdown CASCADE")
+
+
if __name__ == "__main__":
cluster.start()
input("Cluster created, press any key to destroy...")
diff --git a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh
index b81bb75891d..bff85b3e29f 100755
--- a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh
+++ b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh
@@ -30,7 +30,7 @@ $CLICKHOUSE_CLIENT --max_block_size 1 --min_insert_block_size_rows 1 --min_inser
for _ in {1..60}; do
$CLICKHOUSE_CLIENT --query "SYSTEM FLUSH LOGS"
- [[ $($CLICKHOUSE_CLIENT --query "SELECT sum(toUInt32(extract(message, 'Removed (\d+) old log entries'))) FROM system.text_log WHERE event_date >= yesterday() AND logger_name LIKE '%' || '$CLICKHOUSE_DATABASE' || '%r1%(ReplicatedMergeTreeCleanupThread)%' AND message LIKE '%Removed % old log entries%'") -gt $((SCALE - 100)) ]] && break;
+ [[ $($CLICKHOUSE_CLIENT --query "SELECT sum(toUInt32(extract(message, 'Removed (\d+) old log entries'))) FROM system.text_log WHERE event_date >= yesterday() AND logger_name LIKE '%' || '$CLICKHOUSE_DATABASE' || '%r1%(ReplicatedMergeTreeCleanupThread)%' AND message LIKE '%Removed % old log entries%'") -gt $((SCALE - 10)) ]] && break;
sleep 1
done
diff --git a/tests/queries/0_stateless/02443_detach_attach_partition.sh b/tests/queries/0_stateless/02443_detach_attach_partition.sh
index d72d771a150..6a47b7d8d61 100755
--- a/tests/queries/0_stateless/02443_detach_attach_partition.sh
+++ b/tests/queries/0_stateless/02443_detach_attach_partition.sh
@@ -73,7 +73,7 @@ kill -TERM $PID_1 && kill -TERM $PID_2 && kill -TERM $PID_3 && kill -TERM $PID_4
wait
$CLICKHOUSE_CLIENT -q "SELECT '$CLICKHOUSE_DATABASE', 'threads finished'"
-wait_for_queries_to_finish
+wait_for_queries_to_finish 60
$CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA alter_table0"
$CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA alter_table1"
diff --git a/tests/queries/0_stateless/02535_analyzer_group_by_use_nulls.reference b/tests/queries/0_stateless/02535_analyzer_group_by_use_nulls.reference
index 63610604ddd..858fbe98838 100644
--- a/tests/queries/0_stateless/02535_analyzer_group_by_use_nulls.reference
+++ b/tests/queries/0_stateless/02535_analyzer_group_by_use_nulls.reference
@@ -264,3 +264,44 @@ SETTINGS group_by_use_nulls = 1, max_bytes_before_external_sort=10;
9 \N 9
\N 0 20
\N 1 25
+CREATE TABLE test
+ENGINE = ReplacingMergeTree
+PRIMARY KEY id
+AS SELECT number AS id FROM numbers(100);
+SELECT id
+FROM test
+GROUP BY id
+ WITH CUBE
+HAVING id IN (
+ SELECT id
+ FROM test
+)
+FORMAT `NUll`
+SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls = true;
+SELECT id
+FROM test
+FINAL
+GROUP BY id
+ WITH CUBE
+HAVING id IN (
+ SELECT DISTINCT id
+ FROM test
+ FINAL
+)
+FORMAT `NUll`
+SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls = true;
+SELECT id
+FROM test
+FINAL
+GROUP BY
+ GROUPING SETS ((id))
+ORDER BY
+ id IN (
+ SELECT DISTINCT id
+ FROM test
+ FINAL
+ LIMIT 4
+ ) ASC
+LIMIT 256 BY id
+FORMAT `NUll`
+SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls=true;
diff --git a/tests/queries/0_stateless/02535_analyzer_group_by_use_nulls.sql b/tests/queries/0_stateless/02535_analyzer_group_by_use_nulls.sql
index a4d4f2f8bc9..4ae5df9629a 100644
--- a/tests/queries/0_stateless/02535_analyzer_group_by_use_nulls.sql
+++ b/tests/queries/0_stateless/02535_analyzer_group_by_use_nulls.sql
@@ -83,3 +83,48 @@ GROUP BY
)
ORDER BY 1, tuple(val)
SETTINGS group_by_use_nulls = 1, max_bytes_before_external_sort=10;
+
+CREATE TABLE test
+ENGINE = ReplacingMergeTree
+PRIMARY KEY id
+AS SELECT number AS id FROM numbers(100);
+
+SELECT id
+FROM test
+GROUP BY id
+ WITH CUBE
+HAVING id IN (
+ SELECT id
+ FROM test
+)
+FORMAT `NUll`
+SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls = true;
+
+SELECT id
+FROM test
+FINAL
+GROUP BY id
+ WITH CUBE
+HAVING id IN (
+ SELECT DISTINCT id
+ FROM test
+ FINAL
+)
+FORMAT `NUll`
+SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls = true;
+
+SELECT id
+FROM test
+FINAL
+GROUP BY
+ GROUPING SETS ((id))
+ORDER BY
+ id IN (
+ SELECT DISTINCT id
+ FROM test
+ FINAL
+ LIMIT 4
+ ) ASC
+LIMIT 256 BY id
+FORMAT `NUll`
+SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls=true;
diff --git a/tests/queries/0_stateless/02956_rocksdb_bulk_sink.reference b/tests/queries/0_stateless/02956_rocksdb_bulk_sink.reference
index 74c71827e6e..1f140df1d6b 100644
--- a/tests/queries/0_stateless/02956_rocksdb_bulk_sink.reference
+++ b/tests/queries/0_stateless/02956_rocksdb_bulk_sink.reference
@@ -3,7 +3,7 @@
1000
1
1000
-2
+1
1000000
1000
0 999001
diff --git a/tests/queries/0_stateless/02956_rocksdb_bulk_sink.sh b/tests/queries/0_stateless/02956_rocksdb_bulk_sink.sh
index f7111d0afe2..95c136584f0 100755
--- a/tests/queries/0_stateless/02956_rocksdb_bulk_sink.sh
+++ b/tests/queries/0_stateless/02956_rocksdb_bulk_sink.sh
@@ -29,7 +29,7 @@ ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm;"
${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE rocksdb_worm;"
# Must set both max_threads and max_insert_threads to 2 to make sure there is only two sinks
${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers_mt(1000000) SETTINGS max_threads = 2, max_insert_threads = 2, max_block_size = 10000, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, insert_deduplication_token = '', optimize_trivial_insert_select = 1;"
-${CLICKHOUSE_CLIENT} --query "SELECT sum(value) FROM system.rocksdb WHERE database = currentDatabase() AND table = 'rocksdb_worm' AND name = 'no.file.opens';" # should be 2 because default bulk sink size is ~1M rows / SST file
+${CLICKHOUSE_CLIENT} --query "SELECT sum(value) IN (1, 2) FROM system.rocksdb WHERE database = currentDatabase() AND table = 'rocksdb_worm' AND name = 'no.file.opens';" # should be not more than 2 because default bulk sink size is ~1M rows / SST file.
${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm;"
# Testing insert with duplicated keys
diff --git a/tests/queries/0_stateless/03001_consider_lwd_when_merge.sql b/tests/queries/0_stateless/03001_consider_lwd_when_merge.sql
index 988d7058f21..2b10c72ae1b 100644
--- a/tests/queries/0_stateless/03001_consider_lwd_when_merge.sql
+++ b/tests/queries/0_stateless/03001_consider_lwd_when_merge.sql
@@ -7,12 +7,14 @@ SETTINGS max_bytes_to_merge_at_max_space_in_pool = 80000, exclude_deleted_rows_f
INSERT INTO lwd_merge SELECT number FROM numbers(10000);
INSERT INTO lwd_merge SELECT number FROM numbers(10000, 10000);
-OPTIMIZE TABLE lwd_merge;
+SET optimize_throw_if_noop = 1;
+
+OPTIMIZE TABLE lwd_merge; -- { serverError CANNOT_ASSIGN_OPTIMIZE }
SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1;
DELETE FROM lwd_merge WHERE id % 10 > 0;
-OPTIMIZE TABLE lwd_merge;
+OPTIMIZE TABLE lwd_merge; -- { serverError CANNOT_ASSIGN_OPTIMIZE }
SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1;
ALTER TABLE lwd_merge MODIFY SETTING exclude_deleted_rows_for_part_size_in_merge = 1;
diff --git a/tests/queries/0_stateless/03205_column_type_check.reference b/tests/queries/0_stateless/03205_column_type_check.reference
new file mode 100644
index 00000000000..3b6c93a0610
--- /dev/null
+++ b/tests/queries/0_stateless/03205_column_type_check.reference
@@ -0,0 +1,2 @@
+1 nan 1048575 2
+1 1 1 1 1
diff --git a/tests/queries/0_stateless/03205_column_type_check.sql b/tests/queries/0_stateless/03205_column_type_check.sql
new file mode 100644
index 00000000000..ab122821eb0
--- /dev/null
+++ b/tests/queries/0_stateless/03205_column_type_check.sql
@@ -0,0 +1,7 @@
+SELECT * FROM (SELECT toUInt256(1)) AS t, (SELECT greatCircleAngle(toLowCardinality(toNullable(toUInt256(1048575))), 257, -9223372036854775808, 1048576), 1048575, materialize(2)) AS u;
+
+
+SET join_algorithm='hash';
+SET allow_experimental_join_condition=1;
+SELECT * FROM ( SELECT 1 AS a, toLowCardinality(1), 1) AS t1 CROSS JOIN (SELECT toLowCardinality(1 AS a), 1 AS b) AS t2;
+