Merge branch 'master' into fix_profile_names_mismatch

This commit is contained in:
Vasily Nemkov 2022-11-25 12:16:02 +04:00 committed by GitHub
commit 2684fc5b1b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 485 additions and 93 deletions

View File

@ -388,6 +388,8 @@ else
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
# Turn on after 22.12
rm -f /etc/clickhouse-server/config.d/compressed_marks_and_index.xml ||:
# it uses recently introduced settings which previous versions may not have
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:

View File

@ -164,7 +164,7 @@ SETTINGS index_granularity = 8192, index_granularity_bytes = 0;
<li><font face = "monospace">index_granularity</font>: 显式设置为其默认值8192。这意味着对于每一组8192行主索引将有一个索引条目例如如果表包含16384行那么索引将有两个索引条目。
</li>
<br/>
<li><font face = "monospace">index_granularity_bytes</font>: 设置为0表示禁止<a href="https://clickhouse.com/docs/en/whats-new/changelog/2019/#experimental-features-1" target="_blank"><font color="blue">适应索引粒度</font></a>。自适应索引粒度意味着ClickHouse自动为一组n行创建一个索引条目
<li><font face = "monospace">index_granularity_bytes</font>: 设置为0表示禁止<a href="https://clickhouse.com/docs/en/whats-new/changelog/2019/#experimental-features-1" target="_blank"><font color="blue">适应索引粒度</font></a>。自适应索引粒度意味着ClickHouse自动为一组n行创建一个索引条目
<ul>
<li>如果n小于8192但n行的合并行数据大小大于或等于10MB (index_granularity_bytes的默认值)或</li>
<li>n达到8192</li>

View File

@ -155,7 +155,7 @@ public:
"Values for {} are expected to be Numeric, Float or Decimal, passed type {}",
getName(), value_type->getName()};
WhichDataType value_type_to_check(value_type);
WhichDataType value_type_to_check(value_type_without_nullable);
/// Do not promote decimal because of implementation issues of this function design
/// Currently we cannot get result column type in case of decimal we cannot get decimal scale

View File

@ -5455,25 +5455,7 @@ void QueryAnalyzer::resolveQueryJoinTreeNode(QueryTreeNodePtr & join_tree_node,
}
}
/// TODO: Special functions that can take query
/// TODO: Support qualified matchers for table function
for (auto & argument_node : table_function_node.getArguments().getNodes())
{
if (argument_node->getNodeType() == QueryTreeNodeType::MATCHER)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Matcher as table function argument is not supported {}. In scope {}",
join_tree_node->formatASTForErrorMessage(),
scope.scope_node->formatASTForErrorMessage());
}
auto * function_node = argument_node->as<FunctionNode>();
if (function_node && table_function_factory.hasNameOrAlias(function_node->getFunctionName()))
continue;
resolveExpressionNode(argument_node, scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/);
}
resolveExpressionNodeList(table_function_node.getArgumentsNode(), scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/);
auto table_function_ast = table_function_node.toAST();
table_function_ptr->parseArguments(table_function_ast, scope_context);

View File

@ -524,11 +524,13 @@ void ColumnArray::insertRangeFrom(const IColumn & src, size_t start, size_t leng
size_t nested_offset = src_concrete.offsetAt(start);
size_t nested_length = src_concrete.getOffsets()[start + length - 1] - nested_offset;
Offsets & cur_offsets = getOffsets();
/// Reserve offsets before to make it more exception safe (in case of MEMORY_LIMIT_EXCEEDED)
cur_offsets.reserve(cur_offsets.size() + length);
getData().insertRangeFrom(src_concrete.getData(), nested_offset, nested_length);
Offsets & cur_offsets = getOffsets();
const Offsets & src_offsets = src_concrete.getOffsets();
if (start == 0 && cur_offsets.empty())
{
cur_offsets.assign(src_offsets.begin(), src_offsets.begin() + length);

View File

@ -124,6 +124,9 @@ void ColumnString::insertRangeFrom(const IColumn & src, size_t start, size_t len
size_t nested_offset = src_concrete.offsetAt(start);
size_t nested_length = src_concrete.offsets[start + length - 1] - nested_offset;
/// Reserve offsets before to make it more exception safe (in case of MEMORY_LIMIT_EXCEEDED)
offsets.reserve(offsets.size() + length);
size_t old_chars_size = chars.size();
chars.resize(old_chars_size + nested_length);
memcpy(&chars[old_chars_size], &src_concrete.chars[nested_offset], nested_length);

View File

@ -123,13 +123,16 @@ void ProgressIndication::writeFinalProgress()
if (progress.read_rows < 1000)
return;
std::cout << "Processed " << formatReadableQuantity(progress.read_rows) << " rows, "
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes);
UInt64 processed_rows = progress.read_rows + progress.written_rows;
UInt64 processed_bytes = progress.read_bytes + progress.written_bytes;
std::cout << "Processed " << formatReadableQuantity(processed_rows) << " rows, "
<< formatReadableSizeWithDecimalSuffix(processed_bytes);
UInt64 elapsed_ns = getElapsedNanoseconds();
if (elapsed_ns)
std::cout << " (" << formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., "
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.)";
std::cout << " (" << formatReadableQuantity(processed_rows * 1000000000.0 / elapsed_ns) << " rows/s., "
<< formatReadableSizeWithDecimalSuffix(processed_bytes * 1000000000.0 / elapsed_ns) << "/s.)";
else
std::cout << ". ";
}
@ -164,16 +167,18 @@ void ProgressIndication::writeProgress(WriteBufferFromFileDescriptor & message)
size_t prefix_size = message.count();
UInt64 processed_rows = progress.read_rows + progress.written_rows;
UInt64 processed_bytes = progress.read_bytes + progress.written_bytes;
message << indicator << " Progress: ";
message
<< formatReadableQuantity(progress.read_rows) << " rows, "
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes);
<< formatReadableQuantity(processed_rows) << " rows, "
<< formatReadableSizeWithDecimalSuffix(processed_bytes);
UInt64 elapsed_ns = getElapsedNanoseconds();
if (elapsed_ns)
message << " ("
<< formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., "
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.) ";
<< formatReadableQuantity(processed_rows * 1000000000.0 / elapsed_ns) << " rows/s., "
<< formatReadableSizeWithDecimalSuffix(processed_bytes * 1000000000.0 / elapsed_ns) << "/s.) ";
else
message << ". ";

View File

@ -1015,7 +1015,7 @@ void DatabaseCatalog::dropTableFinally(const TableMarkedAsDropped & table)
for (const auto & [disk_name, disk] : getContext()->getDisksMap())
{
String data_path = "store/" + getPathForUUID(table.table_id.uuid);
if (!disk->exists(data_path) || disk->isReadOnly())
if (disk->isReadOnly() || !disk->exists(data_path))
continue;
LOG_INFO(log, "Removing data directory {} of dropped table {} from disk {}", data_path, table.table_id.getNameForLogs(), disk_name);

View File

@ -13,6 +13,7 @@
# include <Interpreters/MySQL/InterpretersMySQLDDLQuery.h>
# include <Parsers/MySQL/ASTAlterQuery.h>
# include <Parsers/MySQL/ASTCreateQuery.h>
# include <Parsers/MySQL/ASTDropQuery.h>
#endif
namespace DB
@ -44,7 +45,7 @@ BlockIO InterpreterExternalDDLQuery::execute()
if (arguments.size() != 2 || !arguments[0]->as<ASTIdentifier>() || !arguments[1]->as<ASTIdentifier>())
throw Exception("MySQL External require two identifier arguments.", ErrorCodes::BAD_ARGUMENTS);
if (external_ddl_query.external_ddl->as<ASTDropQuery>())
if (external_ddl_query.external_ddl->as<MySQLParser::ASTDropQuery>())
return MySQLInterpreter::InterpreterMySQLDropQuery(
external_ddl_query.external_ddl, getContext(), getIdentifierName(arguments[0]),
getIdentifierName(arguments[1])).execute();

View File

@ -6,6 +6,7 @@
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTAlterQuery.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTDropQuery.h>
#include <Parsers/ASTColumnDeclaration.h>
#include <Parsers/ASTIndexDeclaration.h>
#include <Parsers/MySQL/ASTCreateQuery.h>
@ -543,15 +544,29 @@ void InterpreterDropImpl::validate(const InterpreterDropImpl::TQuery & /*query*/
ASTs InterpreterDropImpl::getRewrittenQueries(
const InterpreterDropImpl::TQuery & drop_query, ContextPtr context, const String & mapped_to_database, const String & mysql_database)
{
const auto & database_name = resolveDatabase(drop_query.getDatabase(), mysql_database, mapped_to_database, context);
/// Skip drop database|view|dictionary
if (database_name != mapped_to_database || !drop_query.table || drop_query.is_view || drop_query.is_dictionary)
/// Skip drop database|view|dictionary|others
if (drop_query.kind != TQuery::Kind::Table)
return {};
ASTPtr rewritten_query = drop_query.clone();
rewritten_query->as<ASTDropQuery>()->setDatabase(mapped_to_database);
return ASTs{rewritten_query};
TQuery::QualifiedNames tables = drop_query.names;
ASTs rewritten_querys;
for (const auto & table: tables)
{
const auto & database_name = resolveDatabase(table.schema, mysql_database, mapped_to_database, context);
if (database_name != mapped_to_database)
continue;
auto rewritten_query = std::make_shared<ASTDropQuery>();
rewritten_query->setTable(table.shortName);
rewritten_query->setDatabase(mapped_to_database);
if (drop_query.is_truncate)
rewritten_query->kind = ASTDropQuery::Kind::Truncate;
else
rewritten_query->kind = ASTDropQuery::Kind::Drop;
rewritten_query->is_view = false;
//To avoid failure, we always set exists
rewritten_query->if_exists = true;
rewritten_querys.push_back(rewritten_query);
}
return rewritten_querys;
}
void InterpreterRenameImpl::validate(const InterpreterRenameImpl::TQuery & rename_query, ContextPtr /*context*/)

View File

@ -2,11 +2,11 @@
#include <Interpreters/IInterpreter.h>
#include <Interpreters/executeQuery.h>
#include <Parsers/ASTDropQuery.h>
#include <Parsers/ASTRenameQuery.h>
#include <Parsers/IAST_fwd.h>
#include <Parsers/MySQL/ASTAlterQuery.h>
#include <Parsers/MySQL/ASTCreateQuery.h>
#include <Parsers/MySQL/ASTDropQuery.h>
#include <Parsers/queryToString.h>
#include <Parsers/ASTExpressionList.h>
@ -17,7 +17,7 @@ namespace MySQLInterpreter
{
struct InterpreterDropImpl
{
using TQuery = ASTDropQuery;
using TQuery = MySQLParser::ASTDropQuery;
static void validate(const TQuery & query, ContextPtr context);

View File

@ -0,0 +1,119 @@
#include <Parsers/MySQL/ASTDropQuery.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/CommonParsers.h>
#include <Parsers/ExpressionElementParsers.h>
#include <Parsers/parseDatabaseAndTableName.h>
#include <Parsers/ExpressionListParsers.h>
namespace DB
{
namespace MySQLParser
{
ASTPtr ASTDropQuery::clone() const
{
auto res = std::make_shared<ASTDropQuery>(*this);
res->children.clear();
res->is_truncate = is_truncate;
res->if_exists = if_exists;
return res;
}
bool ParserDropQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & expected)
{
ParserKeyword s_drop("DROP");
ParserKeyword s_truncate("TRUNCATE");
ParserKeyword s_table("TABLE");
ParserKeyword s_database("DATABASE");
ParserKeyword s_if_exists("IF EXISTS");
ParserKeyword s_view("VIEW");
ParserKeyword on("ON");
ParserIdentifier name_p(false);
ParserKeyword s_event("EVENT");
ParserKeyword s_function("FUNCTION");
ParserKeyword s_index("INDEX");
ParserKeyword s_server("SERVER");
ParserKeyword s_trigger("TRIGGER");
auto query = std::make_shared<ASTDropQuery>();
node = query;
ASTDropQuery::QualifiedNames names;
bool if_exists = false;
bool is_truncate = false;
if (s_truncate.ignore(pos, expected) && s_table.ignore(pos, expected))
{
is_truncate = true;
query->kind = ASTDropQuery::Kind::Table;
ASTDropQuery::QualifiedName name;
if (parseDatabaseAndTableName(pos, expected, name.schema, name.shortName))
names.push_back(name);
else
return false;
}
else if (s_drop.ignore(pos, expected))
{
if (s_database.ignore(pos, expected))
{
query->kind = ASTDropQuery::Kind::Database;
if (s_if_exists.ignore(pos, expected))
if_exists = true;
ASTPtr database;
if (!name_p.parse(pos, database, expected))
return false;
}
else
{
if (s_view.ignore(pos, expected))
query->kind = ASTDropQuery::Kind::View;
else if (s_table.ignore(pos, expected))
query->kind = ASTDropQuery::Kind::Table;
else if (s_index.ignore(pos, expected))
{
ASTPtr index;
query->kind = ASTDropQuery::Kind::Index;
if (!(name_p.parse(pos, index, expected) && on.ignore(pos, expected)))
return false;
}
else if (s_event.ignore(pos, expected) || s_function.ignore(pos, expected) || s_server.ignore(pos, expected)
|| s_trigger.ignore(pos, expected))
{
query->kind = ASTDropQuery::Kind::Other;
}
else
return false;
if (s_if_exists.ignore(pos, expected))
if_exists = true;
//parse name
auto parse_element = [&]
{
ASTDropQuery::QualifiedName element;
if (parseDatabaseAndTableName(pos, expected, element.schema, element.shortName))
{
names.emplace_back(std::move(element));
return true;
}
return false;
};
if (!ParserList::parseUtil(pos, expected, parse_element, false))
return false;
}
}
else
return false;
query->if_exists = if_exists;
query->names = names;
query->is_truncate = is_truncate;
return true;
}
}
}

View File

@ -0,0 +1,64 @@
#pragma once
#include <Parsers/IParserBase.h>
#include <Parsers/MySQL/ASTDeclareIndex.h>
#include <Parsers/MySQL/ASTDeclareColumn.h>
#include <Parsers/MySQL/ASTDeclareTableOptions.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NOT_IMPLEMENTED;
}
namespace MySQLParser
{
class ASTDropQuery : public IAST
{
public:
enum Kind
{
Table,
View,
Database,
Index,
/// TRIGGER,FUNCTION,EVENT and so on, No need for support
Other,
};
Kind kind;
struct QualifiedName
{
String schema;
String shortName;
};
using QualifiedNames = std::vector<QualifiedName>;
QualifiedNames names;
bool if_exists{false};
//drop or truncate
bool is_truncate{false};
ASTPtr clone() const override;
String getID(char /*delim*/) const override {return "ASTDropQuery" ;}
protected:
void formatImpl(const FormatSettings & /*settings*/, FormatState & /*state*/, FormatStateStacked /*frame*/) const override
{
throw Exception("Method formatImpl is not supported by MySQLParser::ASTDropQuery.", ErrorCodes::NOT_IMPLEMENTED);
}
};
class ParserDropQuery : public IParserBase
{
protected:
const char * getName() const override { return "DROP query"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
}
}

View File

@ -11,6 +11,7 @@
#if USE_MYSQL
# include <Parsers/MySQL/ASTAlterQuery.h>
# include <Parsers/MySQL/ASTCreateQuery.h>
# include <Parsers/MySQL/ASTDropQuery.h>
#endif
namespace DB
@ -43,7 +44,7 @@ bool ParserExternalDDLQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expect
if (external_ddl_query->from->name == "MySQL")
{
#if USE_MYSQL
ParserDropQuery p_drop_query;
MySQLParser::ParserDropQuery p_drop_query;
ParserRenameQuery p_rename_query;
MySQLParser::ParserAlterQuery p_alter_query;
MySQLParser::ParserCreateQuery p_create_query;

View File

@ -2023,8 +2023,7 @@ std::optional<std::string> getIndexExtensionFromFilesystem(const IDataPartStorag
for (auto it = data_part_storage.iterate(); it->isValid(); it->next())
{
const auto & extension = fs::path(it->name()).extension();
if (extension == getIndexExtension(false)
|| extension == getIndexExtension(true))
if (extension == getIndexExtension(true))
return extension;
}
}

View File

@ -7645,7 +7645,15 @@ void StorageReplicatedMergeTree::createTableSharedID() const
return;
}
auto zookeeper = getZooKeeper();
/// We may call getTableSharedID when table is shut down. If exception happen, restarting thread will be already turned
/// off and nobody will reconnect our zookeeper connection. In this case we use zookeeper connection from
/// context.
ZooKeeperPtr zookeeper;
if (shutdown_called.load())
zookeeper = getZooKeeperIfTableShutDown();
else
zookeeper = getZooKeeper();
String zookeeper_table_id_path = fs::path(zookeeper_path) / "table_shared_id";
String id;
if (!zookeeper->tryGet(zookeeper_table_id_path, id))

View File

@ -0,0 +1,6 @@
<clickhouse>
<merge_tree>
<compress_marks>true</compress_marks>
<compress_primary_key>true</compress_primary_key>
</merge_tree>
</clickhouse>

View File

@ -52,6 +52,7 @@ ln -sf $SRC_PATH/config.d/enable_zero_copy_replication.xml $DEST_SERVER_PATH/con
ln -sf $SRC_PATH/config.d/nlp.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/enable_keeper_map.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/display_name.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/compressed_marks_and_index.xml $DEST_SERVER_PATH/config.d/
# Not supported with fasttest.
if [ "${DEST_SERVER_PATH}" = "/etc/clickhouse-server" ]

View File

@ -1,5 +1,7 @@
<clickhouse>
<merge_tree>
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
<compress_marks>0</compress_marks>
<compress_primary_key>0</compress_primary_key>
</merge_tree>
</clickhouse>

View File

@ -2184,3 +2184,44 @@ def savepoint(clickhouse_node, mysql_node, mysql_host):
mysql_node.query(f"INSERT INTO {db}.t1 VALUES (2)")
mysql_node.query("ROLLBACK TO savepoint_1")
mysql_node.query("COMMIT")
def dropddl(clickhouse_node, mysql_node, mysql_host):
db = "dropddl"
clickhouse_node.query(f"DROP DATABASE IF EXISTS {db}")
mysql_node.query(f"DROP DATABASE IF EXISTS {db}")
mysql_node.query(f"CREATE DATABASE {db}")
mysql_node.query(f"CREATE TABLE {db}.t1 (a INT PRIMARY KEY, b INT)")
mysql_node.query(f"CREATE TABLE {db}.t2 (a INT PRIMARY KEY, b INT)")
mysql_node.query(f"CREATE TABLE {db}.t3 (a INT PRIMARY KEY, b INT)")
mysql_node.query(f"CREATE TABLE {db}.t4 (a INT PRIMARY KEY, b INT)")
mysql_node.query(f"CREATE VIEW {db}.v1 AS SELECT * FROM {db}.t1")
mysql_node.query(f"INSERT INTO {db}.t1(a, b) VALUES(1, 1)")
clickhouse_node.query(
f"CREATE DATABASE {db} ENGINE = MaterializeMySQL('{mysql_host}:3306', '{db}', 'root', 'clickhouse')"
)
check_query(
clickhouse_node,
f"SELECT count() FROM system.tables where database = '{db}' FORMAT TSV",
"4\n",
)
check_query(clickhouse_node, f"SELECT * FROM {db}.t1 FORMAT TSV", "1\t1\n")
mysql_node.query(f"DROP EVENT IF EXISTS {db}.event_name")
mysql_node.query(f"DROP VIEW IF EXISTS {db}.view_name")
mysql_node.query(f"DROP FUNCTION IF EXISTS {db}.function_name")
mysql_node.query(f"DROP TRIGGER IF EXISTS {db}.trigger_name")
mysql_node.query(f"DROP INDEX `PRIMARY` ON {db}.t2")
mysql_node.query(f"DROP TABLE {db}.t3")
mysql_node.query(f"DROP TABLE if EXISTS {db}.t3,{db}.t4")
mysql_node.query(f"TRUNCATE TABLE {db}.t1")
mysql_node.query(f"INSERT INTO {db}.t2(a, b) VALUES(1, 1)")
check_query(clickhouse_node, f"SELECT * FROM {db}.t2 FORMAT TSV", "1\t1\n")
check_query(clickhouse_node, f"SELECT count() FROM {db}.t1 FORMAT TSV", "0\n")
check_query(
clickhouse_node,
f"SELECT name FROM system.tables where database = '{db}' FORMAT TSV",
"t1\nt2\n",
)
mysql_node.query(f"DROP DATABASE {db}")
clickhouse_node.query(f"DROP DATABASE {db}")

View File

@ -516,3 +516,10 @@ def test_savepoint_query(
):
materialize_with_ddl.savepoint(clickhouse_node, started_mysql_8_0, "mysql80")
materialize_with_ddl.savepoint(clickhouse_node, started_mysql_5_7, "mysql57")
def test_materialized_database_mysql_drop_ddl(
started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node
):
materialize_with_ddl.dropddl(clickhouse_node, started_mysql_8_0, "mysql80")
materialize_with_ddl.dropddl(clickhouse_node, started_mysql_5_7, "mysql57")

View File

@ -763,7 +763,7 @@ def test_cache_setting_compatibility(cluster, node_name):
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
node.query(
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_r';"
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_r', compress_marks=false, compress_primary_key=false;"
)
node.query(
"INSERT INTO s3_test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 500"

View File

@ -1655,7 +1655,7 @@ def test_freeze(start_cluster):
) ENGINE = MergeTree
ORDER BY tuple()
PARTITION BY toYYYYMM(d)
SETTINGS storage_policy='small_jbod_with_external'
SETTINGS storage_policy='small_jbod_with_external', compress_marks=false, compress_primary_key=false
"""
)

View File

@ -36,7 +36,7 @@ def partition_table_simple(started_cluster):
q(
"CREATE TABLE test.partition_simple (date MATERIALIZED toDate(0), x UInt64, sample_key MATERIALIZED intHash64(x)) "
"ENGINE=MergeTree PARTITION BY date SAMPLE BY sample_key ORDER BY (date,x,sample_key) "
"SETTINGS index_granularity=8192, index_granularity_bytes=0"
"SETTINGS index_granularity=8192, index_granularity_bytes=0, compress_marks=false, compress_primary_key=false"
)
q("INSERT INTO test.partition_simple ( x ) VALUES ( now() )")
q("INSERT INTO test.partition_simple ( x ) VALUES ( now()+1 )")
@ -115,7 +115,7 @@ def partition_table_complex(started_cluster):
q("DROP TABLE IF EXISTS test.partition_complex")
q(
"CREATE TABLE test.partition_complex (p Date, k Int8, v1 Int8 MATERIALIZED k + 1) "
"ENGINE = MergeTree PARTITION BY p ORDER BY k SETTINGS index_granularity=1, index_granularity_bytes=0"
"ENGINE = MergeTree PARTITION BY p ORDER BY k SETTINGS index_granularity=1, index_granularity_bytes=0, compress_marks=false, compress_primary_key=false"
)
q("INSERT INTO test.partition_complex (p, k) VALUES(toDate(31), 1)")
q("INSERT INTO test.partition_complex (p, k) VALUES(toDate(1), 2)")
@ -153,7 +153,7 @@ def test_partition_complex(partition_table_complex):
def cannot_attach_active_part_table(started_cluster):
q("DROP TABLE IF EXISTS test.attach_active")
q(
"CREATE TABLE test.attach_active (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 4) ORDER BY n"
"CREATE TABLE test.attach_active (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 4) ORDER BY n SETTINGS compress_marks=false, compress_primary_key=false"
)
q("INSERT INTO test.attach_active SELECT number FROM system.numbers LIMIT 16")
@ -181,7 +181,7 @@ def attach_check_all_parts_table(started_cluster):
q("SYSTEM STOP MERGES")
q("DROP TABLE IF EXISTS test.attach_partition")
q(
"CREATE TABLE test.attach_partition (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 8) ORDER BY n"
"CREATE TABLE test.attach_partition (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 8) ORDER BY n SETTINGS compress_marks=false, compress_primary_key=false"
)
q(
"INSERT INTO test.attach_partition SELECT number FROM system.numbers WHERE number % 2 = 0 LIMIT 8"
@ -259,7 +259,7 @@ def drop_detached_parts_table(started_cluster):
q("SYSTEM STOP MERGES")
q("DROP TABLE IF EXISTS test.drop_detached")
q(
"CREATE TABLE test.drop_detached (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 8) ORDER BY n"
"CREATE TABLE test.drop_detached (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 8) ORDER BY n SETTINGS compress_marks=false, compress_primary_key=false"
)
q(
"INSERT INTO test.drop_detached SELECT number FROM system.numbers WHERE number % 2 = 0 LIMIT 8"
@ -329,9 +329,15 @@ def test_drop_detached_parts(drop_detached_parts_table):
def test_system_detached_parts(drop_detached_parts_table):
q("create table sdp_0 (n int, x int) engine=MergeTree order by n")
q("create table sdp_1 (n int, x int) engine=MergeTree order by n partition by x")
q("create table sdp_2 (n int, x String) engine=MergeTree order by n partition by x")
q(
"create table sdp_0 (n int, x int) engine=MergeTree order by n SETTINGS compress_marks=false, compress_primary_key=false"
)
q(
"create table sdp_1 (n int, x int) engine=MergeTree order by n partition by x SETTINGS compress_marks=false, compress_primary_key=false"
)
q(
"create table sdp_2 (n int, x String) engine=MergeTree order by n partition by x SETTINGS compress_marks=false, compress_primary_key=false"
)
q(
"create table sdp_3 (n int, x Enum('broken' = 0, 'all' = 1)) engine=MergeTree order by n partition by x"
)
@ -449,7 +455,9 @@ def test_system_detached_parts(drop_detached_parts_table):
def test_detached_part_dir_exists(started_cluster):
q("create table detached_part_dir_exists (n int) engine=MergeTree order by n")
q(
"create table detached_part_dir_exists (n int) engine=MergeTree order by n SETTINGS compress_marks=false, compress_primary_key=false"
)
q("insert into detached_part_dir_exists select 1") # will create all_1_1_0
q(
"alter table detached_part_dir_exists detach partition id 'all'"
@ -488,7 +496,7 @@ def test_detached_part_dir_exists(started_cluster):
def test_make_clone_in_detached(started_cluster):
q(
"create table clone_in_detached (n int, m String) engine=ReplicatedMergeTree('/clone_in_detached', '1') order by n"
"create table clone_in_detached (n int, m String) engine=ReplicatedMergeTree('/clone_in_detached', '1') order by n SETTINGS compress_marks=false, compress_primary_key=false"
)
path = path_to_data + "data/default/clone_in_detached/"

View File

@ -728,7 +728,7 @@ def test_polymorphic_parts_index(start_cluster):
"""
CREATE TABLE test_index.index_compact(a UInt32, s String)
ENGINE = MergeTree ORDER BY a
SETTINGS min_rows_for_wide_part = 1000, index_granularity = 128, merge_max_block_size = 100"""
SETTINGS min_rows_for_wide_part = 1000, index_granularity = 128, merge_max_block_size = 100, compress_marks=false, compress_primary_key=false"""
)
node1.query(

View File

@ -1,26 +1,70 @@
-- { echoOn }
DROP TABLE IF EXISTS sum_map;
CREATE TABLE sum_map(date Date, timeslot DateTime, statusMap Nested(status UInt16, requests UInt64)) ENGINE = Log;
INSERT INTO sum_map VALUES ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]);
SELECT * FROM sum_map ORDER BY timeslot, statusMap.status, statusMap.requests;
2000-01-01 2000-01-01 00:00:00 [1,2,3] [10,10,10]
2000-01-01 2000-01-01 00:00:00 [3,4,5] [10,10,10]
2000-01-01 2000-01-01 00:01:00 [4,5,6] [10,10,10]
2000-01-01 2000-01-01 00:01:00 [6,7,8] [10,10,10]
SELECT sumMap(statusMap.status, statusMap.requests) FROM sum_map;
([1,2,3,4,5,6,7,8],[10,10,20,20,20,20,10,10])
SELECT sumMap((statusMap.status, statusMap.requests)) FROM sum_map;
([1,2,3,4,5,6,7,8],[10,10,20,20,20,20,10,10])
SELECT sumMapMerge(s) FROM (SELECT sumMapState(statusMap.status, statusMap.requests) AS s FROM sum_map);
([1,2,3,4,5,6,7,8],[10,10,20,20,20,20,10,10])
SELECT timeslot, sumMap(statusMap.status, statusMap.requests) FROM sum_map GROUP BY timeslot ORDER BY timeslot;
2000-01-01 00:00:00 ([1,2,3,4,5],[10,10,20,10,10])
2000-01-01 00:01:00 ([4,5,6,7,8],[10,10,20,10,10])
SELECT timeslot, sumMap(statusMap.status, statusMap.requests).1, sumMap(statusMap.status, statusMap.requests).2 FROM sum_map GROUP BY timeslot ORDER BY timeslot;
2000-01-01 00:00:00 [1,2,3,4,5] [10,10,20,10,10]
2000-01-01 00:01:00 [4,5,6,7,8] [10,10,20,10,10]
SELECT sumMapFiltered([1])(statusMap.status, statusMap.requests) FROM sum_map;
([1],[10])
SELECT sumMapFiltered([1, 4, 8])(statusMap.status, statusMap.requests) FROM sum_map;
([1,4,8],[10,20,10])
DROP TABLE sum_map;
DROP TABLE IF EXISTS sum_map_overflow;
CREATE TABLE sum_map_overflow(events Array(UInt8), counts Array(UInt8)) ENGINE = Log;
INSERT INTO sum_map_overflow VALUES ([1], [255]), ([1], [2]);
SELECT sumMap(events, counts) FROM sum_map_overflow;
([1],[257])
SELECT sumMapWithOverflow(events, counts) FROM sum_map_overflow;
([1],[1])
DROP TABLE sum_map_overflow;
select sumMap(val, cnt) from ( SELECT [ CAST(1, 'UInt64') ] as val, [1] as cnt );
([1],[1])
select sumMap(val, cnt) from ( SELECT [ CAST(1, 'Float64') ] as val, [1] as cnt );
([1],[1])
select sumMap(val, cnt) from ( SELECT [ CAST('a', 'Enum16(\'a\'=1)') ] as val, [1] as cnt );
(['a'],[1])
select sumMap(val, cnt) from ( SELECT [ CAST(1, 'DateTime(\'Asia/Istanbul\')') ] as val, [1] as cnt );
(['1970-01-01 02:00:01'],[1])
select sumMap(val, cnt) from ( SELECT [ CAST(1, 'Date') ] as val, [1] as cnt );
(['1970-01-02'],[1])
select sumMap(val, cnt) from ( SELECT [ CAST('01234567-89ab-cdef-0123-456789abcdef', 'UUID') ] as val, [1] as cnt );
(['01234567-89ab-cdef-0123-456789abcdef'],[1])
select sumMap(val, cnt) from ( SELECT [ CAST(1.01, 'Decimal(10,2)') ] as val, [1] as cnt );
([1.01],[1])
select sumMap(val, cnt) from ( SELECT [ CAST('a', 'FixedString(1)'), CAST('b', 'FixedString(1)' ) ] as val, [1, 2] as cnt );
(['a','b'],[1,2])
select sumMap(val, cnt) from ( SELECT [ CAST('abc', 'String'), CAST('ab', 'String'), CAST('a', 'String') ] as val, [1, 2, 3] as cnt );
(['a','ab','abc'],[3,2,1])
DROP TABLE IF EXISTS sum_map_decimal;
CREATE TABLE sum_map_decimal(
statusMap Nested(
goal_id UInt16,
revenue Decimal32(5)
)
) ENGINE = Log;
INSERT INTO sum_map_decimal VALUES ([1, 2, 3], [1.0, 2.0, 3.0]), ([3, 4, 5], [3.0, 4.0, 5.0]), ([4, 5, 6], [4.0, 5.0, 6.0]), ([6, 7, 8], [6.0, 7.0, 8.0]);
SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal;
([1,2,3,4,5,6,7,8],[1,2,6,8,10,12,7,8])
SELECT sumMapWithOverflow(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal;
([1,2,3,4,5,6,7,8],[1,2,6,8,10,12,7,8])
DROP TABLE sum_map_decimal;
CREATE TABLE sum_map_decimal_nullable (`statusMap` Array(Tuple(goal_id UInt16, revenue Nullable(Decimal(9, 5))))) engine=Log;
INSERT INTO sum_map_decimal_nullable VALUES ([1, 2, 3], [1.0, 2.0, 3.0]), ([3, 4, 5], [3.0, 4.0, 5.0]), ([4, 5, 6], [4.0, 5.0, 6.0]), ([6, 7, 8], [6.0, 7.0, 8.0]);
SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal_nullable;
([1,2,3,4,5,6,7,8],[1,2,6,8,10,12,7,8])
DROP TABLE sum_map_decimal_nullable;

View File

@ -1,5 +1,6 @@
SET send_logs_level = 'fatal';
-- { echoOn }
DROP TABLE IF EXISTS sum_map;
CREATE TABLE sum_map(date Date, timeslot DateTime, statusMap Nested(status UInt16, requests UInt64)) ENGINE = Log;
@ -54,3 +55,8 @@ SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal;
SELECT sumMapWithOverflow(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal;
DROP TABLE sum_map_decimal;
CREATE TABLE sum_map_decimal_nullable (`statusMap` Array(Tuple(goal_id UInt16, revenue Nullable(Decimal(9, 5))))) engine=Log;
INSERT INTO sum_map_decimal_nullable VALUES ([1, 2, 3], [1.0, 2.0, 3.0]), ([3, 4, 5], [3.0, 4.0, 5.0]), ([4, 5, 6], [4.0, 5.0, 6.0]), ([6, 7, 8], [6.0, 7.0, 8.0]);
SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal_nullable;
DROP TABLE sum_map_decimal_nullable;

View File

@ -12,7 +12,7 @@ CREATE TABLE check_system_tables
ORDER BY name1
PARTITION BY name2
SAMPLE BY name1
SETTINGS min_bytes_for_wide_part = 0;
SETTINGS min_bytes_for_wide_part = 0, compress_marks=false, compress_primary_key=false;
SELECT name, partition_key, sorting_key, primary_key, sampling_key, storage_policy, total_rows
FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase()
@ -36,7 +36,8 @@ CREATE TABLE check_system_tables
sign Int8
) ENGINE = VersionedCollapsingMergeTree(sign, version)
PARTITION BY date
ORDER BY date;
ORDER BY date
SETTINGS compress_marks=false, compress_primary_key=false;
SELECT name, partition_key, sorting_key, primary_key, sampling_key
FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase()

View File

@ -9,12 +9,12 @@ DROP TABLE IF EXISTS default_codec_synthetic;
CREATE TABLE delta_codec_synthetic
(
id UInt64 Codec(Delta, ZSTD(3))
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false;
CREATE TABLE default_codec_synthetic
(
id UInt64 Codec(ZSTD(3))
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false;
INSERT INTO delta_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000;
INSERT INTO default_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000;
@ -47,12 +47,12 @@ DROP TABLE IF EXISTS default_codec_float;
CREATE TABLE delta_codec_float
(
id Float64 Codec(Delta, LZ4HC)
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false;
CREATE TABLE default_codec_float
(
id Float64 Codec(LZ4HC)
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false;
INSERT INTO delta_codec_float SELECT number FROM numbers(1547510400, 500000) WHERE number % 3 == 0 OR number % 5 == 0 OR number % 7 == 0 OR number % 11 == 0;
INSERT INTO default_codec_float SELECT * from delta_codec_float;
@ -85,12 +85,12 @@ DROP TABLE IF EXISTS default_codec_string;
CREATE TABLE delta_codec_string
(
id Float64 Codec(Delta, LZ4)
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false;
CREATE TABLE default_codec_string
(
id Float64 Codec(LZ4)
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false;
INSERT INTO delta_codec_string SELECT concat(toString(number), toString(number % 100)) FROM numbers(1547510400, 500000);
INSERT INTO default_codec_string SELECT * from delta_codec_string;

View File

@ -1,7 +1,7 @@
DROP TABLE IF EXISTS test_00961;
CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32)
ENGINE = MergeTree PARTITION BY d ORDER BY (a, b) SETTINGS index_granularity = 111, min_bytes_for_wide_part = 0;
ENGINE = MergeTree PARTITION BY d ORDER BY (a, b) SETTINGS index_granularity = 111, min_bytes_for_wide_part = 0, compress_marks=false, compress_primary_key=false;
INSERT INTO test_00961 VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789);

View File

@ -7,7 +7,7 @@ CREATE TABLE test_table
INDEX value_index value TYPE minmax GRANULARITY 1
)
Engine=MergeTree()
ORDER BY key;
ORDER BY key SETTINGS compress_marks=false;
INSERT INTO test_table VALUES (0, 'Value');
SELECT * FROM system.data_skipping_indices WHERE database = currentDatabase();

View File

@ -3,7 +3,7 @@
SYSTEM DROP FILESYSTEM CACHE;
SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_6', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_6', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;

View File

@ -6,7 +6,7 @@ SYSTEM DROP FILESYSTEM CACHE;
SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_6', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_6', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;

View File

@ -5,7 +5,7 @@ SET enable_filesystem_cache_on_write_operations=0;
SET skip_download_if_exceeds_query_cache=1;
SET max_query_cache_size=128;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_4', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_4', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;

View File

@ -8,7 +8,7 @@ SET skip_download_if_exceeds_query_cache=1;
SET max_query_cache_size=128;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_4', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_4', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;

View File

@ -4,7 +4,7 @@ Using storage policy: s3_cache
SYSTEM DROP FILESYSTEM CACHE;
SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
@ -19,7 +19,7 @@ SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesy
SYSTEM DROP FILESYSTEM CACHE;
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_3', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_3', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;
@ -39,7 +39,7 @@ Using storage policy: local_cache
SYSTEM DROP FILESYSTEM CACHE;
SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
@ -54,7 +54,7 @@ SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesy
SYSTEM DROP FILESYSTEM CACHE;
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache_3', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache_3', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;

View File

@ -3,7 +3,7 @@ Using storage policy: s3_cache
SET enable_filesystem_cache_on_write_operations=1;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test;
SYSTEM DROP FILESYSTEM CACHE;
SELECT file_segment_range_begin, file_segment_range_end, size, state
@ -129,7 +129,7 @@ Using storage policy: local_cache
SET enable_filesystem_cache_on_write_operations=1;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test;
SYSTEM DROP FILESYSTEM CACHE;
SELECT file_segment_range_begin, file_segment_range_end, size, state

View File

@ -6,7 +6,7 @@ SET enable_filesystem_cache_log=1;
SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
DROP TABLE IF EXISTS system.filesystem_cache_log;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test;
INSERT INTO test SELECT number, toString(number) FROM numbers(100000);
SELECT 2240, 's3_cache', * FROM test FORMAT Null;
@ -27,7 +27,7 @@ SET enable_filesystem_cache_log=1;
SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
DROP TABLE IF EXISTS system.filesystem_cache_log;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test;
INSERT INTO test SELECT number, toString(number) FROM numbers(100000);
SELECT 2240, 'local_cache', * FROM test FORMAT Null;

View File

@ -1,7 +1,7 @@
-- Tags: no-backward-compatibility-check
drop table if exists test_02381;
create table test_02381(a UInt64, b UInt64) ENGINE = MergeTree order by (a, b);
create table test_02381(a UInt64, b UInt64) ENGINE = MergeTree order by (a, b) SETTINGS compress_marks=false, compress_primary_key=false;
insert into test_02381 select number, number * 10 from system.numbers limit 1000000;
drop table if exists test_02381_compress;

View File

@ -3,7 +3,7 @@
SET enable_filesystem_cache_on_write_operations=0;
SYSTEM DROP FILESYSTEM CACHE;
DROP TABLE IF EXISTS nopers;
CREATE TABLE nopers (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760;
CREATE TABLE nopers (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES nopers;
INSERT INTO nopers SELECT number, toString(number) FROM numbers(10);
SELECT * FROM nopers FORMAT Null;
@ -22,7 +22,7 @@ ORDER BY file, cache, size;
data.bin 0 114
data.mrk3 0 80
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
@ -47,7 +47,7 @@ data.bin 0 746
data.mrk3 0 80
data.mrk3 0_persistent 80
DROP TABLE IF EXISTS test2;
CREATE TABLE test2 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760;
CREATE TABLE test2 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test2;
INSERT INTO test2 SELECT number, toString(number) FROM numbers(100000);
SELECT * FROM test2 FORMAT Null;

View File

@ -7,7 +7,7 @@ SET enable_filesystem_cache_on_write_operations=0;
SYSTEM DROP FILESYSTEM CACHE;
DROP TABLE IF EXISTS nopers;
CREATE TABLE nopers (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760;
CREATE TABLE nopers (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES nopers;
INSERT INTO nopers SELECT number, toString(number) FROM numbers(10);
@ -26,7 +26,7 @@ ON data_paths.cache_path = caches.cache_path
ORDER BY file, cache, size;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
@ -49,7 +49,7 @@ ON data_paths.cache_path = caches.cache_path
ORDER BY file, cache, size;
DROP TABLE IF EXISTS test2;
CREATE TABLE test2 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760;
CREATE TABLE test2 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test2;
INSERT INTO test2 SELECT number, toString(number) FROM numbers(100000);

View File

@ -0,0 +1,28 @@
0
--
0
1
--
1
2
--
(1) 0
--
(0,1) 0
--
(1,2) 1
(1,2) 2
--
(1) 0
--
(0,1) 0
--
(1,2) 1
(1,2) 2
--
('1') 0
--
('0','1') 0
--
('1','2') 1
('1','2') 2

View File

@ -0,0 +1,47 @@
SET allow_experimental_analyzer = 1;
SELECT number FROM numbers(untuple(tuple(1)));
SELECT '--';
SELECT number FROM numbers(untuple(tuple(0, 2)));
SELECT '--';
SELECT number FROM numbers(untuple(tuple(1, 2)));
SELECT '--';
SELECT cast(tuple(1), 'Tuple(value UInt64)') AS value, number FROM numbers(untuple(value));
SELECT '--';
SELECT cast(tuple(0, 1), 'Tuple(value_1 UInt64, value_2 UInt64)') AS value, number FROM numbers(untuple(value));
SELECT '--';
SELECT cast(tuple(1, 2), 'Tuple(value_1 UInt64, value_2 UInt64)') AS value, number FROM numbers(untuple(value));
SELECT '--';
SELECT cast(tuple(1), 'Tuple(value UInt64)') AS value, number FROM numbers(value.*);
SELECT '--';
SELECT cast(tuple(0, 1), 'Tuple(value_1 UInt64, value_2 UInt64)') AS value, number FROM numbers(value.*);
SELECT '--';
SELECT cast(tuple(1, 2), 'Tuple(value_1 UInt64, value_2 UInt64)') AS value, number FROM numbers(value.*);
SELECT '--';
SELECT cast(tuple('1'), 'Tuple(value String)') AS value, number FROM numbers(value.* APPLY x -> toUInt64(x));
SELECT '--';
SELECT cast(tuple('0', '1'), 'Tuple(value_1 String, value_2 String)') AS value, number FROM numbers(value.* APPLY x -> toUInt64(x));
SELECT '--';
SELECT cast(tuple('1', '2'), 'Tuple(value_1 String, value_2 String)') AS value, number FROM numbers(value.* APPLY x -> toUInt64(x));

View File

@ -3,7 +3,7 @@
SYSTEM DROP FILESYSTEM CACHE;
SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
@ -18,7 +18,7 @@ SYSTEM DROP FILESYSTEM CACHE;
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy_3', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy_3', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;

View File

@ -3,7 +3,7 @@
SET enable_filesystem_cache_on_write_operations=1;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test;
SYSTEM DROP FILESYSTEM CACHE;
SELECT file_segment_range_begin, file_segment_range_end, size, state

View File

@ -6,7 +6,7 @@ SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
DROP TABLE IF EXISTS system.filesystem_cache_log;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy', min_bytes_for_wide_part = 10485760;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
SYSTEM STOP MERGES test;
INSERT INTO test SELECT number, toString(number) FROM numbers(100000);