mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
Merge branch 'master' into master
This commit is contained in:
commit
a0e490e152
@ -388,6 +388,8 @@ else
|
|||||||
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
||||||
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
||||||
|
|
||||||
|
# Turn on after 22.12
|
||||||
|
rm -f /etc/clickhouse-server/config.d/compressed_marks_and_index.xml ||:
|
||||||
# it uses recently introduced settings which previous versions may not have
|
# it uses recently introduced settings which previous versions may not have
|
||||||
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:
|
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:
|
||||||
|
|
||||||
|
@ -155,7 +155,7 @@ public:
|
|||||||
"Values for {} are expected to be Numeric, Float or Decimal, passed type {}",
|
"Values for {} are expected to be Numeric, Float or Decimal, passed type {}",
|
||||||
getName(), value_type->getName()};
|
getName(), value_type->getName()};
|
||||||
|
|
||||||
WhichDataType value_type_to_check(value_type);
|
WhichDataType value_type_to_check(value_type_without_nullable);
|
||||||
|
|
||||||
/// Do not promote decimal because of implementation issues of this function design
|
/// Do not promote decimal because of implementation issues of this function design
|
||||||
/// Currently we cannot get result column type in case of decimal we cannot get decimal scale
|
/// Currently we cannot get result column type in case of decimal we cannot get decimal scale
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
# include <Interpreters/MySQL/InterpretersMySQLDDLQuery.h>
|
# include <Interpreters/MySQL/InterpretersMySQLDDLQuery.h>
|
||||||
# include <Parsers/MySQL/ASTAlterQuery.h>
|
# include <Parsers/MySQL/ASTAlterQuery.h>
|
||||||
# include <Parsers/MySQL/ASTCreateQuery.h>
|
# include <Parsers/MySQL/ASTCreateQuery.h>
|
||||||
|
# include <Parsers/MySQL/ASTDropQuery.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -44,7 +45,7 @@ BlockIO InterpreterExternalDDLQuery::execute()
|
|||||||
if (arguments.size() != 2 || !arguments[0]->as<ASTIdentifier>() || !arguments[1]->as<ASTIdentifier>())
|
if (arguments.size() != 2 || !arguments[0]->as<ASTIdentifier>() || !arguments[1]->as<ASTIdentifier>())
|
||||||
throw Exception("MySQL External require two identifier arguments.", ErrorCodes::BAD_ARGUMENTS);
|
throw Exception("MySQL External require two identifier arguments.", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
if (external_ddl_query.external_ddl->as<ASTDropQuery>())
|
if (external_ddl_query.external_ddl->as<MySQLParser::ASTDropQuery>())
|
||||||
return MySQLInterpreter::InterpreterMySQLDropQuery(
|
return MySQLInterpreter::InterpreterMySQLDropQuery(
|
||||||
external_ddl_query.external_ddl, getContext(), getIdentifierName(arguments[0]),
|
external_ddl_query.external_ddl, getContext(), getIdentifierName(arguments[0]),
|
||||||
getIdentifierName(arguments[1])).execute();
|
getIdentifierName(arguments[1])).execute();
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <Parsers/ASTIdentifier.h>
|
#include <Parsers/ASTIdentifier.h>
|
||||||
#include <Parsers/ASTAlterQuery.h>
|
#include <Parsers/ASTAlterQuery.h>
|
||||||
#include <Parsers/ASTCreateQuery.h>
|
#include <Parsers/ASTCreateQuery.h>
|
||||||
|
#include <Parsers/ASTDropQuery.h>
|
||||||
#include <Parsers/ASTColumnDeclaration.h>
|
#include <Parsers/ASTColumnDeclaration.h>
|
||||||
#include <Parsers/ASTIndexDeclaration.h>
|
#include <Parsers/ASTIndexDeclaration.h>
|
||||||
#include <Parsers/MySQL/ASTCreateQuery.h>
|
#include <Parsers/MySQL/ASTCreateQuery.h>
|
||||||
@ -543,15 +544,29 @@ void InterpreterDropImpl::validate(const InterpreterDropImpl::TQuery & /*query*/
|
|||||||
ASTs InterpreterDropImpl::getRewrittenQueries(
|
ASTs InterpreterDropImpl::getRewrittenQueries(
|
||||||
const InterpreterDropImpl::TQuery & drop_query, ContextPtr context, const String & mapped_to_database, const String & mysql_database)
|
const InterpreterDropImpl::TQuery & drop_query, ContextPtr context, const String & mapped_to_database, const String & mysql_database)
|
||||||
{
|
{
|
||||||
const auto & database_name = resolveDatabase(drop_query.getDatabase(), mysql_database, mapped_to_database, context);
|
/// Skip drop database|view|dictionary|others
|
||||||
|
if (drop_query.kind != TQuery::Kind::Table)
|
||||||
/// Skip drop database|view|dictionary
|
|
||||||
if (database_name != mapped_to_database || !drop_query.table || drop_query.is_view || drop_query.is_dictionary)
|
|
||||||
return {};
|
return {};
|
||||||
|
TQuery::QualifiedNames tables = drop_query.names;
|
||||||
ASTPtr rewritten_query = drop_query.clone();
|
ASTs rewritten_querys;
|
||||||
rewritten_query->as<ASTDropQuery>()->setDatabase(mapped_to_database);
|
for (const auto & table: tables)
|
||||||
return ASTs{rewritten_query};
|
{
|
||||||
|
const auto & database_name = resolveDatabase(table.schema, mysql_database, mapped_to_database, context);
|
||||||
|
if (database_name != mapped_to_database)
|
||||||
|
continue;
|
||||||
|
auto rewritten_query = std::make_shared<ASTDropQuery>();
|
||||||
|
rewritten_query->setTable(table.shortName);
|
||||||
|
rewritten_query->setDatabase(mapped_to_database);
|
||||||
|
if (drop_query.is_truncate)
|
||||||
|
rewritten_query->kind = ASTDropQuery::Kind::Truncate;
|
||||||
|
else
|
||||||
|
rewritten_query->kind = ASTDropQuery::Kind::Drop;
|
||||||
|
rewritten_query->is_view = false;
|
||||||
|
//To avoid failure, we always set exists
|
||||||
|
rewritten_query->if_exists = true;
|
||||||
|
rewritten_querys.push_back(rewritten_query);
|
||||||
|
}
|
||||||
|
return rewritten_querys;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InterpreterRenameImpl::validate(const InterpreterRenameImpl::TQuery & rename_query, ContextPtr /*context*/)
|
void InterpreterRenameImpl::validate(const InterpreterRenameImpl::TQuery & rename_query, ContextPtr /*context*/)
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
#include <Interpreters/IInterpreter.h>
|
#include <Interpreters/IInterpreter.h>
|
||||||
#include <Interpreters/executeQuery.h>
|
#include <Interpreters/executeQuery.h>
|
||||||
#include <Parsers/ASTDropQuery.h>
|
|
||||||
#include <Parsers/ASTRenameQuery.h>
|
#include <Parsers/ASTRenameQuery.h>
|
||||||
#include <Parsers/IAST_fwd.h>
|
#include <Parsers/IAST_fwd.h>
|
||||||
#include <Parsers/MySQL/ASTAlterQuery.h>
|
#include <Parsers/MySQL/ASTAlterQuery.h>
|
||||||
#include <Parsers/MySQL/ASTCreateQuery.h>
|
#include <Parsers/MySQL/ASTCreateQuery.h>
|
||||||
|
#include <Parsers/MySQL/ASTDropQuery.h>
|
||||||
#include <Parsers/queryToString.h>
|
#include <Parsers/queryToString.h>
|
||||||
#include <Parsers/ASTExpressionList.h>
|
#include <Parsers/ASTExpressionList.h>
|
||||||
|
|
||||||
@ -17,7 +17,7 @@ namespace MySQLInterpreter
|
|||||||
{
|
{
|
||||||
struct InterpreterDropImpl
|
struct InterpreterDropImpl
|
||||||
{
|
{
|
||||||
using TQuery = ASTDropQuery;
|
using TQuery = MySQLParser::ASTDropQuery;
|
||||||
|
|
||||||
static void validate(const TQuery & query, ContextPtr context);
|
static void validate(const TQuery & query, ContextPtr context);
|
||||||
|
|
||||||
|
119
src/Parsers/MySQL/ASTDropQuery.cpp
Normal file
119
src/Parsers/MySQL/ASTDropQuery.cpp
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
#include <Parsers/MySQL/ASTDropQuery.h>
|
||||||
|
|
||||||
|
#include <Parsers/ASTIdentifier.h>
|
||||||
|
#include <Parsers/CommonParsers.h>
|
||||||
|
#include <Parsers/ExpressionElementParsers.h>
|
||||||
|
#include <Parsers/parseDatabaseAndTableName.h>
|
||||||
|
#include <Parsers/ExpressionListParsers.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace MySQLParser
|
||||||
|
{
|
||||||
|
|
||||||
|
ASTPtr ASTDropQuery::clone() const
|
||||||
|
{
|
||||||
|
auto res = std::make_shared<ASTDropQuery>(*this);
|
||||||
|
res->children.clear();
|
||||||
|
res->is_truncate = is_truncate;
|
||||||
|
res->if_exists = if_exists;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ParserDropQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & expected)
|
||||||
|
{
|
||||||
|
ParserKeyword s_drop("DROP");
|
||||||
|
ParserKeyword s_truncate("TRUNCATE");
|
||||||
|
ParserKeyword s_table("TABLE");
|
||||||
|
ParserKeyword s_database("DATABASE");
|
||||||
|
ParserKeyword s_if_exists("IF EXISTS");
|
||||||
|
ParserKeyword s_view("VIEW");
|
||||||
|
ParserKeyword on("ON");
|
||||||
|
ParserIdentifier name_p(false);
|
||||||
|
|
||||||
|
ParserKeyword s_event("EVENT");
|
||||||
|
ParserKeyword s_function("FUNCTION");
|
||||||
|
ParserKeyword s_index("INDEX");
|
||||||
|
ParserKeyword s_server("SERVER");
|
||||||
|
ParserKeyword s_trigger("TRIGGER");
|
||||||
|
|
||||||
|
auto query = std::make_shared<ASTDropQuery>();
|
||||||
|
node = query;
|
||||||
|
ASTDropQuery::QualifiedNames names;
|
||||||
|
bool if_exists = false;
|
||||||
|
bool is_truncate = false;
|
||||||
|
|
||||||
|
if (s_truncate.ignore(pos, expected) && s_table.ignore(pos, expected))
|
||||||
|
{
|
||||||
|
is_truncate = true;
|
||||||
|
query->kind = ASTDropQuery::Kind::Table;
|
||||||
|
ASTDropQuery::QualifiedName name;
|
||||||
|
if (parseDatabaseAndTableName(pos, expected, name.schema, name.shortName))
|
||||||
|
names.push_back(name);
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
else if (s_drop.ignore(pos, expected))
|
||||||
|
{
|
||||||
|
if (s_database.ignore(pos, expected))
|
||||||
|
{
|
||||||
|
query->kind = ASTDropQuery::Kind::Database;
|
||||||
|
if (s_if_exists.ignore(pos, expected))
|
||||||
|
if_exists = true;
|
||||||
|
ASTPtr database;
|
||||||
|
if (!name_p.parse(pos, database, expected))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (s_view.ignore(pos, expected))
|
||||||
|
query->kind = ASTDropQuery::Kind::View;
|
||||||
|
else if (s_table.ignore(pos, expected))
|
||||||
|
query->kind = ASTDropQuery::Kind::Table;
|
||||||
|
else if (s_index.ignore(pos, expected))
|
||||||
|
{
|
||||||
|
ASTPtr index;
|
||||||
|
query->kind = ASTDropQuery::Kind::Index;
|
||||||
|
if (!(name_p.parse(pos, index, expected) && on.ignore(pos, expected)))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
else if (s_event.ignore(pos, expected) || s_function.ignore(pos, expected) || s_server.ignore(pos, expected)
|
||||||
|
|| s_trigger.ignore(pos, expected))
|
||||||
|
{
|
||||||
|
query->kind = ASTDropQuery::Kind::Other;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (s_if_exists.ignore(pos, expected))
|
||||||
|
if_exists = true;
|
||||||
|
//parse name
|
||||||
|
auto parse_element = [&]
|
||||||
|
{
|
||||||
|
ASTDropQuery::QualifiedName element;
|
||||||
|
if (parseDatabaseAndTableName(pos, expected, element.schema, element.shortName))
|
||||||
|
{
|
||||||
|
names.emplace_back(std::move(element));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!ParserList::parseUtil(pos, expected, parse_element, false))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
|
||||||
|
query->if_exists = if_exists;
|
||||||
|
query->names = names;
|
||||||
|
query->is_truncate = is_truncate;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
64
src/Parsers/MySQL/ASTDropQuery.h
Normal file
64
src/Parsers/MySQL/ASTDropQuery.h
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Parsers/IParserBase.h>
|
||||||
|
#include <Parsers/MySQL/ASTDeclareIndex.h>
|
||||||
|
#include <Parsers/MySQL/ASTDeclareColumn.h>
|
||||||
|
#include <Parsers/MySQL/ASTDeclareTableOptions.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace MySQLParser
|
||||||
|
{
|
||||||
|
|
||||||
|
class ASTDropQuery : public IAST
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
enum Kind
|
||||||
|
{
|
||||||
|
Table,
|
||||||
|
View,
|
||||||
|
Database,
|
||||||
|
Index,
|
||||||
|
/// TRIGGER,FUNCTION,EVENT and so on, No need for support
|
||||||
|
Other,
|
||||||
|
};
|
||||||
|
Kind kind;
|
||||||
|
struct QualifiedName
|
||||||
|
{
|
||||||
|
String schema;
|
||||||
|
String shortName;
|
||||||
|
};
|
||||||
|
|
||||||
|
using QualifiedNames = std::vector<QualifiedName>;
|
||||||
|
QualifiedNames names;
|
||||||
|
bool if_exists{false};
|
||||||
|
//drop or truncate
|
||||||
|
bool is_truncate{false};
|
||||||
|
|
||||||
|
ASTPtr clone() const override;
|
||||||
|
String getID(char /*delim*/) const override {return "ASTDropQuery" ;}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void formatImpl(const FormatSettings & /*settings*/, FormatState & /*state*/, FormatStateStacked /*frame*/) const override
|
||||||
|
{
|
||||||
|
throw Exception("Method formatImpl is not supported by MySQLParser::ASTDropQuery.", ErrorCodes::NOT_IMPLEMENTED);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class ParserDropQuery : public IParserBase
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
const char * getName() const override { return "DROP query"; }
|
||||||
|
|
||||||
|
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -11,6 +11,7 @@
|
|||||||
#if USE_MYSQL
|
#if USE_MYSQL
|
||||||
# include <Parsers/MySQL/ASTAlterQuery.h>
|
# include <Parsers/MySQL/ASTAlterQuery.h>
|
||||||
# include <Parsers/MySQL/ASTCreateQuery.h>
|
# include <Parsers/MySQL/ASTCreateQuery.h>
|
||||||
|
# include <Parsers/MySQL/ASTDropQuery.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -43,7 +44,7 @@ bool ParserExternalDDLQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expect
|
|||||||
if (external_ddl_query->from->name == "MySQL")
|
if (external_ddl_query->from->name == "MySQL")
|
||||||
{
|
{
|
||||||
#if USE_MYSQL
|
#if USE_MYSQL
|
||||||
ParserDropQuery p_drop_query;
|
MySQLParser::ParserDropQuery p_drop_query;
|
||||||
ParserRenameQuery p_rename_query;
|
ParserRenameQuery p_rename_query;
|
||||||
MySQLParser::ParserAlterQuery p_alter_query;
|
MySQLParser::ParserAlterQuery p_alter_query;
|
||||||
MySQLParser::ParserCreateQuery p_create_query;
|
MySQLParser::ParserCreateQuery p_create_query;
|
||||||
|
@ -2023,8 +2023,7 @@ std::optional<std::string> getIndexExtensionFromFilesystem(const IDataPartStorag
|
|||||||
for (auto it = data_part_storage.iterate(); it->isValid(); it->next())
|
for (auto it = data_part_storage.iterate(); it->isValid(); it->next())
|
||||||
{
|
{
|
||||||
const auto & extension = fs::path(it->name()).extension();
|
const auto & extension = fs::path(it->name()).extension();
|
||||||
if (extension == getIndexExtension(false)
|
if (extension == getIndexExtension(true))
|
||||||
|| extension == getIndexExtension(true))
|
|
||||||
return extension;
|
return extension;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
6
tests/config/config.d/compressed_marks_and_index.xml
Normal file
6
tests/config/config.d/compressed_marks_and_index.xml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<merge_tree>
|
||||||
|
<compress_marks>true</compress_marks>
|
||||||
|
<compress_primary_key>true</compress_primary_key>
|
||||||
|
</merge_tree>
|
||||||
|
</clickhouse>
|
@ -52,6 +52,7 @@ ln -sf $SRC_PATH/config.d/enable_zero_copy_replication.xml $DEST_SERVER_PATH/con
|
|||||||
ln -sf $SRC_PATH/config.d/nlp.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/nlp.xml $DEST_SERVER_PATH/config.d/
|
||||||
ln -sf $SRC_PATH/config.d/enable_keeper_map.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/enable_keeper_map.xml $DEST_SERVER_PATH/config.d/
|
||||||
ln -sf $SRC_PATH/config.d/display_name.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/display_name.xml $DEST_SERVER_PATH/config.d/
|
||||||
|
ln -sf $SRC_PATH/config.d/compressed_marks_and_index.xml $DEST_SERVER_PATH/config.d/
|
||||||
|
|
||||||
# Not supported with fasttest.
|
# Not supported with fasttest.
|
||||||
if [ "${DEST_SERVER_PATH}" = "/etc/clickhouse-server" ]
|
if [ "${DEST_SERVER_PATH}" = "/etc/clickhouse-server" ]
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
<clickhouse>
|
<clickhouse>
|
||||||
<merge_tree>
|
<merge_tree>
|
||||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||||
|
<compress_marks>0</compress_marks>
|
||||||
|
<compress_primary_key>0</compress_primary_key>
|
||||||
</merge_tree>
|
</merge_tree>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
|
@ -2184,3 +2184,44 @@ def savepoint(clickhouse_node, mysql_node, mysql_host):
|
|||||||
mysql_node.query(f"INSERT INTO {db}.t1 VALUES (2)")
|
mysql_node.query(f"INSERT INTO {db}.t1 VALUES (2)")
|
||||||
mysql_node.query("ROLLBACK TO savepoint_1")
|
mysql_node.query("ROLLBACK TO savepoint_1")
|
||||||
mysql_node.query("COMMIT")
|
mysql_node.query("COMMIT")
|
||||||
|
|
||||||
|
|
||||||
|
def dropddl(clickhouse_node, mysql_node, mysql_host):
|
||||||
|
db = "dropddl"
|
||||||
|
clickhouse_node.query(f"DROP DATABASE IF EXISTS {db}")
|
||||||
|
mysql_node.query(f"DROP DATABASE IF EXISTS {db}")
|
||||||
|
mysql_node.query(f"CREATE DATABASE {db}")
|
||||||
|
mysql_node.query(f"CREATE TABLE {db}.t1 (a INT PRIMARY KEY, b INT)")
|
||||||
|
mysql_node.query(f"CREATE TABLE {db}.t2 (a INT PRIMARY KEY, b INT)")
|
||||||
|
mysql_node.query(f"CREATE TABLE {db}.t3 (a INT PRIMARY KEY, b INT)")
|
||||||
|
mysql_node.query(f"CREATE TABLE {db}.t4 (a INT PRIMARY KEY, b INT)")
|
||||||
|
mysql_node.query(f"CREATE VIEW {db}.v1 AS SELECT * FROM {db}.t1")
|
||||||
|
mysql_node.query(f"INSERT INTO {db}.t1(a, b) VALUES(1, 1)")
|
||||||
|
|
||||||
|
clickhouse_node.query(
|
||||||
|
f"CREATE DATABASE {db} ENGINE = MaterializeMySQL('{mysql_host}:3306', '{db}', 'root', 'clickhouse')"
|
||||||
|
)
|
||||||
|
check_query(
|
||||||
|
clickhouse_node,
|
||||||
|
f"SELECT count() FROM system.tables where database = '{db}' FORMAT TSV",
|
||||||
|
"4\n",
|
||||||
|
)
|
||||||
|
check_query(clickhouse_node, f"SELECT * FROM {db}.t1 FORMAT TSV", "1\t1\n")
|
||||||
|
mysql_node.query(f"DROP EVENT IF EXISTS {db}.event_name")
|
||||||
|
mysql_node.query(f"DROP VIEW IF EXISTS {db}.view_name")
|
||||||
|
mysql_node.query(f"DROP FUNCTION IF EXISTS {db}.function_name")
|
||||||
|
mysql_node.query(f"DROP TRIGGER IF EXISTS {db}.trigger_name")
|
||||||
|
mysql_node.query(f"DROP INDEX `PRIMARY` ON {db}.t2")
|
||||||
|
mysql_node.query(f"DROP TABLE {db}.t3")
|
||||||
|
mysql_node.query(f"DROP TABLE if EXISTS {db}.t3,{db}.t4")
|
||||||
|
mysql_node.query(f"TRUNCATE TABLE {db}.t1")
|
||||||
|
mysql_node.query(f"INSERT INTO {db}.t2(a, b) VALUES(1, 1)")
|
||||||
|
check_query(clickhouse_node, f"SELECT * FROM {db}.t2 FORMAT TSV", "1\t1\n")
|
||||||
|
check_query(clickhouse_node, f"SELECT count() FROM {db}.t1 FORMAT TSV", "0\n")
|
||||||
|
check_query(
|
||||||
|
clickhouse_node,
|
||||||
|
f"SELECT name FROM system.tables where database = '{db}' FORMAT TSV",
|
||||||
|
"t1\nt2\n",
|
||||||
|
)
|
||||||
|
mysql_node.query(f"DROP DATABASE {db}")
|
||||||
|
clickhouse_node.query(f"DROP DATABASE {db}")
|
||||||
|
@ -516,3 +516,10 @@ def test_savepoint_query(
|
|||||||
):
|
):
|
||||||
materialize_with_ddl.savepoint(clickhouse_node, started_mysql_8_0, "mysql80")
|
materialize_with_ddl.savepoint(clickhouse_node, started_mysql_8_0, "mysql80")
|
||||||
materialize_with_ddl.savepoint(clickhouse_node, started_mysql_5_7, "mysql57")
|
materialize_with_ddl.savepoint(clickhouse_node, started_mysql_5_7, "mysql57")
|
||||||
|
|
||||||
|
|
||||||
|
def test_materialized_database_mysql_drop_ddl(
|
||||||
|
started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node
|
||||||
|
):
|
||||||
|
materialize_with_ddl.dropddl(clickhouse_node, started_mysql_8_0, "mysql80")
|
||||||
|
materialize_with_ddl.dropddl(clickhouse_node, started_mysql_5_7, "mysql57")
|
||||||
|
@ -763,7 +763,7 @@ def test_cache_setting_compatibility(cluster, node_name):
|
|||||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
||||||
|
|
||||||
node.query(
|
node.query(
|
||||||
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_r';"
|
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_r', compress_marks=false, compress_primary_key=false;"
|
||||||
)
|
)
|
||||||
node.query(
|
node.query(
|
||||||
"INSERT INTO s3_test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 500"
|
"INSERT INTO s3_test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 500"
|
||||||
|
@ -1655,7 +1655,7 @@ def test_freeze(start_cluster):
|
|||||||
) ENGINE = MergeTree
|
) ENGINE = MergeTree
|
||||||
ORDER BY tuple()
|
ORDER BY tuple()
|
||||||
PARTITION BY toYYYYMM(d)
|
PARTITION BY toYYYYMM(d)
|
||||||
SETTINGS storage_policy='small_jbod_with_external'
|
SETTINGS storage_policy='small_jbod_with_external', compress_marks=false, compress_primary_key=false
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ def partition_table_simple(started_cluster):
|
|||||||
q(
|
q(
|
||||||
"CREATE TABLE test.partition_simple (date MATERIALIZED toDate(0), x UInt64, sample_key MATERIALIZED intHash64(x)) "
|
"CREATE TABLE test.partition_simple (date MATERIALIZED toDate(0), x UInt64, sample_key MATERIALIZED intHash64(x)) "
|
||||||
"ENGINE=MergeTree PARTITION BY date SAMPLE BY sample_key ORDER BY (date,x,sample_key) "
|
"ENGINE=MergeTree PARTITION BY date SAMPLE BY sample_key ORDER BY (date,x,sample_key) "
|
||||||
"SETTINGS index_granularity=8192, index_granularity_bytes=0"
|
"SETTINGS index_granularity=8192, index_granularity_bytes=0, compress_marks=false, compress_primary_key=false"
|
||||||
)
|
)
|
||||||
q("INSERT INTO test.partition_simple ( x ) VALUES ( now() )")
|
q("INSERT INTO test.partition_simple ( x ) VALUES ( now() )")
|
||||||
q("INSERT INTO test.partition_simple ( x ) VALUES ( now()+1 )")
|
q("INSERT INTO test.partition_simple ( x ) VALUES ( now()+1 )")
|
||||||
@ -115,7 +115,7 @@ def partition_table_complex(started_cluster):
|
|||||||
q("DROP TABLE IF EXISTS test.partition_complex")
|
q("DROP TABLE IF EXISTS test.partition_complex")
|
||||||
q(
|
q(
|
||||||
"CREATE TABLE test.partition_complex (p Date, k Int8, v1 Int8 MATERIALIZED k + 1) "
|
"CREATE TABLE test.partition_complex (p Date, k Int8, v1 Int8 MATERIALIZED k + 1) "
|
||||||
"ENGINE = MergeTree PARTITION BY p ORDER BY k SETTINGS index_granularity=1, index_granularity_bytes=0"
|
"ENGINE = MergeTree PARTITION BY p ORDER BY k SETTINGS index_granularity=1, index_granularity_bytes=0, compress_marks=false, compress_primary_key=false"
|
||||||
)
|
)
|
||||||
q("INSERT INTO test.partition_complex (p, k) VALUES(toDate(31), 1)")
|
q("INSERT INTO test.partition_complex (p, k) VALUES(toDate(31), 1)")
|
||||||
q("INSERT INTO test.partition_complex (p, k) VALUES(toDate(1), 2)")
|
q("INSERT INTO test.partition_complex (p, k) VALUES(toDate(1), 2)")
|
||||||
@ -153,7 +153,7 @@ def test_partition_complex(partition_table_complex):
|
|||||||
def cannot_attach_active_part_table(started_cluster):
|
def cannot_attach_active_part_table(started_cluster):
|
||||||
q("DROP TABLE IF EXISTS test.attach_active")
|
q("DROP TABLE IF EXISTS test.attach_active")
|
||||||
q(
|
q(
|
||||||
"CREATE TABLE test.attach_active (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 4) ORDER BY n"
|
"CREATE TABLE test.attach_active (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 4) ORDER BY n SETTINGS compress_marks=false, compress_primary_key=false"
|
||||||
)
|
)
|
||||||
q("INSERT INTO test.attach_active SELECT number FROM system.numbers LIMIT 16")
|
q("INSERT INTO test.attach_active SELECT number FROM system.numbers LIMIT 16")
|
||||||
|
|
||||||
@ -181,7 +181,7 @@ def attach_check_all_parts_table(started_cluster):
|
|||||||
q("SYSTEM STOP MERGES")
|
q("SYSTEM STOP MERGES")
|
||||||
q("DROP TABLE IF EXISTS test.attach_partition")
|
q("DROP TABLE IF EXISTS test.attach_partition")
|
||||||
q(
|
q(
|
||||||
"CREATE TABLE test.attach_partition (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 8) ORDER BY n"
|
"CREATE TABLE test.attach_partition (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 8) ORDER BY n SETTINGS compress_marks=false, compress_primary_key=false"
|
||||||
)
|
)
|
||||||
q(
|
q(
|
||||||
"INSERT INTO test.attach_partition SELECT number FROM system.numbers WHERE number % 2 = 0 LIMIT 8"
|
"INSERT INTO test.attach_partition SELECT number FROM system.numbers WHERE number % 2 = 0 LIMIT 8"
|
||||||
@ -259,7 +259,7 @@ def drop_detached_parts_table(started_cluster):
|
|||||||
q("SYSTEM STOP MERGES")
|
q("SYSTEM STOP MERGES")
|
||||||
q("DROP TABLE IF EXISTS test.drop_detached")
|
q("DROP TABLE IF EXISTS test.drop_detached")
|
||||||
q(
|
q(
|
||||||
"CREATE TABLE test.drop_detached (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 8) ORDER BY n"
|
"CREATE TABLE test.drop_detached (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 8) ORDER BY n SETTINGS compress_marks=false, compress_primary_key=false"
|
||||||
)
|
)
|
||||||
q(
|
q(
|
||||||
"INSERT INTO test.drop_detached SELECT number FROM system.numbers WHERE number % 2 = 0 LIMIT 8"
|
"INSERT INTO test.drop_detached SELECT number FROM system.numbers WHERE number % 2 = 0 LIMIT 8"
|
||||||
@ -329,9 +329,15 @@ def test_drop_detached_parts(drop_detached_parts_table):
|
|||||||
|
|
||||||
|
|
||||||
def test_system_detached_parts(drop_detached_parts_table):
|
def test_system_detached_parts(drop_detached_parts_table):
|
||||||
q("create table sdp_0 (n int, x int) engine=MergeTree order by n")
|
q(
|
||||||
q("create table sdp_1 (n int, x int) engine=MergeTree order by n partition by x")
|
"create table sdp_0 (n int, x int) engine=MergeTree order by n SETTINGS compress_marks=false, compress_primary_key=false"
|
||||||
q("create table sdp_2 (n int, x String) engine=MergeTree order by n partition by x")
|
)
|
||||||
|
q(
|
||||||
|
"create table sdp_1 (n int, x int) engine=MergeTree order by n partition by x SETTINGS compress_marks=false, compress_primary_key=false"
|
||||||
|
)
|
||||||
|
q(
|
||||||
|
"create table sdp_2 (n int, x String) engine=MergeTree order by n partition by x SETTINGS compress_marks=false, compress_primary_key=false"
|
||||||
|
)
|
||||||
q(
|
q(
|
||||||
"create table sdp_3 (n int, x Enum('broken' = 0, 'all' = 1)) engine=MergeTree order by n partition by x"
|
"create table sdp_3 (n int, x Enum('broken' = 0, 'all' = 1)) engine=MergeTree order by n partition by x"
|
||||||
)
|
)
|
||||||
@ -449,7 +455,9 @@ def test_system_detached_parts(drop_detached_parts_table):
|
|||||||
|
|
||||||
|
|
||||||
def test_detached_part_dir_exists(started_cluster):
|
def test_detached_part_dir_exists(started_cluster):
|
||||||
q("create table detached_part_dir_exists (n int) engine=MergeTree order by n")
|
q(
|
||||||
|
"create table detached_part_dir_exists (n int) engine=MergeTree order by n SETTINGS compress_marks=false, compress_primary_key=false"
|
||||||
|
)
|
||||||
q("insert into detached_part_dir_exists select 1") # will create all_1_1_0
|
q("insert into detached_part_dir_exists select 1") # will create all_1_1_0
|
||||||
q(
|
q(
|
||||||
"alter table detached_part_dir_exists detach partition id 'all'"
|
"alter table detached_part_dir_exists detach partition id 'all'"
|
||||||
@ -488,7 +496,7 @@ def test_detached_part_dir_exists(started_cluster):
|
|||||||
|
|
||||||
def test_make_clone_in_detached(started_cluster):
|
def test_make_clone_in_detached(started_cluster):
|
||||||
q(
|
q(
|
||||||
"create table clone_in_detached (n int, m String) engine=ReplicatedMergeTree('/clone_in_detached', '1') order by n"
|
"create table clone_in_detached (n int, m String) engine=ReplicatedMergeTree('/clone_in_detached', '1') order by n SETTINGS compress_marks=false, compress_primary_key=false"
|
||||||
)
|
)
|
||||||
|
|
||||||
path = path_to_data + "data/default/clone_in_detached/"
|
path = path_to_data + "data/default/clone_in_detached/"
|
||||||
|
@ -728,7 +728,7 @@ def test_polymorphic_parts_index(start_cluster):
|
|||||||
"""
|
"""
|
||||||
CREATE TABLE test_index.index_compact(a UInt32, s String)
|
CREATE TABLE test_index.index_compact(a UInt32, s String)
|
||||||
ENGINE = MergeTree ORDER BY a
|
ENGINE = MergeTree ORDER BY a
|
||||||
SETTINGS min_rows_for_wide_part = 1000, index_granularity = 128, merge_max_block_size = 100"""
|
SETTINGS min_rows_for_wide_part = 1000, index_granularity = 128, merge_max_block_size = 100, compress_marks=false, compress_primary_key=false"""
|
||||||
)
|
)
|
||||||
|
|
||||||
node1.query(
|
node1.query(
|
||||||
|
@ -1,26 +1,70 @@
|
|||||||
|
-- { echoOn }
|
||||||
|
DROP TABLE IF EXISTS sum_map;
|
||||||
|
CREATE TABLE sum_map(date Date, timeslot DateTime, statusMap Nested(status UInt16, requests UInt64)) ENGINE = Log;
|
||||||
|
INSERT INTO sum_map VALUES ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]);
|
||||||
|
SELECT * FROM sum_map ORDER BY timeslot, statusMap.status, statusMap.requests;
|
||||||
2000-01-01 2000-01-01 00:00:00 [1,2,3] [10,10,10]
|
2000-01-01 2000-01-01 00:00:00 [1,2,3] [10,10,10]
|
||||||
2000-01-01 2000-01-01 00:00:00 [3,4,5] [10,10,10]
|
2000-01-01 2000-01-01 00:00:00 [3,4,5] [10,10,10]
|
||||||
2000-01-01 2000-01-01 00:01:00 [4,5,6] [10,10,10]
|
2000-01-01 2000-01-01 00:01:00 [4,5,6] [10,10,10]
|
||||||
2000-01-01 2000-01-01 00:01:00 [6,7,8] [10,10,10]
|
2000-01-01 2000-01-01 00:01:00 [6,7,8] [10,10,10]
|
||||||
|
SELECT sumMap(statusMap.status, statusMap.requests) FROM sum_map;
|
||||||
([1,2,3,4,5,6,7,8],[10,10,20,20,20,20,10,10])
|
([1,2,3,4,5,6,7,8],[10,10,20,20,20,20,10,10])
|
||||||
|
SELECT sumMap((statusMap.status, statusMap.requests)) FROM sum_map;
|
||||||
([1,2,3,4,5,6,7,8],[10,10,20,20,20,20,10,10])
|
([1,2,3,4,5,6,7,8],[10,10,20,20,20,20,10,10])
|
||||||
|
SELECT sumMapMerge(s) FROM (SELECT sumMapState(statusMap.status, statusMap.requests) AS s FROM sum_map);
|
||||||
([1,2,3,4,5,6,7,8],[10,10,20,20,20,20,10,10])
|
([1,2,3,4,5,6,7,8],[10,10,20,20,20,20,10,10])
|
||||||
|
SELECT timeslot, sumMap(statusMap.status, statusMap.requests) FROM sum_map GROUP BY timeslot ORDER BY timeslot;
|
||||||
2000-01-01 00:00:00 ([1,2,3,4,5],[10,10,20,10,10])
|
2000-01-01 00:00:00 ([1,2,3,4,5],[10,10,20,10,10])
|
||||||
2000-01-01 00:01:00 ([4,5,6,7,8],[10,10,20,10,10])
|
2000-01-01 00:01:00 ([4,5,6,7,8],[10,10,20,10,10])
|
||||||
|
SELECT timeslot, sumMap(statusMap.status, statusMap.requests).1, sumMap(statusMap.status, statusMap.requests).2 FROM sum_map GROUP BY timeslot ORDER BY timeslot;
|
||||||
2000-01-01 00:00:00 [1,2,3,4,5] [10,10,20,10,10]
|
2000-01-01 00:00:00 [1,2,3,4,5] [10,10,20,10,10]
|
||||||
2000-01-01 00:01:00 [4,5,6,7,8] [10,10,20,10,10]
|
2000-01-01 00:01:00 [4,5,6,7,8] [10,10,20,10,10]
|
||||||
|
SELECT sumMapFiltered([1])(statusMap.status, statusMap.requests) FROM sum_map;
|
||||||
([1],[10])
|
([1],[10])
|
||||||
|
SELECT sumMapFiltered([1, 4, 8])(statusMap.status, statusMap.requests) FROM sum_map;
|
||||||
([1,4,8],[10,20,10])
|
([1,4,8],[10,20,10])
|
||||||
|
DROP TABLE sum_map;
|
||||||
|
DROP TABLE IF EXISTS sum_map_overflow;
|
||||||
|
CREATE TABLE sum_map_overflow(events Array(UInt8), counts Array(UInt8)) ENGINE = Log;
|
||||||
|
INSERT INTO sum_map_overflow VALUES ([1], [255]), ([1], [2]);
|
||||||
|
SELECT sumMap(events, counts) FROM sum_map_overflow;
|
||||||
([1],[257])
|
([1],[257])
|
||||||
|
SELECT sumMapWithOverflow(events, counts) FROM sum_map_overflow;
|
||||||
([1],[1])
|
([1],[1])
|
||||||
|
DROP TABLE sum_map_overflow;
|
||||||
|
select sumMap(val, cnt) from ( SELECT [ CAST(1, 'UInt64') ] as val, [1] as cnt );
|
||||||
([1],[1])
|
([1],[1])
|
||||||
|
select sumMap(val, cnt) from ( SELECT [ CAST(1, 'Float64') ] as val, [1] as cnt );
|
||||||
([1],[1])
|
([1],[1])
|
||||||
|
select sumMap(val, cnt) from ( SELECT [ CAST('a', 'Enum16(\'a\'=1)') ] as val, [1] as cnt );
|
||||||
(['a'],[1])
|
(['a'],[1])
|
||||||
|
select sumMap(val, cnt) from ( SELECT [ CAST(1, 'DateTime(\'Asia/Istanbul\')') ] as val, [1] as cnt );
|
||||||
(['1970-01-01 02:00:01'],[1])
|
(['1970-01-01 02:00:01'],[1])
|
||||||
|
select sumMap(val, cnt) from ( SELECT [ CAST(1, 'Date') ] as val, [1] as cnt );
|
||||||
(['1970-01-02'],[1])
|
(['1970-01-02'],[1])
|
||||||
|
select sumMap(val, cnt) from ( SELECT [ CAST('01234567-89ab-cdef-0123-456789abcdef', 'UUID') ] as val, [1] as cnt );
|
||||||
(['01234567-89ab-cdef-0123-456789abcdef'],[1])
|
(['01234567-89ab-cdef-0123-456789abcdef'],[1])
|
||||||
|
select sumMap(val, cnt) from ( SELECT [ CAST(1.01, 'Decimal(10,2)') ] as val, [1] as cnt );
|
||||||
([1.01],[1])
|
([1.01],[1])
|
||||||
|
select sumMap(val, cnt) from ( SELECT [ CAST('a', 'FixedString(1)'), CAST('b', 'FixedString(1)' ) ] as val, [1, 2] as cnt );
|
||||||
(['a','b'],[1,2])
|
(['a','b'],[1,2])
|
||||||
|
select sumMap(val, cnt) from ( SELECT [ CAST('abc', 'String'), CAST('ab', 'String'), CAST('a', 'String') ] as val, [1, 2, 3] as cnt );
|
||||||
(['a','ab','abc'],[3,2,1])
|
(['a','ab','abc'],[3,2,1])
|
||||||
|
DROP TABLE IF EXISTS sum_map_decimal;
|
||||||
|
CREATE TABLE sum_map_decimal(
|
||||||
|
statusMap Nested(
|
||||||
|
goal_id UInt16,
|
||||||
|
revenue Decimal32(5)
|
||||||
|
)
|
||||||
|
) ENGINE = Log;
|
||||||
|
INSERT INTO sum_map_decimal VALUES ([1, 2, 3], [1.0, 2.0, 3.0]), ([3, 4, 5], [3.0, 4.0, 5.0]), ([4, 5, 6], [4.0, 5.0, 6.0]), ([6, 7, 8], [6.0, 7.0, 8.0]);
|
||||||
|
SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal;
|
||||||
([1,2,3,4,5,6,7,8],[1,2,6,8,10,12,7,8])
|
([1,2,3,4,5,6,7,8],[1,2,6,8,10,12,7,8])
|
||||||
|
SELECT sumMapWithOverflow(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal;
|
||||||
([1,2,3,4,5,6,7,8],[1,2,6,8,10,12,7,8])
|
([1,2,3,4,5,6,7,8],[1,2,6,8,10,12,7,8])
|
||||||
|
DROP TABLE sum_map_decimal;
|
||||||
|
CREATE TABLE sum_map_decimal_nullable (`statusMap` Array(Tuple(goal_id UInt16, revenue Nullable(Decimal(9, 5))))) engine=Log;
|
||||||
|
INSERT INTO sum_map_decimal_nullable VALUES ([1, 2, 3], [1.0, 2.0, 3.0]), ([3, 4, 5], [3.0, 4.0, 5.0]), ([4, 5, 6], [4.0, 5.0, 6.0]), ([6, 7, 8], [6.0, 7.0, 8.0]);
|
||||||
|
SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal_nullable;
|
||||||
|
([1,2,3,4,5,6,7,8],[1,2,6,8,10,12,7,8])
|
||||||
|
DROP TABLE sum_map_decimal_nullable;
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
SET send_logs_level = 'fatal';
|
SET send_logs_level = 'fatal';
|
||||||
|
|
||||||
|
-- { echoOn }
|
||||||
DROP TABLE IF EXISTS sum_map;
|
DROP TABLE IF EXISTS sum_map;
|
||||||
CREATE TABLE sum_map(date Date, timeslot DateTime, statusMap Nested(status UInt16, requests UInt64)) ENGINE = Log;
|
CREATE TABLE sum_map(date Date, timeslot DateTime, statusMap Nested(status UInt16, requests UInt64)) ENGINE = Log;
|
||||||
|
|
||||||
@ -54,3 +55,8 @@ SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal;
|
|||||||
SELECT sumMapWithOverflow(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal;
|
SELECT sumMapWithOverflow(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal;
|
||||||
|
|
||||||
DROP TABLE sum_map_decimal;
|
DROP TABLE sum_map_decimal;
|
||||||
|
|
||||||
|
CREATE TABLE sum_map_decimal_nullable (`statusMap` Array(Tuple(goal_id UInt16, revenue Nullable(Decimal(9, 5))))) engine=Log;
|
||||||
|
INSERT INTO sum_map_decimal_nullable VALUES ([1, 2, 3], [1.0, 2.0, 3.0]), ([3, 4, 5], [3.0, 4.0, 5.0]), ([4, 5, 6], [4.0, 5.0, 6.0]), ([6, 7, 8], [6.0, 7.0, 8.0]);
|
||||||
|
SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal_nullable;
|
||||||
|
DROP TABLE sum_map_decimal_nullable;
|
||||||
|
@ -12,7 +12,7 @@ CREATE TABLE check_system_tables
|
|||||||
ORDER BY name1
|
ORDER BY name1
|
||||||
PARTITION BY name2
|
PARTITION BY name2
|
||||||
SAMPLE BY name1
|
SAMPLE BY name1
|
||||||
SETTINGS min_bytes_for_wide_part = 0;
|
SETTINGS min_bytes_for_wide_part = 0, compress_marks=false, compress_primary_key=false;
|
||||||
|
|
||||||
SELECT name, partition_key, sorting_key, primary_key, sampling_key, storage_policy, total_rows
|
SELECT name, partition_key, sorting_key, primary_key, sampling_key, storage_policy, total_rows
|
||||||
FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase()
|
FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase()
|
||||||
@ -36,7 +36,8 @@ CREATE TABLE check_system_tables
|
|||||||
sign Int8
|
sign Int8
|
||||||
) ENGINE = VersionedCollapsingMergeTree(sign, version)
|
) ENGINE = VersionedCollapsingMergeTree(sign, version)
|
||||||
PARTITION BY date
|
PARTITION BY date
|
||||||
ORDER BY date;
|
ORDER BY date
|
||||||
|
SETTINGS compress_marks=false, compress_primary_key=false;
|
||||||
|
|
||||||
SELECT name, partition_key, sorting_key, primary_key, sampling_key
|
SELECT name, partition_key, sorting_key, primary_key, sampling_key
|
||||||
FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase()
|
FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase()
|
||||||
|
@ -9,12 +9,12 @@ DROP TABLE IF EXISTS default_codec_synthetic;
|
|||||||
CREATE TABLE delta_codec_synthetic
|
CREATE TABLE delta_codec_synthetic
|
||||||
(
|
(
|
||||||
id UInt64 Codec(Delta, ZSTD(3))
|
id UInt64 Codec(Delta, ZSTD(3))
|
||||||
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false;
|
||||||
|
|
||||||
CREATE TABLE default_codec_synthetic
|
CREATE TABLE default_codec_synthetic
|
||||||
(
|
(
|
||||||
id UInt64 Codec(ZSTD(3))
|
id UInt64 Codec(ZSTD(3))
|
||||||
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false;
|
||||||
|
|
||||||
INSERT INTO delta_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000;
|
INSERT INTO delta_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000;
|
||||||
INSERT INTO default_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000;
|
INSERT INTO default_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000;
|
||||||
@ -47,12 +47,12 @@ DROP TABLE IF EXISTS default_codec_float;
|
|||||||
CREATE TABLE delta_codec_float
|
CREATE TABLE delta_codec_float
|
||||||
(
|
(
|
||||||
id Float64 Codec(Delta, LZ4HC)
|
id Float64 Codec(Delta, LZ4HC)
|
||||||
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false;
|
||||||
|
|
||||||
CREATE TABLE default_codec_float
|
CREATE TABLE default_codec_float
|
||||||
(
|
(
|
||||||
id Float64 Codec(LZ4HC)
|
id Float64 Codec(LZ4HC)
|
||||||
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false;
|
||||||
|
|
||||||
INSERT INTO delta_codec_float SELECT number FROM numbers(1547510400, 500000) WHERE number % 3 == 0 OR number % 5 == 0 OR number % 7 == 0 OR number % 11 == 0;
|
INSERT INTO delta_codec_float SELECT number FROM numbers(1547510400, 500000) WHERE number % 3 == 0 OR number % 5 == 0 OR number % 7 == 0 OR number % 11 == 0;
|
||||||
INSERT INTO default_codec_float SELECT * from delta_codec_float;
|
INSERT INTO default_codec_float SELECT * from delta_codec_float;
|
||||||
@ -85,12 +85,12 @@ DROP TABLE IF EXISTS default_codec_string;
|
|||||||
CREATE TABLE delta_codec_string
|
CREATE TABLE delta_codec_string
|
||||||
(
|
(
|
||||||
id Float64 Codec(Delta, LZ4)
|
id Float64 Codec(Delta, LZ4)
|
||||||
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false;
|
||||||
|
|
||||||
CREATE TABLE default_codec_string
|
CREATE TABLE default_codec_string
|
||||||
(
|
(
|
||||||
id Float64 Codec(LZ4)
|
id Float64 Codec(LZ4)
|
||||||
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key=false;
|
||||||
|
|
||||||
INSERT INTO delta_codec_string SELECT concat(toString(number), toString(number % 100)) FROM numbers(1547510400, 500000);
|
INSERT INTO delta_codec_string SELECT concat(toString(number), toString(number % 100)) FROM numbers(1547510400, 500000);
|
||||||
INSERT INTO default_codec_string SELECT * from delta_codec_string;
|
INSERT INTO default_codec_string SELECT * from delta_codec_string;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
DROP TABLE IF EXISTS test_00961;
|
DROP TABLE IF EXISTS test_00961;
|
||||||
|
|
||||||
CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32)
|
CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32)
|
||||||
ENGINE = MergeTree PARTITION BY d ORDER BY (a, b) SETTINGS index_granularity = 111, min_bytes_for_wide_part = 0;
|
ENGINE = MergeTree PARTITION BY d ORDER BY (a, b) SETTINGS index_granularity = 111, min_bytes_for_wide_part = 0, compress_marks=false, compress_primary_key=false;
|
||||||
|
|
||||||
INSERT INTO test_00961 VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789);
|
INSERT INTO test_00961 VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789);
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ CREATE TABLE test_table
|
|||||||
INDEX value_index value TYPE minmax GRANULARITY 1
|
INDEX value_index value TYPE minmax GRANULARITY 1
|
||||||
)
|
)
|
||||||
Engine=MergeTree()
|
Engine=MergeTree()
|
||||||
ORDER BY key;
|
ORDER BY key SETTINGS compress_marks=false;
|
||||||
|
|
||||||
INSERT INTO test_table VALUES (0, 'Value');
|
INSERT INTO test_table VALUES (0, 'Value');
|
||||||
SELECT * FROM system.data_skipping_indices WHERE database = currentDatabase();
|
SELECT * FROM system.data_skipping_indices WHERE database = currentDatabase();
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
SYSTEM DROP FILESYSTEM CACHE;
|
SYSTEM DROP FILESYSTEM CACHE;
|
||||||
SET enable_filesystem_cache_on_write_operations=0;
|
SET enable_filesystem_cache_on_write_operations=0;
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_6', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_6', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
||||||
SELECT * FROM test FORMAT Null;
|
SELECT * FROM test FORMAT Null;
|
||||||
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;
|
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;
|
||||||
|
@ -6,7 +6,7 @@ SYSTEM DROP FILESYSTEM CACHE;
|
|||||||
SET enable_filesystem_cache_on_write_operations=0;
|
SET enable_filesystem_cache_on_write_operations=0;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_6', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_6', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
||||||
|
|
||||||
SELECT * FROM test FORMAT Null;
|
SELECT * FROM test FORMAT Null;
|
||||||
|
@ -5,7 +5,7 @@ SET enable_filesystem_cache_on_write_operations=0;
|
|||||||
SET skip_download_if_exceeds_query_cache=1;
|
SET skip_download_if_exceeds_query_cache=1;
|
||||||
SET max_query_cache_size=128;
|
SET max_query_cache_size=128;
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_4', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_4', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
||||||
SELECT * FROM test FORMAT Null;
|
SELECT * FROM test FORMAT Null;
|
||||||
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;
|
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;
|
||||||
|
@ -8,7 +8,7 @@ SET skip_download_if_exceeds_query_cache=1;
|
|||||||
SET max_query_cache_size=128;
|
SET max_query_cache_size=128;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_4', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_4', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
||||||
|
|
||||||
SELECT * FROM test FORMAT Null;
|
SELECT * FROM test FORMAT Null;
|
||||||
|
@ -4,7 +4,7 @@ Using storage policy: s3_cache
|
|||||||
SYSTEM DROP FILESYSTEM CACHE;
|
SYSTEM DROP FILESYSTEM CACHE;
|
||||||
SET enable_filesystem_cache_on_write_operations=0;
|
SET enable_filesystem_cache_on_write_operations=0;
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test;
|
SYSTEM STOP MERGES test;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
||||||
SELECT * FROM test FORMAT Null;
|
SELECT * FROM test FORMAT Null;
|
||||||
@ -19,7 +19,7 @@ SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesy
|
|||||||
SYSTEM DROP FILESYSTEM CACHE;
|
SYSTEM DROP FILESYSTEM CACHE;
|
||||||
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache;
|
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache;
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_3', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_3', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
||||||
SELECT * FROM test FORMAT Null;
|
SELECT * FROM test FORMAT Null;
|
||||||
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;
|
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;
|
||||||
@ -39,7 +39,7 @@ Using storage policy: local_cache
|
|||||||
SYSTEM DROP FILESYSTEM CACHE;
|
SYSTEM DROP FILESYSTEM CACHE;
|
||||||
SET enable_filesystem_cache_on_write_operations=0;
|
SET enable_filesystem_cache_on_write_operations=0;
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test;
|
SYSTEM STOP MERGES test;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
||||||
SELECT * FROM test FORMAT Null;
|
SELECT * FROM test FORMAT Null;
|
||||||
@ -54,7 +54,7 @@ SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesy
|
|||||||
SYSTEM DROP FILESYSTEM CACHE;
|
SYSTEM DROP FILESYSTEM CACHE;
|
||||||
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache;
|
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache;
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache_3', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache_3', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
||||||
SELECT * FROM test FORMAT Null;
|
SELECT * FROM test FORMAT Null;
|
||||||
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;
|
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;
|
||||||
|
@ -3,7 +3,7 @@ Using storage policy: s3_cache
|
|||||||
|
|
||||||
SET enable_filesystem_cache_on_write_operations=1;
|
SET enable_filesystem_cache_on_write_operations=1;
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test;
|
SYSTEM STOP MERGES test;
|
||||||
SYSTEM DROP FILESYSTEM CACHE;
|
SYSTEM DROP FILESYSTEM CACHE;
|
||||||
SELECT file_segment_range_begin, file_segment_range_end, size, state
|
SELECT file_segment_range_begin, file_segment_range_end, size, state
|
||||||
@ -129,7 +129,7 @@ Using storage policy: local_cache
|
|||||||
|
|
||||||
SET enable_filesystem_cache_on_write_operations=1;
|
SET enable_filesystem_cache_on_write_operations=1;
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test;
|
SYSTEM STOP MERGES test;
|
||||||
SYSTEM DROP FILESYSTEM CACHE;
|
SYSTEM DROP FILESYSTEM CACHE;
|
||||||
SELECT file_segment_range_begin, file_segment_range_end, size, state
|
SELECT file_segment_range_begin, file_segment_range_end, size, state
|
||||||
|
@ -6,7 +6,7 @@ SET enable_filesystem_cache_log=1;
|
|||||||
SET enable_filesystem_cache_on_write_operations=0;
|
SET enable_filesystem_cache_on_write_operations=0;
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
DROP TABLE IF EXISTS system.filesystem_cache_log;
|
DROP TABLE IF EXISTS system.filesystem_cache_log;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test;
|
SYSTEM STOP MERGES test;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100000);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100000);
|
||||||
SELECT 2240, 's3_cache', * FROM test FORMAT Null;
|
SELECT 2240, 's3_cache', * FROM test FORMAT Null;
|
||||||
@ -27,7 +27,7 @@ SET enable_filesystem_cache_log=1;
|
|||||||
SET enable_filesystem_cache_on_write_operations=0;
|
SET enable_filesystem_cache_on_write_operations=0;
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
DROP TABLE IF EXISTS system.filesystem_cache_log;
|
DROP TABLE IF EXISTS system.filesystem_cache_log;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='local_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test;
|
SYSTEM STOP MERGES test;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100000);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100000);
|
||||||
SELECT 2240, 'local_cache', * FROM test FORMAT Null;
|
SELECT 2240, 'local_cache', * FROM test FORMAT Null;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
-- Tags: no-backward-compatibility-check
|
-- Tags: no-backward-compatibility-check
|
||||||
|
|
||||||
drop table if exists test_02381;
|
drop table if exists test_02381;
|
||||||
create table test_02381(a UInt64, b UInt64) ENGINE = MergeTree order by (a, b);
|
create table test_02381(a UInt64, b UInt64) ENGINE = MergeTree order by (a, b) SETTINGS compress_marks=false, compress_primary_key=false;
|
||||||
insert into test_02381 select number, number * 10 from system.numbers limit 1000000;
|
insert into test_02381 select number, number * 10 from system.numbers limit 1000000;
|
||||||
|
|
||||||
drop table if exists test_02381_compress;
|
drop table if exists test_02381_compress;
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
SET enable_filesystem_cache_on_write_operations=0;
|
SET enable_filesystem_cache_on_write_operations=0;
|
||||||
SYSTEM DROP FILESYSTEM CACHE;
|
SYSTEM DROP FILESYSTEM CACHE;
|
||||||
DROP TABLE IF EXISTS nopers;
|
DROP TABLE IF EXISTS nopers;
|
||||||
CREATE TABLE nopers (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE nopers (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES nopers;
|
SYSTEM STOP MERGES nopers;
|
||||||
INSERT INTO nopers SELECT number, toString(number) FROM numbers(10);
|
INSERT INTO nopers SELECT number, toString(number) FROM numbers(10);
|
||||||
SELECT * FROM nopers FORMAT Null;
|
SELECT * FROM nopers FORMAT Null;
|
||||||
@ -22,7 +22,7 @@ ORDER BY file, cache, size;
|
|||||||
data.bin 0 114
|
data.bin 0 114
|
||||||
data.mrk3 0 80
|
data.mrk3 0 80
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test;
|
SYSTEM STOP MERGES test;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
||||||
SELECT * FROM test FORMAT Null;
|
SELECT * FROM test FORMAT Null;
|
||||||
@ -47,7 +47,7 @@ data.bin 0 746
|
|||||||
data.mrk3 0 80
|
data.mrk3 0 80
|
||||||
data.mrk3 0_persistent 80
|
data.mrk3 0_persistent 80
|
||||||
DROP TABLE IF EXISTS test2;
|
DROP TABLE IF EXISTS test2;
|
||||||
CREATE TABLE test2 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test2 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test2;
|
SYSTEM STOP MERGES test2;
|
||||||
INSERT INTO test2 SELECT number, toString(number) FROM numbers(100000);
|
INSERT INTO test2 SELECT number, toString(number) FROM numbers(100000);
|
||||||
SELECT * FROM test2 FORMAT Null;
|
SELECT * FROM test2 FORMAT Null;
|
||||||
|
@ -7,7 +7,7 @@ SET enable_filesystem_cache_on_write_operations=0;
|
|||||||
SYSTEM DROP FILESYSTEM CACHE;
|
SYSTEM DROP FILESYSTEM CACHE;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS nopers;
|
DROP TABLE IF EXISTS nopers;
|
||||||
CREATE TABLE nopers (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE nopers (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES nopers;
|
SYSTEM STOP MERGES nopers;
|
||||||
|
|
||||||
INSERT INTO nopers SELECT number, toString(number) FROM numbers(10);
|
INSERT INTO nopers SELECT number, toString(number) FROM numbers(10);
|
||||||
@ -26,7 +26,7 @@ ON data_paths.cache_path = caches.cache_path
|
|||||||
ORDER BY file, cache, size;
|
ORDER BY file, cache, size;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test;
|
SYSTEM STOP MERGES test;
|
||||||
|
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
||||||
@ -49,7 +49,7 @@ ON data_paths.cache_path = caches.cache_path
|
|||||||
ORDER BY file, cache, size;
|
ORDER BY file, cache, size;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS test2;
|
DROP TABLE IF EXISTS test2;
|
||||||
CREATE TABLE test2 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test2 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_small', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test2;
|
SYSTEM STOP MERGES test2;
|
||||||
|
|
||||||
INSERT INTO test2 SELECT number, toString(number) FROM numbers(100000);
|
INSERT INTO test2 SELECT number, toString(number) FROM numbers(100000);
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
SYSTEM DROP FILESYSTEM CACHE;
|
SYSTEM DROP FILESYSTEM CACHE;
|
||||||
SET enable_filesystem_cache_on_write_operations=0;
|
SET enable_filesystem_cache_on_write_operations=0;
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test;
|
SYSTEM STOP MERGES test;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
||||||
|
|
||||||
@ -18,7 +18,7 @@ SYSTEM DROP FILESYSTEM CACHE;
|
|||||||
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache;
|
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy_3', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy_3', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
|
||||||
SELECT * FROM test FORMAT Null;
|
SELECT * FROM test FORMAT Null;
|
||||||
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;
|
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
SET enable_filesystem_cache_on_write_operations=1;
|
SET enable_filesystem_cache_on_write_operations=1;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test;
|
SYSTEM STOP MERGES test;
|
||||||
SYSTEM DROP FILESYSTEM CACHE;
|
SYSTEM DROP FILESYSTEM CACHE;
|
||||||
SELECT file_segment_range_begin, file_segment_range_end, size, state
|
SELECT file_segment_range_begin, file_segment_range_end, size, state
|
||||||
|
@ -6,7 +6,7 @@ SET enable_filesystem_cache_on_write_operations=0;
|
|||||||
|
|
||||||
DROP TABLE IF EXISTS test;
|
DROP TABLE IF EXISTS test;
|
||||||
DROP TABLE IF EXISTS system.filesystem_cache_log;
|
DROP TABLE IF EXISTS system.filesystem_cache_log;
|
||||||
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy', min_bytes_for_wide_part = 10485760;
|
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='_storagePolicy', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
|
||||||
SYSTEM STOP MERGES test;
|
SYSTEM STOP MERGES test;
|
||||||
INSERT INTO test SELECT number, toString(number) FROM numbers(100000);
|
INSERT INTO test SELECT number, toString(number) FROM numbers(100000);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user