Merge branch 'master' into parallel-s3-downloading

This commit is contained in:
Antonio Andelic 2022-03-23 16:35:05 +00:00
commit cbd20af706
5 changed files with 110 additions and 8 deletions

View File

@ -7,6 +7,7 @@ env:
"on":
schedule:
- cron: '13 3 * * *'
workflow_dispatch:
jobs:
DockerHubPushAarch64:

View File

@ -41,6 +41,57 @@ namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int LOGICAL_ERROR;
extern const int NOT_IMPLEMENTED;
}
namespace
{
class StorageWithComment : public IAST
{
public:
ASTPtr storage;
ASTPtr comment;
String getID(char) const override { return "Storage with comment definition"; }
ASTPtr clone() const override
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method clone is not supported");
}
void formatImpl(const FormatSettings &, FormatState &, FormatStateStacked) const override
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method formatImpl is not supported");
}
};
class ParserStorageWithComment : public IParserBase
{
protected:
const char * getName() const override { return "storage definition with comment"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
{
ParserStorage storage_p;
ASTPtr storage;
if (!storage_p.parse(pos, storage, expected))
return false;
ParserKeyword s_comment("COMMENT");
ParserStringLiteral string_literal_parser;
ASTPtr comment;
if (s_comment.ignore(pos, expected))
string_literal_parser.parse(pos, comment, expected);
auto storage_with_comment = std::make_shared<StorageWithComment>();
storage_with_comment->storage = std::move(storage);
storage_with_comment->comment = std::move(comment);
node = storage_with_comment;
return true;
}
};
}
namespace
@ -102,8 +153,9 @@ std::shared_ptr<TSystemLog> createSystemLog(
engine += " TTL " + ttl;
engine += " ORDER BY (event_date, event_time)";
}
// Validate engine definition grammatically to prevent some configuration errors
ParserStorage storage_parser;
ParserStorageWithComment storage_parser;
parseQuery(storage_parser, engine.data(), engine.data() + engine.size(),
"Storage to create table for " + config_prefix, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
@ -450,7 +502,6 @@ void SystemLog<LogElement>::prepareTable()
is_prepared = true;
}
template <typename LogElement>
ASTPtr SystemLog<LogElement>::getCreateTableQuery()
{
@ -465,11 +516,16 @@ ASTPtr SystemLog<LogElement>::getCreateTableQuery()
new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(ordinary_columns, alias_columns));
create->set(create->columns_list, new_columns_list);
ParserStorage storage_parser;
ASTPtr storage_ast = parseQuery(
ParserStorageWithComment storage_parser;
ASTPtr storage_with_comment_ast = parseQuery(
storage_parser, storage_def.data(), storage_def.data() + storage_def.size(),
"Storage to create table for " + LogElement::name(), 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
create->set(create->storage, storage_ast);
StorageWithComment & storage_with_comment = storage_with_comment_ast->as<StorageWithComment &>();
create->set(create->storage, storage_with_comment.storage);
create->set(create->comment, storage_with_comment.comment);
/// Write additional (default) settings for MergeTree engine to make it make it possible to compare ASTs
/// and recreate tables on settings changes.

View File

@ -179,8 +179,9 @@ std::unique_ptr<ReadBuffer> createReadBuffer(
method = chooseCompressionMethod(current_path, compression_method);
}
/// For clickhouse-local add progress callback to display progress bar.
if (context->getApplicationType() == Context::ApplicationType::LOCAL)
/// For clickhouse-local and clickhouse-client add progress callback to display progress bar.
if (context->getApplicationType() == Context::ApplicationType::LOCAL
|| context->getApplicationType() == Context::ApplicationType::CLIENT)
{
auto & in = static_cast<ReadBufferFromFileDescriptor &>(*nested_buffer);
in.setProgressCallback(context);
@ -643,7 +644,9 @@ Pipe StorageFile::read(
/// Set total number of bytes to process. For progress bar.
auto progress_callback = context->getFileProgressCallback();
if (context->getApplicationType() == Context::ApplicationType::LOCAL && progress_callback)
if ((context->getApplicationType() == Context::ApplicationType::LOCAL
|| context->getApplicationType() == Context::ApplicationType::CLIENT)
&& progress_callback)
progress_callback(FileProgress(0, total_bytes_to_read));
for (size_t i = 0; i < num_streams; ++i)

View File

@ -0,0 +1,42 @@
# pylint: disable=line-too-long
# pylint: disable=unused-argument
# pylint: disable=redefined-outer-name
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node_default', stay_alive=True)
@pytest.fixture(scope='module', autouse=True)
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_system_logs_comment():
node.exec_in_container(['bash', '-c', f"""echo "
<clickhouse>
<query_log>
<engine>ENGINE = MergeTree
PARTITION BY (event_date)
ORDER BY (event_time)
TTL event_date + INTERVAL 14 DAY DELETE
SETTINGS ttl_only_drop_parts=1
COMMENT 'test_comment'
</engine>
<partition_by remove='remove'/>
</query_log>
</clickhouse>
" > /etc/clickhouse-server/config.d/yyy-override-query_log.xml
"""])
node.restart_clickhouse()
node.query("select 1")
node.query("system flush logs")
comment = node.query("SELECT comment FROM system.tables WHERE name = 'query_log'")
assert comment =='test_comment\n'