mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-01 12:01:58 +00:00
Merge branch 'master' into parallel-s3-downloading
This commit is contained in:
commit
cbd20af706
1
.github/workflows/nightly.yml
vendored
1
.github/workflows/nightly.yml
vendored
@ -7,6 +7,7 @@ env:
|
|||||||
"on":
|
"on":
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '13 3 * * *'
|
- cron: '13 3 * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
DockerHubPushAarch64:
|
DockerHubPushAarch64:
|
||||||
|
@ -41,6 +41,57 @@ namespace ErrorCodes
|
|||||||
{
|
{
|
||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
class StorageWithComment : public IAST
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
ASTPtr storage;
|
||||||
|
ASTPtr comment;
|
||||||
|
|
||||||
|
String getID(char) const override { return "Storage with comment definition"; }
|
||||||
|
|
||||||
|
ASTPtr clone() const override
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method clone is not supported");
|
||||||
|
}
|
||||||
|
|
||||||
|
void formatImpl(const FormatSettings &, FormatState &, FormatStateStacked) const override
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method formatImpl is not supported");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class ParserStorageWithComment : public IParserBase
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
const char * getName() const override { return "storage definition with comment"; }
|
||||||
|
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||||
|
{
|
||||||
|
ParserStorage storage_p;
|
||||||
|
ASTPtr storage;
|
||||||
|
|
||||||
|
if (!storage_p.parse(pos, storage, expected))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
ParserKeyword s_comment("COMMENT");
|
||||||
|
ParserStringLiteral string_literal_parser;
|
||||||
|
ASTPtr comment;
|
||||||
|
|
||||||
|
if (s_comment.ignore(pos, expected))
|
||||||
|
string_literal_parser.parse(pos, comment, expected);
|
||||||
|
|
||||||
|
auto storage_with_comment = std::make_shared<StorageWithComment>();
|
||||||
|
storage_with_comment->storage = std::move(storage);
|
||||||
|
storage_with_comment->comment = std::move(comment);
|
||||||
|
|
||||||
|
node = storage_with_comment;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
@ -102,8 +153,9 @@ std::shared_ptr<TSystemLog> createSystemLog(
|
|||||||
engine += " TTL " + ttl;
|
engine += " TTL " + ttl;
|
||||||
engine += " ORDER BY (event_date, event_time)";
|
engine += " ORDER BY (event_date, event_time)";
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate engine definition grammatically to prevent some configuration errors
|
// Validate engine definition grammatically to prevent some configuration errors
|
||||||
ParserStorage storage_parser;
|
ParserStorageWithComment storage_parser;
|
||||||
parseQuery(storage_parser, engine.data(), engine.data() + engine.size(),
|
parseQuery(storage_parser, engine.data(), engine.data() + engine.size(),
|
||||||
"Storage to create table for " + config_prefix, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
"Storage to create table for " + config_prefix, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||||
|
|
||||||
@ -450,7 +502,6 @@ void SystemLog<LogElement>::prepareTable()
|
|||||||
is_prepared = true;
|
is_prepared = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template <typename LogElement>
|
template <typename LogElement>
|
||||||
ASTPtr SystemLog<LogElement>::getCreateTableQuery()
|
ASTPtr SystemLog<LogElement>::getCreateTableQuery()
|
||||||
{
|
{
|
||||||
@ -465,11 +516,16 @@ ASTPtr SystemLog<LogElement>::getCreateTableQuery()
|
|||||||
new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(ordinary_columns, alias_columns));
|
new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(ordinary_columns, alias_columns));
|
||||||
create->set(create->columns_list, new_columns_list);
|
create->set(create->columns_list, new_columns_list);
|
||||||
|
|
||||||
ParserStorage storage_parser;
|
ParserStorageWithComment storage_parser;
|
||||||
ASTPtr storage_ast = parseQuery(
|
|
||||||
|
ASTPtr storage_with_comment_ast = parseQuery(
|
||||||
storage_parser, storage_def.data(), storage_def.data() + storage_def.size(),
|
storage_parser, storage_def.data(), storage_def.data() + storage_def.size(),
|
||||||
"Storage to create table for " + LogElement::name(), 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
"Storage to create table for " + LogElement::name(), 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||||
create->set(create->storage, storage_ast);
|
|
||||||
|
StorageWithComment & storage_with_comment = storage_with_comment_ast->as<StorageWithComment &>();
|
||||||
|
|
||||||
|
create->set(create->storage, storage_with_comment.storage);
|
||||||
|
create->set(create->comment, storage_with_comment.comment);
|
||||||
|
|
||||||
/// Write additional (default) settings for MergeTree engine to make it make it possible to compare ASTs
|
/// Write additional (default) settings for MergeTree engine to make it make it possible to compare ASTs
|
||||||
/// and recreate tables on settings changes.
|
/// and recreate tables on settings changes.
|
||||||
|
@ -179,8 +179,9 @@ std::unique_ptr<ReadBuffer> createReadBuffer(
|
|||||||
method = chooseCompressionMethod(current_path, compression_method);
|
method = chooseCompressionMethod(current_path, compression_method);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// For clickhouse-local add progress callback to display progress bar.
|
/// For clickhouse-local and clickhouse-client add progress callback to display progress bar.
|
||||||
if (context->getApplicationType() == Context::ApplicationType::LOCAL)
|
if (context->getApplicationType() == Context::ApplicationType::LOCAL
|
||||||
|
|| context->getApplicationType() == Context::ApplicationType::CLIENT)
|
||||||
{
|
{
|
||||||
auto & in = static_cast<ReadBufferFromFileDescriptor &>(*nested_buffer);
|
auto & in = static_cast<ReadBufferFromFileDescriptor &>(*nested_buffer);
|
||||||
in.setProgressCallback(context);
|
in.setProgressCallback(context);
|
||||||
@ -643,7 +644,9 @@ Pipe StorageFile::read(
|
|||||||
|
|
||||||
/// Set total number of bytes to process. For progress bar.
|
/// Set total number of bytes to process. For progress bar.
|
||||||
auto progress_callback = context->getFileProgressCallback();
|
auto progress_callback = context->getFileProgressCallback();
|
||||||
if (context->getApplicationType() == Context::ApplicationType::LOCAL && progress_callback)
|
if ((context->getApplicationType() == Context::ApplicationType::LOCAL
|
||||||
|
|| context->getApplicationType() == Context::ApplicationType::CLIENT)
|
||||||
|
&& progress_callback)
|
||||||
progress_callback(FileProgress(0, total_bytes_to_read));
|
progress_callback(FileProgress(0, total_bytes_to_read));
|
||||||
|
|
||||||
for (size_t i = 0; i < num_streams; ++i)
|
for (size_t i = 0; i < num_streams; ++i)
|
||||||
|
42
tests/integration/test_system_logs_comment/test.py
Normal file
42
tests/integration/test_system_logs_comment/test.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
# pylint: disable=line-too-long
|
||||||
|
# pylint: disable=unused-argument
|
||||||
|
# pylint: disable=redefined-outer-name
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
|
||||||
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
node = cluster.add_instance('node_default', stay_alive=True)
|
||||||
|
|
||||||
|
@pytest.fixture(scope='module', autouse=True)
|
||||||
|
def start_cluster():
|
||||||
|
try:
|
||||||
|
cluster.start()
|
||||||
|
yield cluster
|
||||||
|
finally:
|
||||||
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
|
def test_system_logs_comment():
|
||||||
|
node.exec_in_container(['bash', '-c', f"""echo "
|
||||||
|
<clickhouse>
|
||||||
|
<query_log>
|
||||||
|
<engine>ENGINE = MergeTree
|
||||||
|
PARTITION BY (event_date)
|
||||||
|
ORDER BY (event_time)
|
||||||
|
TTL event_date + INTERVAL 14 DAY DELETE
|
||||||
|
SETTINGS ttl_only_drop_parts=1
|
||||||
|
COMMENT 'test_comment'
|
||||||
|
</engine>
|
||||||
|
<partition_by remove='remove'/>
|
||||||
|
</query_log>
|
||||||
|
</clickhouse>
|
||||||
|
" > /etc/clickhouse-server/config.d/yyy-override-query_log.xml
|
||||||
|
"""])
|
||||||
|
node.restart_clickhouse()
|
||||||
|
|
||||||
|
node.query("select 1")
|
||||||
|
node.query("system flush logs")
|
||||||
|
|
||||||
|
comment = node.query("SELECT comment FROM system.tables WHERE name = 'query_log'")
|
||||||
|
assert comment =='test_comment\n'
|
Loading…
Reference in New Issue
Block a user