Merge branch 'master' of github.com:ClickHouse/ClickHouse into insert-cluster

This commit is contained in:
feng lv 2021-01-10 03:47:14 +00:00
commit 09d71029c2
17 changed files with 101 additions and 14 deletions

View File

@ -1,6 +1,6 @@
---
name: Question
about: Ask question about ClickHouse
about: Ask a question about ClickHouse
title: ''
labels: question
assignees: ''

View File

@ -1,6 +1,6 @@
---
name: Unexpected behaviour
about: Create a report to help us improve ClickHouse
about: Some feature is working in non-obvious way
title: ''
labels: unexpected behaviour
assignees: ''

View File

@ -0,0 +1,30 @@
---
name: Incomplete implementation
about: Implementation of existing feature is not finished
title: ''
labels: unfinished code
assignees: ''
---
(you don't have to strictly follow this form)
**Describe the unexpected behaviour**
A clear and concise description of what works not as it is supposed to.
**How to reproduce**
* Which ClickHouse server version to use
* Which interface to use, if matters
* Non-default settings, if any
* `CREATE TABLE` statements for all tables involved
* Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
* Queries to run that lead to unexpected result
**Expected behavior**
A clear and concise description of what you expected to happen.
**Error message and/or stacktrace**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.

View File

@ -1,6 +1,6 @@
---
name: Usability issue
about: Create a report to help us improve ClickHouse
about: Report something can be made more convenient to use
title: ''
labels: usability
assignees: ''

View File

@ -1,6 +1,6 @@
---
name: Backward compatibility issue
about: Create a report to help us improve ClickHouse
about: Report the case when the behaviour of a new version can break existing use cases
title: ''
labels: backward compatibility
assignees: ''

View File

@ -0,0 +1,16 @@
---
name: Assertion found via fuzzing
about: Potential issue has been found via Fuzzer or Stress tests
title: ''
labels: fuzz
assignees: ''
---
(you don't have to strictly follow this form)
**Describe the bug**
A link to the report
**How to reproduce**
Try to reproduce the report and copy the tables and queries involved.

View File

@ -801,7 +801,8 @@ private:
connection->setDefaultDatabase(connection_parameters.default_database);
ReadBufferFromFile in(queries_file);
readStringUntilEOF(text, in);
processMultiQuery(text);
if (!processMultiQuery(text))
break;
}
return;
}
@ -984,7 +985,8 @@ private:
if (query_fuzzer_runs)
{
processWithFuzzing(full_query);
if (!processWithFuzzing(full_query))
return false;
}
else
{
@ -1034,7 +1036,8 @@ private:
}
void processWithFuzzing(const String & text)
/// Returns false when server is not available.
bool processWithFuzzing(const String & text)
{
ASTPtr orig_ast;
@ -1052,7 +1055,7 @@ private:
if (!orig_ast)
{
// Can't continue after a parsing error
return;
return true;
}
// Don't repeat inserts, the tables grow too big. Also don't repeat
@ -1147,7 +1150,7 @@ private:
// Probably the server is dead because we found an assertion
// failure. Fail fast.
fmt::print(stderr, "Lost connection to the server\n");
return;
return false;
}
// The server is still alive so we're going to continue fuzzing.
@ -1173,6 +1176,8 @@ private:
fuzz_base = ast_to_process;
}
}
return true;
}
void processTextAsSingleQuery(const String & text_)

View File

@ -33,7 +33,7 @@ template <typename T>
struct AggregateFunctionWindowFunnelData
{
using TimestampEvent = std::pair<T, UInt8>;
using TimestampEvents = PODArray<TimestampEvent, 64>;
using TimestampEvents = PODArrayWithStackMemory<TimestampEvent, 64>;
using Comparator = ComparePairFirst;
bool sorted = true;

View File

@ -80,6 +80,24 @@ TEST(TransformQueryForExternalDatabase, InWithSingleElement)
state.context, state.columns);
}
TEST(TransformQueryForExternalDatabase, InWithTable)
{
const State & state = State::instance();
check("SELECT column FROM test.table WHERE 1 IN external_table",
R"(SELECT "column" FROM "test"."table")",
state.context, state.columns);
check("SELECT column FROM test.table WHERE 1 IN (x)",
R"(SELECT "column" FROM "test"."table")",
state.context, state.columns);
check("SELECT column, field, value FROM test.table WHERE column IN (field, value)",
R"(SELECT "column", "field", "value" FROM "test"."table" WHERE "column" IN ("field", "value"))",
state.context, state.columns);
check("SELECT column FROM test.table WHERE column NOT IN hello AND column = 123",
R"(SELECT "column" FROM "test"."table" WHERE ("column" = 123))",
state.context, state.columns);
}
TEST(TransformQueryForExternalDatabase, Like)
{
const State & state = State::instance();

View File

@ -138,6 +138,12 @@ bool isCompatible(const IAST & node)
if (name == "tuple" && function->arguments->children.size() <= 1)
return false;
/// If the right hand side of IN is an identifier (example: x IN table), then it's not compatible.
if ((name == "in" || name == "notIn")
&& (function->arguments->children.size() != 2
|| function->arguments->children[1]->as<ASTIdentifier>()))
return false;
for (const auto & expr : function->arguments->children)
if (!isCompatible(*expr))
return false;

View File

@ -0,0 +1,12 @@
<test>
<create_query>CREATE TABLE action(uid UInt64, event String, time DateTime) ENGINE = MergeTree ORDER BY uid</create_query>
<fill_query>INSERT INTO action SELECT arrayJoin(groupArray(number)), 'a', now() from numbers(1000000)</fill_query>
<fill_query>INSERT INTO action SELECT arrayJoin(groupArray(number)), 'b', now() + INTERVAL 6 hour from numbers(1000000)</fill_query>
<fill_query>INSERT INTO action SELECT arrayJoin(groupArray(number)), 'c', now() + INTERVAL 12 hour from numbers(1000000)</fill_query>
<fill_query>INSERT INTO action SELECT arrayJoin(groupArray(number)), 'd', now() + INTERVAL 18 hour from numbers(1000000)</fill_query>
<query>SELECT level, count() from (select windowFunnel(86400)(time, event='a', event='b', event='c', event='d') level from action group by uid) group by level FORMAT Null</query>
<drop_query>DROP TABLE IF EXISTS action</drop_query>
</test>

View File

@ -14,10 +14,10 @@ def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
"""
args = {"local": local, "clickhouse_binary_path": clickhouse_binary_path, "stress": stress, "parallel": parallel}
Feature(test=load("example.regression", "regression"))(**args)
Feature(test=load("ldap.regression", "regression"))(**args)
Feature(test=load("rbac.regression", "regression"))(**args)
Feature(test=load("aes_encryption.regression", "regression"))(**args)
# Feature(test=load("example.regression", "regression"))(**args)
# Feature(test=load("ldap.regression", "regression"))(**args)
# Feature(test=load("rbac.regression", "regression"))(**args)
# Feature(test=load("aes_encryption.regression", "regression"))(**args)
if main():
regression()