Merge remote-tracking branch 'origin/master' into less-bloat

This commit is contained in:
Alexey Milovidov 2019-12-19 02:14:10 +03:00
commit be7794d4da
10 changed files with 77 additions and 12 deletions

View File

@ -287,10 +287,13 @@ void TaskStatsInfoGetter::getStat(::taskstats & out_stats, pid_t tid)
}
static thread_local pid_t current_tid = 0;
pid_t TaskStatsInfoGetter::getCurrentTID()
{
/// This call is always successful. - man gettid
return static_cast<pid_t>(syscall(SYS_gettid));
if (!current_tid)
current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid
return current_tid;
}

View File

@ -65,7 +65,7 @@ public:
if (arguments.size() >= 1)
{
const auto & argument = arguments[0];
if (!isInteger(argument.type) || !isColumnConst(*argument.column))
if (!isInteger(argument.type) || !argument.column || !isColumnConst(*argument.column))
throw Exception("Illegal type " + argument.type->getName() +
" of 0" +
" argument of function " + getName() +
@ -81,13 +81,9 @@ public:
void executeImpl(Block & block, const ColumnNumbers & /*arguments*/, size_t result, size_t input_rows_count) override
{
auto & result_col = block.getByPosition(result);
UInt32 scale = DataTypeDateTime64::default_scale;
if (const auto * dt64 = assert_cast<const DataTypeDateTime64 *>(result_col.type.get()))
{
scale = dt64->getScale();
}
const UInt32 scale = assert_cast<const DataTypeDateTime64 *>(result_col.type.get())->getScale();
result_col.column = DataTypeDateTime64(scale).createColumnConst(input_rows_count, nowSubsecond(scale));
result_col.column = result_col.type->createColumnConst(input_rows_count, nowSubsecond(scale));
}
};

View File

@ -9,6 +9,7 @@
#include <DataTypes/DataTypeSet.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeFunction.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeTuple.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <DataTypes/FieldToDataType.h>
@ -37,6 +38,7 @@
#include <Interpreters/convertFieldToType.h>
#include <Interpreters/interpretSubquery.h>
#include <Interpreters/DatabaseAndTableWithAlias.h>
#include <Interpreters/IdentifierSemantic.h>
namespace DB
{
@ -392,6 +394,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data &
auto child_column_name = child->getColumnName();
const auto * lambda = child->as<ASTFunction>();
const auto * identifier = child->as<ASTIdentifier>();
if (lambda && lambda->name == "lambda")
{
/// If the argument is a lambda expression, just remember its approximate type.
@ -435,6 +438,23 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data &
argument_types.push_back(column.type);
argument_names.push_back(column.name);
}
else if (identifier && node.name == "joinGet" && arg == 0)
{
String database_name;
String table_name;
std::tie(database_name, table_name) = IdentifierSemantic::extractDatabaseAndTable(*identifier);
if (database_name.empty())
database_name = data.context.getCurrentDatabase();
auto column_string = ColumnString::create();
column_string->insert(database_name + "." + table_name);
ColumnWithTypeAndName column(
ColumnConst::create(std::move(column_string), 1),
std::make_shared<DataTypeString>(),
getUniqueName(data.getSampleBlock(), "__joinGet"));
data.addAction(ExpressionAction::addColumn(column));
argument_types.push_back(column.type);
argument_names.push_back(column.name);
}
else
{
/// If the argument is not a lambda expression, call it recursively and find out its type.

View File

@ -42,6 +42,14 @@ void MarkTableIdentifiersMatcher::visit(const ASTFunction & func, ASTPtr &, Data
if (!data.aliases.count(*opt_name))
setIdentifierSpecial(ast);
}
// first argument of joinGet can be a table identifier
if (func.name == "joinGet")
{
auto & ast = func.arguments->children.at(0);
if (auto opt_name = tryGetIdentifierName(ast))
setIdentifierSpecial(ast);
}
}
}

View File

@ -16,6 +16,8 @@
#include <Parsers/ASTSubquery.h>
#include <Parsers/ASTIdentifier.h>
#include <cassert>
namespace DB
{
@ -1095,10 +1097,14 @@ bool KeyCondition::mayBeTrueInParallelogram(const std::vector<Range> & parallelo
}
else if (element.function == RPNElement::FUNCTION_NOT)
{
assert(!rpn_stack.empty());
rpn_stack.back() = !rpn_stack.back();
}
else if (element.function == RPNElement::FUNCTION_AND)
{
assert(!rpn_stack.empty());
auto arg1 = rpn_stack.back();
rpn_stack.pop_back();
auto arg2 = rpn_stack.back();
@ -1106,6 +1112,8 @@ bool KeyCondition::mayBeTrueInParallelogram(const std::vector<Range> & parallelo
}
else if (element.function == RPNElement::FUNCTION_OR)
{
assert(!rpn_stack.empty());
auto arg1 = rpn_stack.back();
rpn_stack.pop_back();
auto arg2 = rpn_stack.back();
@ -1124,7 +1132,7 @@ bool KeyCondition::mayBeTrueInParallelogram(const std::vector<Range> & parallelo
}
if (rpn_stack.size() != 1)
throw Exception("Unexpected stack size in KeyCondition::mayBeTrueInRange", ErrorCodes::LOGICAL_ERROR);
throw Exception("Unexpected stack size in KeyCondition::mayBeTrueInParallelogram", ErrorCodes::LOGICAL_ERROR);
return rpn_stack[0].can_be_true;
}
@ -1223,6 +1231,8 @@ bool KeyCondition::alwaysUnknownOrTrue() const
}
else if (element.function == RPNElement::FUNCTION_AND)
{
assert(!rpn_stack.empty());
auto arg1 = rpn_stack.back();
rpn_stack.pop_back();
auto arg2 = rpn_stack.back();
@ -1230,6 +1240,8 @@ bool KeyCondition::alwaysUnknownOrTrue() const
}
else if (element.function == RPNElement::FUNCTION_OR)
{
assert(!rpn_stack.empty());
auto arg1 = rpn_stack.back();
rpn_stack.pop_back();
auto arg2 = rpn_stack.back();
@ -1239,6 +1251,9 @@ bool KeyCondition::alwaysUnknownOrTrue() const
throw Exception("Unexpected function type in KeyCondition::RPNElement", ErrorCodes::LOGICAL_ERROR);
}
if (rpn_stack.size() != 1)
throw Exception("Unexpected stack size in KeyCondition::alwaysUnknownOrTrue", ErrorCodes::LOGICAL_ERROR);
return rpn_stack[0];
}

View File

@ -21,7 +21,7 @@ $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=hello&replace_running_query=1" -d
# Wait for it to be replaced
wait
${CLICKHOUSE_CLIENT} --user=readonly --query_id=42 --query='SELECT 2, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' &
${CLICKHOUSE_CLIENT_BINARY} --user=readonly --query_id=42 --query='SELECT 2, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' &
wait_for_query_to_start '42'
# Trying to run another query with the same query_id

View File

@ -9,6 +9,14 @@ def [1,2] 2
abc [0] 1
--------joinGet--------
abc
def
\N
abc
def
abc
def

View File

@ -37,6 +37,13 @@ SELECT '';
SELECT joinGet('join_any_left_null', 's', number) FROM numbers(3);
SELECT '';
-- Using identifier as the first argument
SELECT joinGet(join_any_left, 's', number) FROM numbers(3);
SELECT '';
SELECT joinGet(test.join_any_left_null, 's', number) FROM numbers(3);
SELECT '';
CREATE TABLE test.join_string_key (s String, x Array(UInt8), k UInt64) ENGINE = Join(ANY, LEFT, s);
INSERT INTO test.join_string_key VALUES ('abc', [0], 1), ('def', [1, 2], 2);
SELECT joinGet('join_string_key', 'x', 'abc'), joinGet('join_string_key', 'k', 'abc');

View File

@ -6,7 +6,15 @@ SELECT CAST(1 as DateTime64('abc')); -- { serverError 43 } # Invalid scale param
SELECT CAST(1 as DateTime64(100)); -- { serverError 69 } # too big scale
SELECT CAST(1 as DateTime64(-1)); -- { serverError 43 } # signed scale parameter type
SELECT CAST(1 as DateTime64(3, 'qqq')); -- { serverError 1000 } # invalid timezone
SELECT toDateTime64('2019-09-16 19:20:11.234', 'abc'); -- { serverError 43 } # invalid scale
SELECT toDateTime64('2019-09-16 19:20:11.234', 100); -- { serverError 69 } # too big scale
SELECT toDateTime64('2019-09-16 19:20:11.234', 3, 'qqq'); -- { serverError 1000 } # invalid timezone
SELECT ignore(now64(gccMurmurHash())); -- { serverError 43 } # Illegal argument type
SELECT ignore(now64('abcd')); -- { serverError 43 } # Illegal argument type
SELECT ignore(now64(number)) FROM system.numbers LIMIT 10; -- { serverError 43 } # Illegal argument type
SELECT toDateTime64('2019-09-16 19:20:11', 3, 'UTC'); -- this now works OK and produces timestamp with no subsecond part
CREATE TABLE A(t DateTime64(3, 'UTC')) ENGINE = MergeTree() ORDER BY t;

View File

@ -13,7 +13,7 @@ cd build/build_docker
ccache --show-stats ||:
ccache --zero-stats ||:
rm -f CMakeCache.txt
cmake .. -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DSANITIZE=$SANITIZER $CMAKE_FLAGS
cmake .. -LA -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DSANITIZE=$SANITIZER $CMAKE_FLAGS
ninja
ccache --show-stats ||:
mv ./dbms/programs/clickhouse* /output