Merge branch 'master' of github.com:ClickHouse/ClickHouse

This commit is contained in:
BayoNet 2019-11-12 12:42:57 +03:00
commit 03b5225aa8
28 changed files with 139 additions and 137 deletions

View File

@ -219,7 +219,8 @@ public:
return std::make_shared<DataTypeUInt64>(); return std::make_shared<DataTypeUInt64>();
} }
void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override /// ALWAYS_INLINE is required to have better code layout for uniqHLL12 function
void ALWAYS_INLINE add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override
{ {
detail::OneAdder<T, Data>::add(this->data(place), *columns[0], row_num); detail::OneAdder<T, Data>::add(this->data(place), *columns[0], row_num);
} }

View File

@ -48,7 +48,8 @@ struct __attribute__((__packed__)) AggregateFunctionUniqUpToData
} }
/// threshold - for how many elements there is room in a `data`. /// threshold - for how many elements there is room in a `data`.
void insert(T x, UInt8 threshold) /// ALWAYS_INLINE is required to have better code layout for uniqUpTo function
void ALWAYS_INLINE insert(T x, UInt8 threshold)
{ {
/// The state is already full - nothing needs to be done. /// The state is already full - nothing needs to be done.
if (count > threshold) if (count > threshold)
@ -100,7 +101,8 @@ struct __attribute__((__packed__)) AggregateFunctionUniqUpToData
rb.read(reinterpret_cast<char *>(data), count * sizeof(data[0])); rb.read(reinterpret_cast<char *>(data), count * sizeof(data[0]));
} }
void add(const IColumn & column, size_t row_num, UInt8 threshold) /// ALWAYS_INLINE is required to have better code layout for uniqUpTo function
void ALWAYS_INLINE add(const IColumn & column, size_t row_num, UInt8 threshold)
{ {
insert(assert_cast<const ColumnVector<T> &>(column).getData()[row_num], threshold); insert(assert_cast<const ColumnVector<T> &>(column).getData()[row_num], threshold);
} }
@ -111,7 +113,8 @@ struct __attribute__((__packed__)) AggregateFunctionUniqUpToData
template <> template <>
struct AggregateFunctionUniqUpToData<String> : AggregateFunctionUniqUpToData<UInt64> struct AggregateFunctionUniqUpToData<String> : AggregateFunctionUniqUpToData<UInt64>
{ {
void add(const IColumn & column, size_t row_num, UInt8 threshold) /// ALWAYS_INLINE is required to have better code layout for uniqUpTo function
void ALWAYS_INLINE add(const IColumn & column, size_t row_num, UInt8 threshold)
{ {
/// Keep in mind that calculations are approximate. /// Keep in mind that calculations are approximate.
StringRef value = column.getDataAt(row_num); StringRef value = column.getDataAt(row_num);
@ -122,7 +125,8 @@ struct AggregateFunctionUniqUpToData<String> : AggregateFunctionUniqUpToData<UIn
template <> template <>
struct AggregateFunctionUniqUpToData<UInt128> : AggregateFunctionUniqUpToData<UInt64> struct AggregateFunctionUniqUpToData<UInt128> : AggregateFunctionUniqUpToData<UInt64>
{ {
void add(const IColumn & column, size_t row_num, UInt8 threshold) /// ALWAYS_INLINE is required to have better code layout for uniqUpTo function
void ALWAYS_INLINE add(const IColumn & column, size_t row_num, UInt8 threshold)
{ {
UInt128 value = assert_cast<const ColumnVector<UInt128> &>(column).getData()[row_num]; UInt128 value = assert_cast<const ColumnVector<UInt128> &>(column).getData()[row_num];
insert(sipHash64(value), threshold); insert(sipHash64(value), threshold);
@ -155,7 +159,8 @@ public:
return std::make_shared<DataTypeUInt64>(); return std::make_shared<DataTypeUInt64>();
} }
void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override /// ALWAYS_INLINE is required to have better code layout for uniqUpTo function
void ALWAYS_INLINE add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override
{ {
this->data(place).add(*columns[0], row_num, threshold); this->data(place).add(*columns[0], row_num, threshold);
} }

View File

@ -119,56 +119,34 @@ public:
*/ */
virtual bool isState() const { return false; } virtual bool isState() const { return false; }
using AddFunc = void (*)(const IAggregateFunction *, AggregateDataPtr, const IColumn **, size_t, Arena *);
/** Contains a loop with calls to "add" function. You can collect arguments into array "places"
* and do a single call to "addBatch" for devirtualization and inlining. When offsets is not
* null, behave like AddBatchArrayFunc (it's used to work around unknown regressions).
*/
using AddBatchFunc = void (*)(
const IAggregateFunction *,
size_t batch_size,
AggregateDataPtr * places,
size_t place_offset,
const IColumn ** columns,
const UInt64 * offsets,
Arena * arena);
/** The same for single place.
*/
using AddBatchSinglePlaceFunc
= void (*)(const IAggregateFunction *, size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena);
/** In addition to the above method, this variant accepts an array of "offsets" which allows
* collecting multiple rows of arguments into array "places" as long as they are between
* offsets[i-1] and offsets[i]. It is used for arrayReduce and might be used generally to
* break data dependency when array "places" contains a large number of same values
* consecutively.
*/
using AddBatchArrayFunc = void (*)(
const IAggregateFunction *,
size_t batch_size,
AggregateDataPtr * places,
size_t place_offset,
const IColumn ** columns,
const UInt64 * offsets,
Arena * arena);
struct AddFuncs
{
AddFunc add;
AddBatchFunc add_batch;
AddBatchSinglePlaceFunc add_batch_single_place;
AddBatchArrayFunc add_batch_array;
};
/** The inner loop that uses the function pointer is better than using the virtual function. /** The inner loop that uses the function pointer is better than using the virtual function.
* The reason is that in the case of virtual functions GCC 5.1.2 generates code, * The reason is that in the case of virtual functions GCC 5.1.2 generates code,
* which, at each iteration of the loop, reloads the function address (the offset value in the virtual function table) from memory to the register. * which, at each iteration of the loop, reloads the function address (the offset value in the virtual function table) from memory to the register.
* This gives a performance drop on simple queries around 12%. * This gives a performance drop on simple queries around 12%.
* After the appearance of better compilers, the code can be removed. * After the appearance of better compilers, the code can be removed.
*/ */
virtual AddFuncs getAddressOfAddFunctions() const = 0; using AddFunc = void (*)(const IAggregateFunction *, AggregateDataPtr, const IColumn **, size_t, Arena *);
virtual AddFunc getAddressOfAddFunction() const = 0;
/** Contains a loop with calls to "add" function. You can collect arguments into array "places"
* and do a single call to "addBatch" for devirtualization and inlining.
*/
virtual void
addBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, Arena * arena)
const = 0;
/** The same for single place.
*/
virtual void addBatchSinglePlace(size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena) const = 0;
/** In addition to addBatch, this method collects multiple rows of arguments into array "places"
* as long as they are between offsets[i-1] and offsets[i]. This is used for arrayReduce and
* -Array combinator. It might also be used generally to break data dependency when array
* "places" contains a large number of same values consecutively.
*/
virtual void
addBatchArray(size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, const UInt64 * offsets, Arena * arena)
const = 0;
/** This is used for runtime code generation to determine, which header files to include in generated source. /** This is used for runtime code generation to determine, which header files to include in generated source.
* Always implement it as * Always implement it as
@ -195,58 +173,37 @@ private:
static_cast<const Derived &>(*that).add(place, columns, row_num, arena); static_cast<const Derived &>(*that).add(place, columns, row_num, arena);
} }
static void addBatch(
const IAggregateFunction * that,
size_t batch_size,
AggregateDataPtr * places,
size_t place_offset,
const IColumn ** columns,
const UInt64 * offsets,
Arena * arena)
{
if (offsets)
{
size_t current_offset = 0;
for (size_t i = 0; i < batch_size; ++i)
{
size_t next_offset = offsets[i];
for (size_t j = current_offset; j < next_offset; ++j)
static_cast<const Derived *>(that)->add(places[i] + place_offset, columns, j, arena);
current_offset = next_offset;
}
}
else
for (size_t i = 0; i < batch_size; ++i)
static_cast<const Derived *>(that)->add(places[i] + place_offset, columns, i, arena);
}
static void
addBatchSinglePlaceFree(const IAggregateFunction * that, size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena)
{
for (size_t i = 0; i < batch_size; ++i)
static_cast<const Derived *>(that)->add(place, columns, i, arena);
}
/// TODO: We cannot use this function directly as it slows down aggregate functions like uniqCombined due to unknown reasons.
static void addBatchArrayFree(const IAggregateFunction * that,
size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, const UInt64 * offsets, Arena * arena)
{
size_t current_offset = 0;
for (size_t i = 0; i < batch_size; ++i)
{
size_t next_offset = offsets[i];
for (size_t j = current_offset; j < next_offset; ++j)
static_cast<const Derived *>(that)->add(places[i] + place_offset, columns, j, arena);
current_offset = next_offset;
}
}
public: public:
IAggregateFunctionHelper(const DataTypes & argument_types_, const Array & parameters_) IAggregateFunctionHelper(const DataTypes & argument_types_, const Array & parameters_)
: IAggregateFunction(argument_types_, parameters_) {} : IAggregateFunction(argument_types_, parameters_) {}
/// If we return addBatchArrayFree instead of nullptr, it leads to regression. AddFunc getAddressOfAddFunction() const override { return &addFree; }
AddFuncs getAddressOfAddFunctions() const override { return {&addFree, &addBatch, &addBatchSinglePlaceFree, nullptr}; }
void addBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, Arena * arena) const override
{
for (size_t i = 0; i < batch_size; ++i)
static_cast<const Derived *>(this)->add(places[i] + place_offset, columns, i, arena);
}
void addBatchSinglePlace(size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena) const override
{
for (size_t i = 0; i < batch_size; ++i)
static_cast<const Derived *>(this)->add(place, columns, i, arena);
}
void addBatchArray(
size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, const UInt64 * offsets, Arena * arena)
const override
{
size_t current_offset = 0;
for (size_t i = 0; i < batch_size; ++i)
{
size_t next_offset = offsets[i];
for (size_t j = current_offset; j < next_offset; ++j)
static_cast<const Derived *>(this)->add(places[i] + place_offset, columns, j, arena);
current_offset = next_offset;
}
}
}; };

View File

@ -293,7 +293,8 @@ private:
public: public:
using value_type = Value; using value_type = Value;
void insert(Value value) /// ALWAYS_INLINE is required to have better code layout for uniqCombined function
void ALWAYS_INLINE insert(Value value)
{ {
HashValueType hash = getHash(value); HashValueType hash = getHash(value);
@ -420,7 +421,8 @@ private:
} }
/// Update maximum rank for current bucket. /// Update maximum rank for current bucket.
void update(HashValueType bucket, UInt8 rank) /// ALWAYS_INLINE is required to have better code layout for uniqCombined function
void ALWAYS_INLINE update(HashValueType bucket, UInt8 rank)
{ {
typename RankStore::Locus content = rank_store[bucket]; typename RankStore::Locus content = rank_store[bucket];
UInt8 cur_rank = static_cast<UInt8>(content); UInt8 cur_rank = static_cast<UInt8>(content);

View File

@ -56,7 +56,8 @@ public:
delete large; delete large;
} }
void insert(Key value) /// ALWAYS_INLINE is required to have better code layout for uniqHLL12 function
void ALWAYS_INLINE insert(Key value)
{ {
if (!isLarge()) if (!isLarge())
{ {

View File

@ -83,7 +83,7 @@ private:
SimpleAggregateDescription(const AggregateFunctionPtr & function_, const size_t column_number_) : function(function_), column_number(column_number_) SimpleAggregateDescription(const AggregateFunctionPtr & function_, const size_t column_number_) : function(function_), column_number(column_number_)
{ {
add_function = function->getAddressOfAddFunctions().add; add_function = function->getAddressOfAddFunction();
state.reset(function->sizeOfData(), function->alignOfData()); state.reset(function->sizeOfData(), function->alignOfData());
} }

View File

@ -84,7 +84,7 @@ private:
void init(const char * function_name, const DataTypes & argument_types) void init(const char * function_name, const DataTypes & argument_types)
{ {
function = AggregateFunctionFactory::instance().get(function_name, argument_types); function = AggregateFunctionFactory::instance().get(function_name, argument_types);
add_function = function->getAddressOfAddFunctions().add; add_function = function->getAddressOfAddFunction();
state.reset(function->sizeOfData(), function->alignOfData()); state.reset(function->sizeOfData(), function->alignOfData());
} }

View File

@ -183,8 +183,7 @@ void FunctionArrayReduce::executeImpl(Block & block, const ColumnNumbers & argum
while (auto func = typeid_cast<AggregateFunctionState *>(that)) while (auto func = typeid_cast<AggregateFunctionState *>(that))
that = func->getNestedFunction().get(); that = func->getNestedFunction().get();
that->getAddressOfAddFunctions().add_batch( that->addBatchArray(input_rows_count, places.data(), 0, aggregate_arguments, offsets->data(), arena.get());
that, input_rows_count, places.data(), 0, aggregate_arguments, offsets->data(), arena.get());
} }
for (size_t i = 0; i < input_rows_count; ++i) for (size_t i = 0; i < input_rows_count; ++i)

View File

@ -138,15 +138,9 @@ public:
Int64 length_value = 0; Int64 length_value = 0;
if (column_start_const) if (column_start_const)
{
start_value = column_start_const->getInt(0); start_value = column_start_const->getInt(0);
}
if (column_length_const) if (column_length_const)
{
length_value = column_length_const->getInt(0); length_value = column_length_const->getInt(0);
if (length_value < 0)
throw Exception("Third argument provided for function substring could not be negative.", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
}
if constexpr (is_utf8) if constexpr (is_utf8)
{ {

View File

@ -452,7 +452,7 @@ void NO_INLINE Aggregator::executeImplCase(
/// Add values to the aggregate functions. /// Add values to the aggregate functions.
for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst)
(*inst->funcs.add)(inst->that, value + inst->state_offset, inst->arguments, i, aggregates_pool); (*inst->func)(inst->that, value + inst->state_offset, inst->arguments, i, aggregates_pool);
} }
} }
@ -495,8 +495,10 @@ void NO_INLINE Aggregator::executeImplBatch(
/// Add values to the aggregate functions. /// Add values to the aggregate functions.
for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst)
{ {
(*inst->batch_funcs.add_batch)( if (inst->offsets)
inst->batch_that, rows, places.data(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool); inst->batch_that->addBatchArray(rows, places.data(), inst->state_offset, inst->batch_arguments, inst->offsets, aggregates_pool);
else
inst->batch_that->addBatch(rows, places.data(), inst->state_offset, inst->batch_arguments, aggregates_pool);
} }
} }
@ -511,10 +513,10 @@ void NO_INLINE Aggregator::executeWithoutKeyImpl(
for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst) for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst)
{ {
if (inst->offsets) if (inst->offsets)
(*inst->batch_funcs.add_batch_single_place)( inst->batch_that->addBatchSinglePlace(
inst->batch_that, inst->offsets[static_cast<ssize_t>(rows - 1)], res + inst->state_offset, inst->batch_arguments, arena); inst->offsets[static_cast<ssize_t>(rows - 1)], res + inst->state_offset, inst->batch_arguments, arena);
else else
(*inst->batch_funcs.add_batch_single_place)(inst->batch_that, rows, res + inst->state_offset, inst->batch_arguments, arena); inst->batch_that->addBatchSinglePlace(rows, res + inst->state_offset, inst->batch_arguments, arena);
} }
} }
@ -598,7 +600,7 @@ bool Aggregator::executeOnBlock(Columns columns, UInt64 num_rows, AggregatedData
while (auto func = typeid_cast<const AggregateFunctionState *>(that)) while (auto func = typeid_cast<const AggregateFunctionState *>(that))
that = func->getNestedFunction().get(); that = func->getNestedFunction().get();
aggregate_functions_instructions[i].that = that; aggregate_functions_instructions[i].that = that;
aggregate_functions_instructions[i].funcs = that->getAddressOfAddFunctions(); aggregate_functions_instructions[i].func = that->getAddressOfAddFunction();
if (auto func = typeid_cast<const AggregateFunctionArray *>(that)) if (auto func = typeid_cast<const AggregateFunctionArray *>(that))
{ {
@ -615,7 +617,6 @@ bool Aggregator::executeOnBlock(Columns columns, UInt64 num_rows, AggregatedData
aggregate_functions_instructions[i].batch_arguments = aggregate_columns[i].data(); aggregate_functions_instructions[i].batch_arguments = aggregate_columns[i].data();
aggregate_functions_instructions[i].batch_that = that; aggregate_functions_instructions[i].batch_that = that;
aggregate_functions_instructions[i].batch_funcs = that->getAddressOfAddFunctions();
} }
if (isCancelled()) if (isCancelled())

View File

@ -1005,11 +1005,10 @@ protected:
struct AggregateFunctionInstruction struct AggregateFunctionInstruction
{ {
const IAggregateFunction * that; const IAggregateFunction * that;
IAggregateFunction::AddFuncs funcs; IAggregateFunction::AddFunc func;
size_t state_offset; size_t state_offset;
const IColumn ** arguments; const IColumn ** arguments;
const IAggregateFunction * batch_that; const IAggregateFunction * batch_that;
IAggregateFunction::AddFuncs batch_funcs;
const IColumn ** batch_arguments; const IColumn ** batch_arguments;
const UInt64 * offsets = nullptr; const UInt64 * offsets = nullptr;
}; };

View File

@ -11,6 +11,7 @@
#include <TableFunctions/TableFunctionFactory.h> #include <TableFunctions/TableFunctionFactory.h>
#include <common/logger_useful.h> #include <common/logger_useful.h>
#include <DataStreams/ConvertingBlockInputStream.h>
namespace ProfileEvents namespace ProfileEvents
@ -66,7 +67,7 @@ SelectStreamFactory::SelectStreamFactory(
namespace namespace
{ {
BlockInputStreamPtr createLocalStream(const ASTPtr & query_ast, const Context & context, QueryProcessingStage::Enum processed_stage) BlockInputStreamPtr createLocalStream(const ASTPtr & query_ast, const Block & header, const Context & context, QueryProcessingStage::Enum processed_stage)
{ {
checkStackSize(); checkStackSize();
@ -83,7 +84,7 @@ BlockInputStreamPtr createLocalStream(const ASTPtr & query_ast, const Context &
*/ */
/// return std::make_shared<MaterializingBlockInputStream>(stream); /// return std::make_shared<MaterializingBlockInputStream>(stream);
return stream; return std::make_shared<ConvertingBlockInputStream>(context, stream, header, ConvertingBlockInputStream::MatchColumnsMode::Name);
} }
static String formattedAST(const ASTPtr & ast) static String formattedAST(const ASTPtr & ast)
@ -109,7 +110,7 @@ void SelectStreamFactory::createForShard(
auto emplace_local_stream = [&]() auto emplace_local_stream = [&]()
{ {
res.emplace_back(createLocalStream(modified_query_ast, context, processed_stage)); res.emplace_back(createLocalStream(modified_query_ast, header, context, processed_stage));
}; };
String modified_query = formattedAST(modified_query_ast); String modified_query = formattedAST(modified_query_ast);
@ -249,7 +250,7 @@ void SelectStreamFactory::createForShard(
} }
if (try_results.empty() || local_delay < max_remote_delay) if (try_results.empty() || local_delay < max_remote_delay)
return createLocalStream(modified_query_ast, context, stage); return createLocalStream(modified_query_ast, header, context, stage);
else else
{ {
std::vector<IConnectionPool::Entry> connections; std::vector<IConnectionPool::Entry> connections;

View File

@ -1814,12 +1814,12 @@ void InterpreterSelectQuery::executeFetchColumns(
} }
/// Pin sources for merge tree tables. /// Pin sources for merge tree tables.
bool pin_sources = dynamic_cast<const MergeTreeData *>(storage.get()) != nullptr; // bool pin_sources = dynamic_cast<const MergeTreeData *>(storage.get()) != nullptr;
if (pin_sources) // if (pin_sources)
{ // {
for (size_t i = 0; i < pipes.size(); ++i) // for (size_t i = 0; i < pipes.size(); ++i)
pipes[i].pinSources(i); // pipes[i].pinSources(i);
} // }
pipeline.init(std::move(pipes)); pipeline.init(std::move(pipes));
} }

View File

@ -10,6 +10,7 @@
#include <boost/lockfree/queue.hpp> #include <boost/lockfree/queue.hpp>
#include <Common/Stopwatch.h> #include <Common/Stopwatch.h>
#include <Processors/ISource.h> #include <Processors/ISource.h>
#include <Common/setThreadName.h>
namespace DB namespace DB
{ {
@ -750,6 +751,8 @@ void PipelineExecutor::executeImpl(size_t num_threads)
{ {
/// ThreadStatus thread_status; /// ThreadStatus thread_status;
setThreadName("QueryPipelineEx");
if (thread_group) if (thread_group)
CurrentThread::attachTo(thread_group); CurrentThread::attachTo(thread_group);

View File

@ -131,7 +131,7 @@ void FilterTransform::transform(Chunk & chunk)
size_t first_non_constant_column = num_columns; size_t first_non_constant_column = num_columns;
for (size_t i = 0; i < num_columns; ++i) for (size_t i = 0; i < num_columns; ++i)
{ {
if (!isColumnConst(*columns[i])) if (i != filter_column_position && !isColumnConst(*columns[i]))
{ {
first_non_constant_column = i; first_non_constant_column = i;
break; break;

View File

@ -105,7 +105,7 @@ namespace
template <typename BridgeHelperMixin> template <typename BridgeHelperMixin>
void registerXDBCStorage(StorageFactory & factory, const std::string & name) void registerXDBCStorage(StorageFactory & factory, const std::string & name)
{ {
factory.registerStorage(name, [&name](const StorageFactory::Arguments & args) factory.registerStorage(name, [name](const StorageFactory::Arguments & args)
{ {
ASTs & engine_args = args.engine_args; ASTs & engine_args = args.engine_args;

View File

@ -502,13 +502,13 @@ if __name__ == '__main__':
parser.add_argument('--no-stateful', action='store_true', help='Disable all stateful tests') parser.add_argument('--no-stateful', action='store_true', help='Disable all stateful tests')
parser.add_argument('--skip', nargs='+', help="Skip these tests") parser.add_argument('--skip', nargs='+', help="Skip these tests")
parser.add_argument('--no-long', action='store_false', dest='no_long', help='Do not run long tests') parser.add_argument('--no-long', action='store_false', dest='no_long', help='Do not run long tests')
parser.add_argument('--client-option', nargs='+', help='Specify additional client argument')
group=parser.add_mutually_exclusive_group(required=False) group=parser.add_mutually_exclusive_group(required=False)
group.add_argument('--zookeeper', action='store_true', default=None, dest='zookeeper', help='Run zookeeper related tests') group.add_argument('--zookeeper', action='store_true', default=None, dest='zookeeper', help='Run zookeeper related tests')
group.add_argument('--no-zookeeper', action='store_false', default=None, dest='zookeeper', help='Do not run zookeeper related tests') group.add_argument('--no-zookeeper', action='store_false', default=None, dest='zookeeper', help='Do not run zookeeper related tests')
group=parser.add_mutually_exclusive_group(required=False) group=parser.add_mutually_exclusive_group(required=False)
group.add_argument('--shard', action='store_true', default=None, dest='shard', help='Run sharding related tests (required to clickhouse-server listen 127.0.0.2 127.0.0.3)') group.add_argument('--shard', action='store_true', default=None, dest='shard', help='Run sharding related tests (required to clickhouse-server listen 127.0.0.2 127.0.0.3)')
group.add_argument('--no-shard', action='store_false', default=None, dest='shard', help='Do not run shard related tests') group.add_argument('--no-shard', action='store_false', default=None, dest='shard', help='Do not run shard related tests')
group.add_argument('--client-option', nargs='+', help='Specify additional client argument')
args = parser.parse_args() args = parser.parse_args()

View File

@ -54,3 +54,4 @@
2018-08-01 -1 2018-08-01 -1
2018-08-01 1 2018-08-01 1
2018-08-01 1 2018-08-01 1
2018-08-01 1

View File

@ -80,7 +80,7 @@ SELECT '--------------Implicit type conversion------------';
SELECT * FROM merge(currentDatabase(), 'test_s64_distributed|test_u64_distributed') ORDER BY value; SELECT * FROM merge(currentDatabase(), 'test_s64_distributed|test_u64_distributed') ORDER BY value;
SELECT * FROM merge(currentDatabase(), 'test_s64_distributed|test_u64_distributed') WHERE date = '2018-08-01' ORDER BY value; SELECT * FROM merge(currentDatabase(), 'test_s64_distributed|test_u64_distributed') WHERE date = '2018-08-01' ORDER BY value;
SELECT * FROM merge(currentDatabase(), 'test_s64_distributed|test_u64_distributed') WHERE _table = 'test_u64_distributed' ORDER BY value; SELECT * FROM merge(currentDatabase(), 'test_s64_distributed|test_u64_distributed') WHERE _table = 'test_u64_distributed' ORDER BY value;
SELECT * FROM merge(currentDatabase(), 'test_s64_distributed|test_u64_distributed') WHERE value = 1; -- { serverError 171 } SELECT * FROM merge(currentDatabase(), 'test_s64_distributed|test_u64_distributed') WHERE value = 1;
DROP TABLE IF EXISTS test_u64_local; DROP TABLE IF EXISTS test_u64_local;
DROP TABLE IF EXISTS test_s64_local; DROP TABLE IF EXISTS test_s64_local;

View File

@ -1,4 +1,4 @@
SELECT substring('hello', []); -- { serverError 43 } SELECT substring('hello', []); -- { serverError 43 }
SELECT substring('hello', 1, []); -- { serverError 43 } SELECT substring('hello', 1, []); -- { serverError 43 }
SELECT substring(materialize('hello'), -1, -1); -- { serverError 69 } SELECT substring(materialize('hello'), -1, -1);
SELECT substring(materialize('hello'), 0); -- { serverError 135 } SELECT substring(materialize('hello'), 0); -- { serverError 135 }

View File

@ -6,39 +6,39 @@ SELECT 'remote(system.one)';
SELECT * FROM remote('127.0.0.1', system.one); SELECT * FROM remote('127.0.0.1', system.one);
SELECT * FROM remote('127.0.0.{1,2}', system.one); SELECT * FROM remote('127.0.0.{1,2}', system.one);
SELECT _shard_num, * FROM remote('127.0.0.1', system.one); SELECT _shard_num, * FROM remote('127.0.0.1', system.one);
SELECT _shard_num, * FROM remote('127.0.0.{1,2}', system.one); SELECT _shard_num, * FROM remote('127.0.0.{1,2}', system.one) order by _shard_num;
SELECT _shard_num, * FROM remote('127.0.0.{1,2}', system.one) WHERE _shard_num = 1; SELECT _shard_num, * FROM remote('127.0.0.{1,2}', system.one) WHERE _shard_num = 1;
-- dist_1 using test_shard_localhost -- dist_1 using test_shard_localhost
SELECT 'dist_1'; SELECT 'dist_1';
CREATE TABLE mem1 (key Int) Engine=Memory(); CREATE TABLE mem1 (key Int) Engine=Memory();
CREATE TABLE dist_1 AS mem1 Engine=Distributed(test_shard_localhost, currentDatabase(), mem1); CREATE TABLE dist_1 AS mem1 Engine=Distributed(test_shard_localhost, currentDatabase(), mem1);
SELECT _shard_num FROM dist_1; SELECT _shard_num FROM dist_1 order by _shard_num;
INSERT INTO mem1 VALUES (10); INSERT INTO mem1 VALUES (10);
SELECT _shard_num FROM dist_1; SELECT _shard_num FROM dist_1 order by _shard_num;
SELECT _shard_num, key FROM dist_1; SELECT _shard_num, key FROM dist_1 order by _shard_num;
SELECT key FROM dist_1; SELECT key FROM dist_1;
INSERT INTO dist_1 VALUES (20); INSERT INTO dist_1 VALUES (20);
SELECT _shard_num FROM dist_1; SELECT _shard_num FROM dist_1 order by _shard_num;
SELECT _shard_num, key FROM dist_1; SELECT _shard_num, key FROM dist_1 order by _shard_num, key;
SELECT key FROM dist_1; SELECT key FROM dist_1;
-- dist_2 using test_cluster_two_shards_localhost -- dist_2 using test_cluster_two_shards_localhost
SELECT 'dist_2'; SELECT 'dist_2';
CREATE TABLE mem2 (key Int) Engine=Memory(); CREATE TABLE mem2 (key Int) Engine=Memory();
CREATE TABLE dist_2 AS mem2 Engine=Distributed(test_cluster_two_shards_localhost, currentDatabase(), mem2); CREATE TABLE dist_2 AS mem2 Engine=Distributed(test_cluster_two_shards_localhost, currentDatabase(), mem2);
SELECT _shard_num FROM dist_2; SELECT _shard_num FROM dist_2 order by _shard_num;
INSERT INTO mem2 VALUES (100); INSERT INTO mem2 VALUES (100);
SELECT _shard_num FROM dist_2; SELECT _shard_num FROM dist_2 order by _shard_num;
SELECT _shard_num, key FROM dist_2; SELECT _shard_num, key FROM dist_2 order by _shard_num, key;
SELECT key FROM dist_2; SELECT key FROM dist_2;
-- multiple _shard_num -- multiple _shard_num
SELECT 'remote(Distributed)'; SELECT 'remote(Distributed)';
SELECT _shard_num, key FROM remote('127.0.0.1', currentDatabase(), dist_2); SELECT _shard_num, key FROM remote('127.0.0.1', currentDatabase(), dist_2) order by _shard_num, key;
-- JOIN system.clusters -- JOIN system.clusters
SELECT 'JOIN system.clusters'; SELECT 'JOIN system.clusters';
@ -68,4 +68,4 @@ CREATE TABLE mem3 (key Int, _shard_num String) Engine=Memory();
CREATE TABLE dist_3 AS mem3 Engine=Distributed(test_shard_localhost, currentDatabase(), mem3); CREATE TABLE dist_3 AS mem3 Engine=Distributed(test_shard_localhost, currentDatabase(), mem3);
INSERT INTO mem3 VALUES (100, 'foo'); INSERT INTO mem3 VALUES (100, 'foo');
SELECT * FROM dist_3; SELECT * FROM dist_3;
SELECT _shard_num, * FROM dist_3; SELECT _shard_num, * FROM dist_3 order by _shard_num;

View File

@ -0,0 +1 @@
CREATE TABLE default.BannerDict (`BannerID` UInt64, `CompaignID` UInt64) ENGINE = ODBC(\'DSN=pgconn;Database=postgres\', somedb, bannerdict)

View File

@ -0,0 +1,9 @@
DROP TABLE IF EXISTS BannerDict;
CREATE TABLE BannerDict (`BannerID` UInt64, `CompaignID` UInt64) ENGINE = ODBC('DSN=pgconn;Database=postgres', bannerdict); -- {serverError 42}
CREATE TABLE BannerDict (`BannerID` UInt64, `CompaignID` UInt64) ENGINE = ODBC('DSN=pgconn;Database=postgres', somedb, bannerdict);
SHOW CREATE TABLE BannerDict;
DROP TABLE IF EXISTS BannerDict;

View File

@ -0,0 +1,8 @@
lickhous
lickhous
lickhous
lickhous
lickhous
lickhous
lickhous
lickhous

View File

@ -0,0 +1,8 @@
SELECT substr('clickhouse', 2, -2);
SELECT substr(materialize('clickhouse'), 2, -2);
SELECT substr('clickhouse', materialize(2), -2);
SELECT substr(materialize('clickhouse'), materialize(2), -2);
SELECT substr('clickhouse', 2, materialize(-2));
SELECT substr(materialize('clickhouse'), 2, materialize(-2));
SELECT substr('clickhouse', materialize(2), materialize(-2));
SELECT substr(materialize('clickhouse'), materialize(2), materialize(-2));

View File

@ -205,6 +205,17 @@ Result:
└───────────────────────────┘ └───────────────────────────┘
``` ```
## javaHashUTF16LE
The same as [JavaHash](#hash_functions-javahash), but for UTF-16LE code points. Works under the assumption that the string contains a set of bytes representing a UTF-16LE encoded text. If this assumption is not met, it returns some result (It only throws an exception in partial cases).
**Example**
```sql
SELECT javaHashUTF16LE(convertCharset('Hello, world!', 'utf-8', 'utf-16le'))
```
## hiveHash {#hash_functions-hivehash} ## hiveHash {#hash_functions-hivehash}
Calculates `HiveHash` from a string. Calculates `HiveHash` from a string.