mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-28 02:21:59 +00:00
Miscellaneous [#CLICKHOUSe-2]
This commit is contained in:
parent
d5f6c8365e
commit
0bfea2f52c
@ -338,7 +338,8 @@ protected:
|
||||
std::stack<backtrack_info> back_stack;
|
||||
|
||||
/// backtrack if possible
|
||||
const auto do_backtrack = [&] {
|
||||
const auto do_backtrack = [&]
|
||||
{
|
||||
while (!back_stack.empty())
|
||||
{
|
||||
auto & top = back_stack.top();
|
||||
|
@ -210,7 +210,8 @@ private:
|
||||
if (sorted)
|
||||
return;
|
||||
sorted = true;
|
||||
std::sort(samples.begin(), samples.end(), [] (const std::pair<T, UInt32> & lhs, const std::pair<T, UInt32> & rhs) {
|
||||
std::sort(samples.begin(), samples.end(), [] (const std::pair<T, UInt32> & lhs, const std::pair<T, UInt32> & rhs)
|
||||
{
|
||||
return lhs.first < rhs.first;
|
||||
});
|
||||
}
|
||||
|
@ -516,10 +516,8 @@ private:
|
||||
}
|
||||
|
||||
private:
|
||||
/// Maximum rank.
|
||||
static constexpr int max_rank = sizeof(HashValueType) * 8 - precision + 1;
|
||||
|
||||
/// Rank storage.
|
||||
RankStore rank_store;
|
||||
|
||||
/// Expression's denominator for HyperLogLog algorithm.
|
||||
|
@ -62,7 +62,8 @@ public:
|
||||
|
||||
void free(const void * ptr)
|
||||
{
|
||||
union {
|
||||
union
|
||||
{
|
||||
const void * p_v;
|
||||
Block * block;
|
||||
};
|
||||
|
@ -74,7 +74,8 @@ void formatIPv6(const unsigned char * src, char *& dst, UInt8 zeroed_tail_bytes_
|
||||
|
||||
for (const auto i : ext::range(0, words.size()))
|
||||
{
|
||||
if (words[i] == 0) {
|
||||
if (words[i] == 0)
|
||||
{
|
||||
if (cur.base == -1)
|
||||
cur.base = i, cur.len = 1;
|
||||
else
|
||||
|
@ -30,7 +30,8 @@ TEST(Common, RWLockFIFO_1)
|
||||
static thread_local std::random_device rd;
|
||||
static thread_local pcg64 gen(rd());
|
||||
|
||||
auto func = [&] (size_t threads, int round) {
|
||||
auto func = [&] (size_t threads, int round)
|
||||
{
|
||||
for (int i = 0; i < cycles; ++i)
|
||||
{
|
||||
auto type = (std::uniform_int_distribution<>(0, 9)(gen) >= round) ? RWLockFIFO::Read : RWLockFIFO::Write;
|
||||
@ -92,7 +93,8 @@ TEST(Common, RWLockFIFO_Recursive)
|
||||
static thread_local std::random_device rd;
|
||||
static thread_local pcg64 gen(rd());
|
||||
|
||||
std::thread t1([&] () {
|
||||
std::thread t1([&] ()
|
||||
{
|
||||
for (int i = 0; i < 2 * cycles; ++i)
|
||||
{
|
||||
auto lock = fifo_lock->getLock(RWLockFIFO::Write);
|
||||
@ -102,7 +104,8 @@ TEST(Common, RWLockFIFO_Recursive)
|
||||
}
|
||||
});
|
||||
|
||||
std::thread t2([&] () {
|
||||
std::thread t2([&] ()
|
||||
{
|
||||
for (int i = 0; i < cycles; ++i)
|
||||
{
|
||||
auto lock1 = fifo_lock->getLock(RWLockFIFO::Read);
|
||||
|
@ -47,7 +47,8 @@ try
|
||||
}
|
||||
|
||||
// <defunct> hunting:
|
||||
for (int i = 0; i < 1000; ++i) {
|
||||
for (int i = 0; i < 1000; ++i)
|
||||
{
|
||||
auto command = ShellCommand::execute("echo " + std::to_string(i));
|
||||
//command->wait(); // now automatic
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ std::ostream & operator<<(std::ostream & stream, const IFunction & what)
|
||||
std::ostream & operator<<(std::ostream & stream, const Block & what)
|
||||
{
|
||||
stream << "Block("
|
||||
<< "size = " << what.columns()
|
||||
<< "num_columns = " << what.columns()
|
||||
<< "){" << what.dumpStructure() << "}";
|
||||
return stream;
|
||||
}
|
||||
|
@ -36,7 +36,8 @@ CapnProtoRowInputStream::NestedField split(const Block & header, size_t i)
|
||||
|
||||
Field convertNodeToField(capnp::DynamicValue::Reader value)
|
||||
{
|
||||
switch (value.getType()) {
|
||||
switch (value.getType())
|
||||
{
|
||||
case capnp::DynamicValue::UNKNOWN:
|
||||
throw Exception("Unknown field type");
|
||||
case capnp::DynamicValue::VOID:
|
||||
@ -172,8 +173,10 @@ bool CapnProtoRowInputStream::read(MutableColumns & columns)
|
||||
|
||||
for (auto action : actions)
|
||||
{
|
||||
switch (action.type) {
|
||||
case Action::READ: {
|
||||
switch (action.type)
|
||||
{
|
||||
case Action::READ:
|
||||
{
|
||||
auto & col = columns[action.column];
|
||||
Field value = convertNodeToField(stack.back().get(action.field));
|
||||
col->insert(value);
|
||||
|
@ -1,27 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <DataStreams/IBlockOutputStream.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_WRITE_TO_EMPTY_BLOCK_OUTPUT_STREAM;
|
||||
}
|
||||
|
||||
/** When trying to write blocks to this stream of blocks, throws an exception.
|
||||
* Used where, in general, you need to pass a stream of blocks, but in some cases, it should not be used.
|
||||
*/
|
||||
class EmptyBlockOutputStream : public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
void write(const Block & block) override
|
||||
{
|
||||
throw Exception("Cannot write to EmptyBlockOutputStream", ErrorCodes::CANNOT_WRITE_TO_EMPTY_BLOCK_OUTPUT_STREAM);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
@ -1,74 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <DataStreams/QueueBlockIOStream.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
/** Allows you to make several sources from one.
|
||||
* Used for single-pass execution of several queries at once.
|
||||
*
|
||||
* Multiple received sources should be read from different threads!
|
||||
* Uses O(1) RAM (does not buffer all data).
|
||||
* For this, readings from different sources are synchronized:
|
||||
* reading of next block is blocked until all sources have read the current block.
|
||||
*/
|
||||
class ForkBlockInputStreams : private boost::noncopyable
|
||||
{
|
||||
public:
|
||||
ForkBlockInputStreams(const BlockInputStreamPtr & source_) : source(source_) {}
|
||||
|
||||
/// Create a source. Call the function as many times as many forked sources you need.
|
||||
BlockInputStreamPtr createInput()
|
||||
{
|
||||
destinations.emplace_back(std::make_shared<QueueBlockIOStream>(1));
|
||||
return destinations.back();
|
||||
}
|
||||
|
||||
/// Before you can read from the sources you have to "run" this construct.
|
||||
void run()
|
||||
{
|
||||
while (1)
|
||||
{
|
||||
if (destinations.empty())
|
||||
return;
|
||||
|
||||
Block block = source->read();
|
||||
|
||||
for (Destinations::iterator it = destinations.begin(); it != destinations.end();)
|
||||
{
|
||||
if ((*it)->isCancelled())
|
||||
{
|
||||
destinations.erase(it++);
|
||||
}
|
||||
else
|
||||
{
|
||||
(*it)->write(block);
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
if (!block)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
/// From where to read.
|
||||
BlockInputStreamPtr source;
|
||||
|
||||
/** Forked sources.
|
||||
* Made on the basis of a queue of small length.
|
||||
* A block from `source` is put in each queue.
|
||||
*/
|
||||
using Destination = std::shared_ptr<QueueBlockIOStream>;
|
||||
using Destinations = std::list<Destination>;
|
||||
Destinations destinations;
|
||||
};
|
||||
|
||||
using ForkPtr = std::shared_ptr<ForkBlockInputStreams>;
|
||||
using Forks = std::vector<ForkPtr>;
|
||||
|
||||
}
|
@ -60,9 +60,6 @@ size_t IBlockInputStream::checkDepthImpl(size_t max_depth, size_t level) const
|
||||
|
||||
|
||||
void IBlockInputStream::dumpTree(std::ostream & ostr, size_t indent, size_t multiplier)
|
||||
{
|
||||
/// We will not display the wrapper of the block stream in the AsynchronousBlockInputStream in the tree.
|
||||
if (getName() != "Asynchronous")
|
||||
{
|
||||
ostr << String(indent, ' ') << getName();
|
||||
if (multiplier > 1)
|
||||
@ -88,12 +85,6 @@ void IBlockInputStream::dumpTree(std::ostream & ostr, size_t indent, size_t mult
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (BlockInputStreams::iterator it = children.begin(); it != children.end(); ++it)
|
||||
(*it)->dumpTree(ostr, indent, multiplier);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
BlockInputStreams IBlockInputStream::getLeaves()
|
||||
|
@ -1,10 +1,20 @@
|
||||
#include <Parsers/ASTInsertQuery.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <IO/ConcatReadBuffer.h>
|
||||
#include <IO/ReadBufferFromMemory.h>
|
||||
#include <DataStreams/BlockIO.h>
|
||||
#include <DataStreams/InputStreamFromASTInsertQuery.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
InputStreamFromASTInsertQuery::InputStreamFromASTInsertQuery(
|
||||
const ASTPtr & ast, ReadBuffer & input_buffer_tail_part, const BlockIO & streams, Context & context)
|
||||
{
|
||||
|
@ -1,4 +1,6 @@
|
||||
#include <DataStreams/LimitByBlockInputStream.h>
|
||||
#include <Common/SipHash.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,69 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include <Common/ConcurrentBoundedQueue.h>
|
||||
|
||||
#include <DataStreams/IProfilingBlockInputStream.h>
|
||||
#include <DataStreams/IBlockOutputStream.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
/** Is both an InputStream and an OutputStream.
|
||||
* When writing, puts the blocks in the queue.
|
||||
* When reading, it takes them out of the queue.
|
||||
* A thread-safe queue is used.
|
||||
* If the queue is empty, the read is blocked.
|
||||
* If the queue is full, the write is blocked.
|
||||
*
|
||||
* Used to temporarily store the result somewhere, and later pass it further.
|
||||
* Also used for synchronization, when you need to make several sources from one
|
||||
* - for single-pass execution of several queries at once.
|
||||
* It can also be used for parallelization: several threads put blocks in the queue, and one - takes out.
|
||||
*/
|
||||
|
||||
class QueueBlockIOStream : public IProfilingBlockInputStream, public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
QueueBlockIOStream(size_t queue_size_ = std::numeric_limits<int>::max())
|
||||
: queue_size(queue_size_), queue(queue_size) {}
|
||||
|
||||
String getName() const override { return "QueueBlockIOStream"; }
|
||||
|
||||
String getID() const override
|
||||
{
|
||||
std::stringstream res;
|
||||
res << this;
|
||||
return res.str();
|
||||
}
|
||||
|
||||
void write(const Block & block) override
|
||||
{
|
||||
queue.push(block);
|
||||
}
|
||||
|
||||
void cancel() override
|
||||
{
|
||||
IProfilingBlockInputStream::cancel();
|
||||
queue.clear();
|
||||
}
|
||||
|
||||
protected:
|
||||
Block readImpl() override
|
||||
{
|
||||
Block res;
|
||||
queue.pop(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t queue_size;
|
||||
|
||||
using Queue = ConcurrentBoundedQueue<Block>;
|
||||
Queue queue;
|
||||
};
|
||||
|
||||
}
|
@ -1,63 +0,0 @@
|
||||
#include <set>
|
||||
|
||||
#include <DataStreams/glueBlockInputStreams.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
using IDsMap = std::map<String, BlockInputStreams>;
|
||||
using ForksMap = std::map<String, ForkPtr>;
|
||||
|
||||
|
||||
static void createIDsMap(BlockInputStreamPtr & node, IDsMap & ids_map)
|
||||
{
|
||||
ids_map[node->getID()].push_back(node);
|
||||
|
||||
BlockInputStreams & children = node->getChildren();
|
||||
for (size_t i = 0, size = children.size(); i < size; ++i)
|
||||
createIDsMap(children[i], ids_map);
|
||||
}
|
||||
|
||||
|
||||
static void glue(BlockInputStreamPtr & node, IDsMap & ids_map, ForksMap & forks_map)
|
||||
{
|
||||
String id = node->getID();
|
||||
if (ids_map.end() != ids_map.find(id) && ids_map[id].size() > 1)
|
||||
{
|
||||
/// Insert a "fork" or use an existing one.
|
||||
if (forks_map.end() == forks_map.find(id))
|
||||
{
|
||||
forks_map[id] = std::make_shared<ForkBlockInputStreams>(node);
|
||||
std::cerr << "Forking at " << id << std::endl;
|
||||
}
|
||||
|
||||
std::cerr << "Replacing node with fork end" << std::endl;
|
||||
node = forks_map[id]->createInput();
|
||||
}
|
||||
else
|
||||
{
|
||||
BlockInputStreams & children = node->getChildren();
|
||||
for (size_t i = 0, size = children.size(); i < size; ++i)
|
||||
glue(children[i], ids_map, forks_map);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void glueBlockInputStreams(BlockInputStreams & inputs, Forks & forks)
|
||||
{
|
||||
IDsMap ids_map;
|
||||
for (size_t i = 0, size = inputs.size(); i < size; ++i)
|
||||
createIDsMap(inputs[i], ids_map);
|
||||
|
||||
ForksMap forks_map;
|
||||
for (size_t i = 0, size = inputs.size(); i < size; ++i)
|
||||
glue(inputs[i], ids_map, forks_map);
|
||||
|
||||
for (ForksMap::iterator it = forks_map.begin(); it != forks_map.end(); ++it)
|
||||
forks.push_back(it->second);
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <DataStreams/ForkBlockInputStreams.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** If passed sources (query execution pipelines) have the same parts,
|
||||
* then glues these parts, replacing them with one source and inserting "forks" (multipliers).
|
||||
* This is used for single-pass execution of multiple queries.
|
||||
*
|
||||
* To execute a glued pipeline, all `inputs` and `forks` must be used in different threads.
|
||||
*/
|
||||
void glueBlockInputStreams(BlockInputStreams & inputs, Forks & forks);
|
||||
|
||||
}
|
@ -28,9 +28,3 @@ target_link_libraries (union_stream2 dbms)
|
||||
|
||||
add_executable (collapsing_sorted_stream collapsing_sorted_stream.cpp ${SRCS})
|
||||
target_link_libraries (collapsing_sorted_stream dbms)
|
||||
|
||||
add_executable (fork_streams fork_streams.cpp ${SRCS})
|
||||
target_link_libraries (fork_streams dbms clickhouse_storages_system)
|
||||
|
||||
add_executable (glue_streams glue_streams.cpp ${SRCS})
|
||||
target_link_libraries (glue_streams dbms)
|
||||
|
@ -1,110 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
#include <thread>
|
||||
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
|
||||
#include <Storages/System/StorageSystemNumbers.h>
|
||||
|
||||
#include <DataStreams/LimitBlockInputStream.h>
|
||||
#include <DataStreams/ExpressionBlockInputStream.h>
|
||||
#include <DataStreams/FilterBlockInputStream.h>
|
||||
#include <DataStreams/TabSeparatedRowOutputStream.h>
|
||||
#include <DataStreams/ForkBlockInputStreams.h>
|
||||
#include <DataStreams/copyData.h>
|
||||
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
|
||||
#include <Parsers/ParserSelectQuery.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
|
||||
#include <Interpreters/ExpressionAnalyzer.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
|
||||
void thread1(DB::BlockInputStreamPtr in, DB::BlockOutputStreamPtr out, DB::WriteBuffer & out_buf)
|
||||
{
|
||||
while (DB::Block block = in->read())
|
||||
{
|
||||
out->write(block);
|
||||
out_buf.next();
|
||||
}
|
||||
}
|
||||
|
||||
void thread2(DB::BlockInputStreamPtr in, DB::BlockOutputStreamPtr out, DB::WriteBuffer & out_buf)
|
||||
{
|
||||
while (DB::Block block = in->read())
|
||||
{
|
||||
out->write(block);
|
||||
out_buf.next();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main(int, char **)
|
||||
try
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
std::string input = "SELECT number, number % 10000000 == 1";
|
||||
|
||||
ParserSelectQuery parser;
|
||||
ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "");
|
||||
|
||||
formatAST(*ast, std::cerr);
|
||||
std::cerr << std::endl;
|
||||
|
||||
Context context = Context::createGlobal();
|
||||
|
||||
ExpressionAnalyzer analyzer(ast, context, {}, {NameAndTypePair("number", std::make_shared<DataTypeUInt64>())});
|
||||
ExpressionActionsChain chain;
|
||||
analyzer.appendSelect(chain, false);
|
||||
analyzer.appendProjectResult(chain);
|
||||
chain.finalize();
|
||||
ExpressionActionsPtr expression = chain.getLastActions();
|
||||
|
||||
StoragePtr table = StorageSystemNumbers::create("numbers", false);
|
||||
|
||||
Names column_names;
|
||||
column_names.push_back("number");
|
||||
|
||||
QueryProcessingStage::Enum stage;
|
||||
|
||||
BlockInputStreamPtr in = table->read(column_names, {}, context, stage, 8192, 1)[0];
|
||||
|
||||
ForkBlockInputStreams fork(in);
|
||||
|
||||
BlockInputStreamPtr in1 = fork.createInput();
|
||||
BlockInputStreamPtr in2 = fork.createInput();
|
||||
|
||||
in1 = std::make_shared<FilterBlockInputStream>(in1, expression, 1);
|
||||
in1 = std::make_shared<LimitBlockInputStream>(in1, 10, 0);
|
||||
|
||||
in2 = std::make_shared<FilterBlockInputStream>(in2, expression, 1);
|
||||
in2 = std::make_shared<LimitBlockInputStream>(in2, 20, 5);
|
||||
|
||||
Block out_sample = expression->getSampleBlock();
|
||||
|
||||
WriteBufferFromOStream ob1(std::cout);
|
||||
WriteBufferFromOStream ob2(std::cerr);
|
||||
|
||||
BlockOutputStreamPtr out1 = context.getOutputFormat("TabSeparated", ob1, out_sample);
|
||||
BlockOutputStreamPtr out2 = context.getOutputFormat("TabSeparated", ob2, out_sample);
|
||||
|
||||
std::thread thr1(std::bind(thread1, in1, out1, std::ref(ob1)));
|
||||
std::thread thr2(std::bind(thread2, in2, out2, std::ref(ob2)));
|
||||
|
||||
fork.run();
|
||||
|
||||
thr1.join();
|
||||
thr2.join();
|
||||
|
||||
return 0;
|
||||
}
|
||||
catch (const DB::Exception & e)
|
||||
{
|
||||
std::cerr << e.what() << ", " << e.displayText() << std::endl;
|
||||
throw;
|
||||
}
|
@ -1,98 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
|
||||
#include <common/ThreadPool.h>
|
||||
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/loadMetadata.h>
|
||||
#include <Interpreters/executeQuery.h>
|
||||
|
||||
#include <DataStreams/glueBlockInputStreams.h>
|
||||
|
||||
|
||||
using namespace DB;
|
||||
|
||||
|
||||
void inputThread(const BlockInputStreamPtr & in, BlockOutputStreamPtr out, WriteBuffer & wb, std::mutex & mutex)
|
||||
{
|
||||
while (Block block = in->read())
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
|
||||
out->write(block);
|
||||
wb.next();
|
||||
}
|
||||
}
|
||||
|
||||
void forkThread(ForkPtr fork)
|
||||
{
|
||||
fork->run();
|
||||
}
|
||||
|
||||
|
||||
int main(int, char **)
|
||||
try
|
||||
{
|
||||
Context context = Context::createGlobal();
|
||||
|
||||
context.setGlobalContext(context);
|
||||
context.setPath("./");
|
||||
|
||||
loadMetadata(context);
|
||||
|
||||
context.setCurrentDatabase("default");
|
||||
context.setSetting("max_threads", UInt64(1));
|
||||
|
||||
BlockIO io1 = executeQuery(
|
||||
"SELECT SearchPhrase, count()"
|
||||
" FROM hits"
|
||||
" WHERE SearchPhrase != ''"
|
||||
" GROUP BY SearchPhrase"
|
||||
" ORDER BY count() DESC"
|
||||
" LIMIT 10",
|
||||
context, false, QueryProcessingStage::Complete);
|
||||
|
||||
BlockIO io2 = executeQuery(
|
||||
"SELECT count()"
|
||||
" FROM hits"
|
||||
" WHERE SearchPhrase != ''",
|
||||
context, false, QueryProcessingStage::Complete);
|
||||
|
||||
WriteBufferFromFileDescriptor wb(STDOUT_FILENO);
|
||||
|
||||
BlockOutputStreamPtr out1 = context.getOutputFormat("TabSeparated", wb, io1.in_sample);
|
||||
BlockOutputStreamPtr out2 = context.getOutputFormat("TabSeparated", wb, io2.in_sample);
|
||||
|
||||
BlockInputStreams inputs;
|
||||
inputs.push_back(io1.in);
|
||||
inputs.push_back(io2.in);
|
||||
|
||||
for (size_t i = 0; i < inputs.size(); ++i)
|
||||
std::cerr << inputs[i]->getID() << std::endl;
|
||||
|
||||
Forks forks;
|
||||
|
||||
glueBlockInputStreams(inputs, forks);
|
||||
|
||||
std::cerr << forks.size() << std::endl;
|
||||
|
||||
std::mutex mutex;
|
||||
|
||||
ThreadPool pool(inputs.size() + forks.size());
|
||||
|
||||
pool.schedule(std::bind(inputThread, inputs[0], out1, std::ref(wb), std::ref(mutex)));
|
||||
pool.schedule(std::bind(inputThread, inputs[1], out2, std::ref(wb), std::ref(mutex)));
|
||||
|
||||
for (size_t i = 0; i < forks.size(); ++i)
|
||||
pool.schedule(std::bind(forkThread, forks[i]));
|
||||
|
||||
pool.wait();
|
||||
return 0;
|
||||
}
|
||||
catch (const DB::Exception & e)
|
||||
{
|
||||
std::cerr << e.what() << ", " << e.displayText() << std::endl;
|
||||
throw;
|
||||
}
|
@ -138,7 +138,8 @@ void DatabaseMemory::shutdown()
|
||||
tables_snapshot = tables;
|
||||
}
|
||||
|
||||
for (const auto & kv: tables_snapshot) {
|
||||
for (const auto & kv: tables_snapshot)
|
||||
{
|
||||
kv.second->shutdown();
|
||||
}
|
||||
|
||||
|
@ -427,7 +427,8 @@ void DatabaseOrdinary::shutdown()
|
||||
tables_snapshot = tables;
|
||||
}
|
||||
|
||||
for (const auto & kv: tables_snapshot) {
|
||||
for (const auto & kv: tables_snapshot)
|
||||
{
|
||||
kv.second->shutdown();
|
||||
}
|
||||
|
||||
|
@ -474,14 +474,16 @@ private:
|
||||
if (!outdated_keys.empty())
|
||||
{
|
||||
std::vector<size_t> required_rows(outdated_keys.size());
|
||||
std::transform(std::begin(outdated_keys), std::end(outdated_keys), std::begin(required_rows), [](auto & pair) {
|
||||
std::transform(std::begin(outdated_keys), std::end(outdated_keys), std::begin(required_rows), [](auto & pair)
|
||||
{
|
||||
return pair.second.front();
|
||||
});
|
||||
|
||||
update(key_columns,
|
||||
keys_array,
|
||||
required_rows,
|
||||
[&](const StringRef key, const size_t cell_idx) {
|
||||
[&](const StringRef key, const size_t cell_idx)
|
||||
{
|
||||
const StringRef attribute_value = attribute_array[cell_idx];
|
||||
|
||||
/// We must copy key and value to own memory, because it may be replaced with another
|
||||
@ -492,7 +494,8 @@ private:
|
||||
map[copied_key] = copied_value;
|
||||
total_length += (attribute_value.size + 1) * outdated_keys[key].size();
|
||||
},
|
||||
[&](const StringRef key, const size_t) {
|
||||
[&](const StringRef key, const size_t)
|
||||
{
|
||||
for (const auto row : outdated_keys[key])
|
||||
total_length += get_default(row).size + 1;
|
||||
});
|
||||
|
@ -244,12 +244,14 @@ void ComplexKeyHashedDictionary::loadData()
|
||||
element_count += rows;
|
||||
|
||||
const auto key_column_ptrs = ext::map<Columns>(ext::range(0, keys_size),
|
||||
[&] (const size_t attribute_idx) {
|
||||
[&] (const size_t attribute_idx)
|
||||
{
|
||||
return block.safeGetByPosition(attribute_idx).column;
|
||||
});
|
||||
|
||||
const auto attribute_column_ptrs = ext::map<Columns>(ext::range(0, attributes_size),
|
||||
[&] (const size_t attribute_idx) {
|
||||
[&] (const size_t attribute_idx)
|
||||
{
|
||||
return block.safeGetByPosition(keys_size + attribute_idx).column;
|
||||
});
|
||||
|
||||
|
@ -41,7 +41,8 @@ public:
|
||||
GetColumnsFunction && get_key_columns_function,
|
||||
GetColumnsFunction && get_view_columns_function);
|
||||
|
||||
String getName() const override {
|
||||
String getName() const override
|
||||
{
|
||||
return "DictionaryBlockInputStream";
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,8 @@ private:
|
||||
struct language_alias { const char * const name; const Language lang; };
|
||||
static const language_alias * getLanguageAliases()
|
||||
{
|
||||
static constexpr const language_alias language_aliases[] {
|
||||
static constexpr const language_alias language_aliases[]
|
||||
{
|
||||
{ "ru", Language::RU },
|
||||
{ "en", Language::EN },
|
||||
{ "ua", Language::UA },
|
||||
|
@ -539,11 +539,8 @@ PaddedPODArray<FlatDictionary::Key> FlatDictionary::getIds() const
|
||||
|
||||
PaddedPODArray<Key> ids;
|
||||
for (auto idx : ext::range(0, ids_count))
|
||||
{
|
||||
if (loaded_ids[idx]) {
|
||||
if (loaded_ids[idx])
|
||||
ids.push_back(idx);
|
||||
}
|
||||
}
|
||||
return ids;
|
||||
}
|
||||
|
||||
|
@ -494,9 +494,9 @@ PaddedPODArray<HashedDictionary::Key> HashedDictionary::getIds(const Attribute &
|
||||
|
||||
PaddedPODArray<Key> ids;
|
||||
ids.reserve(attr.size());
|
||||
for (const auto & value : attr) {
|
||||
for (const auto & value : attr)
|
||||
ids.push_back(value.first);
|
||||
}
|
||||
|
||||
return ids;
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,8 @@ public:
|
||||
DictionatyPtr dictionary, size_t max_block_size, const Names & column_names, PaddedPODArray<Key> && ids,
|
||||
PaddedPODArray<UInt16> && start_dates, PaddedPODArray<UInt16> && end_dates);
|
||||
|
||||
String getName() const override {
|
||||
String getName() const override
|
||||
{
|
||||
return "RangeDictionaryBlockInputStream";
|
||||
}
|
||||
|
||||
@ -140,13 +141,12 @@ void RangeDictionaryBlockInputStream<DictionaryType, Key>::addSpecialColumn(
|
||||
const PaddedPODArray<T> & values, ColumnsWithTypeAndName & columns) const
|
||||
{
|
||||
std::string name = default_name;
|
||||
if (attribute) {
|
||||
if (attribute)
|
||||
name = attribute->name;
|
||||
}
|
||||
if (column_names.find(name) != column_names.end()) {
|
||||
|
||||
if (column_names.find(name) != column_names.end())
|
||||
columns.emplace_back(getColumnFromPODArray(values), type, name);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename DictionaryType, typename Key>
|
||||
PaddedPODArray<UInt16> RangeDictionaryBlockInputStream<DictionaryType, Key>::makeDateKey(
|
||||
|
@ -323,7 +323,8 @@ void RangeHashedDictionary::setAttributeValue(Attribute & attribute, const Key i
|
||||
auto & values = it->second;
|
||||
|
||||
const auto insert_it = std::lower_bound(std::begin(values), std::end(values), range,
|
||||
[] (const Value<StringRef> & lhs, const Range & range) {
|
||||
[] (const Value<StringRef> & lhs, const Range & range)
|
||||
{
|
||||
return lhs.range < range;
|
||||
});
|
||||
|
||||
|
@ -608,7 +608,8 @@ Columns TrieDictionary::getKeyColumns() const
|
||||
auto mask_column = ColumnVector<UInt8>::create();
|
||||
|
||||
#if defined(__SIZEOF_INT128__)
|
||||
auto getter = [& ip_column, & mask_column](__uint128_t ip, size_t mask) {
|
||||
auto getter = [& ip_column, & mask_column](__uint128_t ip, size_t mask)
|
||||
{
|
||||
UInt64 * ip_array = reinterpret_cast<UInt64 *>(&ip);
|
||||
ip_array[0] = Poco::ByteOrder::fromNetwork(ip_array[0]);
|
||||
ip_array[1] = Poco::ByteOrder::fromNetwork(ip_array[1]);
|
||||
|
@ -181,7 +181,8 @@ public:
|
||||
|
||||
private:
|
||||
|
||||
struct Ellipse {
|
||||
struct Ellipse
|
||||
{
|
||||
Float64 x;
|
||||
Float64 y;
|
||||
Float64 a;
|
||||
|
@ -835,7 +835,8 @@ struct ImplMetroHash64
|
||||
static auto Hash128to64(const uint128_t & x) { return CityHash_v1_0_2::Hash128to64(x); }
|
||||
static auto Hash64(const char * s, const size_t len)
|
||||
{
|
||||
union {
|
||||
union
|
||||
{
|
||||
UInt64 u64;
|
||||
UInt8 u8[sizeof(u64)];
|
||||
};
|
||||
|
@ -1396,7 +1396,8 @@ public:
|
||||
AggregateFunctionPtr aggregate_function_ptr = column_with_states->getAggregateFunction();
|
||||
const IAggregateFunction & agg_func = *aggregate_function_ptr;
|
||||
|
||||
auto deleter = [&agg_func](char * ptr) {
|
||||
auto deleter = [&agg_func](char * ptr)
|
||||
{
|
||||
agg_func.destroy(ptr);
|
||||
free(ptr);
|
||||
};
|
||||
@ -1529,7 +1530,8 @@ public:
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
DataTypePtr res;
|
||||
dispatchForSourceType(*arguments[0], [&](auto field_type_tag) {
|
||||
dispatchForSourceType(*arguments[0], [&](auto field_type_tag)
|
||||
{
|
||||
res = std::make_shared<DataTypeNumber<DstFieldType<decltype(field_type_tag)>>>();
|
||||
});
|
||||
|
||||
|
@ -11,8 +11,8 @@ namespace ProfileEvents
|
||||
}
|
||||
|
||||
|
||||
namespace DB {
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace Regexps
|
||||
{
|
||||
|
@ -35,9 +35,8 @@ ReadBufferFromFile::ReadBufferFromFile(
|
||||
|
||||
#ifdef __APPLE__
|
||||
bool o_direct = (flags != -1) && (flags & O_DIRECT);
|
||||
if (o_direct) {
|
||||
if (o_direct)
|
||||
flags = flags & ~O_DIRECT;
|
||||
}
|
||||
#endif
|
||||
fd = open(file_name.c_str(), flags == -1 ? O_RDONLY : flags);
|
||||
|
||||
|
@ -37,9 +37,8 @@ WriteBufferFromFile::WriteBufferFromFile(
|
||||
|
||||
#ifdef __APPLE__
|
||||
bool o_direct = (flags != -1) && (flags & O_DIRECT);
|
||||
if (o_direct) {
|
||||
if (o_direct)
|
||||
flags = flags & ~O_DIRECT;
|
||||
}
|
||||
#endif
|
||||
|
||||
fd = open(file_name.c_str(), flags == -1 ? O_WRONLY | O_TRUNC | O_CREAT : flags, mode);
|
||||
|
@ -1,40 +0,0 @@
|
||||
#include <Interpreters/ClusterProxy/AlterStreamFactory.h>
|
||||
#include <Interpreters/InterpreterAlterQuery.h>
|
||||
#include <DataStreams/RemoteBlockInputStream.h>
|
||||
#include <DataStreams/LazyBlockInputStream.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ClusterProxy
|
||||
{
|
||||
|
||||
void AlterStreamFactory::createForShard(
|
||||
const Cluster::ShardInfo & shard_info,
|
||||
const String & query, const ASTPtr & query_ast,
|
||||
const Context & context, const ThrottlerPtr & throttler,
|
||||
BlockInputStreams & res)
|
||||
{
|
||||
if (shard_info.isLocal())
|
||||
{
|
||||
/// The ALTER query may be a part of a distributed job.
|
||||
/// Since the latter heavily relies on synchronization among its participating
|
||||
/// nodes, it is very important to defer the execution of a local query so as
|
||||
/// to prevent any deadlock.
|
||||
auto interpreter = std::make_shared<InterpreterAlterQuery>(query_ast, context);
|
||||
res.emplace_back(std::make_shared<LazyBlockInputStream>(
|
||||
[interpreter]() mutable
|
||||
{
|
||||
return interpreter->execute().in;
|
||||
}));
|
||||
}
|
||||
else
|
||||
{
|
||||
auto stream = std::make_shared<RemoteBlockInputStream>(shard_info.pool, query, context, nullptr, throttler);
|
||||
stream->setPoolMode(PoolMode::GET_ONE);
|
||||
res.emplace_back(std::move(stream));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/ClusterProxy/IStreamFactory.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ClusterProxy
|
||||
{
|
||||
|
||||
class AlterStreamFactory final : public IStreamFactory
|
||||
{
|
||||
public:
|
||||
AlterStreamFactory() = default;
|
||||
|
||||
virtual void createForShard(
|
||||
const Cluster::ShardInfo & shard_info,
|
||||
const String & query, const ASTPtr & query_ast,
|
||||
const Context & context, const ThrottlerPtr & throttler,
|
||||
BlockInputStreams & res) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -5,7 +5,6 @@
|
||||
#include <Interpreters/Cluster.h>
|
||||
#include <Interpreters/IInterpreter.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <DataStreams/RemoteBlockInputStream.h>
|
||||
#include <Interpreters/ProcessList.h>
|
||||
|
||||
|
||||
@ -24,8 +23,7 @@ BlockInputStreams executeQuery(
|
||||
const std::string query = queryToString(query_ast);
|
||||
|
||||
Settings new_settings = settings;
|
||||
new_settings.queue_max_wait_ms = ConnectionTimeouts::saturate(new_settings.queue_max_wait_ms,
|
||||
settings.limits.max_execution_time);
|
||||
new_settings.queue_max_wait_ms = Cluster::saturate(new_settings.queue_max_wait_ms, settings.limits.max_execution_time);
|
||||
|
||||
/// Does not matter on remote servers, because queries are sent under different user.
|
||||
new_settings.max_concurrent_queries_for_user = 0;
|
||||
|
@ -1373,7 +1373,8 @@ void ExpressionAnalyzer::optimizeGroupBy()
|
||||
if (!(select_query && select_query->group_expression_list))
|
||||
return;
|
||||
|
||||
const auto is_literal = [] (const ASTPtr& ast) {
|
||||
const auto is_literal = [] (const ASTPtr & ast)
|
||||
{
|
||||
return typeid_cast<const ASTLiteral *>(ast.get());
|
||||
};
|
||||
|
||||
@ -1620,10 +1621,10 @@ void ExpressionAnalyzer::makeSet(const ASTFunction * node, const Block & sample_
|
||||
|
||||
/** Why is LazyBlockInputStream used?
|
||||
*
|
||||
* The fact is that when processing a request of the form
|
||||
* The fact is that when processing a query of the form
|
||||
* SELECT ... FROM remote_test WHERE column GLOBAL IN (subquery),
|
||||
* if the distributed remote_test table contains localhost as one of the servers,
|
||||
* the request will be interpreted locally again (and not sent over TCP, as in the case of a remote server).
|
||||
* the query will be interpreted locally again (and not sent over TCP, as in the case of a remote server).
|
||||
*
|
||||
* The query execution pipeline will be:
|
||||
* CreatingSets
|
||||
@ -1632,7 +1633,7 @@ void ExpressionAnalyzer::makeSet(const ASTFunction * node, const Block & sample_
|
||||
* reading from the table _data1, creating the set (2)
|
||||
* read from the table subordinate to remote_test.
|
||||
*
|
||||
* (The second part of the pipeline under CreateSets is a reinterpretation of the request inside StorageDistributed,
|
||||
* (The second part of the pipeline under CreateSets is a reinterpretation of the query inside StorageDistributed,
|
||||
* the query differs in that the database name and tables are replaced with subordinates, and the subquery is replaced with _data1.)
|
||||
*
|
||||
* But when creating the pipeline, when creating the source (2), it will be found that the _data1 table is empty
|
||||
|
@ -14,10 +14,10 @@ namespace
|
||||
static ExternalLoaderConfigSettings settings;
|
||||
static std::once_flag flag;
|
||||
|
||||
std::call_once(flag, [] {
|
||||
std::call_once(flag, []
|
||||
{
|
||||
settings.external_config = "dictionary";
|
||||
settings.external_name = "name";
|
||||
|
||||
settings.path_setting_name = "dictionaries_config";
|
||||
});
|
||||
|
||||
|
@ -19,10 +19,10 @@ namespace
|
||||
static ExternalLoaderConfigSettings settings;
|
||||
static std::once_flag flag;
|
||||
|
||||
std::call_once(flag, [] {
|
||||
std::call_once(flag, []
|
||||
{
|
||||
settings.external_config = "model";
|
||||
settings.external_name = "name";
|
||||
|
||||
settings.path_setting_name = "models_config";
|
||||
});
|
||||
|
||||
|
@ -100,7 +100,8 @@ struct FastHash64
|
||||
UInt64 h = len * m;
|
||||
UInt64 v;
|
||||
|
||||
while (pos != end) {
|
||||
while (pos != end)
|
||||
{
|
||||
v = *pos++;
|
||||
h ^= mix(v);
|
||||
h *= m;
|
||||
@ -109,7 +110,8 @@ struct FastHash64
|
||||
pos2 = reinterpret_cast<const unsigned char*>(pos);
|
||||
v = 0;
|
||||
|
||||
switch (len & 7) {
|
||||
switch (len & 7)
|
||||
{
|
||||
case 7: v ^= static_cast<UInt64>(pos2[6]) << 48; [[fallthrough]];
|
||||
case 6: v ^= static_cast<UInt64>(pos2[5]) << 40; [[fallthrough]];
|
||||
case 5: v ^= static_cast<UInt64>(pos2[4]) << 32; [[fallthrough]];
|
||||
|
@ -113,7 +113,8 @@ struct FastHash64
|
||||
uint64_t h = len * m;
|
||||
uint64_t v;
|
||||
|
||||
while (pos != end) {
|
||||
while (pos != end)
|
||||
{
|
||||
v = *pos++;
|
||||
h ^= mix(v);
|
||||
h *= m;
|
||||
@ -122,7 +123,8 @@ struct FastHash64
|
||||
pos2 = reinterpret_cast<const unsigned char*>(pos);
|
||||
v = 0;
|
||||
|
||||
switch (len & 7) {
|
||||
switch (len & 7)
|
||||
{
|
||||
case 7: v ^= static_cast<uint64_t>(pos2[6]) << 48; [[fallthrough]];
|
||||
case 6: v ^= static_cast<uint64_t>(pos2[5]) << 40; [[fallthrough]];
|
||||
case 5: v ^= static_cast<uint64_t>(pos2[4]) << 32; [[fallthrough]];
|
||||
@ -324,7 +326,8 @@ struct MetroHash64
|
||||
{
|
||||
size_t operator() (StringRef x) const
|
||||
{
|
||||
union {
|
||||
union
|
||||
{
|
||||
uint64_t u64;
|
||||
std::uint8_t u8[sizeof(u64)];
|
||||
};
|
||||
|
@ -34,7 +34,8 @@ public:
|
||||
res->children.push_back(res->type);
|
||||
}
|
||||
|
||||
if (default_expression) {
|
||||
if (default_expression)
|
||||
{
|
||||
res->default_expression = default_expression->clone();
|
||||
res->children.push_back(res->default_expression);
|
||||
}
|
||||
|
@ -92,7 +92,8 @@ public:
|
||||
|
||||
private:
|
||||
using StringSet = std::unordered_set<String>;
|
||||
StringSet exit_strings {
|
||||
StringSet exit_strings
|
||||
{
|
||||
"exit", "quit", "logout",
|
||||
"учше", "йгше", "дщпщге",
|
||||
"exit;", "quit;", "logout;",
|
||||
@ -385,9 +386,7 @@ private:
|
||||
: Protocol::Encryption::Disable;
|
||||
|
||||
String host = config().getString("host", "localhost");
|
||||
UInt16 port = config().getInt("port", config().getInt(
|
||||
static_cast<bool>(encryption) ? "tcp_ssl_port" : "tcp_port",
|
||||
static_cast<bool>(encryption) ? DBMS_DEFAULT_SECURE_PORT : DBMS_DEFAULT_PORT));
|
||||
UInt16 port = config().getInt("port", config().getInt(static_cast<bool>(encryption) ? "tcp_ssl_port" : "tcp_port", static_cast<bool>(encryption) ? DBMS_DEFAULT_SECURE_PORT : DBMS_DEFAULT_PORT));
|
||||
String default_database = config().getString("database", "");
|
||||
String user = config().getString("user", "");
|
||||
String password = config().getString("password", "");
|
||||
@ -403,12 +402,11 @@ private:
|
||||
<< (!user.empty() ? " as user " + user : "")
|
||||
<< "." << std::endl;
|
||||
|
||||
ConnectionTimeouts timeouts(
|
||||
connection = std::make_unique<Connection>(host, port, default_database, user, password, "client", compression,
|
||||
encryption,
|
||||
Poco::Timespan(config().getInt("connect_timeout", DBMS_DEFAULT_CONNECT_TIMEOUT_SEC), 0),
|
||||
Poco::Timespan(config().getInt("receive_timeout", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC), 0),
|
||||
Poco::Timespan(config().getInt("send_timeout", DBMS_DEFAULT_SEND_TIMEOUT_SEC), 0));
|
||||
connection = std::make_unique<Connection>(host, port, default_database, user, password, timeouts, "client",
|
||||
compression, encryption);
|
||||
|
||||
String server_name;
|
||||
UInt64 server_version_major = 0;
|
||||
@ -1066,6 +1064,7 @@ private:
|
||||
void onProgress(const Progress & value)
|
||||
{
|
||||
progress.incrementPiecewiseAtomically(value);
|
||||
if (block_out_stream)
|
||||
block_out_stream->onProgress(value);
|
||||
writeProgress();
|
||||
}
|
||||
|
@ -17,15 +17,19 @@ namespace ErrorCodes
|
||||
|
||||
#ifdef __APPLE__
|
||||
// We only need to support timeout = {0, 0} at this moment
|
||||
static int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec * /*timeout*/) {
|
||||
static int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec * /*timeout*/)
|
||||
{
|
||||
sigset_t pending;
|
||||
int signo;
|
||||
sigpending(&pending);
|
||||
|
||||
for (signo = 1; signo < NSIG; signo++) {
|
||||
if (sigismember(set, signo) && sigismember(&pending, signo)) {
|
||||
for (signo = 1; signo < NSIG; ++signo)
|
||||
{
|
||||
if (sigismember(set, signo) && sigismember(&pending, signo))
|
||||
{
|
||||
sigwait(set, &signo);
|
||||
if (info) {
|
||||
if (info)
|
||||
{
|
||||
memset(info, 0, sizeof *info);
|
||||
info->si_signo = signo;
|
||||
}
|
||||
|
@ -44,7 +44,8 @@ void MetricsTransmitter::run()
|
||||
const std::string thread_name = "MericsTrns " + std::to_string(interval) + "s";
|
||||
setThreadName(thread_name.c_str());
|
||||
|
||||
const auto get_next_time = [](size_t seconds) {
|
||||
const auto get_next_time = [](size_t seconds)
|
||||
{
|
||||
/// To avoid time drift and transmit values exactly each interval:
|
||||
/// next time aligned to system seconds
|
||||
/// (60s -> every minute at 00 seconds, 5s -> every minute:[00, 05, 15 ... 55]s, 3600 -> every hour:00:00
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/ConnectionTimeouts.h>
|
||||
#include <Interpreters/Settings.h>
|
||||
#include <common/ThreadPool.h>
|
||||
#include <common/getMemoryAmount.h>
|
||||
@ -504,9 +503,8 @@ public:
|
||||
Strings && tests_names_,
|
||||
Strings && skip_names_,
|
||||
Strings && tests_names_regexp_,
|
||||
Strings && skip_names_regexp_,
|
||||
const ConnectionTimeouts & timeouts)
|
||||
: connection(host_, port_, default_database_, user_, password_, timeouts),
|
||||
Strings && skip_names_regexp_)
|
||||
: connection(host_, port_, default_database_, user_, password_),
|
||||
gotSIGINT(false),
|
||||
lite_output(lite_output_),
|
||||
profiles_file(profiles_file_),
|
||||
@ -601,7 +599,8 @@ private:
|
||||
void removeConfigurationsIf(
|
||||
std::vector<XMLConfigurationPtr> & configs, FilterType filter_type, const Strings & values, bool leave = false)
|
||||
{
|
||||
auto checker = [&filter_type, &values, &leave](XMLConfigurationPtr & config) {
|
||||
auto checker = [&filter_type, &values, &leave](XMLConfigurationPtr & config)
|
||||
{
|
||||
if (values.size() == 0)
|
||||
return false;
|
||||
|
||||
@ -631,7 +630,8 @@ private:
|
||||
if (filter_type == FilterType::Name_regexp)
|
||||
{
|
||||
String config_name = config->getString("name", "");
|
||||
auto regex_checker = [&config_name](const String & name_regexp) {
|
||||
auto regex_checker = [&config_name](const String & name_regexp)
|
||||
{
|
||||
std::regex pattern(name_regexp);
|
||||
return std::regex_search(config_name, pattern);
|
||||
};
|
||||
@ -917,7 +917,8 @@ private:
|
||||
|
||||
auto queries_pre_format = queries;
|
||||
queries.clear();
|
||||
for (const auto & query : queries_pre_format) {
|
||||
for (const auto & query : queries_pre_format)
|
||||
{
|
||||
auto formatted = formatQueries(query, substitutions);
|
||||
queries.insert(queries.end(), formatted.begin(), formatted.end());
|
||||
}
|
||||
@ -1483,8 +1484,6 @@ try
|
||||
Strings tests_names_regexp = options.count("names-regexp") ? options["names-regexp"].as<Strings>() : Strings({});
|
||||
Strings skip_names_regexp = options.count("skip-names-regexp") ? options["skip-names-regexp"].as<Strings>() : Strings({});
|
||||
|
||||
auto timeouts = DB::ConnectionTimeouts::getTCPTimeouts(DB::Settings());
|
||||
|
||||
DB::PerformanceTest performanceTest(options["host"].as<String>(),
|
||||
options["port"].as<UInt16>(),
|
||||
options["database"].as<String>(),
|
||||
@ -1498,8 +1497,7 @@ try
|
||||
std::move(tests_names),
|
||||
std::move(skip_names),
|
||||
std::move(tests_names_regexp),
|
||||
std::move(skip_names_regexp),
|
||||
timeouts);
|
||||
std::move(skip_names_regexp));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -27,7 +27,8 @@ void AlterCommand::apply(
|
||||
{
|
||||
if (type == ADD_COLUMN)
|
||||
{
|
||||
const auto exists_in = [this] (const NamesAndTypesList & columns) {
|
||||
const auto exists_in = [this] (const NamesAndTypesList & columns)
|
||||
{
|
||||
return columns.end() != std::find_if(columns.begin(), columns.end(),
|
||||
std::bind(namesEqual, std::cref(column_name), std::placeholders::_1));
|
||||
};
|
||||
@ -85,7 +86,8 @@ void AlterCommand::apply(
|
||||
else if (type == DROP_COLUMN)
|
||||
{
|
||||
/// look for a column in list and remove it if present, also removing corresponding entry from column_defaults
|
||||
const auto remove_column = [&column_defaults, this] (NamesAndTypesList & columns) {
|
||||
const auto remove_column = [&column_defaults, this] (NamesAndTypesList & columns)
|
||||
{
|
||||
auto removed = false;
|
||||
NamesAndTypesList::iterator column_it;
|
||||
|
||||
@ -120,7 +122,8 @@ void AlterCommand::apply(
|
||||
materialized_columns : alias_columns;
|
||||
|
||||
/// find column or throw exception
|
||||
const auto find_column = [this] (NamesAndTypesList & columns) {
|
||||
const auto find_column = [this] (NamesAndTypesList & columns)
|
||||
{
|
||||
const auto it = std::find_if(columns.begin(), columns.end(),
|
||||
std::bind(namesEqual, std::cref(column_name), std::placeholders::_1) );
|
||||
if (it == columns.end())
|
||||
|
@ -32,7 +32,6 @@ public:
|
||||
~MergeTreeBaseBlockInputStream() override;
|
||||
|
||||
protected:
|
||||
|
||||
Block readImpl() override final;
|
||||
|
||||
/// Creates new this->task, and initilizes readers
|
||||
@ -46,7 +45,6 @@ protected:
|
||||
void injectVirtualColumns(Block & block);
|
||||
|
||||
protected:
|
||||
|
||||
MergeTreeData & storage;
|
||||
|
||||
ExpressionActionsPtr prewhere_actions;
|
||||
|
@ -5,8 +5,8 @@
|
||||
#include <unordered_set>
|
||||
|
||||
|
||||
namespace DB {
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
NameSet injectRequiredColumns(const MergeTreeData & storage, const MergeTreeData::DataPartPtr & part, Names & columns)
|
||||
{
|
||||
|
@ -91,7 +91,8 @@ void MergeTreeWhereOptimizer::optimizeConjunction(ASTSelectQuery & select, ASTFu
|
||||
auto & conditions = fun->arguments->children;
|
||||
|
||||
/// remove condition by swapping it with the last one and calling ::pop_back()
|
||||
const auto remove_condition_at_index = [&conditions] (const size_t idx) {
|
||||
const auto remove_condition_at_index = [&conditions] (const size_t idx)
|
||||
{
|
||||
if (idx < conditions.size() - 1)
|
||||
std::swap(conditions[idx], conditions.back());
|
||||
conditions.pop_back();
|
||||
@ -142,7 +143,8 @@ void MergeTreeWhereOptimizer::optimizeConjunction(ASTSelectQuery & select, ASTFu
|
||||
}
|
||||
}
|
||||
|
||||
const auto move_condition_to_prewhere = [&] (const size_t idx) {
|
||||
const auto move_condition_to_prewhere = [&] (const size_t idx)
|
||||
{
|
||||
select.prewhere_expression = conditions[idx];
|
||||
select.children.push_back(select.prewhere_expression);
|
||||
LOG_DEBUG(log, "MergeTreeWhereOptimizer: condition `" << select.prewhere_expression << "` moved to PREWHERE");
|
||||
|
@ -45,7 +45,8 @@ private:
|
||||
|
||||
ColumnTypesMap getColumnTypesMap() const
|
||||
{
|
||||
return {
|
||||
return
|
||||
{
|
||||
{"Target", DatasetColumnType::Target},
|
||||
{"Num", DatasetColumnType::Num},
|
||||
{"Categ", DatasetColumnType::Categ},
|
||||
|
@ -124,7 +124,8 @@ static Strings getAllGraphiteSections(const AbstractConfiguration & config)
|
||||
|
||||
StorageSystemGraphite::StorageSystemGraphite(const std::string & name_)
|
||||
: name(name_)
|
||||
, columns {
|
||||
, columns
|
||||
{
|
||||
{"config_name", std::make_shared<DataTypeString>()},
|
||||
{"regexp", std::make_shared<DataTypeString>()},
|
||||
{"function", std::make_shared<DataTypeString>()},
|
||||
|
@ -30,7 +30,8 @@ public:
|
||||
template <typename Function>
|
||||
void registerFunction()
|
||||
{
|
||||
auto creator = [] () -> TableFunctionPtr {
|
||||
auto creator = [] () -> TableFunctionPtr
|
||||
{
|
||||
return std::make_shared<Function>();
|
||||
};
|
||||
registerFunction(Function::name, std::move(creator));
|
||||
|
@ -17,7 +17,8 @@ struct DataHolder
|
||||
ClickHouseLibrary::ColumnsUInt64 columns;
|
||||
};
|
||||
|
||||
extern "C" {
|
||||
extern "C"
|
||||
{
|
||||
|
||||
void * ClickHouseDictionary_v1_loadIds(
|
||||
void * data_ptr, ClickHouseLibrary::CStrings * settings, ClickHouseLibrary::CStrings * columns, const struct ClickHouseLibrary::VectorUInt64 * ids)
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
echo 'DROP TABLE IF EXISTS test.long_insert' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL} -d @-
|
||||
echo 'CREATE TABLE test.long_insert (a String) ENGINE = Memory' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL} -d @-
|
||||
for string_size in 1 10 100 1000 10000 100000 1000000; do
|
||||
# Если не указать LC_ALL=C, то Perl будет ругаться на некоторых плохо настроенных системах.
|
||||
# LC_ALL=C is needed because otherwise Perl will bark on bad tuned environment.
|
||||
LC_ALL=C perl -we 'for my $letter ("a" .. "z") { print(($letter x '$string_size') . "\n") }' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}?query=INSERT+INTO+test.long_insert+FORMAT+TabSeparated" --data-binary @-
|
||||
echo 'SELECT substring(a, 1, 1) AS c, length(a) AS l FROM test.long_insert ORDER BY c, l' | ${CLICKHOUSE_CURL} -sSg ${CLICKHOUSE_URL} -d @-
|
||||
done
|
||||
|
Loading…
Reference in New Issue
Block a user