mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
Merge branch 'master' of github.com:yandex/ClickHouse into fix_non_deterministic_compile
This commit is contained in:
commit
64b1108989
@ -1,3 +1,8 @@
|
||||
## ClickHouse release 18.14.10, 2018-10-23
|
||||
|
||||
* Настройка `compile_expressions` (JIT компиляция выражений) выключена по умолчанию. [#3410](https://github.com/yandex/ClickHouse/pull/3410)
|
||||
* Настройка `enable_optimize_predicate_expression` выключена по умолчанию.
|
||||
|
||||
## ClickHouse release 18.14.9, 2018-10-16
|
||||
|
||||
### Новые возможности:
|
||||
|
@ -86,4 +86,4 @@ if (ENABLE_ODBC)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
message (STATUS "Using odbc: ${ODBC_INCLUDE_DIRECTORIES} : ${ODBC_LIBRARIES}")
|
||||
message (STATUS "Using odbc=${ODBC_FOUND}: ${ODBC_INCLUDE_DIRECTORIES} : ${ODBC_LIBRARIES}")
|
||||
|
@ -116,10 +116,10 @@ endif ()
|
||||
if (Poco_MongoDB_LIBRARY)
|
||||
set (USE_POCO_MONGODB 1)
|
||||
endif ()
|
||||
if (Poco_DataODBC_LIBRARY)
|
||||
if (Poco_DataODBC_LIBRARY AND ODBC_FOUND)
|
||||
set (USE_POCO_DATAODBC 1)
|
||||
endif ()
|
||||
if (Poco_SQLODBC_LIBRARY)
|
||||
if (Poco_SQLODBC_LIBRARY AND ODBC_FOUND)
|
||||
set (USE_POCO_SQLODBC 1)
|
||||
endif ()
|
||||
|
||||
|
@ -76,7 +76,8 @@ struct ConnectionParameters
|
||||
timeouts = ConnectionTimeouts(
|
||||
Poco::Timespan(config.getInt("connect_timeout", DBMS_DEFAULT_CONNECT_TIMEOUT_SEC), 0),
|
||||
Poco::Timespan(config.getInt("receive_timeout", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC), 0),
|
||||
Poco::Timespan(config.getInt("send_timeout", DBMS_DEFAULT_SEND_TIMEOUT_SEC), 0));
|
||||
Poco::Timespan(config.getInt("send_timeout", DBMS_DEFAULT_SEND_TIMEOUT_SEC), 0),
|
||||
Poco::Timespan(config.getInt("tcp_keep_alive_timeout", 0), 0));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -77,6 +77,17 @@ void Connection::connect()
|
||||
socket->setReceiveTimeout(timeouts.receive_timeout);
|
||||
socket->setSendTimeout(timeouts.send_timeout);
|
||||
socket->setNoDelay(true);
|
||||
if (timeouts.tcp_keep_alive_timeout.totalSeconds())
|
||||
{
|
||||
socket->setKeepAlive(true);
|
||||
socket->setOption(IPPROTO_TCP,
|
||||
#if defined(TCP_KEEPALIVE)
|
||||
TCP_KEEPALIVE
|
||||
#else
|
||||
TCP_KEEPIDLE // __APPLE__
|
||||
#endif
|
||||
, timeouts.tcp_keep_alive_timeout);
|
||||
}
|
||||
|
||||
in = std::make_shared<ReadBufferFromPocoSocket>(*socket);
|
||||
out = std::make_shared<WriteBufferFromPocoSocket>(*socket);
|
||||
|
@ -784,6 +784,9 @@ public:
|
||||
}
|
||||
else if constexpr (to_decimal)
|
||||
{
|
||||
if (!arguments[1].column)
|
||||
throw Exception("Second argument for function " + getName() + " must be constant", ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
UInt64 scale = extractToDecimalScale(arguments[1]);
|
||||
|
||||
if constexpr (std::is_same_v<Name, NameToDecimal32>)
|
||||
|
@ -11,6 +11,7 @@ struct ConnectionTimeouts
|
||||
Poco::Timespan connection_timeout;
|
||||
Poco::Timespan send_timeout;
|
||||
Poco::Timespan receive_timeout;
|
||||
Poco::Timespan tcp_keep_alive_timeout;
|
||||
|
||||
ConnectionTimeouts() = default;
|
||||
|
||||
@ -19,7 +20,19 @@ struct ConnectionTimeouts
|
||||
const Poco::Timespan & receive_timeout_)
|
||||
: connection_timeout(connection_timeout_),
|
||||
send_timeout(send_timeout_),
|
||||
receive_timeout(receive_timeout_)
|
||||
receive_timeout(receive_timeout_),
|
||||
tcp_keep_alive_timeout(0)
|
||||
{
|
||||
}
|
||||
|
||||
ConnectionTimeouts(const Poco::Timespan & connection_timeout_,
|
||||
const Poco::Timespan & send_timeout_,
|
||||
const Poco::Timespan & receive_timeout_,
|
||||
const Poco::Timespan & tcp_keep_alive_timeout_)
|
||||
: connection_timeout(connection_timeout_),
|
||||
send_timeout(send_timeout_),
|
||||
receive_timeout(receive_timeout_),
|
||||
tcp_keep_alive_timeout(tcp_keep_alive_timeout_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -35,19 +48,20 @@ struct ConnectionTimeouts
|
||||
{
|
||||
return ConnectionTimeouts(saturate(connection_timeout, limit),
|
||||
saturate(send_timeout, limit),
|
||||
saturate(receive_timeout, limit));
|
||||
saturate(receive_timeout, limit),
|
||||
saturate(tcp_keep_alive_timeout, limit));
|
||||
}
|
||||
|
||||
/// Timeouts for the case when we have just single attempt to connect.
|
||||
static ConnectionTimeouts getTCPTimeoutsWithoutFailover(const Settings & settings)
|
||||
{
|
||||
return ConnectionTimeouts(settings.connect_timeout, settings.send_timeout, settings.receive_timeout);
|
||||
return ConnectionTimeouts(settings.connect_timeout, settings.send_timeout, settings.receive_timeout, settings.tcp_keep_alive_timeout);
|
||||
}
|
||||
|
||||
/// Timeouts for the case when we will try many addresses in a loop.
|
||||
static ConnectionTimeouts getTCPTimeoutsWithFailover(const Settings & settings)
|
||||
{
|
||||
return ConnectionTimeouts(settings.connect_timeout_with_failover_ms, settings.send_timeout, settings.receive_timeout);
|
||||
return ConnectionTimeouts(settings.connect_timeout_with_failover_ms, settings.send_timeout, settings.receive_timeout, settings.tcp_keep_alive_timeout);
|
||||
}
|
||||
|
||||
static ConnectionTimeouts getHTTPTimeouts(const Settings & settings)
|
||||
|
64
dbms/src/Interpreters/AddDefaultDatabaseVisitor.h
Normal file
64
dbms/src/Interpreters/AddDefaultDatabaseVisitor.h
Normal file
@ -0,0 +1,64 @@
|
||||
#pragma once
|
||||
|
||||
#include <Parsers/ASTQueryWithTableAndOutput.h>
|
||||
#include <Parsers/ASTRenameQuery.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Visits AST nodes, add default database to DDLs if not set.
|
||||
class AddDefaultDatabaseVisitor
|
||||
{
|
||||
public:
|
||||
AddDefaultDatabaseVisitor(const String & default_database_)
|
||||
: default_database(default_database_)
|
||||
{}
|
||||
|
||||
void visit(ASTPtr & ast) const
|
||||
{
|
||||
visitChildren(ast);
|
||||
|
||||
if (!tryVisit<ASTQueryWithTableAndOutput>(ast) &&
|
||||
!tryVisit<ASTRenameQuery>(ast))
|
||||
{}
|
||||
}
|
||||
|
||||
private:
|
||||
const String default_database;
|
||||
|
||||
void visit(ASTQueryWithTableAndOutput * node, ASTPtr &) const
|
||||
{
|
||||
if (node->database.empty())
|
||||
node->database = default_database;
|
||||
}
|
||||
|
||||
void visit(ASTRenameQuery * node, ASTPtr &) const
|
||||
{
|
||||
for (ASTRenameQuery::Element & elem : node->elements)
|
||||
{
|
||||
if (elem.from.database.empty())
|
||||
elem.from.database = default_database;
|
||||
if (elem.to.database.empty())
|
||||
elem.to.database = default_database;
|
||||
}
|
||||
}
|
||||
|
||||
void visitChildren(ASTPtr & ast) const
|
||||
{
|
||||
for (auto & child : ast->children)
|
||||
visit(child);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool tryVisit(ASTPtr & ast) const
|
||||
{
|
||||
if (T * t = dynamic_cast<T *>(ast.get()))
|
||||
{
|
||||
visit(t, ast);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
@ -12,6 +12,7 @@
|
||||
#include <DataStreams/IProfilingBlockInputStream.h>
|
||||
#include <Interpreters/executeQuery.h>
|
||||
#include <Interpreters/Cluster.h>
|
||||
#include <Interpreters/AddDefaultDatabaseVisitor.h>
|
||||
#include <Common/DNSResolver.h>
|
||||
#include <Common/Macros.h>
|
||||
#include <Common/getFQDNOrHostName.h>
|
||||
@ -39,6 +40,7 @@ namespace DB
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNKNOWN_ELEMENT_IN_CONFIG;
|
||||
extern const int INVALID_CONFIG_PARAMETER;
|
||||
extern const int UNKNOWN_FORMAT_VERSION;
|
||||
@ -1135,7 +1137,7 @@ private:
|
||||
};
|
||||
|
||||
|
||||
BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & context, const NameSet & query_databases)
|
||||
BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & context, NameSet && query_databases)
|
||||
{
|
||||
/// Remove FORMAT <fmt> and INTO OUTFILE <file> if exists
|
||||
ASTPtr query_ptr = query_ptr_->clone();
|
||||
@ -1163,30 +1165,56 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, const Context & cont
|
||||
ClusterPtr cluster = context.getCluster(query->cluster);
|
||||
DDLWorker & ddl_worker = context.getDDLWorker();
|
||||
|
||||
DDLLogEntry entry;
|
||||
entry.query = queryToString(query_ptr);
|
||||
entry.initiator = ddl_worker.getCommonHostID();
|
||||
|
||||
/// Check database access rights, assume that all servers have the same users config
|
||||
NameSet databases_to_check_access_rights;
|
||||
NameSet databases_to_access;
|
||||
const String & current_database = context.getCurrentDatabase();
|
||||
|
||||
Cluster::AddressesWithFailover shards = cluster->getShardsAddresses();
|
||||
|
||||
std::vector<HostID> hosts;
|
||||
bool use_shard_default_db = false;
|
||||
bool use_local_default_db = false;
|
||||
for (const auto & shard : shards)
|
||||
{
|
||||
for (const auto & addr : shard)
|
||||
{
|
||||
entry.hosts.emplace_back(addr);
|
||||
hosts.emplace_back(addr);
|
||||
|
||||
/// Expand empty database name to shards' default database name
|
||||
/// Expand empty database name to shards' default (o current) database name
|
||||
for (const String & database : query_databases)
|
||||
databases_to_check_access_rights.emplace(database.empty() ? addr.default_database : database);
|
||||
{
|
||||
if (database.empty())
|
||||
{
|
||||
bool has_shard_default_db = !addr.default_database.empty();
|
||||
use_shard_default_db |= has_shard_default_db;
|
||||
use_local_default_db |= !has_shard_default_db;
|
||||
databases_to_access.emplace(has_shard_default_db ? addr.default_database : current_database );
|
||||
}
|
||||
else
|
||||
databases_to_access.emplace(database);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const String & database : databases_to_check_access_rights)
|
||||
context.checkDatabaseAccessRights(database.empty() ? context.getCurrentDatabase() : database);
|
||||
if (use_shard_default_db && use_local_default_db)
|
||||
throw Exception("Mixed local default DB and shard default DB in DDL query", ErrorCodes::NOT_IMPLEMENTED);
|
||||
|
||||
if (databases_to_access.empty())
|
||||
throw Exception("No databases to access in distributed DDL query", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
for (const String & database : databases_to_access)
|
||||
context.checkDatabaseAccessRights(database);
|
||||
|
||||
if (use_local_default_db)
|
||||
{
|
||||
AddDefaultDatabaseVisitor visitor(current_database);
|
||||
visitor.visit(query_ptr);
|
||||
}
|
||||
|
||||
DDLLogEntry entry;
|
||||
entry.hosts = std::move(hosts);
|
||||
entry.query = queryToString(query_ptr);
|
||||
entry.initiator = ddl_worker.getCommonHostID();
|
||||
String node_path = ddl_worker.enqueueQuery(entry);
|
||||
|
||||
BlockIO io;
|
||||
|
@ -20,7 +20,7 @@ struct DDLTask;
|
||||
|
||||
|
||||
/// Pushes distributed DDL query to the queue
|
||||
BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context, const NameSet & query_databases);
|
||||
BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr, const Context & context, NameSet && query_databases);
|
||||
|
||||
|
||||
class DDLWorker
|
||||
|
@ -27,7 +27,7 @@ BlockIO InterpreterAlterQuery::execute()
|
||||
auto & alter = typeid_cast<ASTAlterQuery &>(*query_ptr);
|
||||
|
||||
if (!alter.cluster.empty())
|
||||
return executeDDLQueryOnCluster(query_ptr, context, {alter.table});
|
||||
return executeDDLQueryOnCluster(query_ptr, context, {alter.database});
|
||||
|
||||
const String & table_name = alter.table;
|
||||
String database_name = alter.database.empty() ? context.getCurrentDatabase() : alter.database;
|
||||
|
@ -487,7 +487,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
|
||||
if (!create.to_table.empty())
|
||||
databases.emplace(create.to_database);
|
||||
|
||||
return executeDDLQueryOnCluster(query_ptr, context, databases);
|
||||
return executeDDLQueryOnCluster(query_ptr, context, std::move(databases));
|
||||
}
|
||||
|
||||
String path = context.getPath();
|
||||
|
@ -47,7 +47,7 @@ BlockIO InterpreterRenameQuery::execute()
|
||||
databases.emplace(elem.to.database);
|
||||
}
|
||||
|
||||
return executeDDLQueryOnCluster(query_ptr, context, databases);
|
||||
return executeDDLQueryOnCluster(query_ptr, context, std::move(databases));
|
||||
}
|
||||
|
||||
String path = context.getPath();
|
||||
|
@ -49,6 +49,7 @@
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <ext/map.h>
|
||||
#include <memory>
|
||||
#include <DataStreams/ConvertingBlockInputStream.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -1293,6 +1294,17 @@ void InterpreterSelectQuery::executeUnion(Pipeline & pipeline)
|
||||
/// If there are still several streams, then we combine them into one
|
||||
if (pipeline.hasMoreThanOneStream())
|
||||
{
|
||||
/// Unify streams in case they have different headers.
|
||||
auto first_header = pipeline.streams.at(0)->getHeader();
|
||||
for (size_t i = 1; i < pipeline.streams.size(); ++i)
|
||||
{
|
||||
auto & stream = pipeline.streams[i];
|
||||
auto header = stream->getHeader();
|
||||
auto mode = ConvertingBlockInputStream::MatchColumnsMode::Name;
|
||||
if (!blocksHaveEqualStructure(first_header, header))
|
||||
stream = std::make_shared<ConvertingBlockInputStream>(context, stream, first_header, mode);
|
||||
}
|
||||
|
||||
pipeline.firstStream() = std::make_shared<UnionBlockInputStream<>>(pipeline.streams, pipeline.stream_with_non_joined_data, max_streams);
|
||||
pipeline.stream_with_non_joined_data = nullptr;
|
||||
pipeline.streams.resize(1);
|
||||
|
@ -49,6 +49,7 @@ struct Settings
|
||||
M(SettingMilliseconds, connect_timeout_with_failover_ms, DBMS_DEFAULT_CONNECT_TIMEOUT_WITH_FAILOVER_MS, "Connection timeout for selecting first healthy replica.") \
|
||||
M(SettingSeconds, receive_timeout, DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC, "") \
|
||||
M(SettingSeconds, send_timeout, DBMS_DEFAULT_SEND_TIMEOUT_SEC, "") \
|
||||
M(SettingSeconds, tcp_keep_alive_timeout, 0, "") \
|
||||
M(SettingMilliseconds, queue_max_wait_ms, 5000, "The wait time in the request queue, if the number of concurrent requests exceeds the maximum.") \
|
||||
M(SettingUInt64, poll_interval, DBMS_DEFAULT_POLL_INTERVAL, "Block at the query wait loop on the server for the specified number of seconds.") \
|
||||
M(SettingUInt64, distributed_connections_pool_size, DBMS_DEFAULT_DISTRIBUTED_CONNECTIONS_POOL_SIZE, "Maximum number of connections with one remote server in the pool.") \
|
||||
|
@ -187,18 +187,6 @@ ASTPtr ASTAlterQuery::clone() const
|
||||
return res;
|
||||
}
|
||||
|
||||
ASTPtr ASTAlterQuery::getRewrittenASTWithoutOnCluster(const std::string & new_database) const
|
||||
{
|
||||
auto query_ptr = clone();
|
||||
auto & query = static_cast<ASTAlterQuery &>(*query_ptr);
|
||||
|
||||
query.cluster.clear();
|
||||
if (query.database.empty())
|
||||
query.database = new_database;
|
||||
|
||||
return query_ptr;
|
||||
}
|
||||
|
||||
void ASTAlterQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
frame.need_parens = false;
|
||||
|
@ -122,7 +122,10 @@ public:
|
||||
|
||||
ASTPtr clone() const override;
|
||||
|
||||
ASTPtr getRewrittenASTWithoutOnCluster(const std::string & new_database) const override;
|
||||
ASTPtr getRewrittenASTWithoutOnCluster(const std::string & new_database) const override
|
||||
{
|
||||
return removeOnCluster<ASTAlterQuery>(clone(), new_database);
|
||||
}
|
||||
|
||||
protected:
|
||||
void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||
|
@ -112,14 +112,7 @@ public:
|
||||
|
||||
ASTPtr getRewrittenASTWithoutOnCluster(const std::string & new_database) const override
|
||||
{
|
||||
auto query_ptr = clone();
|
||||
ASTCreateQuery & query = static_cast<ASTCreateQuery &>(*query_ptr);
|
||||
|
||||
query.cluster.clear();
|
||||
if (query.database.empty())
|
||||
query.database = new_database;
|
||||
|
||||
return query_ptr;
|
||||
return removeOnCluster<ASTCreateQuery>(clone(), new_database);
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -29,18 +29,6 @@ ASTPtr ASTDropQuery::clone() const
|
||||
return res;
|
||||
}
|
||||
|
||||
ASTPtr ASTDropQuery::getRewrittenASTWithoutOnCluster(const std::string & new_database) const
|
||||
{
|
||||
auto query_ptr = clone();
|
||||
auto & query = static_cast<ASTDropQuery &>(*query_ptr);
|
||||
|
||||
query.cluster.clear();
|
||||
if (query.database.empty())
|
||||
query.database = new_database;
|
||||
|
||||
return query_ptr;
|
||||
}
|
||||
|
||||
void ASTDropQuery::formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "");
|
||||
|
@ -26,7 +26,10 @@ public:
|
||||
String getID() const override;
|
||||
ASTPtr clone() const override;
|
||||
|
||||
ASTPtr getRewrittenASTWithoutOnCluster(const std::string & new_database) const override;
|
||||
ASTPtr getRewrittenASTWithoutOnCluster(const std::string & new_database) const override
|
||||
{
|
||||
return removeOnCluster<ASTDropQuery>(clone(), new_database);
|
||||
}
|
||||
|
||||
protected:
|
||||
void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override;
|
||||
|
@ -8,16 +8,6 @@ String ASTKillQueryQuery::getID() const
|
||||
return "KillQueryQuery_" + (where_expression ? where_expression->getID() : "") + "_" + String(sync ? "SYNC" : "ASYNC");
|
||||
}
|
||||
|
||||
ASTPtr ASTKillQueryQuery::getRewrittenASTWithoutOnCluster(const std::string & /*new_database*/) const
|
||||
{
|
||||
auto query_ptr = clone();
|
||||
ASTKillQueryQuery & query = static_cast<ASTKillQueryQuery &>(*query_ptr);
|
||||
|
||||
query.cluster.clear();
|
||||
|
||||
return query_ptr;
|
||||
}
|
||||
|
||||
void ASTKillQueryQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << "KILL QUERY ";
|
||||
|
@ -24,7 +24,10 @@ public:
|
||||
|
||||
void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||
|
||||
ASTPtr getRewrittenASTWithoutOnCluster(const std::string &new_database) const override;
|
||||
ASTPtr getRewrittenASTWithoutOnCluster(const std::string &) const override
|
||||
{
|
||||
return removeOnCluster<ASTKillQueryQuery>(clone());
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -3,19 +3,6 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
ASTPtr ASTOptimizeQuery::getRewrittenASTWithoutOnCluster(const std::string & new_database) const
|
||||
{
|
||||
auto query_ptr = clone();
|
||||
ASTOptimizeQuery & query = static_cast<ASTOptimizeQuery &>(*query_ptr);
|
||||
|
||||
query.cluster.clear();
|
||||
if (query.database.empty())
|
||||
query.database = new_database;
|
||||
|
||||
return query_ptr;
|
||||
}
|
||||
|
||||
void ASTOptimizeQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << "OPTIMIZE TABLE " << (settings.hilite ? hilite_none : "")
|
||||
|
@ -40,7 +40,10 @@ public:
|
||||
|
||||
void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||
|
||||
ASTPtr getRewrittenASTWithoutOnCluster(const std::string &new_database) const override;
|
||||
ASTPtr getRewrittenASTWithoutOnCluster(const std::string &new_database) const override
|
||||
{
|
||||
return removeOnCluster<ASTOptimizeQuery>(clone(), new_database);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -28,6 +28,27 @@ public:
|
||||
static bool parse(Pos & pos, std::string & cluster_str, Expected & expected);
|
||||
|
||||
virtual ~ASTQueryWithOnCluster() = default;
|
||||
|
||||
protected:
|
||||
template <typename T>
|
||||
static ASTPtr removeOnCluster(ASTPtr query_ptr, const std::string & new_database)
|
||||
{
|
||||
T & query = static_cast<T &>(*query_ptr);
|
||||
|
||||
query.cluster.clear();
|
||||
if (query.database.empty())
|
||||
query.database = new_database;
|
||||
|
||||
return query_ptr;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static ASTPtr removeOnCluster(ASTPtr query_ptr)
|
||||
{
|
||||
T & query = static_cast<T &>(*query_ptr);
|
||||
query.cluster.clear();
|
||||
return query_ptr;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,4 +1,3 @@
|
||||
#include <Storages/MergeTree/RangesInDataPart.h>
|
||||
#include <Storages/MergeTree/MergeTreeReadPool.h>
|
||||
#include <ext/range.h>
|
||||
#include <Storages/MergeTree/MergeTreeBaseBlockInputStream.h>
|
||||
@ -10,6 +9,11 @@ namespace ProfileEvents
|
||||
extern const Event ReadBackoff;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -22,8 +26,13 @@ MergeTreeReadPool::MergeTreeReadPool(
|
||||
const bool do_not_steal_tasks)
|
||||
: backoff_settings{backoff_settings}, backoff_state{threads}, data{data},
|
||||
column_names{column_names}, do_not_steal_tasks{do_not_steal_tasks},
|
||||
predict_block_size_bytes{preferred_block_size_bytes > 0}, prewhere_info{prewhere_info}
|
||||
predict_block_size_bytes{preferred_block_size_bytes > 0}, prewhere_info{prewhere_info}, parts_ranges{parts}
|
||||
{
|
||||
/// reverse from right-to-left to left-to-right
|
||||
/// because 'reverse' was done in MergeTreeDataSelectExecutor
|
||||
for (auto & part_ranges : parts_ranges)
|
||||
std::reverse(std::begin(part_ranges.ranges), std::end(part_ranges.ranges));
|
||||
|
||||
/// parts don't contain duplicate MergeTreeDataPart's.
|
||||
const auto per_part_sum_marks = fillPerPartInfo(parts, prewhere_info, check_columns);
|
||||
fillPerThreadInfo(threads, sum_marks, per_part_sum_marks, parts, min_marks_for_concurrent_read);
|
||||
@ -120,6 +129,27 @@ MergeTreeReadTaskPtr MergeTreeReadPool::getTask(const size_t min_marks_to_read,
|
||||
prewhere_info && prewhere_info->remove_prewhere_column, per_part_should_reorder[part_idx], std::move(curr_task_size_predictor));
|
||||
}
|
||||
|
||||
MarkRanges MergeTreeReadPool::getRestMarks(const std::string & part_path, const MarkRange & from) const
|
||||
{
|
||||
MarkRanges all_part_ranges;
|
||||
for (const auto & part_ranges : parts_ranges)
|
||||
{
|
||||
if (part_ranges.data_part->getFullPath() == part_path)
|
||||
{
|
||||
all_part_ranges = part_ranges.ranges;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (all_part_ranges.empty())
|
||||
throw Exception("Trying to read marks range [" + std::to_string(from.begin) + ", " + std::to_string(from.end) + "] from part '"
|
||||
+ part_path + "' which has no ranges in this query", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
auto begin = std::lower_bound(all_part_ranges.begin(), all_part_ranges.end(), from, [] (const auto & f, const auto & s) { return f.begin < s.begin; });
|
||||
if (begin == all_part_ranges.end())
|
||||
begin = std::prev(all_part_ranges.end());
|
||||
begin->begin = from.begin;
|
||||
return MarkRanges(begin, all_part_ranges.end());
|
||||
}
|
||||
|
||||
Block MergeTreeReadPool::getHeader() const
|
||||
{
|
||||
|
@ -80,6 +80,9 @@ public:
|
||||
*/
|
||||
void profileFeedback(const ReadBufferFromFileBase::ProfileInfo info);
|
||||
|
||||
/// This method tells which mark ranges we have to read if we start from @from mark range
|
||||
MarkRanges getRestMarks(const std::string & part_path, const MarkRange & from) const;
|
||||
|
||||
Block getHeader() const;
|
||||
|
||||
private:
|
||||
@ -127,6 +130,8 @@ private:
|
||||
|
||||
std::set<size_t> remaining_thread_tasks;
|
||||
|
||||
RangesInDataParts parts_ranges;
|
||||
|
||||
mutable std::mutex mutex;
|
||||
|
||||
Logger * log = &Logger::get("MergeTreeReadPool");
|
||||
|
@ -65,50 +65,49 @@ bool MergeTreeThreadBlockInputStream::getNewTask()
|
||||
}
|
||||
|
||||
const std::string path = task->data_part->getFullPath();
|
||||
size_t current_task_first_mark = task->mark_ranges[0].begin;
|
||||
size_t current_task_end_mark = task->mark_ranges.back().end;
|
||||
|
||||
/// Allows pool to reduce number of threads in case of too slow reads.
|
||||
auto profile_callback = [this](ReadBufferFromFileBase::ProfileInfo info) { pool->profileFeedback(info); };
|
||||
|
||||
if (!reader)
|
||||
{
|
||||
auto rest_mark_ranges = pool->getRestMarks(path, task->mark_ranges[0]);
|
||||
|
||||
if (use_uncompressed_cache)
|
||||
owned_uncompressed_cache = storage.context.getUncompressedCache();
|
||||
|
||||
owned_mark_cache = storage.context.getMarkCache();
|
||||
|
||||
reader = std::make_unique<MergeTreeReader>(
|
||||
path, task->data_part, task->columns, owned_uncompressed_cache.get(), owned_mark_cache.get(), save_marks_in_cache,
|
||||
storage, task->mark_ranges, min_bytes_to_use_direct_io, max_read_buffer_size, MergeTreeReader::ValueSizeMap{}, profile_callback);
|
||||
storage, rest_mark_ranges, min_bytes_to_use_direct_io, max_read_buffer_size, MergeTreeReader::ValueSizeMap{}, profile_callback);
|
||||
|
||||
|
||||
if (prewhere_info)
|
||||
pre_reader = std::make_unique<MergeTreeReader>(
|
||||
path, task->data_part, task->pre_columns, owned_uncompressed_cache.get(), owned_mark_cache.get(), save_marks_in_cache,
|
||||
storage, task->mark_ranges, min_bytes_to_use_direct_io,
|
||||
storage, rest_mark_ranges, min_bytes_to_use_direct_io,
|
||||
max_read_buffer_size, MergeTreeReader::ValueSizeMap{}, profile_callback);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// in other case we can reuse readers, they stopped exactly at required position
|
||||
if (last_task_end_mark != current_task_first_mark || path != last_readed_part_path)
|
||||
/// in other case we can reuse readers, anyway they will be "seeked" to required mark
|
||||
if (path != last_readed_part_path)
|
||||
{
|
||||
auto rest_mark_ranges = pool->getRestMarks(path, task->mark_ranges[0]);
|
||||
/// retain avg_value_size_hints
|
||||
reader = std::make_unique<MergeTreeReader>(
|
||||
path, task->data_part, task->columns, owned_uncompressed_cache.get(), owned_mark_cache.get(), save_marks_in_cache,
|
||||
storage, task->mark_ranges, min_bytes_to_use_direct_io, max_read_buffer_size,
|
||||
storage, rest_mark_ranges, min_bytes_to_use_direct_io, max_read_buffer_size,
|
||||
reader->getAvgValueSizeHints(), profile_callback);
|
||||
|
||||
if (prewhere_info)
|
||||
pre_reader = std::make_unique<MergeTreeReader>(
|
||||
path, task->data_part, task->pre_columns, owned_uncompressed_cache.get(), owned_mark_cache.get(), save_marks_in_cache,
|
||||
storage, task->mark_ranges, min_bytes_to_use_direct_io,
|
||||
storage, rest_mark_ranges, min_bytes_to_use_direct_io,
|
||||
max_read_buffer_size, pre_reader->getAvgValueSizeHints(), profile_callback);
|
||||
}
|
||||
}
|
||||
last_readed_part_path = path;
|
||||
last_task_end_mark = current_task_end_mark;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -44,8 +44,6 @@ private:
|
||||
std::shared_ptr<MergeTreeReadPool> pool;
|
||||
size_t min_marks_to_read;
|
||||
|
||||
/// Last readed mark in task for this thread
|
||||
size_t last_task_end_mark;
|
||||
/// Last part readed in this thread
|
||||
std::string last_readed_part_path;
|
||||
/// Names from header. Used in order to order columns in read blocks.
|
||||
|
@ -242,7 +242,7 @@ def main(args):
|
||||
stderr_element = et.Element("system-err")
|
||||
stderr_element.text = et.CDATA(stderr)
|
||||
report_testcase.append(stderr_element)
|
||||
print(stderr)
|
||||
print(stderr.encode('utf-8'))
|
||||
|
||||
if args.stop and ('Connection refused' in stderr or 'Attempt to read after eof' in stderr) and not 'Received exception from server' in stderr:
|
||||
SERVER_DIED = True
|
||||
|
@ -0,0 +1,18 @@
|
||||
<yandex>
|
||||
<remote_servers>
|
||||
<two_shards>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>node1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>node2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</two_shards>
|
||||
</remote_servers>
|
||||
</yandex>
|
44
dbms/tests/integration/test_union_header/test.py
Normal file
44
dbms/tests/integration/test_union_header/test.py
Normal file
@ -0,0 +1,44 @@
|
||||
import pytest
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
||||
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
for node in (node1, node2):
|
||||
|
||||
node.query('''
|
||||
CREATE TABLE default.t1_local
|
||||
(
|
||||
event_date Date DEFAULT toDate(event_time),
|
||||
event_time DateTime,
|
||||
log_type UInt32,
|
||||
account_id String
|
||||
)
|
||||
ENGINE = MergeTree(event_date, (event_time, account_id), 8192);
|
||||
''')
|
||||
|
||||
node.query('''
|
||||
CREATE TABLE default.t1 AS default.t1_local
|
||||
ENGINE = Distributed('two_shards', 'default', 't1_local', rand());
|
||||
''')
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_read(started_cluster):
|
||||
assert node1.query('''SELECT event_date, event_time, log_type
|
||||
FROM default.t1
|
||||
WHERE (log_type = 30305) AND (account_id = '111111')
|
||||
LIMIT 1''').strip() == ''
|
@ -0,0 +1,28 @@
|
||||
<test>
|
||||
<name>read_from_hits_with_aio</name>
|
||||
<stop_conditions>
|
||||
<any_of>
|
||||
<average_speed_not_changing_for_ms>5000</average_speed_not_changing_for_ms>
|
||||
<total_time_ms>30000</total_time_ms>
|
||||
</any_of>
|
||||
</stop_conditions>
|
||||
<type>once</type>
|
||||
<metrics>
|
||||
<!-- For running one inifinite query -->
|
||||
<max_rows_per_second />
|
||||
<max_bytes_per_second />
|
||||
<avg_rows_per_second />
|
||||
<avg_bytes_per_second />
|
||||
</metrics>
|
||||
|
||||
<preconditions>
|
||||
<table_exists>hits_1000m_single</table_exists>
|
||||
</preconditions>
|
||||
|
||||
<query>SELECT count() FROM hits_1000m_single where UserID=1234567890 SETTINGS max_threads = 1, min_bytes_to_use_direct_io = 1, max_read_buffer_size = 10485760;</query>
|
||||
<query>SELECT count() FROM hits_1000m_single where EventDate between toDate('2013-07-10') and toDate('2013-07-16') and UserID=123 SETTINGS max_threads = 1, min_bytes_to_use_direct_io = 1, max_read_buffer_size = 10485760;</query>
|
||||
|
||||
<query>SELECT count() FROM hits_1000m_single where UserID=1234567890 SETTINGS max_threads = 1, min_bytes_to_use_direct_io = 0, max_read_buffer_size = 10485760;</query>
|
||||
<query>SELECT count() FROM hits_1000m_single where EventDate between toDate('2013-07-10') and toDate('2013-07-16') and UserID=123 SETTINGS max_threads = 1, min_bytes_to_use_direct_io = 0, max_read_buffer_size = 10485760;</query>
|
||||
|
||||
</test>
|
@ -239,4 +239,8 @@ SELECT toUInt64('2147483649') AS x, toDecimal32(x, 0); -- { serverError 407 }
|
||||
SELECT toUInt64('9223372036854775807') AS x, toDecimal64(x, 0);
|
||||
SELECT toUInt64('9223372036854775809') AS x, toDecimal64(x, 0); -- { serverError 407 }
|
||||
|
||||
SELECT toDecimal32(0, rowNumberInBlock()); -- { serverError 44 }
|
||||
SELECT toDecimal64(0, rowNumberInBlock()); -- { serverError 44 }
|
||||
SELECT toDecimal128(0, rowNumberInBlock()); -- { serverError 44 }
|
||||
|
||||
DROP TABLE IF EXISTS test.decimal;
|
||||
|
@ -1,13 +1,13 @@
|
||||
SELECT toDate('2018-06-21') % 234 = toInt16(toDate('2018-06-21')) % 234;
|
||||
SELECT toDate('2018-06-21') % 23456 = toInt16(toDate('2018-06-21')) % 23456;
|
||||
SELECT toDate('2018-06-21') % 12376 = toInt16(toDate('2018-06-21')) % 12376;
|
||||
SELECT toDateTime('2018-06-21 12:12:12') % 234 = toInt32(toDateTime('2018-06-21 12:12:12')) % 234;
|
||||
SELECT toDateTime('2018-06-21 12:12:12') % 23456 = toInt32(toDateTime('2018-06-21 12:12:12')) % 23456;
|
||||
SELECT toDateTime('2018-06-21 12:12:12') % 12376 = toInt32(toDateTime('2018-06-21 12:12:12')) % 12376;
|
||||
SELECT toDate('2018-06-21') % 234 = toUInt16(toDate('2018-06-21')) % 234;
|
||||
SELECT toDate('2018-06-21') % 23456 = toUInt16(toDate('2018-06-21')) % 23456;
|
||||
SELECT toDate('2018-06-21') % 12376 = toUInt16(toDate('2018-06-21')) % 12376;
|
||||
SELECT toDateTime('2018-06-21 12:12:12') % 234 = toUInt32(toDateTime('2018-06-21 12:12:12')) % 234;
|
||||
SELECT toDateTime('2018-06-21 12:12:12') % 23456 = toUInt32(toDateTime('2018-06-21 12:12:12')) % 23456;
|
||||
SELECT toDateTime('2018-06-21 12:12:12') % 12376 = toUInt32(toDateTime('2018-06-21 12:12:12')) % 12376;
|
||||
|
||||
SELECT toDate('2018-06-21') % 234.8 = toInt16(toDate('2018-06-21')) % 234.8;
|
||||
SELECT toDate('2018-06-21') % 23456.8 = toInt16(toDate('2018-06-21')) % 23456.8;
|
||||
SELECT toDate('2018-06-21') % 12376.8 = toInt16(toDate('2018-06-21')) % 12376.8;
|
||||
SELECT toDateTime('2018-06-21 12:12:12') % 234.8 = toInt32(toDateTime('2018-06-21 12:12:12')) % 234.8;
|
||||
SELECT toDateTime('2018-06-21 12:12:12') % 23456.8 = toInt32(toDateTime('2018-06-21 12:12:12')) % 23456.8;
|
||||
SELECT toDateTime('2018-06-21 12:12:12') % 12376.8 = toInt32(toDateTime('2018-06-21 12:12:12')) % 12376.8;
|
||||
SELECT toDate('2018-06-21') % 234.8 = toUInt16(toDate('2018-06-21')) % 234.8;
|
||||
SELECT toDate('2018-06-21') % 23456.8 = toUInt16(toDate('2018-06-21')) % 23456.8;
|
||||
SELECT toDate('2018-06-21') % 12376.8 = toUInt16(toDate('2018-06-21')) % 12376.8;
|
||||
SELECT toDateTime('2018-06-21 12:12:12') % 234.8 = toUInt32(toDateTime('2018-06-21 12:12:12')) % 234.8;
|
||||
SELECT toDateTime('2018-06-21 12:12:12') % 23456.8 = toUInt32(toDateTime('2018-06-21 12:12:12')) % 23456.8;
|
||||
SELECT toDateTime('2018-06-21 12:12:12') % 12376.8 = toUInt32(toDateTime('2018-06-21 12:12:12')) % 12376.8;
|
||||
|
151
dbms/tests/queries/0_stateless/00746_sql_fuzzy.pl
Executable file
151
dbms/tests/queries/0_stateless/00746_sql_fuzzy.pl
Executable file
File diff suppressed because one or more lines are too long
1
dbms/tests/queries/0_stateless/00746_sql_fuzzy.reference
Normal file
1
dbms/tests/queries/0_stateless/00746_sql_fuzzy.reference
Normal file
@ -0,0 +1 @@
|
||||
Still alive
|
25
dbms/tests/queries/0_stateless/00746_sql_fuzzy.sh
Executable file
25
dbms/tests/queries/0_stateless/00746_sql_fuzzy.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. $CURDIR/../shell_config.sh
|
||||
|
||||
export SQL_FUZZY_FILE_FUNCTIONS=${CLICKHOUSE_TMP}/clickhouse-functions
|
||||
$CLICKHOUSE_CLIENT -q "select name from system.functions format TSV;" > $SQL_FUZZY_FILE_FUNCTIONS
|
||||
|
||||
export SQL_FUZZY_FILE_TABLE_FUNCTIONS=${CLICKHOUSE_TMP}/clickhouse-table_functions
|
||||
$CLICKHOUSE_CLIENT -q "select name from system.table_functions format TSV;" > $SQL_FUZZY_FILE_TABLE_FUNCTIONS
|
||||
|
||||
# This is short run for ordinary tests.
|
||||
# if you want long run use: env SQL_FUZZY_RUNS=100000 clickhouse-test sql_fuzzy
|
||||
|
||||
for SQL_FUZZY_RUN in $(seq ${SQL_FUZZY_RUNS:=10}); do
|
||||
env SQL_FUZZY_RUN=$SQL_FUZZY_RUN $CURDIR/00746_sql_fuzzy.pl | $CLICKHOUSE_CLIENT -n --ignore-error >/dev/null 2>&1
|
||||
if [[ `$CLICKHOUSE_CLIENT -q "SELECT 'Still alive'"` != 'Still alive' ]]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "SELECT 'Still alive'"
|
||||
|
||||
# Query replay:
|
||||
# cat clickhouse-server.log | grep -aF "<Debug> executeQuery: (from " | perl -lpe 's/^.*executeQuery: \(from \S+\) (.*)/$1;/' | clickhouse-client -n --ignore-error
|
9
dbms/tests/queries/bugs/fuzzy.sql
Normal file
9
dbms/tests/queries/bugs/fuzzy.sql
Normal file
@ -0,0 +1,9 @@
|
||||
SELECT __inner_restore_projection__(2.0885, -66.72488);
|
||||
SELECT __inner_restore_projection__(-4, '');
|
||||
SELECT __inner_restore_projection__(067274, 'vb\s');
|
||||
SELECT sequenceCount((CAST((( SELECT NULL ) AS rg, ( SELECT ( SELECT [], '<e', caseWithExpr([NULL], -588755.149, []), retention(addWeeks((CAST((-7644612.39732) AS DateTime)), -23578040.02833), (CAST(([]) AS DateTime)), (CAST(([010977.08]) AS String))), emptyArrayToSingle('') ) , '\0', toUInt64([], 't3hw@'), '\0', toStartOfQuarter(-4230.1872, []) ) ) AS Date)));
|
||||
SELECT extractURLParameter('?_', '\0');
|
||||
SELECT extractURLParameter('ZiqSZeh?', '\0')
|
||||
SELECT globalNotIn(['"wh'], [NULL]);
|
||||
SELECT globalIn([''], [NULL])
|
||||
SELECT ( SELECT toDecimal128([], rowNumberInBlock()) ) , lcm('', [[(CAST(('>A') AS String))]]);
|
@ -53,6 +53,135 @@ FROM ontime
|
||||
Selects the last value encountered.
|
||||
The result is just as indeterminate as for the `any` function.
|
||||
|
||||
##groupBitAnd
|
||||
|
||||
Applies bitwise `AND` for series of numbers.
|
||||
|
||||
```
|
||||
groupBitAnd(expr)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
`expr` – An expression that results in `UInt*` type.
|
||||
|
||||
**Return value**
|
||||
|
||||
Value of the `UInt*` type.
|
||||
|
||||
**Example**
|
||||
|
||||
Test data:
|
||||
|
||||
```
|
||||
binary decimal
|
||||
00101100 = 44
|
||||
00011100 = 28
|
||||
00001101 = 13
|
||||
01010101 = 85
|
||||
```
|
||||
|
||||
The query:
|
||||
|
||||
```
|
||||
SELECT groupBitAnd(num) FROM t
|
||||
```
|
||||
|
||||
Where `num` is the column with the test data.
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
binary decimal
|
||||
00000100 = 4
|
||||
```
|
||||
|
||||
##groupBitOr
|
||||
|
||||
Applies bitwise `OR` for series of numbers.
|
||||
|
||||
```
|
||||
groupBitOr(expr)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
`expr` – An expression that results in `UInt*` type.
|
||||
|
||||
**Return value**
|
||||
|
||||
Value of the `UInt*` type.
|
||||
|
||||
**Example**
|
||||
|
||||
Test data:
|
||||
|
||||
```
|
||||
binary decimal
|
||||
00101100 = 44
|
||||
00011100 = 28
|
||||
00001101 = 13
|
||||
01010101 = 85
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```
|
||||
SELECT groupBitOr(num) FROM t
|
||||
```
|
||||
|
||||
Where `num` is the column with the test data.
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
binary decimal
|
||||
01111101 = 125
|
||||
```
|
||||
|
||||
##groupBitXor
|
||||
|
||||
Applies bitwise `XOR` for series of numbers.
|
||||
|
||||
```
|
||||
groupBitXor(expr)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
`expr` – An expression that results in `UInt*` type.
|
||||
|
||||
**Return value**
|
||||
|
||||
Value of the `UInt*` type.
|
||||
|
||||
**Example**
|
||||
|
||||
Test data:
|
||||
|
||||
```
|
||||
binary decimal
|
||||
00101100 = 44
|
||||
00011100 = 28
|
||||
00001101 = 13
|
||||
01010101 = 85
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```
|
||||
SELECT groupBitXor(num) FROM t
|
||||
```
|
||||
|
||||
Where `num` is the column with the test data.
|
||||
|
||||
Result:
|
||||
|
||||
```
|
||||
binary decimal
|
||||
01101000 = 104
|
||||
```
|
||||
|
||||
## min(x)
|
||||
|
||||
Calculates the minimum.
|
||||
|
@ -1 +1,13 @@
|
||||
../../../en/operations/table_engines/index.md
|
||||
# 表引擎
|
||||
表引擎(即表的类型)决定了:
|
||||
|
||||
* 数据的存储方式和位置,写到哪里以及从哪里读取数据。
|
||||
* 支持哪些查询以及如何支持。
|
||||
* 并发数据访问。
|
||||
* 索引的使用(如果存在)。
|
||||
* 是否可以执行多线程请求。
|
||||
* 数据复制参数。
|
||||
|
||||
在读取时,引擎只需要输出所请求的列,但在某些情况下,引擎可以在响应请求时部分处理数据。
|
||||
|
||||
对于大多数正式的任务,应该使用MergeTree族中的引擎。
|
||||
|
@ -1,3 +1,6 @@
|
||||
add_executable(clickhouse-zookeeper-cli zookeeper-cli.cpp)
|
||||
target_link_libraries(clickhouse-zookeeper-cli clickhouse_common_zookeeper ${LINE_EDITING_LIBS})
|
||||
if (READLINE_INCLUDE_DIR)
|
||||
target_include_directories (clickhouse-zookeeper-cli SYSTEM PRIVATE ${READLINE_INCLUDE_DIR})
|
||||
endif ()
|
||||
INSTALL(TARGETS clickhouse-zookeeper-cli RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse-utils)
|
||||
|
Loading…
Reference in New Issue
Block a user