mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-28 02:21:59 +00:00
Merge branch 'master' into optimize_parquet_reader
This commit is contained in:
commit
a12d87890f
@ -48,6 +48,7 @@ RUN apt-get update \
|
||||
gdb \
|
||||
git \
|
||||
gperf \
|
||||
libclang-rt-${LLVM_VERSION}-dev \
|
||||
lld-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION} \
|
||||
llvm-${LLVM_VERSION}-dev \
|
||||
|
@ -76,7 +76,7 @@
|
||||
#charts
|
||||
{
|
||||
height: 100%;
|
||||
display: flex;
|
||||
display: none;
|
||||
flex-flow: row wrap;
|
||||
gap: 1rem;
|
||||
}
|
||||
@ -170,6 +170,14 @@
|
||||
background: var(--button-background-color);
|
||||
}
|
||||
|
||||
#auth-error {
|
||||
color: var(--error-color);
|
||||
|
||||
display: flex;
|
||||
flex-flow: row nowrap;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
form {
|
||||
display: inline;
|
||||
}
|
||||
@ -293,6 +301,7 @@
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
<div id="auth-error"></div>
|
||||
<div id="charts"></div>
|
||||
<script>
|
||||
|
||||
@ -322,6 +331,11 @@ if (location.protocol != 'file:') {
|
||||
user = 'default';
|
||||
}
|
||||
|
||||
const errorCodeRegex = /Code: (\d+)/
|
||||
const errorCodeMessageMap = {
|
||||
516: 'Error authenticating with database. Please check your connection params and try again.'
|
||||
}
|
||||
|
||||
/// This is just a demo configuration of the dashboard.
|
||||
|
||||
let queries = [
|
||||
@ -597,6 +611,11 @@ function insertChart(i) {
|
||||
query_editor_confirm.value = 'Ok';
|
||||
query_editor_confirm.className = 'edit-confirm';
|
||||
|
||||
function getCurrentIndex() {
|
||||
/// Indices may change after deletion of other element, hence captured "i" may become incorrect.
|
||||
return [...charts.querySelectorAll('.chart')].findIndex(child => chart == child);
|
||||
}
|
||||
|
||||
function editConfirm() {
|
||||
query_editor.style.display = 'none';
|
||||
query_error.style.display = 'none';
|
||||
@ -605,7 +624,8 @@ function insertChart(i) {
|
||||
title_text.data = '';
|
||||
findParamsInQuery(q.query, params);
|
||||
buildParams();
|
||||
draw(i, chart, getParamsForURL(), q.query);
|
||||
const idx = getCurrentIndex();
|
||||
draw(idx, chart, getParamsForURL(), q.query);
|
||||
saveState();
|
||||
}
|
||||
|
||||
@ -649,8 +669,7 @@ function insertChart(i) {
|
||||
let trash_text = document.createTextNode('✕');
|
||||
trash.appendChild(trash_text);
|
||||
trash.addEventListener('click', e => {
|
||||
/// Indices may change after deletion of other element, hence captured "i" may become incorrect.
|
||||
let idx = [...charts.querySelectorAll('.chart')].findIndex(child => chart == child);
|
||||
const idx = getCurrentIndex();
|
||||
if (plots[idx]) {
|
||||
plots[idx].destroy();
|
||||
plots[idx] = null;
|
||||
@ -796,6 +815,18 @@ async function draw(idx, chart, url_params, query) {
|
||||
error = e.toString();
|
||||
}
|
||||
|
||||
if (error) {
|
||||
const errorMatch = error.match(errorCodeRegex)
|
||||
if (errorMatch && errorMatch[1]) {
|
||||
const code = errorMatch[1]
|
||||
if (errorCodeMessageMap[code]) {
|
||||
const authError = new Error(errorCodeMessageMap[code])
|
||||
authError.code = code
|
||||
throw authError
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!error) {
|
||||
if (!Array.isArray(data)) {
|
||||
error = "Query should return an array.";
|
||||
@ -853,16 +884,50 @@ async function draw(idx, chart, url_params, query) {
|
||||
sync.sub(plots[idx]);
|
||||
|
||||
/// Set title
|
||||
const title = queries[idx].title ? queries[idx].title.replaceAll(/\{(\w+)\}/g, (_, name) => params[name] ) : '';
|
||||
const title = queries[idx] && queries[idx].title ? queries[idx].title.replaceAll(/\{(\w+)\}/g, (_, name) => params[name] ) : '';
|
||||
chart.querySelector('.title').firstChild.data = title;
|
||||
}
|
||||
|
||||
function showAuthError(message) {
|
||||
const charts = document.querySelector('#charts');
|
||||
charts.style.display = 'none';
|
||||
const add = document.querySelector('#add');
|
||||
add.style.display = 'none';
|
||||
|
||||
const authError = document.querySelector('#auth-error');
|
||||
authError.textContent = message;
|
||||
authError.style.display = 'flex';
|
||||
}
|
||||
|
||||
function hideAuthError() {
|
||||
const charts = document.querySelector('#charts');
|
||||
charts.style.display = 'flex';
|
||||
const add = document.querySelector('#add');
|
||||
add.style.display = 'block';
|
||||
|
||||
const authError = document.querySelector('#auth-error');
|
||||
authError.textContent = '';
|
||||
authError.style.display = 'none';
|
||||
}
|
||||
|
||||
let firstLoad = true;
|
||||
|
||||
async function drawAll() {
|
||||
let params = getParamsForURL();
|
||||
const charts = document.getElementsByClassName('chart');
|
||||
for (let i = 0; i < queries.length; ++i) {
|
||||
draw(i, charts[i], params, queries[i].query);
|
||||
|
||||
if (!firstLoad) {
|
||||
hideAuthError();
|
||||
}
|
||||
await Promise.all([...Array(queries.length)].map(async (_, i) => {
|
||||
return draw(i, charts[i], params, queries[i].query).catch((e) => {
|
||||
if (!firstLoad) {
|
||||
showAuthError(e.message);
|
||||
}
|
||||
});
|
||||
})).then(() => {
|
||||
firstLoad = false;
|
||||
})
|
||||
}
|
||||
|
||||
function resize() {
|
||||
|
40
src/AggregateFunctions/AggregateFunctionVarianceMatrix.cpp
Normal file
40
src/AggregateFunctions/AggregateFunctionVarianceMatrix.cpp
Normal file
@ -0,0 +1,40 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/Helpers.h>
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
#include <AggregateFunctions/AggregateFunctionVarianceMatrix.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct Settings;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
template <typename FunctionTemplate>
|
||||
AggregateFunctionPtr createAggregateFunctionVarianceMatrix(
|
||||
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
|
||||
{
|
||||
assertNoParameters(name, parameters);
|
||||
for (const auto & argument_type : argument_types)
|
||||
if (!isNativeNumber(argument_type))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} only supports numerical types", name);
|
||||
|
||||
return std::make_shared<FunctionTemplate>(argument_types);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void registerAggregateFunctionsVarianceMatrix(AggregateFunctionFactory & factory)
|
||||
{
|
||||
factory.registerFunction("covarSampMatrix", createAggregateFunctionVarianceMatrix<AggregateFunctionCovarSampMatrix>);
|
||||
factory.registerFunction("covarPopMatrix", createAggregateFunctionVarianceMatrix<AggregateFunctionCovarPopMatrix>);
|
||||
factory.registerFunction("corrMatrix", createAggregateFunctionVarianceMatrix<AggregateFunctionCorrMatrix>);
|
||||
}
|
||||
|
||||
}
|
159
src/AggregateFunctions/AggregateFunctionVarianceMatrix.h
Normal file
159
src/AggregateFunctions/AggregateFunctionVarianceMatrix.h
Normal file
@ -0,0 +1,159 @@
|
||||
#pragma once
|
||||
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/PODArray_fwd.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/Moments.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct Settings;
|
||||
|
||||
enum class StatisticsMatrixFunctionKind
|
||||
{
|
||||
covarPopMatrix,
|
||||
covarSampMatrix,
|
||||
corrMatrix
|
||||
};
|
||||
|
||||
template <StatisticsMatrixFunctionKind _kind>
|
||||
struct AggregateFunctionVarianceMatrixData
|
||||
{
|
||||
using DataType = std::conditional_t<_kind == StatisticsMatrixFunctionKind::corrMatrix, CorrMoments<Float64>, CovarMoments<Float64>>;
|
||||
|
||||
AggregateFunctionVarianceMatrixData() = default;
|
||||
|
||||
explicit AggregateFunctionVarianceMatrixData(const size_t _num_args)
|
||||
: num_args(_num_args)
|
||||
{
|
||||
data_matrix.resize_fill(num_args * (num_args + 1) / 2, DataType());
|
||||
}
|
||||
|
||||
void add(const IColumn ** column, const size_t row_num)
|
||||
{
|
||||
for (size_t i = 0; i < num_args; ++i)
|
||||
for (size_t j = 0; j <= i; ++j)
|
||||
data_matrix[i * (i + 1) / 2 + j].add(column[i]->getFloat64(row_num), column[j]->getFloat64(row_num));
|
||||
}
|
||||
|
||||
void merge(const AggregateFunctionVarianceMatrixData & other)
|
||||
{
|
||||
for (size_t i = 0; i < num_args; ++i)
|
||||
for (size_t j = 0; j <= i; ++j)
|
||||
data_matrix[i * (i + 1) / 2 + j].merge(other.data_matrix[i * (i + 1) / 2 + j]);
|
||||
}
|
||||
|
||||
void serialize(WriteBuffer & buf) const
|
||||
{
|
||||
for (size_t i = 0; i < num_args; ++i)
|
||||
for (size_t j = 0; j <= i; ++j)
|
||||
data_matrix[i * (i + 1) / 2 + j].write(buf);
|
||||
}
|
||||
|
||||
void deserialize(ReadBuffer & buf)
|
||||
{
|
||||
for (size_t i = 0; i < num_args; ++i)
|
||||
for (size_t j = 0; j <= i; ++j)
|
||||
data_matrix[i * (i + 1) / 2 + j].read(buf);
|
||||
}
|
||||
|
||||
void insertResultInto(IColumn & to) const
|
||||
{
|
||||
auto & data_to = assert_cast<ColumnFloat64 &>(assert_cast<ColumnArray &>(assert_cast<ColumnArray &>(to).getData()).getData()).getData();
|
||||
auto & root_offsets_to = assert_cast<ColumnArray &>(to).getOffsets();
|
||||
auto & nested_offsets_to = assert_cast<ColumnArray &>(assert_cast<ColumnArray &>(to).getData()).getOffsets();
|
||||
for (size_t i = 0; i < num_args; ++i)
|
||||
{
|
||||
for (size_t j = 0; j < num_args; ++j)
|
||||
{
|
||||
auto & data = i < j ? data_matrix[j * (j + 1) / 2 + i] : data_matrix[i * (i + 1) / 2 + j];
|
||||
if constexpr (kind == StatisticsMatrixFunctionKind::covarPopMatrix)
|
||||
data_to.push_back(data.getPopulation());
|
||||
if constexpr (kind == StatisticsMatrixFunctionKind::covarSampMatrix)
|
||||
data_to.push_back(data.getSample());
|
||||
if constexpr (kind == StatisticsMatrixFunctionKind::corrMatrix)
|
||||
data_to.push_back(data.get());
|
||||
}
|
||||
nested_offsets_to.push_back(nested_offsets_to.back() + num_args);
|
||||
}
|
||||
root_offsets_to.push_back(root_offsets_to.back() + num_args);
|
||||
}
|
||||
|
||||
static constexpr StatisticsMatrixFunctionKind kind = _kind;
|
||||
PaddedPODArray<DataType> data_matrix;
|
||||
size_t num_args;
|
||||
};
|
||||
|
||||
template <typename Data>
|
||||
class AggregateFunctionVarianceMatrix final
|
||||
: public IAggregateFunctionDataHelper<Data, AggregateFunctionVarianceMatrix<Data>>
|
||||
{
|
||||
public:
|
||||
|
||||
explicit AggregateFunctionVarianceMatrix(const DataTypes & argument_types_)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionVarianceMatrix<Data>>(argument_types_, {}, createResultType())
|
||||
{}
|
||||
|
||||
AggregateFunctionVarianceMatrix(const IDataType &, const DataTypes & argument_types_)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionVarianceMatrix<Data>>(argument_types_, {}, createResultType())
|
||||
{}
|
||||
|
||||
String getName() const override
|
||||
{
|
||||
if constexpr (Data::kind == StatisticsMatrixFunctionKind::covarPopMatrix)
|
||||
return "covarPopMatrix";
|
||||
if constexpr (Data::kind == StatisticsMatrixFunctionKind::covarSampMatrix)
|
||||
return "covarSampMatrix";
|
||||
if constexpr (Data::kind == StatisticsMatrixFunctionKind::corrMatrix)
|
||||
return "corrMatrix";
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void create(AggregateDataPtr __restrict place) const override
|
||||
{
|
||||
new (place) Data(this->argument_types.size());
|
||||
}
|
||||
|
||||
static DataTypePtr createResultType()
|
||||
{
|
||||
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeArray>(std::make_shared<DataTypeFloat64>()));
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return false; }
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||
{
|
||||
this->data(place).add(columns, row_num);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
this->data(place).insertResultInto(to);
|
||||
}
|
||||
};
|
||||
|
||||
using AggregateFunctionCovarPopMatrix = AggregateFunctionVarianceMatrix<AggregateFunctionVarianceMatrixData<StatisticsMatrixFunctionKind::covarPopMatrix>>;
|
||||
using AggregateFunctionCovarSampMatrix = AggregateFunctionVarianceMatrix<AggregateFunctionVarianceMatrixData<StatisticsMatrixFunctionKind::covarSampMatrix>>;
|
||||
using AggregateFunctionCorrMatrix = AggregateFunctionVarianceMatrix<AggregateFunctionVarianceMatrixData<StatisticsMatrixFunctionKind::corrMatrix>>;
|
||||
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ void registerAggregateFunctionsMax(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionsAny(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionsStatisticsStable(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionsStatisticsSimple(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionsVarianceMatrix(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionSum(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionSumCount(AggregateFunctionFactory &);
|
||||
void registerAggregateFunctionSumMap(AggregateFunctionFactory &);
|
||||
@ -126,6 +127,7 @@ void registerAggregateFunctions()
|
||||
registerAggregateFunctionsAny(factory);
|
||||
registerAggregateFunctionsStatisticsStable(factory);
|
||||
registerAggregateFunctionsStatisticsSimple(factory);
|
||||
registerAggregateFunctionsVarianceMatrix(factory);
|
||||
registerAggregateFunctionSum(factory);
|
||||
registerAggregateFunctionSumCount(factory);
|
||||
registerAggregateFunctionSumMap(factory);
|
||||
|
@ -216,7 +216,7 @@ struct ConvertImpl
|
||||
}
|
||||
else if constexpr (
|
||||
(std::is_same_v<FromDataType, DataTypeIPv4> != std::is_same_v<ToDataType, DataTypeIPv4>)
|
||||
&& !(is_any_of<FromDataType, DataTypeUInt8, DataTypeUInt16, DataTypeUInt32> || is_any_of<ToDataType, DataTypeUInt32, DataTypeUInt64, DataTypeUInt128, DataTypeUInt256>)
|
||||
&& !(is_any_of<FromDataType, DataTypeUInt8, DataTypeUInt16, DataTypeUInt32, DataTypeUInt64> || is_any_of<ToDataType, DataTypeUInt32, DataTypeUInt64, DataTypeUInt128, DataTypeUInt256>)
|
||||
)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from {} to {} is not supported",
|
||||
@ -303,7 +303,10 @@ struct ConvertImpl
|
||||
}
|
||||
else
|
||||
{
|
||||
vec_to[i] = static_cast<ToFieldType>(vec_from[i]);
|
||||
if constexpr (std::is_same_v<ToDataType, DataTypeIPv4> && std::is_same_v<FromDataType, DataTypeUInt64>)
|
||||
vec_to[i] = static_cast<ToFieldType>(static_cast<IPv4::UnderlyingType>(vec_from[i]));
|
||||
else
|
||||
vec_to[i] = static_cast<ToFieldType>(vec_from[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -374,7 +377,7 @@ struct ToDateTransform32Or64
|
||||
static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone)
|
||||
{
|
||||
// since converting to Date, no need in values outside of default LUT range.
|
||||
return (from < DATE_LUT_MAX_DAY_NUM)
|
||||
return (from <= DATE_LUT_MAX_DAY_NUM)
|
||||
? from
|
||||
: time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF)));
|
||||
}
|
||||
@ -391,7 +394,7 @@ struct ToDateTransform32Or64Signed
|
||||
/// The function should be monotonic (better for query optimizations), so we saturate instead of overflow.
|
||||
if (from < 0)
|
||||
return 0;
|
||||
return (from < DATE_LUT_MAX_DAY_NUM)
|
||||
return (from <= DATE_LUT_MAX_DAY_NUM)
|
||||
? static_cast<ToType>(from)
|
||||
: time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF)));
|
||||
}
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include <Processors/QueryPlan/ExpressionStep.h>
|
||||
#include <Processors/QueryPlan/FilterStep.h>
|
||||
#include <Processors/QueryPlan/ReadFromPreparedSource.h>
|
||||
#include <Processors/Executors/PullingPipelineExecutor.h>
|
||||
#include <Processors/Executors/PullingAsyncPipelineExecutor.h>
|
||||
#include <Processors/Transforms/CheckSortedTransform.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
@ -197,7 +197,7 @@ bool isStorageTouchedByMutations(
|
||||
MergeTreeData::DataPartPtr source_part,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
const std::vector<MutationCommand> & commands,
|
||||
ContextMutablePtr context_copy)
|
||||
ContextPtr context)
|
||||
{
|
||||
if (commands.empty())
|
||||
return false;
|
||||
@ -210,7 +210,7 @@ bool isStorageTouchedByMutations(
|
||||
|
||||
if (command.partition)
|
||||
{
|
||||
const String partition_id = storage.getPartitionIDFromQuery(command.partition, context_copy);
|
||||
const String partition_id = storage.getPartitionIDFromQuery(command.partition, context);
|
||||
if (partition_id == source_part->info.partition_id)
|
||||
all_commands_can_be_skipped = false;
|
||||
}
|
||||
@ -221,15 +221,7 @@ bool isStorageTouchedByMutations(
|
||||
if (all_commands_can_be_skipped)
|
||||
return false;
|
||||
|
||||
/// We must read with one thread because it guarantees that
|
||||
/// output stream will be sorted after reading from MergeTree parts.
|
||||
/// Disable all settings that can enable reading with several streams.
|
||||
context_copy->setSetting("max_streams_to_max_threads_ratio", 1);
|
||||
context_copy->setSetting("max_threads", 1);
|
||||
context_copy->setSetting("allow_asynchronous_read_from_io_pool_for_merge_tree", false);
|
||||
context_copy->setSetting("max_streams_for_merge_tree_reading", Field(0));
|
||||
|
||||
ASTPtr select_query = prepareQueryAffectedAST(commands, storage.shared_from_this(), context_copy);
|
||||
ASTPtr select_query = prepareQueryAffectedAST(commands, storage.shared_from_this(), context);
|
||||
|
||||
auto storage_from_part = std::make_shared<StorageFromMergeTreeDataPart>(source_part);
|
||||
|
||||
@ -237,12 +229,12 @@ bool isStorageTouchedByMutations(
|
||||
/// For some reason it may copy context and give it into ExpressionTransform
|
||||
/// after that we will use context from destroyed stack frame in our stream.
|
||||
InterpreterSelectQuery interpreter(
|
||||
select_query, context_copy, storage_from_part, metadata_snapshot, SelectQueryOptions().ignoreLimits().ignoreProjections());
|
||||
select_query, context, storage_from_part, metadata_snapshot, SelectQueryOptions().ignoreLimits().ignoreProjections());
|
||||
auto io = interpreter.execute();
|
||||
PullingPipelineExecutor executor(io.pipeline);
|
||||
PullingAsyncPipelineExecutor executor(io.pipeline);
|
||||
|
||||
Block block;
|
||||
while (executor.pull(block)) {}
|
||||
while (block.rows() == 0 && executor.pull(block));
|
||||
|
||||
if (!block.rows())
|
||||
return false;
|
||||
|
@ -23,7 +23,7 @@ bool isStorageTouchedByMutations(
|
||||
MergeTreeData::DataPartPtr source_part,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
const std::vector<MutationCommand> & commands,
|
||||
ContextMutablePtr context_copy
|
||||
ContextPtr context
|
||||
);
|
||||
|
||||
ASTPtr getPartitionAndPredicateExpressionForMutationCommand(
|
||||
|
@ -1543,13 +1543,6 @@ bool MutateTask::prepare()
|
||||
|
||||
auto context_for_reading = Context::createCopy(ctx->context);
|
||||
|
||||
/// We must read with one thread because it guarantees that output stream will be sorted.
|
||||
/// Disable all settings that can enable reading with several streams.
|
||||
context_for_reading->setSetting("max_streams_to_max_threads_ratio", 1);
|
||||
context_for_reading->setSetting("max_threads", 1);
|
||||
context_for_reading->setSetting("allow_asynchronous_read_from_io_pool_for_merge_tree", false);
|
||||
context_for_reading->setSetting("max_streams_for_merge_tree_reading", Field(0));
|
||||
|
||||
/// Allow mutations to work when force_index_by_date or force_primary_key is on.
|
||||
context_for_reading->setSetting("force_index_by_date", false);
|
||||
context_for_reading->setSetting("force_primary_key", false);
|
||||
@ -1562,7 +1555,7 @@ bool MutateTask::prepare()
|
||||
}
|
||||
|
||||
if (ctx->source_part->isStoredOnDisk() && !isStorageTouchedByMutations(
|
||||
*ctx->data, ctx->source_part, ctx->metadata_snapshot, ctx->commands_for_part, Context::createCopy(context_for_reading)))
|
||||
*ctx->data, ctx->source_part, ctx->metadata_snapshot, ctx->commands_for_part, context_for_reading))
|
||||
{
|
||||
NameSet files_to_copy_instead_of_hardlinks;
|
||||
auto settings_ptr = ctx->data->getSettings();
|
||||
@ -1597,6 +1590,15 @@ bool MutateTask::prepare()
|
||||
LOG_TRACE(ctx->log, "Mutating part {} to mutation version {}", ctx->source_part->name, ctx->future_part->part_info.mutation);
|
||||
}
|
||||
|
||||
/// We must read with one thread because it guarantees that output stream will be sorted.
|
||||
/// Disable all settings that can enable reading with several streams.
|
||||
/// NOTE: isStorageTouchedByMutations() above is done without this settings because it
|
||||
/// should be ok to calculate count() with multiple streams.
|
||||
context_for_reading->setSetting("max_streams_to_max_threads_ratio", 1);
|
||||
context_for_reading->setSetting("max_threads", 1);
|
||||
context_for_reading->setSetting("allow_asynchronous_read_from_io_pool_for_merge_tree", false);
|
||||
context_for_reading->setSetting("max_streams_for_merge_tree_reading", Field(0));
|
||||
|
||||
MutationHelpers::splitMutationCommands(ctx->source_part, ctx->commands_for_part, ctx->for_interpreter, ctx->for_file_renames);
|
||||
|
||||
ctx->stage_progress = std::make_unique<MergeStageProgress>(1.0);
|
||||
|
@ -208,6 +208,8 @@ Merge it only if you intend to backport changes to the target branch, otherwise
|
||||
self.cherrypick_pr.add_to_labels(Labels.CHERRYPICK)
|
||||
self.cherrypick_pr.add_to_labels(Labels.DO_NOT_TEST)
|
||||
self._assign_new_pr(self.cherrypick_pr)
|
||||
# update cherrypick PR to get the state for PR.mergable
|
||||
self.cherrypick_pr.update()
|
||||
|
||||
def create_backport(self):
|
||||
assert self.cherrypick_pr is not None
|
||||
|
@ -953,6 +953,7 @@
|
||||
"topKWeighted"
|
||||
"stochasticLinearRegression"
|
||||
"corr"
|
||||
"corrMatrix"
|
||||
"uniqCombined64"
|
||||
"intervalLengthSum"
|
||||
"uniqCombined"
|
||||
@ -967,6 +968,7 @@
|
||||
"quantiles"
|
||||
"sum"
|
||||
"covarPop"
|
||||
"covarPopMatrix"
|
||||
"row_number"
|
||||
"kurtPop"
|
||||
"kurtSamp"
|
||||
@ -1021,6 +1023,7 @@
|
||||
"quantilesTiming"
|
||||
"welchTTest"
|
||||
"covarSamp"
|
||||
"covarSampMatrix"
|
||||
"varPopStable"
|
||||
"quantileTiming"
|
||||
"quantileExactInclusive"
|
||||
|
@ -0,0 +1,18 @@
|
||||
[[nan]]
|
||||
[[nan]]
|
||||
[[nan,nan,nan,nan],[nan,nan,nan,nan],[nan,nan,nan,nan],[nan,nan,nan,nan]]
|
||||
[[nan,nan,nan,nan],[nan,nan,nan,nan],[nan,nan,nan,nan],[nan,nan,nan,nan]]
|
||||
[[1,-0.09561,0.24287,0.74554],[-0.09561,1,0.17303,0.10558],[0.24287,0.17303,1,0.25797],[0.74554,0.10558,0.25797,1]]
|
||||
0 0 0
|
||||
[[nan]]
|
||||
[[nan]]
|
||||
[[nan,nan,nan,nan],[nan,nan,nan,nan],[nan,nan,nan,nan],[nan,nan,nan,nan]]
|
||||
[[nan,nan,nan,nan],[nan,nan,nan,nan],[nan,nan,nan,nan],[nan,nan,nan,nan]]
|
||||
[[9.16667,-1.95556,4.5335,7.49776],[-1.95556,45.63378,7.20628,2.36899],[4.5335,7.20628,38.01103,5.28296],[7.49776,2.36899,5.28296,11.03352]]
|
||||
0 0 0
|
||||
[[nan]]
|
||||
[[0]]
|
||||
[[nan,nan,nan,nan],[nan,nan,nan,nan],[nan,nan,nan,nan],[nan,nan,nan,nan]]
|
||||
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
|
||||
[[8.25,-1.76,4.08015,6.74799],[-1.76,41.0704,6.48565,2.13209],[4.08015,6.48565,34.20993,4.75467],[6.74799,2.13209,4.75467,9.93017]]
|
||||
0 0 0
|
@ -0,0 +1,41 @@
|
||||
DROP TABLE IF EXISTS fh;
|
||||
|
||||
CREATE TABLE fh(a_value UInt32, b_value Float64, c_value Float64, d_value Float64) ENGINE = Memory;
|
||||
|
||||
INSERT INTO fh(a_value, b_value, c_value, d_value) VALUES (1, 5.6,-4.4, 2.6),(2, -9.6, 3, 3.3),(3, -1.3,-4, 1.2),(4, 5.3,9.7,2.3),(5, 4.4,0.037,1.222),(6, -8.6,-7.8,2.1233),(7, 5.1,9.3,8.1222),(8, 7.9,-3.6,9.837),(9, -8.2,0.62,8.43555),(10, -3,7.3,6.762);
|
||||
|
||||
SELECT corrMatrix(a_value) FROM (select a_value from fh limit 0);
|
||||
|
||||
SELECT corrMatrix(a_value) FROM (select a_value from fh limit 1);
|
||||
|
||||
SELECT corrMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 0);
|
||||
|
||||
SELECT corrMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 1);
|
||||
|
||||
SELECT arrayMap(x -> arrayMap(y -> round(y, 5), x), corrMatrix(a_value, b_value, c_value, d_value)) FROM fh;
|
||||
|
||||
SELECT round(abs(corr(x1,x2) - corrMatrix(x1,x2)[1][2]), 5), round(abs(corr(x1,x1) - corrMatrix(x1,x2)[1][1]), 5), round(abs(corr(x2,x2) - corrMatrix(x1,x2)[2][2]), 5) from (select randNormal(100, 1) as x1, randNormal(100,5) as x2 from numbers(100000));
|
||||
|
||||
SELECT covarSampMatrix(a_value) FROM (select a_value from fh limit 0);
|
||||
|
||||
SELECT covarSampMatrix(a_value) FROM (select a_value from fh limit 1);
|
||||
|
||||
SELECT covarSampMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 0);
|
||||
|
||||
SELECT covarSampMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 1);
|
||||
|
||||
SELECT arrayMap(x -> arrayMap(y -> round(y, 5), x), covarSampMatrix(a_value, b_value, c_value, d_value)) FROM fh;
|
||||
|
||||
SELECT round(abs(covarSamp(x1,x2) - covarSampMatrix(x1,x2)[1][2]), 5), round(abs(covarSamp(x1,x1) - covarSampMatrix(x1,x2)[1][1]), 5), round(abs(covarSamp(x2,x2) - covarSampMatrix(x1,x2)[2][2]), 5) from (select randNormal(100, 1) as x1, randNormal(100,5) as x2 from numbers(100000));
|
||||
|
||||
SELECT covarPopMatrix(a_value) FROM (select a_value from fh limit 0);
|
||||
|
||||
SELECT covarPopMatrix(a_value) FROM (select a_value from fh limit 1);
|
||||
|
||||
SELECT covarPopMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 0);
|
||||
|
||||
SELECT covarPopMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 1);
|
||||
|
||||
SELECT arrayMap(x -> arrayMap(y -> round(y, 5), x), covarPopMatrix(a_value, b_value, c_value, d_value)) FROM fh;
|
||||
|
||||
SELECT round(abs(covarPop(x1,x2) - covarPopMatrix(x1,x2)[1][2]), 5), round(abs(covarPop(x1,x1) - covarPopMatrix(x1,x2)[1][1]), 5), round(abs(covarPop(x2,x2) - covarPopMatrix(x1,x2)[2][2]), 5) from (select randNormal(100, 1) as x1, randNormal(100,5) as x2 from numbers(100000));
|
@ -0,0 +1,27 @@
|
||||
2149-06-06 65535
|
||||
2149-06-06 toUInt16(65535)
|
||||
2149-06-06 toInt32(65535)
|
||||
2149-06-06 toUInt32(65535)
|
||||
2149-06-06 toDate(65535)
|
||||
2149-06-06 CAST(65535 as UInt16)
|
||||
2149-06-06 CAST(65535 as Int32)
|
||||
2149-06-06 CAST(65535 as UInt32)
|
||||
2149-06-06 CAST(65535 as Date)
|
||||
2149-06-05 65534
|
||||
2149-06-05 toUInt16(65534)
|
||||
2149-06-05 toInt32(65534)
|
||||
2149-06-05 toUInt32(65534)
|
||||
2149-06-05 toDate(65534)
|
||||
2149-06-05 CAST(65534 as UInt16)
|
||||
2149-06-05 CAST(65534 as Int32)
|
||||
2149-06-05 CAST(65534 as UInt32)
|
||||
2149-06-05 CAST(65534 as Date)
|
||||
1970-01-01 65536
|
||||
1970-01-01 toUInt16(65536)
|
||||
1970-01-01 toInt32(65536)
|
||||
1970-01-01 toUInt32(65536)
|
||||
1970-01-01 toDate(65536)
|
||||
1970-01-01 CAST(65536 as UInt16)
|
||||
1970-01-01 CAST(65536 as Int32)
|
||||
1970-01-01 CAST(65536 as UInt32)
|
||||
1970-01-01 CAST(65536 as Date)
|
@ -0,0 +1,72 @@
|
||||
DROP TABLE IF EXISTS 02540_date;
|
||||
CREATE TABLE 02540_date (txt String, x Date) engine=Memory;
|
||||
|
||||
-- Date: Supported range of values: [1970-01-01, 2149-06-06].
|
||||
-- ^----closed interval---^
|
||||
|
||||
INSERT INTO 02540_date VALUES('65535', 65535);
|
||||
INSERT INTO 02540_date VALUES('toUInt16(65535)', toUInt16(65535)); -- #43370 weird one -> used to be 1970-01-01
|
||||
INSERT INTO 02540_date VALUES('toInt32(65535)', toInt32(65535));
|
||||
INSERT INTO 02540_date VALUES('toUInt32(65535)', toUInt32(65535));
|
||||
INSERT INTO 02540_date VALUES('toDate(65535)', toDate(65535));
|
||||
|
||||
INSERT INTO 02540_date VALUES('CAST(65535 as UInt16)', CAST(65535 as UInt16));
|
||||
INSERT INTO 02540_date VALUES('CAST(65535 as Int32)', CAST(65535 as Int32));
|
||||
INSERT INTO 02540_date VALUES('CAST(65535 as UInt32)', CAST(65535 as UInt32));
|
||||
INSERT INTO 02540_date VALUES('CAST(65535 as Date)', CAST(65535 as Date));
|
||||
|
||||
INSERT INTO 02540_date VALUES('65534', 65534);
|
||||
INSERT INTO 02540_date VALUES('toUInt16(65534)', toUInt16(65534));
|
||||
INSERT INTO 02540_date VALUES('toInt32(65534)', toInt32(65534));
|
||||
INSERT INTO 02540_date VALUES('toUInt32(65534)', toUInt32(65534));
|
||||
INSERT INTO 02540_date VALUES('toDate(65534)', toDate(65534));
|
||||
|
||||
INSERT INTO 02540_date VALUES('CAST(65534 as UInt16)', CAST(65534 as UInt16));
|
||||
INSERT INTO 02540_date VALUES('CAST(65534 as Int32)', CAST(65534 as Int32));
|
||||
INSERT INTO 02540_date VALUES('CAST(65534 as UInt32)', CAST(65534 as UInt32));
|
||||
INSERT INTO 02540_date VALUES('CAST(65534 as Date)', CAST(65534 as Date));
|
||||
|
||||
INSERT INTO 02540_date VALUES('65536', 65536);
|
||||
INSERT INTO 02540_date VALUES('toUInt16(65536)', toUInt16(65536));
|
||||
INSERT INTO 02540_date VALUES('toInt32(65536)', toInt32(65536));
|
||||
INSERT INTO 02540_date VALUES('toUInt32(65536)', toUInt32(65536));
|
||||
INSERT INTO 02540_date VALUES('toDate(65536)', toDate(65536));
|
||||
|
||||
INSERT INTO 02540_date VALUES('CAST(65536 as UInt16)', CAST(65536 as UInt16));
|
||||
INSERT INTO 02540_date VALUES('CAST(65536 as Int32)', CAST(65536 as Int32));
|
||||
INSERT INTO 02540_date VALUES('CAST(65536 as UInt32)', CAST(65536 as UInt32));
|
||||
INSERT INTO 02540_date VALUES('CAST(65536 as Date)', CAST(65536 as Date));
|
||||
|
||||
|
||||
SELECT x, txt FROM 02540_date WHERE txt == '65535';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt16(65535)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'toInt32(65535)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt32(65535)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'toDate(65535)';
|
||||
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65535 as UInt16)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65535 as Int32)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65535 as UInt32)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65535 as Date)';
|
||||
|
||||
SELECT x, txt FROM 02540_date WHERE txt == '65534';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt16(65534)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'toInt32(65534)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt32(65534)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'toDate(65534)';
|
||||
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65534 as UInt16)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65534 as Int32)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65534 as UInt32)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65534 as Date)';
|
||||
|
||||
SELECT x, txt FROM 02540_date WHERE txt == '65536';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt16(65536)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'toInt32(65536)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'toUInt32(65536)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'toDate(65536)';
|
||||
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65536 as UInt16)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65536 as Int32)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65536 as UInt32)';
|
||||
SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65536 as Date)';
|
@ -0,0 +1,2 @@
|
||||
85.85.85.85
|
||||
138.68.230.86
|
4
tests/queries/0_stateless/02551_ipv4_implicit_uint64.sql
Normal file
4
tests/queries/0_stateless/02551_ipv4_implicit_uint64.sql
Normal file
@ -0,0 +1,4 @@
|
||||
CREATE TABLE ip4test (ip IPv4) ENGINE=Memory;
|
||||
INSERT INTO ip4test VALUES (22906492245), (2319771222);
|
||||
SELECT * FROM ip4test;
|
||||
DROP TABLE ip4test;
|
Loading…
Reference in New Issue
Block a user