Merge branch 'master' into jepsen-label

This commit is contained in:
Mikhail f. Shiryaev 2022-07-29 22:20:12 +02:00 committed by GitHub
commit d86f07d7ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 79 additions and 5 deletions

View File

@ -237,7 +237,11 @@ def parse_env_variables(
result.append("CCACHE_BASEDIR=/build")
result.append("CCACHE_NOHASHDIR=true")
result.append("CCACHE_COMPILERCHECK=content")
result.append("CCACHE_MAXSIZE=15G")
cache_maxsize = "15G"
if clang_tidy:
# 15G is not enough for tidy build
cache_maxsize = "25G"
result.append(f"CCACHE_MAXSIZE={cache_maxsize}")
# result.append("CCACHE_UMASK=777")
if distcc_hosts:

View File

@ -245,6 +245,11 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ContextPtr context_, ASTDropQue
DatabaseCatalog::instance().tryRemoveLoadingDependencies(table_id, getContext()->getSettingsRef().check_table_dependencies,
is_drop_or_detach_database);
database->dropTable(context_, table_id.table_name, query.sync);
/// We have to drop mmapio cache when dropping table from Ordinary database
/// to avoid reading old data if new table with the same name is created
if (database->getUUID() == UUIDHelpers::Nil)
context_->dropMMappedFileCache();
}
db = database;

View File

@ -8,6 +8,20 @@ namespace ErrorCodes
extern const int SET_SIZE_LIMIT_EXCEEDED;
}
static void handleAllColumnsConst(Chunk & chunk)
{
const size_t rows = chunk.getNumRows();
IColumn::Filter filter(rows);
Chunk res_chunk;
std::fill(filter.begin(), filter.end(), 0);
filter[0] = 1;
for (const auto & column : chunk.getColumns())
res_chunk.addColumn(column->filter(filter, -1));
chunk = std::move(res_chunk);
}
DistinctSortedTransform::DistinctSortedTransform(
Block header_, SortDescription sort_description, const SizeLimits & set_size_limits_, UInt64 limit_hint_, const Names & columns)
: ISimpleTransform(header_, header_, true)
@ -23,9 +37,12 @@ DistinctSortedTransform::DistinctSortedTransform(
for (size_t i = 0; i < num_columns; ++i)
{
auto pos = column_names.empty() ? i : header.getPositionByName(column_names[i]);
const auto & col = header.getByPosition(pos).column;
if (col && !isColumnConst(*col))
const auto & column = header.getByPosition(pos).column;
if (column && !isColumnConst(*column))
{
column_positions.emplace_back(pos);
all_columns_const = false;
}
}
column_ptrs.reserve(column_positions.size());
@ -52,6 +69,14 @@ void DistinctSortedTransform::transform(Chunk & chunk)
if (unlikely(!chunk.hasRows()))
return;
/// special case - all column constant
if (unlikely(all_columns_const))
{
handleAllColumnsConst(chunk);
stopReading();
return;
}
/// get DISTINCT columns from chunk
column_ptrs.clear();
for (const auto pos : column_positions)

View File

@ -66,6 +66,7 @@ private:
/// Restrictions on the maximum size of the output data.
SizeLimits set_size_limits;
bool all_columns_const = true;
};
}

View File

@ -77,4 +77,30 @@
2 2
1 1
0 0
-- distinct with constants columns
-- { echoOn }
select distinct 1 as x, 2 as y from distinct_in_order;
1 2
select distinct 1 as x, 2 as y from distinct_in_order order by x;
1 2
select distinct 1 as x, 2 as y from distinct_in_order order by x, y;
1 2
select distinct a, 1 as x from distinct_in_order order by x;
0 1
select distinct a, 1 as x, 2 as y from distinct_in_order order by a;
0 1 2
select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a;
0 0 1 2
0 1 1 2
0 2 1 2
0 3 1 2
0 4 1 2
select distinct x, y from (select 1 as x, 2 as y from distinct_in_order order by x) order by y;
1 2
select distinct a, b, x, y from (select a, b, 1 as x, 2 as y from distinct_in_order order by a) order by b;
0 0 1 2
0 1 1 2
0 2 1 2
0 3 1 2
0 4 1 2
-- check that distinct in order has the same result as ordinary distinct

View File

@ -43,6 +43,18 @@ select distinct b,c from distinct_in_order order by c;
select '-- distinct with non-key prefix and non-sorted column, order by non-sorted desc';
select distinct b,c from distinct_in_order order by c desc;
select '-- distinct with constants columns';
-- { echoOn }
select distinct 1 as x, 2 as y from distinct_in_order;
select distinct 1 as x, 2 as y from distinct_in_order order by x;
select distinct 1 as x, 2 as y from distinct_in_order order by x, y;
select distinct a, 1 as x from distinct_in_order order by x;
select distinct a, 1 as x, 2 as y from distinct_in_order order by a;
select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a;
select distinct x, y from (select 1 as x, 2 as y from distinct_in_order order by x) order by y;
select distinct a, b, x, y from (select a, b, 1 as x, 2 as y from distinct_in_order order by a) order by b;
-- { echoOff }
drop table if exists distinct_in_order sync;
select '-- check that distinct in order has the same result as ordinary distinct';

View File

@ -1,3 +1,2 @@
ASCII text
ASCII text
ASCII text

View File

@ -26,6 +26,8 @@ EOF
run "$CLICKHOUSE_CLIENT -q 'SELECT 1' 2>$file_name"
run "$CLICKHOUSE_CLIENT -q 'SELECT 1' --server_logs_file=$file_name"
run "$CLICKHOUSE_CLIENT -q 'SELECT 1' --server_logs_file=- >$file_name"
# This query may fail due to bug in clickhouse-client.
# run "$CLICKHOUSE_CLIENT -q 'SELECT 1' --server_logs_file=- >$file_name"
rm -f "$file_name"