Merge branch 'master' into system_fetches_table

This commit is contained in:
alesapin 2020-10-31 18:45:17 +03:00
commit ae53092536
73 changed files with 262 additions and 101 deletions

View File

@ -16,8 +16,4 @@ endif ()
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
set (ARCH_PPC64LE 1)
# FIXME: move this check into tools.cmake
if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8))
message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture")
endif ()
endif ()

View File

@ -84,3 +84,9 @@ if (LINKER_NAME)
message(STATUS "Using custom linker by name: ${LINKER_NAME}")
endif ()
if (ARCH_PPC64LE)
if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8))
message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture")
endif ()
endif ()

View File

@ -2148,7 +2148,34 @@ Result:
└───────────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->
## output_format_pretty_row_numbers {#output_format_pretty_row_numbers}
Adds row numbers to output in the [Pretty](../../interfaces/formats.md#pretty) format.
Possible values:
- 0 — Output without row numbers.
- 1 — Output with row numbers.
Default value: `0`.
**Example**
Query:
```sql
SET output_format_pretty_row_numbers = 1;
SELECT TOP 3 name, value FROM system.settings;
```
Result:
```text
┌─name────────────────────┬─value───┐
1. │ min_compress_block_size │ 65536 │
2. │ max_compress_block_size │ 1048576 │
3. │ max_block_size │ 65505 │
└─────────────────────────┴─────────┘
```
## allow_experimental_bigint_types {#allow_experimental_bigint_types}
@ -2160,3 +2187,5 @@ Possible values:
- 0 — The bigint data type is disabled.
Default value: `0`.
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->

View File

@ -23,8 +23,6 @@ SELECT
└─────────────────────┴────────────┴────────────┴─────────────────────┘
```
Only time zones that differ from UTC by a whole number of hours are supported.
## toTimeZone {#totimezone}
Convert time or date and time to the specified time zone.

View File

@ -1977,6 +1977,48 @@ SELECT range(number) FROM system.numbers LIMIT 5 FORMAT PrettyCompactNoEscapes;
└───────────────┘
```
## output_format_pretty_row_numbers {#output_format_pretty_row_numbers}
Включает режим отображения номеров строк для запросов, выводимых в формате [Pretty](../../interfaces/formats.md#pretty).
Возможные значения:
- 0 — номера строк не выводятся.
- 1 — номера строк выводятся.
Значение по умолчанию: `0`.
**Пример**
Запрос:
```sql
SET output_format_pretty_row_numbers = 1;
SELECT TOP 3 name, value FROM system.settings;
```
Результат:
```text
┌─name────────────────────┬─value───┐
1. │ min_compress_block_size │ 65536 │
2. │ max_compress_block_size │ 1048576 │
3. │ max_block_size │ 65505 │
└─────────────────────────┴─────────┘
```
## allow_experimental_bigint_types {#allow_experimental_bigint_types}
Включает или отключает поддержку целочисленных значений, превышающих максимальное значение, допустимое для типа `int`.
Возможные значения:
- 1 — большие целочисленные значения поддерживаются.
- 0 — большие целочисленные значения не поддерживаются.
Значение по умолчанию: `0`.
## lock_acquire_timeout {#lock_acquire_timeout}
Устанавливает, сколько секунд сервер ожидает возможности выполнить блокировку таблицы.

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
AccessControlManager.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | sed 's/^\.\// /' | sort ?>

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
AggregateFunctionAggThrow.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F GroupBitmap | sed 's/^\.\// /' | sort ?>

View File

@ -6,7 +6,6 @@ PEERDIR(
contrib/libs/poco/NetSSL_OpenSSL
)
CFLAGS(-g0)
SRCS(
Connection.cpp

View File

@ -5,7 +5,6 @@ PEERDIR(
contrib/libs/poco/NetSSL_OpenSSL
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -13,7 +13,6 @@ PEERDIR(
contrib/libs/pdqsort
)
CFLAGS(-g0)
SRCS(
Collator.cpp

View File

@ -172,7 +172,7 @@ protected:
void finalizeQueryProfiler();
void logToQueryThreadLog(QueryThreadLog & thread_log);
void logToQueryThreadLog(QueryThreadLog & thread_log, const String & current_database);
void assertState(const std::initializer_list<int> & permitted_states, const char * description = nullptr) const;

View File

@ -21,7 +21,6 @@ PEERDIR(
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc)
CFLAGS(-g0)
SRCS(
ActionLock.cpp

View File

@ -20,7 +20,6 @@ PEERDIR(
INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -12,7 +12,6 @@ PEERDIR(
contrib/libs/zstd
)
CFLAGS(-g0)
SRCS(
CachedCompressedReadBuffer.cpp

View File

@ -11,7 +11,6 @@ PEERDIR(
contrib/libs/zstd
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -85,6 +85,9 @@ void GTIDSets::update(const GTID & other)
ErrorCodes::LOGICAL_ERROR);
}
/// Try to shirnk Sequence interval.
GTIDSet::tryShirnk(set, i, current);
/// Sequence, extend the interval.
if (other.seq_no == current.end)
{
@ -116,6 +119,16 @@ void GTIDSets::update(const GTID & other)
sets.emplace_back(set);
}
void GTIDSet::tryShirnk(GTIDSet & set, unsigned int i, GTIDSet::Interval & current)
{
if (i != set.intervals.size() -1)
{
auto & next = set.intervals[i+1];
if (current.end == next.start)
set.tryMerge(i);
}
}
String GTIDSets::toString() const
{
WriteBufferFromOwnString buffer;

View File

@ -26,6 +26,8 @@ public:
std::vector<Interval> intervals;
void tryMerge(size_t i);
static void tryShirnk(GTIDSet & set, unsigned int i, Interval & current);
};
class GTIDSets

View File

@ -260,6 +260,17 @@ int main(int argc, char ** argv)
"10662d71-9d91-11ea-bbc2-0242ac110003:6-7",
"20662d71-9d91-11ea-bbc2-0242ac110003:9",
"10662d71-9d91-11ea-bbc2-0242ac110003:6-7,20662d71-9d91-11ea-bbc2-0242ac110003:9"},
{"shirnk-sequence",
"10662d71-9d91-11ea-bbc2-0242ac110003:1-3:4-5:7",
"10662d71-9d91-11ea-bbc2-0242ac110003:6",
"10662d71-9d91-11ea-bbc2-0242ac110003:1-7"},
{"shirnk-sequence",
"10662d71-9d91-11ea-bbc2-0242ac110003:1-3:4-5:10",
"10662d71-9d91-11ea-bbc2-0242ac110003:8",
"10662d71-9d91-11ea-bbc2-0242ac110003:1-5:8:10"
}
};
for (auto & tc : cases)

View File

@ -7,7 +7,6 @@ PEERDIR(
contrib/restricted/boost/libs
)
CFLAGS(-g0)
SRCS(
BackgroundSchedulePool.cpp

View File

@ -6,7 +6,6 @@ PEERDIR(
contrib/restricted/boost/libs
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -8,7 +8,6 @@ PEERDIR(
NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS(
AddingDefaultBlockOutputStream.cpp

View File

@ -7,7 +7,6 @@ PEERDIR(
NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -6,7 +6,6 @@ PEERDIR(
clickhouse/src/Formats
)
CFLAGS(-g0)
SRCS(
convertMySQLDataType.cpp

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Formats
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
DatabaseAtomic.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | sed 's/^\.\// /' | sort ?>

View File

@ -1467,7 +1467,6 @@ void SSDComplexKeyCacheDictionary::getItemsNumberImpl(
{
assert(dict_struct.key);
assert(key_columns.size() == key_types.size());
assert(key_columns.size() == dict_struct.key->size());
dict_struct.validateKeyTypes(key_types);

View File

@ -12,7 +12,6 @@ PEERDIR(
NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS(
CacheDictionary.cpp

View File

@ -11,7 +11,6 @@ PEERDIR(
NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F Trie | sed 's/^\.\// /' | sort ?>

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
DiskS3.cpp

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
createVolume.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F S3 | sed 's/^\.\// /' | sort ?>

View File

@ -7,7 +7,6 @@ PEERDIR(
contrib/libs/protoc
)
CFLAGS(-g0)
SRCS(
FormatFactory.cpp

View File

@ -6,7 +6,6 @@ PEERDIR(
contrib/libs/protoc
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -32,7 +32,6 @@ PEERDIR(
)
# "Arcadia" build is slightly deficient. It lacks many libraries that we need.
CFLAGS(-g0)
SRCS(
abs.cpp

View File

@ -31,7 +31,6 @@ PEERDIR(
)
# "Arcadia" build is slightly deficient. It lacks many libraries that we need.
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -i -v -P 'tests|Bitmap|sumbur|abtesting' | sed 's/^\.\// /' | sort ?>

View File

@ -533,6 +533,10 @@ ReturnType parseDateTimeBestEffortImpl(
}
}
/// If neither Date nor Time is parsed successfully, it should fail
if (!year && !month && !day_of_month && !has_time)
return on_error("Cannot read DateTime: neither Date nor Time was parsed successfully", ErrorCodes::CANNOT_PARSE_DATETIME);
if (!year)
year = 2000;
if (!month)

View File

@ -8,7 +8,6 @@ PEERDIR(
contrib/libs/poco/NetSSL_OpenSSL
)
CFLAGS(-g0)
SRCS(
AIOContextPool.cpp

View File

@ -7,7 +7,6 @@ PEERDIR(
contrib/libs/poco/NetSSL_OpenSSL
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -P 'S3|HDFS' | sed 's/^\.\// /' | sort ?>

View File

@ -38,6 +38,7 @@ Block QueryThreadLogElement::createBlock()
{std::make_shared<DataTypeString>(), "thread_name"},
{std::make_shared<DataTypeUInt64>(), "thread_id"},
{std::make_shared<DataTypeUInt64>(), "master_thread_id"},
{std::make_shared<DataTypeString>(), "current_database"},
{std::make_shared<DataTypeString>(), "query"},
{std::make_shared<DataTypeUInt8>(), "is_initial_query"},
@ -91,6 +92,7 @@ void QueryThreadLogElement::appendToBlock(MutableColumns & columns) const
columns[i++]->insert(thread_id);
columns[i++]->insert(master_thread_id);
columns[i++]->insertData(current_database.data(), current_database.size());
columns[i++]->insertData(query.data(), query.size());
QueryLogElement::appendClientInfo(client_info, columns, i);

View File

@ -39,7 +39,9 @@ struct QueryThreadLogElement
UInt64 thread_id{};
UInt64 master_thread_id{};
String current_database;
String query;
ClientInfo client_info;
std::shared_ptr<ProfileEvents::Counters> profile_counters;

View File

@ -243,7 +243,7 @@ void ThreadStatus::finalizePerformanceCounters()
const auto & settings = query_context->getSettingsRef();
if (settings.log_queries && settings.log_query_threads)
if (auto thread_log = global_context->getQueryThreadLog())
logToQueryThreadLog(*thread_log);
logToQueryThreadLog(*thread_log, query_context->getCurrentDatabase());
}
}
catch (...)
@ -322,7 +322,7 @@ void ThreadStatus::detachQuery(bool exit_if_already_detached, bool thread_exits)
#endif
}
void ThreadStatus::logToQueryThreadLog(QueryThreadLog & thread_log)
void ThreadStatus::logToQueryThreadLog(QueryThreadLog & thread_log, const String & current_database)
{
QueryThreadLogElement elem;
@ -350,6 +350,7 @@ void ThreadStatus::logToQueryThreadLog(QueryThreadLog & thread_log)
elem.thread_name = getThreadName();
elem.thread_id = thread_id;
elem.current_database = current_database;
if (thread_group)
{
{

View File

@ -14,7 +14,6 @@ PEERDIR(
NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS(
ActionLocksManager.cpp

View File

@ -13,7 +13,6 @@ PEERDIR(
NO_COMPILER_WARNINGS()
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -F JIT | sed 's/^\.\// /' | sort ?>

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
ASTAlterQuery.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | sed 's/^\.\// /' | sort ?>

View File

@ -1,4 +1,5 @@
#include "AvroRowInputFormat.h"
#include "DataTypes/DataTypeLowCardinality.h"
#if USE_AVRO
#include <numeric>
@ -174,7 +175,8 @@ static std::string nodeName(avro::NodePtr node)
AvroDeserializer::DeserializeFn AvroDeserializer::createDeserializeFn(avro::NodePtr root_node, DataTypePtr target_type)
{
WhichDataType target(target_type);
const WhichDataType target = removeLowCardinality(target_type);
switch (root_node->type())
{
case avro::AVRO_STRING: [[fallthrough]];
@ -384,7 +386,8 @@ AvroDeserializer::DeserializeFn AvroDeserializer::createDeserializeFn(avro::Node
}
throw Exception(
"Type " + target_type->getName() + " is not compatible with Avro " + avro::toString(root_node->type()) + ":\n" + nodeToJson(root_node),
"Type " + target_type->getName() + " is not compatible with Avro " + avro::toString(root_node->type()) + ":\n"
+ nodeToJson(root_node),
ErrorCodes::ILLEGAL_COLUMN);
}

View File

@ -7,7 +7,6 @@ PEERDIR(
contrib/libs/protobuf
)
CFLAGS(-g0)
SRCS(
Chunk.cpp

View File

@ -6,7 +6,6 @@ PEERDIR(
contrib/libs/protobuf
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -P 'Arrow|Avro|ORC|Parquet|CapnProto' | sed 's/^\.\// /' | sort ?>

View File

@ -6,7 +6,6 @@ PEERDIR(
contrib/libs/poco/Util
)
CFLAGS(-g0)
SRCS(
HTTPHandler.cpp

View File

@ -5,7 +5,6 @@ PEERDIR(
contrib/libs/poco/Util
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | sed 's/^\.\// /' | sort ?>

View File

@ -965,13 +965,16 @@ void ReplicatedMergeTreeQueue::checkThereAreNoConflictsInRange(const MergeTreePa
}
bool ReplicatedMergeTreeQueue::isNotCoveredByFuturePartsImpl(const String & new_part_name, String & out_reason, std::lock_guard<std::mutex> & /* queue_lock */) const
bool ReplicatedMergeTreeQueue::isNotCoveredByFuturePartsImpl(const String & log_entry_name, const String & new_part_name,
String & out_reason, std::lock_guard<std::mutex> & /* queue_lock */) const
{
/// Let's check if the same part is now being created by another action.
if (future_parts.count(new_part_name))
{
out_reason = "Not executing log entry for part " + new_part_name
+ " because another log entry for the same part is being processed. This shouldn't happen often.";
const char * format_str = "Not executing log entry {} for part {} "
"because another log entry for the same part is being processed. This shouldn't happen often.";
LOG_INFO(log, format_str, log_entry_name, new_part_name);
out_reason = fmt::format(format_str, log_entry_name, new_part_name);
return false;
/** When the corresponding action is completed, then `isNotCoveredByFuturePart` next time, will succeed,
@ -992,8 +995,10 @@ bool ReplicatedMergeTreeQueue::isNotCoveredByFuturePartsImpl(const String & new_
if (future_part.contains(result_part))
{
out_reason = "Not executing log entry for part " + new_part_name + " because it is covered by part "
+ future_part_elem.first + " that is currently executing";
const char * format_str = "Not executing log entry {} for part {} "
"because it is covered by part {} that is currently executing.";
LOG_TRACE(log, format_str, log_entry_name, new_part_name, future_part_elem.first);
out_reason = fmt::format(format_str, log_entry_name, new_part_name, future_part_elem.first);
return false;
}
}
@ -1005,7 +1010,7 @@ bool ReplicatedMergeTreeQueue::addFuturePartIfNotCoveredByThem(const String & pa
{
std::lock_guard lock(state_mutex);
if (isNotCoveredByFuturePartsImpl(part_name, reject_reason, lock))
if (isNotCoveredByFuturePartsImpl(entry.znode_name, part_name, reject_reason, lock))
{
CurrentlyExecuting::setActualPartName(entry, part_name, *this);
return true;
@ -1030,12 +1035,8 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
{
for (const String & new_part_name : entry.getBlockingPartNames())
{
if (!isNotCoveredByFuturePartsImpl(new_part_name, out_postpone_reason, state_lock))
{
if (!out_postpone_reason.empty())
LOG_DEBUG(log, out_postpone_reason);
if (!isNotCoveredByFuturePartsImpl(entry.znode_name, new_part_name, out_postpone_reason, state_lock))
return false;
}
}
}
@ -1051,10 +1052,11 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
{
if (future_parts.count(name))
{
String reason = "Not merging into part " + entry.new_part_name
+ " because part " + name + " is not ready yet (log entry for that part is being processed).";
LOG_TRACE(log, reason);
out_postpone_reason = reason;
const char * format_str = "Not executing log entry {} of type {} for part {} "
"because part {} is not ready yet (log entry for that part is being processed).";
LOG_TRACE(log, format_str, entry.znode_name, entry.typeToString(), entry.new_part_name, name);
/// Copy-paste of above because we need structured logging (instead of already formatted message).
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.typeToString(), entry.new_part_name, name);
return false;
}
@ -1070,9 +1072,9 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
if (merger_mutator.merges_blocker.isCancelled())
{
String reason = "Not executing log entry for part " + entry.new_part_name + " because merges and mutations are cancelled now.";
LOG_DEBUG(log, reason);
out_postpone_reason = reason;
const char * format_str = "Not executing log entry {} of type {} for part {} because merges and mutations are cancelled now.";
LOG_DEBUG(log, format_str, entry.znode_name, entry.typeToString(), entry.new_part_name);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.typeToString(), entry.new_part_name);
return false;
}
@ -1094,17 +1096,19 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
{
if (merger_mutator.ttl_merges_blocker.isCancelled())
{
String reason = "Not executing log entry for part " + entry.new_part_name + " because merges with TTL are cancelled now.";
LOG_DEBUG(log, reason);
out_postpone_reason = reason;
const char * format_str = "Not executing log entry {} for part {} because merges with TTL are cancelled now.";
LOG_DEBUG(log, format_str,
entry.znode_name, entry.new_part_name);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.new_part_name);
return false;
}
size_t total_merges_with_ttl = data.getTotalMergesWithTTLInMergeList();
if (total_merges_with_ttl >= data_settings->max_number_of_merges_with_ttl_in_pool)
{
const char * format_str = "Not executing log entry for part {}"
const char * format_str = "Not executing log entry {} for part {}"
" because {} merges with TTL already executing, maximum {}.";
LOG_DEBUG(log, format_str, entry.new_part_name, total_merges_with_ttl,
LOG_DEBUG(log, format_str, entry.znode_name,
entry.new_part_name, total_merges_with_ttl,
data_settings->max_number_of_merges_with_ttl_in_pool);
out_postpone_reason = fmt::format(format_str, entry.new_part_name, total_merges_with_ttl,
@ -1116,15 +1120,14 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
if (!ignore_max_size && sum_parts_size_in_bytes > max_source_parts_size)
{
const char * format_str = "Not executing log entry {} for part {}"
const char * format_str = "Not executing log entry {} of type {} for part {}"
" because source parts size ({}) is greater than the current maximum ({}).";
LOG_DEBUG(log, format_str,
LOG_DEBUG(log, format_str, entry.znode_name,
entry.typeToString(), entry.new_part_name,
ReadableSize(sum_parts_size_in_bytes), ReadableSize(max_source_parts_size));
/// Copy-paste of above because we need structured logging (instead of already formatted message).
out_postpone_reason = fmt::format(format_str,
out_postpone_reason = fmt::format(format_str, entry.znode_name,
entry.typeToString(), entry.new_part_name,
ReadableSize(sum_parts_size_in_bytes), ReadableSize(max_source_parts_size));
@ -1139,9 +1142,9 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
if (!alter_sequence.canExecuteMetaAlter(entry.alter_version, state_lock))
{
int head_alter = alter_sequence.getHeadAlterVersion(state_lock);
out_postpone_reason = "Cannot execute alter metadata with version: " + std::to_string(entry.alter_version)
+ " because another alter " + std::to_string(head_alter)
+ " must be executed before";
const char * format_str = "Cannot execute alter metadata {} with version {} because another alter {} must be executed before";
LOG_TRACE(log, format_str, entry.znode_name, entry.alter_version, head_alter);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.alter_version, head_alter);
return false;
}
}
@ -1153,11 +1156,17 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
{
int head_alter = alter_sequence.getHeadAlterVersion(state_lock);
if (head_alter == entry.alter_version)
out_postpone_reason = "Cannot execute alter data with version: "
+ std::to_string(entry.alter_version) + " because metadata still not altered";
{
const char * format_str = "Cannot execute alter data {} with version {} because metadata still not altered";
LOG_TRACE(log, format_str, entry.znode_name, entry.alter_version);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.alter_version);
}
else
out_postpone_reason = "Cannot execute alter data with version: " + std::to_string(entry.alter_version)
+ " because another alter " + std::to_string(head_alter) + " must be executed before";
{
const char * format_str = "Cannot execute alter data {} with version {} because another alter {} must be executed before";
LOG_TRACE(log, format_str, entry.znode_name, entry.alter_version, head_alter);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.alter_version, head_alter);
}
return false;
}
@ -1170,7 +1179,14 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
/// Deadlock is possible if multiple DROP/REPLACE RANGE entries are executing in parallel and wait each other.
/// See also removePartProducingOpsInRange(...) and ReplicatedMergeTreeQueue::CurrentlyExecuting.
if (currently_executing_drop_or_replace_range)
{
const char * format_str = "Not executing log entry {} of type {} for part {} "
"because another DROP_RANGE or REPLACE_RANGE entry are currently executing.";
LOG_TRACE(log, format_str, entry.znode_name, entry.typeToString(), entry.new_part_name);
out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.typeToString(), entry.new_part_name);
return false;
}
}
return true;

View File

@ -205,6 +205,7 @@ private:
* Should be called under state_mutex.
*/
bool isNotCoveredByFuturePartsImpl(
const String & log_entry_name,
const String & new_part_name, String & out_reason,
std::lock_guard<std::mutex> & state_lock) const;

View File

@ -7,7 +7,6 @@ PEERDIR(
contrib/libs/poco/MongoDB
)
CFLAGS(-g0)
SRCS(
AlterCommands.cpp

View File

@ -6,7 +6,6 @@ PEERDIR(
contrib/libs/poco/MongoDB
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests | grep -v -P 'Kafka|RabbitMQ|S3|HDFS|Licenses|TimeZones' | sed 's/^\.\// /' | sort ?>

View File

@ -5,7 +5,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
ITableFunction.cpp

View File

@ -4,7 +4,6 @@ PEERDIR(
clickhouse/src/Common
)
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -P 'S3|HDFS' | sed 's/^\.\// /' | sort ?>

View File

@ -1,7 +1,7 @@
s a b
0 2000-01-01 00:00:00 2000-01-01 00:00:00
0000 2000-01-01 00:00:00 2000-01-01 00:00:00
0 ᴺᵁᴸᴸ 1970-01-01 00:00:00
0000 ᴺᵁᴸᴸ 1970-01-01 00:00:00
00:00:00 2000-01-01 00:00:00 2000-01-01 00:00:00
01:00:00 2000-01-01 01:00:00 2000-01-01 01:00:00
02/01/17 010203 MSK 2017-01-01 22:02:03 2017-01-01 22:02:03

View File

@ -97,4 +97,7 @@ timeout $TIMEOUT bash -c thread6 2>&1 | grep "was not completely removed from Zo
wait
for i in {0..9}; do $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS alter_table_$i"; done
for i in {0..9}; do
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS alter_table_$i" &
done
wait

View File

@ -62,6 +62,8 @@ SELECT dictGetUInt64('database_for_dict.ssd_dict', 'a', tuple('10', toInt32(-20)
SELECT dictGetInt32('database_for_dict.ssd_dict', 'b', tuple('10', toInt32(-20)));
SELECT dictGetString('database_for_dict.ssd_dict', 'c', tuple('10', toInt32(-20)));
SELECT dictGetUInt64('database_for_dict.ssd_dict', 'a', tuple(toInt32(3))); --{serverError 53}
DROP DICTIONARY database_for_dict.ssd_dict;
DROP TABLE IF EXISTS database_for_dict.keys_table;

View File

@ -1,8 +1,8 @@
SELECT parseDateTimeBestEffort('<Empty>'); -- { serverError 6 }
SELECT parseDateTimeBestEffort('<Empty>'); -- { serverError 41 }
SELECT parseDateTimeBestEffortOrNull('<Empty>');
SELECT parseDateTimeBestEffortOrZero('<Empty>', 'UTC');
SELECT parseDateTime64BestEffort('<Empty>'); -- { serverError 6 }
SELECT parseDateTime64BestEffort('<Empty>'); -- { serverError 41 }
SELECT parseDateTime64BestEffortOrNull('<Empty>');
SELECT parseDateTime64BestEffortOrZero('<Empty>', 0, 'UTC');

View File

@ -15,7 +15,7 @@
2020-05-14 03:37:03.253 Nullable(DateTime64(3, \'UTC\'))
2020-05-14 06:37:03.253 Nullable(DateTime64(3, \'Europe/Minsk\'))
2020-05-14 03:37:03.253 Nullable(DateTime64(3, \'UTC\'))
1970-01-01 03:00:00.000 DateTime64(3)
1970-01-01 00:00:00.000 DateTime64(3, \'UTC\')
2020-05-14 03:37:03.000 DateTime64(3, \'UTC\')
2020-05-14 03:37:03.000 DateTime64(3, \'UTC\')
2020-05-14 03:37:03.253 DateTime64(3, \'UTC\')
@ -35,7 +35,7 @@
2020-05-14 03:37:03 Nullable(DateTime(\'UTC\'))
2020-05-14 06:37:03 Nullable(DateTime(\'Europe/Minsk\'))
2020-05-14 03:37:03 Nullable(DateTime(\'UTC\'))
1970-01-01 03:00:00 DateTime
1970-01-01 00:00:00 DateTime(\'UTC\')
2020-05-14 03:37:03 DateTime(\'UTC\')
2020-05-14 03:37:03 DateTime(\'UTC\')
2020-05-14 03:37:03 DateTime(\'UTC\')

View File

@ -12,7 +12,7 @@ SELECT CAST('2020-01-01 00:00:00', 'DateTime') AS a, toTypeName(a), CAST('2020-0
SELECT toDateTime32('2020-01-01 00:00:00') AS a, toTypeName(a);
SELECT parseDateTimeBestEffort('<Empty>', 3) AS a, toTypeName(a); -- {serverError 6}
SELECT parseDateTimeBestEffort('<Empty>', 3) AS a, toTypeName(a); -- {serverError 41}
SELECT parseDateTimeBestEffort('2020-05-14T03:37:03', 3, 'UTC') AS a, toTypeName(a);
SELECT parseDateTimeBestEffort('2020-05-14 03:37:03', 3, 'UTC') AS a, toTypeName(a);
SELECT parseDateTimeBestEffort('2020-05-14T03:37:03.253184', 3, 'UTC') AS a, toTypeName(a);
@ -28,7 +28,7 @@ SELECT parseDateTimeBestEffortOrNull('2020-05-14T03:37:03.253184Z', 3, 'UTC') AS
SELECT parseDateTimeBestEffortOrNull('2020-05-14T03:37:03.253184Z', 3, 'Europe/Minsk') AS a, toTypeName(a);
SELECT parseDateTimeBestEffortOrNull(materialize('2020-05-14T03:37:03.253184Z'), 3, 'UTC') AS a, toTypeName(a);
SELECT parseDateTimeBestEffortOrZero('<Empty>', 3) AS a, toTypeName(a);
SELECT parseDateTimeBestEffortOrZero('<Empty>', 3, 'UTC') AS a, toTypeName(a);
SELECT parseDateTimeBestEffortOrZero('2020-05-14T03:37:03', 3, 'UTC') AS a, toTypeName(a);
SELECT parseDateTimeBestEffortOrZero('2020-05-14 03:37:03', 3, 'UTC') AS a, toTypeName(a);
SELECT parseDateTimeBestEffortOrZero('2020-05-14T03:37:03.253184', 3, 'UTC') AS a, toTypeName(a);
@ -37,7 +37,7 @@ SELECT parseDateTimeBestEffortOrZero('2020-05-14T03:37:03.253184Z', 3, 'Europe/M
SELECT parseDateTimeBestEffortOrZero(materialize('2020-05-14T03:37:03.253184Z'), 3, 'UTC') AS a, toTypeName(a);
SELECT parseDateTime32BestEffort('<Empty>') AS a, toTypeName(a); -- {serverError 6}
SELECT parseDateTime32BestEffort('<Empty>') AS a, toTypeName(a); -- {serverError 41}
SELECT parseDateTime32BestEffort('2020-05-14T03:37:03', 'UTC') AS a, toTypeName(a);
SELECT parseDateTime32BestEffort('2020-05-14 03:37:03', 'UTC') AS a, toTypeName(a);
SELECT parseDateTime32BestEffort('2020-05-14T03:37:03.253184', 'UTC') AS a, toTypeName(a);
@ -53,7 +53,7 @@ SELECT parseDateTime32BestEffortOrNull('2020-05-14T03:37:03.253184Z', 'UTC') AS
SELECT parseDateTime32BestEffortOrNull('2020-05-14T03:37:03.253184Z', 'Europe/Minsk') AS a, toTypeName(a);
SELECT parseDateTime32BestEffortOrNull(materialize('2020-05-14T03:37:03.253184Z'), 'UTC') AS a, toTypeName(a);
SELECT parseDateTime32BestEffortOrZero('<Empty>') AS a, toTypeName(a);
SELECT parseDateTime32BestEffortOrZero('<Empty>', 'UTC') AS a, toTypeName(a);
SELECT parseDateTime32BestEffortOrZero('2020-05-14T03:37:03', 'UTC') AS a, toTypeName(a);
SELECT parseDateTime32BestEffortOrZero('2020-05-14 03:37:03', 'UTC') AS a, toTypeName(a);
SELECT parseDateTime32BestEffortOrZero('2020-05-14T03:37:03.253184', 'UTC') AS a, toTypeName(a);

View File

@ -35,7 +35,8 @@ function execute_tcp_one_session()
# one users query in background (to avoid reseting max_memory_usage_for_user)
# --max_block_size=1 to make it killable (check the state each 1 second, 1 row)
# (the test takes ~40 seconds in debug build, so 60 seconds is ok)
${CLICKHOUSE_CLIENT} --max_block_size=1 --format Null -q 'SELECT sleepEachRow(1) FROM numbers(600)' &
query_id=$$-$RANDOM-$SECONDS
${CLICKHOUSE_CLIENT} --max_block_size=1 --format Null --query_id $query_id -q 'SELECT sleepEachRow(1) FROM numbers(600)' &
# trap
sleep_query_pid=$!
function cleanup()
@ -43,6 +44,10 @@ function cleanup()
echo 'KILL sleep'
# if the timeout will not be enough, it will trigger "No such process" error/message
kill $sleep_query_pid
# waiting for a query to finish
while ${CLICKHOUSE_CLIENT} -q "SELECT query_id FROM system.processes WHERE query_id = '$query_id'" | grep -xq "$query_id"; do
sleep 0.1
done
}
trap cleanup EXIT

View File

@ -0,0 +1,12 @@
#!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --query "CREATE TABLE IF NOT EXISTS test_01543 (value LowCardinality(String)) ENGINE=Memory()"
$CLICKHOUSE_CLIENT --query "INSERT INTO test_01543 SELECT toString(number) FROM numbers(1000)"
$CLICKHOUSE_CLIENT -q "SELECT * FROM test_01543 FORMAT Avro" |
$CLICKHOUSE_CLIENT -q "INSERT INTO test_01543 FORMAT Avro";
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_01543"

View File

@ -0,0 +1,8 @@
2010-01-01 00:00:00
2010-01-01 01:01:01
2000-01-01 01:01:01
\N
\N
\N
\N
\N

View File

@ -0,0 +1,8 @@
SELECT parseDateTimeBestEffortOrNull('2010-01-01');
SELECT parseDateTimeBestEffortOrNull('2010-01-01 01:01:01');
SELECT parseDateTimeBestEffortOrNull('01:01:01');
SELECT parseDateTimeBestEffortOrNull('20100');
SELECT parseDateTimeBestEffortOrNull('0100:0100:0000');
SELECT parseDateTimeBestEffortOrNull('x');
SELECT parseDateTimeBestEffortOrNull('');
SELECT parseDateTimeBestEffortOrNull(' ');

View File

@ -0,0 +1,2 @@
1
1

View File

@ -0,0 +1,37 @@
--
-- This is more cleaner approach for writing a test that relies on system.query_log/query_thread_log.
--
-- It uses current database, and since clickhouse-test will generate random for
-- each run you can run the test multiple times without worrying about
-- overlaps.
--
-- There is still event_date/event_time filter for better performance
-- (even though this is not relevant for runs on CI)
--
set log_query_threads=1;
set log_queries_min_type='QUERY_FINISH';
set log_queries=1;
select '01547_query_log_current_database' from system.one format Null;
set log_queries=0;
set log_query_threads=0;
system flush logs;
select count()
from system.query_log
where
query like '%01547_query_log_current_database%'
and current_database = currentDatabase()
and event_date = today()
and event_time >= now() - interval 1 minute;
-- at least two threads for processing
-- (but one just waits for another, sigh)
select count() == 2
from system.query_thread_log
where
query like '%01547_query_log_current_database%'
and current_database = currentDatabase()
and event_date = today()
and event_time >= now() - interval 1 minute;

View File

@ -155,3 +155,4 @@
01509_dictionary_preallocate
01526_max_untracked_memory
01530_drop_database_atomic_sync
01547_query_log_current_database