mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 08:02:02 +00:00
Merge remote-tracking branch 'origin/disable-query-profiler-with-sanitizers' into add-test-for-deadlock-system-tables
This commit is contained in:
commit
a1a00b2001
@ -520,7 +520,18 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
/// Init trace collector only after trace_log system table was created
|
||||
/// Disable it if we collect test coverage information, because it will work extremely slow.
|
||||
#if USE_UNWIND && !WITH_COVERAGE
|
||||
///
|
||||
/// It also cannot work with sanitizers.
|
||||
/// Sanitizers are using quick "frame walking" stack unwinding (this implies -fno-omit-frame-pointer)
|
||||
/// And they do unwiding frequently (on every malloc/free, thread/mutex operations, etc).
|
||||
/// They change %rbp during unwinding and it confuses libunwind if signal comes during sanitizer unwiding
|
||||
/// and query profiler decide to unwind stack with libunwind at this moment.
|
||||
///
|
||||
/// Symptoms: you'll get silent Segmentation Fault - without sanitizer message and without usual ClickHouse diagnostics.
|
||||
///
|
||||
/// Look at compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
|
||||
///
|
||||
#if USE_UNWIND && !WITH_COVERAGE && !defined(SANITIZER)
|
||||
/// QueryProfiler cannot work reliably with any other libunwind or without PHDR cache.
|
||||
if (hasPHDRCache())
|
||||
global_context->initializeTraceCollector();
|
||||
|
@ -124,6 +124,8 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/// TODO Strange enough, there is no way to detect UB sanitizer.
|
||||
|
||||
/// Explicitly allow undefined behaviour for certain functions. Use it as a function attribute.
|
||||
/// It is useful in case when compiler cannot see (and exploit) it, but UBSan can.
|
||||
/// Example: multiplication of signed integers with possibility of overflow when both sides are from user input.
|
||||
|
@ -605,7 +605,7 @@ def test_kafka_produce_consume(kafka_cluster):
|
||||
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
|
||||
|
||||
|
||||
@pytest.mark.timeout(180)
|
||||
@pytest.mark.timeout(300)
|
||||
def test_kafka_commit_on_block_write(kafka_cluster):
|
||||
instance.query('''
|
||||
DROP TABLE IF EXISTS test.view;
|
||||
|
@ -19,7 +19,7 @@ $CLICKHOUSE_CLIENT --use_uncompressed_cache=1 --query_id="test-query-uncompresse
|
||||
sleep 1
|
||||
$CLICKHOUSE_CLIENT --query="SYSTEM FLUSH LOGS"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'Seek')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'ReadCompressedBytes')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'UncompressedCacheHits')] AS hit FROM system.query_log WHERE (query_id = 'test-query-uncompressed-cache') AND (type = 2) ORDER BY event_time DESC LIMIT 1"
|
||||
$CLICKHOUSE_CLIENT --query="SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'Seek')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'ReadCompressedBytes')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'UncompressedCacheHits')] AS hit FROM system.query_log WHERE (query_id = 'test-query-uncompressed-cache') AND (type = 2) AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS small_table"
|
||||
|
||||
|
@ -10,7 +10,7 @@ do
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="SYSTEM FLUSH LOGS"
|
||||
sleep 0.1;
|
||||
if [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() > 0 FROM system.text_log WHERE position(system.text_log.message, 'SELECT 6103') > 0") == 1 ]]; then echo 1; exit; fi;
|
||||
if [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() > 0 FROM system.text_log WHERE position(system.text_log.message, 'SELECT 6103') > 0 AND event_date >= yesterday()") == 1 ]]; then echo 1; exit; fi;
|
||||
|
||||
done;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user