Merge branch 'master' into fix-distributed-analyzer

This commit is contained in:
Alexey Milovidov 2024-07-24 02:52:53 +02:00
commit 6657808663
66 changed files with 1141 additions and 868 deletions

View File

@ -272,7 +272,4 @@ jobs:
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }} ${{ toJson(needs) }}
EOF EOF
echo "::group::Workflow results"
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
echo "::endgroup::"
python3 ./tests/ci/ci_buddy.py --check-wf-status python3 ./tests/ci/ci_buddy.py --check-wf-status

View File

@ -138,7 +138,4 @@ jobs:
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }} ${{ toJson(needs) }}
EOF EOF
echo "::group::Workflow results"
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
echo "::endgroup::"
python3 ./tests/ci/ci_buddy.py --check-wf-status python3 ./tests/ci/ci_buddy.py --check-wf-status

View File

@ -111,7 +111,4 @@ jobs:
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }} ${{ toJson(needs) }}
EOF EOF
echo "::group::Workflow results"
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
echo "::endgroup::"
python3 ./tests/ci/ci_buddy.py --check-wf-status python3 ./tests/ci/ci_buddy.py --check-wf-status

View File

@ -57,7 +57,4 @@ jobs:
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }} ${{ toJson(needs) }}
EOF EOF
echo "::group::Workflow results"
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
echo "::endgroup::"
python3 ./tests/ci/ci_buddy.py --check-wf-status python3 ./tests/ci/ci_buddy.py --check-wf-status

View File

@ -171,9 +171,6 @@ jobs:
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }} ${{ toJson(needs) }}
EOF EOF
echo "::group::Workflow results"
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
echo "::endgroup::"
python3 ./tests/ci/ci_buddy.py --check-wf-status python3 ./tests/ci/ci_buddy.py --check-wf-status
################################# Stage Final ################################# ################################# Stage Final #################################

View File

@ -492,7 +492,5 @@ jobs:
cat >> "$WORKFLOW_RESULT_FILE" << 'EOF' cat >> "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }} ${{ toJson(needs) }}
EOF EOF
echo "::group::Workflow results"
python3 -m json.tool "$WORKFLOW_RESULT_FILE"
echo "::endgroup::"
python3 ./tests/ci/ci_buddy.py --check-wf-status python3 ./tests/ci/ci_buddy.py --check-wf-status

View File

@ -2,11 +2,11 @@
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION, # NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54488) SET(VERSION_REVISION 54489)
SET(VERSION_MAJOR 24) SET(VERSION_MAJOR 24)
SET(VERSION_MINOR 7) SET(VERSION_MINOR 8)
SET(VERSION_PATCH 1) SET(VERSION_PATCH 1)
SET(VERSION_GITHASH aa023477a9265e403982fca5ee29a714db5133d9) SET(VERSION_GITHASH 3f8b27d7accd2b5ec4afe7d0dd459115323304af)
SET(VERSION_DESCRIBE v24.7.1.1-testing) SET(VERSION_DESCRIBE v24.8.1.1-testing)
SET(VERSION_STRING 24.7.1.1) SET(VERSION_STRING 24.8.1.1)
# end of autochange # end of autochange

2
contrib/libunwind vendored

@ -1 +1 @@
Subproject commit 8f28e64d15819d2d096badd598c7d85bebddb1f2 Subproject commit fe854449e24bedfa26e38465b84374312dbd587f

View File

@ -6,7 +6,7 @@ ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN apt-get update --yes \ RUN apt-get update --yes \
&& env DEBIAN_FRONTEND=noninteractive apt-get install wget git default-jdk maven python3 --yes --no-install-recommends \ && env DEBIAN_FRONTEND=noninteractive apt-get install wget git python3 default-jdk maven --yes --no-install-recommends \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*

View File

@ -191,8 +191,8 @@ else
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC" clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC" clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
else else
@ -200,7 +200,7 @@ else
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
fi fi
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0" clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
fi fi
clickhouse-client --query "SHOW TABLES FROM test" clickhouse-client --query "SHOW TABLES FROM test"

View File

@ -209,9 +209,9 @@ clickhouse-client --query "CREATE TABLE test.visits (CounterID UInt32, StartDat
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='$TEMP_POLICY'" SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='$TEMP_POLICY'"
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0" clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16"
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC" clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC" clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"

View File

@ -11,6 +11,7 @@
#include <base/cgroupsv2.h> #include <base/cgroupsv2.h>
#include <base/getMemoryAmount.h> #include <base/getMemoryAmount.h>
#include <base/sleep.h> #include <base/sleep.h>
#include <fmt/ranges.h>
#include <cstdint> #include <cstdint>
#include <filesystem> #include <filesystem>
@ -45,26 +46,33 @@ namespace
/// kernel 5 /// kernel 5
/// rss 15 /// rss 15
/// [...] /// [...]
uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & key) using Metrics = std::map<std::string, uint64_t>;
Metrics readAllMetricsFromStatFile(ReadBufferFromFile & buf)
{ {
Metrics metrics;
while (!buf.eof()) while (!buf.eof())
{ {
std::string current_key; std::string current_key;
readStringUntilWhitespace(current_key, buf); readStringUntilWhitespace(current_key, buf);
if (current_key != key)
{
std::string dummy;
readStringUntilNewlineInto(dummy, buf);
buf.ignore();
continue;
}
assertChar(' ', buf); assertChar(' ', buf);
uint64_t value = 0; uint64_t value = 0;
readIntText(value, buf); readIntText(value, buf);
return value; assertChar('\n', buf);
auto [_, inserted] = metrics.emplace(std::move(current_key), value);
chassert(inserted, "Duplicate keys in stat file");
}
return metrics;
} }
uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & key)
{
const auto all_metrics = readAllMetricsFromStatFile(buf);
if (const auto it = all_metrics.find(key); it != all_metrics.end())
return it->second;
throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot find '{}' in '{}'", key, buf.getFileName()); throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot find '{}' in '{}'", key, buf.getFileName());
} }
@ -79,6 +87,13 @@ struct CgroupsV1Reader : ICgroupsReader
return readMetricFromStatFile(buf, "rss"); return readMetricFromStatFile(buf, "rss");
} }
std::string dumpAllStats() override
{
std::lock_guard lock(mutex);
buf.rewind();
return fmt::format("{}", readAllMetricsFromStatFile(buf));
}
private: private:
std::mutex mutex; std::mutex mutex;
ReadBufferFromFile buf TSA_GUARDED_BY(mutex); ReadBufferFromFile buf TSA_GUARDED_BY(mutex);
@ -106,6 +121,13 @@ struct CgroupsV2Reader : ICgroupsReader
return mem_usage; return mem_usage;
} }
std::string dumpAllStats() override
{
std::lock_guard lock(mutex);
stat_buf.rewind();
return fmt::format("{}", readAllMetricsFromStatFile(stat_buf));
}
private: private:
std::mutex mutex; std::mutex mutex;
ReadBufferFromFile current_buf TSA_GUARDED_BY(mutex); ReadBufferFromFile current_buf TSA_GUARDED_BY(mutex);
@ -178,10 +200,7 @@ CgroupsMemoryUsageObserver::CgroupsMemoryUsageObserver(std::chrono::seconds wait
{ {
const auto [cgroup_path, version] = getCgroupsPath(); const auto [cgroup_path, version] = getCgroupsPath();
if (version == CgroupsVersion::V2) cgroup_reader = createCgroupsReader(version, cgroup_path);
cgroup_reader = std::make_unique<CgroupsV2Reader>(cgroup_path);
else
cgroup_reader = std::make_unique<CgroupsV1Reader>(cgroup_path);
LOG_INFO( LOG_INFO(
log, log,
@ -234,7 +253,12 @@ void CgroupsMemoryUsageObserver::setMemoryUsageLimits(uint64_t hard_limit_, uint
# endif # endif
/// Reset current usage in memory tracker. Expect zero for free_memory_in_allocator_arenas as we just purged them. /// Reset current usage in memory tracker. Expect zero for free_memory_in_allocator_arenas as we just purged them.
uint64_t memory_usage = cgroup_reader->readMemoryUsage(); uint64_t memory_usage = cgroup_reader->readMemoryUsage();
LOG_TRACE(log, "Read current memory usage {} bytes ({}) from cgroups", memory_usage, ReadableSize(memory_usage)); LOG_TRACE(
log,
"Read current memory usage {} bytes ({}) from cgroups, full available stats: {}",
memory_usage,
ReadableSize(memory_usage),
cgroup_reader->dumpAllStats());
MemoryTracker::setRSS(memory_usage, 0); MemoryTracker::setRSS(memory_usage, 0);
LOG_INFO(log, "Purged jemalloc arenas. Current memory usage is {}", ReadableSize(memory_usage)); LOG_INFO(log, "Purged jemalloc arenas. Current memory usage is {}", ReadableSize(memory_usage));
@ -338,6 +362,13 @@ void CgroupsMemoryUsageObserver::runThread()
} }
} }
std::unique_ptr<ICgroupsReader> createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const fs::path & cgroup_path)
{
if (version == CgroupsMemoryUsageObserver::CgroupsVersion::V2)
return std::make_unique<CgroupsV2Reader>(cgroup_path);
else
return std::make_unique<CgroupsV1Reader>(cgroup_path);
}
} }
#endif #endif

View File

@ -14,6 +14,8 @@ struct ICgroupsReader
virtual ~ICgroupsReader() = default; virtual ~ICgroupsReader() = default;
virtual uint64_t readMemoryUsage() = 0; virtual uint64_t readMemoryUsage() = 0;
virtual std::string dumpAllStats() = 0;
}; };
/// Does two things: /// Does two things:
@ -81,6 +83,9 @@ private:
bool quit = false; bool quit = false;
}; };
std::unique_ptr<ICgroupsReader>
createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const std::filesystem::path & cgroup_path);
#else #else
class CgroupsMemoryUsageObserver class CgroupsMemoryUsageObserver
{ {

View File

@ -57,7 +57,8 @@ static struct InitFiu
PAUSEABLE_ONCE(finish_clean_quorum_failed_parts) \ PAUSEABLE_ONCE(finish_clean_quorum_failed_parts) \
PAUSEABLE(dummy_pausable_failpoint) \ PAUSEABLE(dummy_pausable_failpoint) \
ONCE(execute_query_calling_empty_set_result_func_on_exception) \ ONCE(execute_query_calling_empty_set_result_func_on_exception) \
ONCE(receive_timeout_on_table_status_response) ONCE(receive_timeout_on_table_status_response) \
REGULAR(keepermap_fail_drop_data) \
namespace FailPoints namespace FailPoints

View File

@ -0,0 +1,178 @@
#if defined(OS_LINUX)
#include <gtest/gtest.h>
#include <cstdint>
#include <filesystem>
#include <IO/WriteBufferFromFile.h>
#include <Common/CgroupsMemoryUsageObserver.h>
#include <Common/filesystemHelpers.h>
using namespace DB;
const std::string SAMPLE_FILE[2] = {
R"(cache 4673703936
rss 2232029184
rss_huge 0
shmem 0
mapped_file 344678400
dirty 4730880
writeback 135168
swap 0
pgpgin 2038569918
pgpgout 2036883790
pgfault 2055373287
pgmajfault 0
inactive_anon 2156335104
active_anon 0
inactive_file 2841305088
active_file 1653915648
unevictable 256008192
hierarchical_memory_limit 8589934592
hierarchical_memsw_limit 8589934592
total_cache 4673703936
total_rss 2232029184
total_rss_huge 0
total_shmem 0
total_mapped_file 344678400
total_dirty 4730880
total_writeback 135168
total_swap 0
total_pgpgin 2038569918
total_pgpgout 2036883790
total_pgfault 2055373287
total_pgmajfault 0
total_inactive_anon 2156335104
total_active_anon 0
total_inactive_file 2841305088
total_active_file 1653915648
total_unevictable 256008192
)",
R"(anon 10429399040
file 17410793472
kernel 1537789952
kernel_stack 3833856
pagetables 65441792
sec_pagetables 0
percpu 15232
sock 0
vmalloc 0
shmem 0
zswap 0
zswapped 0
file_mapped 344010752
file_dirty 2060857344
file_writeback 0
swapcached 0
anon_thp 0
file_thp 0
shmem_thp 0
inactive_anon 0
active_anon 10429370368
inactive_file 8693084160
active_file 8717561856
unevictable 0
slab_reclaimable 1460982504
slab_unreclaimable 5152864
slab 1466135368
workingset_refault_anon 0
workingset_refault_file 0
workingset_activate_anon 0
workingset_activate_file 0
workingset_restore_anon 0
workingset_restore_file 0
workingset_nodereclaim 0
pgscan 0
pgsteal 0
pgscan_kswapd 0
pgscan_direct 0
pgscan_khugepaged 0
pgsteal_kswapd 0
pgsteal_direct 0
pgsteal_khugepaged 0
pgfault 43026352
pgmajfault 36762
pgrefill 0
pgactivate 0
pgdeactivate 0
pglazyfree 259
pglazyfreed 0
zswpin 0
zswpout 0
thp_fault_alloc 0
thp_collapse_alloc 0
)"};
const std::string EXPECTED[2]
= {"{\"active_anon\": 0, \"active_file\": 1653915648, \"cache\": 4673703936, \"dirty\": 4730880, \"hierarchical_memory_limit\": "
"8589934592, \"hierarchical_memsw_limit\": 8589934592, \"inactive_anon\": 2156335104, \"inactive_file\": 2841305088, "
"\"mapped_file\": 344678400, \"pgfault\": 2055373287, \"pgmajfault\": 0, \"pgpgin\": 2038569918, \"pgpgout\": 2036883790, \"rss\": "
"2232029184, \"rss_huge\": 0, \"shmem\": 0, \"swap\": 0, \"total_active_anon\": 0, \"total_active_file\": 1653915648, "
"\"total_cache\": 4673703936, \"total_dirty\": 4730880, \"total_inactive_anon\": 2156335104, \"total_inactive_file\": 2841305088, "
"\"total_mapped_file\": 344678400, \"total_pgfault\": 2055373287, \"total_pgmajfault\": 0, \"total_pgpgin\": 2038569918, "
"\"total_pgpgout\": 2036883790, \"total_rss\": 2232029184, \"total_rss_huge\": 0, \"total_shmem\": 0, \"total_swap\": 0, "
"\"total_unevictable\": 256008192, \"total_writeback\": 135168, \"unevictable\": 256008192, \"writeback\": 135168}",
"{\"active_anon\": 10429370368, \"active_file\": 8717561856, \"anon\": 10429399040, \"anon_thp\": 0, \"file\": 17410793472, "
"\"file_dirty\": 2060857344, \"file_mapped\": 344010752, \"file_thp\": 0, \"file_writeback\": 0, \"inactive_anon\": 0, "
"\"inactive_file\": 8693084160, \"kernel\": 1537789952, \"kernel_stack\": 3833856, \"pagetables\": 65441792, \"percpu\": 15232, "
"\"pgactivate\": 0, \"pgdeactivate\": 0, \"pgfault\": 43026352, \"pglazyfree\": 259, \"pglazyfreed\": 0, \"pgmajfault\": 36762, "
"\"pgrefill\": 0, \"pgscan\": 0, \"pgscan_direct\": 0, \"pgscan_khugepaged\": 0, \"pgscan_kswapd\": 0, \"pgsteal\": 0, "
"\"pgsteal_direct\": 0, \"pgsteal_khugepaged\": 0, \"pgsteal_kswapd\": 0, \"sec_pagetables\": 0, \"shmem\": 0, \"shmem_thp\": 0, "
"\"slab\": 1466135368, \"slab_reclaimable\": 1460982504, \"slab_unreclaimable\": 5152864, \"sock\": 0, \"swapcached\": 0, "
"\"thp_collapse_alloc\": 0, \"thp_fault_alloc\": 0, \"unevictable\": 0, \"vmalloc\": 0, \"workingset_activate_anon\": 0, "
"\"workingset_activate_file\": 0, \"workingset_nodereclaim\": 0, \"workingset_refault_anon\": 0, \"workingset_refault_file\": 0, "
"\"workingset_restore_anon\": 0, \"workingset_restore_file\": 0, \"zswap\": 0, \"zswapped\": 0, \"zswpin\": 0, \"zswpout\": 0}"};
class CgroupsMemoryUsageObserverFixture : public ::testing::TestWithParam<CgroupsMemoryUsageObserver::CgroupsVersion>
{
void SetUp() override
{
const uint8_t version = static_cast<uint8_t>(GetParam());
tmp_dir = fmt::format("./test_cgroups_{}", magic_enum::enum_name(GetParam()));
fs::create_directories(tmp_dir);
auto stat_file = WriteBufferFromFile(tmp_dir + "/memory.stat");
stat_file.write(SAMPLE_FILE[version].data(), SAMPLE_FILE[version].size());
stat_file.sync();
if (GetParam() == CgroupsMemoryUsageObserver::CgroupsVersion::V2)
{
auto current_file = WriteBufferFromFile(tmp_dir + "/memory.current");
current_file.write("29645422592", 11);
current_file.sync();
}
}
protected:
std::string tmp_dir;
};
TEST_P(CgroupsMemoryUsageObserverFixture, ReadMemoryUsageTest)
{
const auto version = GetParam();
auto reader = createCgroupsReader(version, tmp_dir);
ASSERT_EQ(
reader->readMemoryUsage(),
version == CgroupsMemoryUsageObserver::CgroupsVersion::V1 ? /* rss from memory.stat */ 2232029184
: /* value from memory.current - inactive_file */ 20952338432);
}
TEST_P(CgroupsMemoryUsageObserverFixture, DumpAllStatsTest)
{
const auto version = GetParam();
auto reader = createCgroupsReader(version, tmp_dir);
ASSERT_EQ(reader->dumpAllStats(), EXPECTED[static_cast<uint8_t>(version)]);
}
INSTANTIATE_TEST_SUITE_P(
CgroupsMemoryUsageObserverTests,
CgroupsMemoryUsageObserverFixture,
::testing::Values(CgroupsMemoryUsageObserver::CgroupsVersion::V1, CgroupsMemoryUsageObserver::CgroupsVersion::V2));
#endif

View File

@ -1144,7 +1144,7 @@ std::optional<UInt64> MergeTreeData::totalRowsByPartitionPredicateImpl(
auto metadata_snapshot = getInMemoryMetadataPtr(); auto metadata_snapshot = getInMemoryMetadataPtr();
auto virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, {parts[0]}); auto virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, {parts[0]});
auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), nullptr); auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag->getOutputs().at(0), nullptr, /*allow_non_deterministic_functions=*/ false);
if (!filter_dag) if (!filter_dag)
return {}; return {};

View File

@ -37,6 +37,7 @@
#include <Common/Base64.h> #include <Common/Base64.h>
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Common/FailPoint.h>
#include <Common/ZooKeeper/IKeeper.h> #include <Common/ZooKeeper/IKeeper.h>
#include <Common/ZooKeeper/KeeperException.h> #include <Common/ZooKeeper/KeeperException.h>
#include <Common/ZooKeeper/Types.h> #include <Common/ZooKeeper/Types.h>
@ -64,6 +65,11 @@
namespace DB namespace DB
{ {
namespace FailPoints
{
extern const char keepermap_fail_drop_data[];
}
namespace ErrorCodes namespace ErrorCodes
{ {
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
@ -411,18 +417,16 @@ StorageKeeperMap::StorageKeeperMap(
auto code = client->tryCreate(zk_table_path, "", zkutil::CreateMode::Persistent); auto code = client->tryCreate(zk_table_path, "", zkutil::CreateMode::Persistent);
// tables_path was removed with drop /// A table on the same Keeper path already exists, we just appended our table id to subscribe as a new replica
if (code == Coordination::Error::ZNONODE) /// We still don't know if the table matches the expected metadata so table_is_valid is not changed
{ /// It will be checked lazily on the first operation
LOG_INFO(log, "Metadata nodes were removed by another server, will retry"); if (code == Coordination::Error::ZOK)
continue;
}
else if (code != Coordination::Error::ZOK)
{
throw zkutil::KeeperException(code, "Failed to create table on path {} because a table with same UUID already exists", zk_root_path);
}
return; return;
if (code != Coordination::Error::ZNONODE)
throw zkutil::KeeperException(code, "Failed to create table on path {} because a table with same UUID already exists", zk_root_path);
/// ZNONODE means we dropped zk_tables_path but didn't finish drop completely
} }
if (client->exists(zk_dropped_path)) if (client->exists(zk_dropped_path))
@ -473,6 +477,7 @@ StorageKeeperMap::StorageKeeperMap(
table_is_valid = true; table_is_valid = true;
/// we are the first table created for the specified Keeper path, i.e. we are the first replica
return; return;
} }
@ -561,6 +566,10 @@ void StorageKeeperMap::truncate(const ASTPtr &, const StorageMetadataPtr &, Cont
bool StorageKeeperMap::dropTable(zkutil::ZooKeeperPtr zookeeper, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock) bool StorageKeeperMap::dropTable(zkutil::ZooKeeperPtr zookeeper, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock)
{ {
fiu_do_on(FailPoints::keepermap_fail_drop_data,
{
throw zkutil::KeeperException(Coordination::Error::ZOPERATIONTIMEOUT, "Manually triggered operation timeout");
});
zookeeper->removeChildrenRecursive(zk_data_path); zookeeper->removeChildrenRecursive(zk_data_path);
bool completely_removed = false; bool completely_removed = false;

View File

@ -505,18 +505,18 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, Context
additional_info = fmt::format(" (TID: {}; TIDH: {})", current_tid, current_tid.getHash()); additional_info = fmt::format(" (TID: {}; TIDH: {})", current_tid, current_tid.getHash());
} }
Int64 version;
{
std::lock_guard lock(currently_processing_in_background_mutex);
MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get(), current_tid, getContext()->getWriteSettings()); MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get(), current_tid, getContext()->getWriteSettings());
version = increment.get(); Int64 version = increment.get();
entry.commit(version); entry.commit(version);
String mutation_id = entry.file_name; String mutation_id = entry.file_name;
if (txn) if (txn)
txn->addMutation(shared_from_this(), mutation_id); txn->addMutation(shared_from_this(), mutation_id);
bool alter_conversions_mutations_updated = updateAlterConversionsMutations(entry.commands, alter_conversions_mutations, /* remove= */ false); bool alter_conversions_mutations_updated = updateAlterConversionsMutations(entry.commands, alter_conversions_mutations, /* remove= */ false);
{
std::lock_guard lock(currently_processing_in_background_mutex);
bool inserted = current_mutations_by_version.try_emplace(version, std::move(entry)).second; bool inserted = current_mutations_by_version.try_emplace(version, std::move(entry)).second;
if (!inserted) if (!inserted)
{ {
@ -527,9 +527,9 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, Context
} }
throw Exception(ErrorCodes::LOGICAL_ERROR, "Mutation {} already exists, it's a bug", version); throw Exception(ErrorCodes::LOGICAL_ERROR, "Mutation {} already exists, it's a bug", version);
} }
}
LOG_INFO(log, "Added mutation: {}{}", mutation_id, additional_info); LOG_INFO(log, "Added mutation: {}{}", mutation_id, additional_info);
}
background_operations_assignee.trigger(); background_operations_assignee.trigger();
return version; return version;
} }

View File

@ -3940,7 +3940,7 @@ void StorageReplicatedMergeTree::mergeSelectingTask()
merge_selecting_task->schedule(); merge_selecting_task->schedule();
else else
{ {
LOG_TRACE(log, "Scheduling next merge selecting task after {}ms", merge_selecting_sleep_ms); LOG_TRACE(log, "Scheduling next merge selecting task after {}ms, current attempt status: {}", merge_selecting_sleep_ms, result);
merge_selecting_task->scheduleAfter(merge_selecting_sleep_ms); merge_selecting_task->scheduleAfter(merge_selecting_sleep_ms);
} }
} }

View File

@ -1,5 +1,6 @@
// autogenerated by tests/ci/version_helper.py // autogenerated by tests/ci/version_helper.py
const char * auto_contributors[] { const char * auto_contributors[] {
"0x01f",
"0xflotus", "0xflotus",
"13DaGGeR", "13DaGGeR",
"1lann", "1lann",
@ -167,6 +168,7 @@ const char * auto_contributors[] {
"AnneClickHouse", "AnneClickHouse",
"Anselmo D. Adams", "Anselmo D. Adams",
"Anthony N. Simon", "Anthony N. Simon",
"AntiTopQuark",
"Anton Ivashkin", "Anton Ivashkin",
"Anton Kobzev", "Anton Kobzev",
"Anton Kozlov", "Anton Kozlov",
@ -299,6 +301,7 @@ const char * auto_contributors[] {
"Dan Wu", "Dan Wu",
"DanRoscigno", "DanRoscigno",
"Dani Pozo", "Dani Pozo",
"Daniel Anugerah",
"Daniel Bershatsky", "Daniel Bershatsky",
"Daniel Byta", "Daniel Byta",
"Daniel Dao", "Daniel Dao",
@ -370,6 +373,7 @@ const char * auto_contributors[] {
"Elena", "Elena",
"Elena Baskakova", "Elena Baskakova",
"Elena Torró", "Elena Torró",
"Elena Torró Martínez",
"Elghazal Ahmed", "Elghazal Ahmed",
"Eliot Hautefeuille", "Eliot Hautefeuille",
"Elizaveta Mironyuk", "Elizaveta Mironyuk",
@ -415,6 +419,7 @@ const char * auto_contributors[] {
"FgoDt", "FgoDt",
"Filatenkov Artur", "Filatenkov Artur",
"Filipe Caixeta", "Filipe Caixeta",
"Filipp Bakanov",
"Filipp Ozinov", "Filipp Ozinov",
"Filippov Denis", "Filippov Denis",
"Fille", "Fille",
@ -451,6 +456,7 @@ const char * auto_contributors[] {
"Gleb Novikov", "Gleb Novikov",
"Gleb-Tretyakov", "Gleb-Tretyakov",
"GoGoWen2021", "GoGoWen2021",
"Gosha Letov",
"Gregory", "Gregory",
"Grigorii Sokolik", "Grigorii Sokolik",
"Grigory", "Grigory",
@ -461,6 +467,7 @@ const char * auto_contributors[] {
"Guillaume Tassery", "Guillaume Tassery",
"Guo Wangyang", "Guo Wangyang",
"Guo Wei (William)", "Guo Wei (William)",
"Guspan Tanadi",
"Haavard Kvaalen", "Haavard Kvaalen",
"Habibullah Oladepo", "Habibullah Oladepo",
"HaiBo Li", "HaiBo Li",
@ -474,6 +481,7 @@ const char * auto_contributors[] {
"HarryLeeIBM", "HarryLeeIBM",
"Hasitha Kanchana", "Hasitha Kanchana",
"Hasnat", "Hasnat",
"Haydn",
"Heena Bansal", "Heena Bansal",
"HeenaBansal2009", "HeenaBansal2009",
"Hendrik M", "Hendrik M",
@ -606,6 +614,7 @@ const char * auto_contributors[] {
"Kevin Chiang", "Kevin Chiang",
"Kevin Michel", "Kevin Michel",
"Kevin Mingtarja", "Kevin Mingtarja",
"Kevin Song",
"Kevin Zhang", "Kevin Zhang",
"KevinyhZou", "KevinyhZou",
"KinderRiven", "KinderRiven",
@ -661,6 +670,7 @@ const char * auto_contributors[] {
"Lewinma", "Lewinma",
"Li Shuai", "Li Shuai",
"Li Yin", "Li Yin",
"Linh Giang",
"Lino Uruñuela", "Lino Uruñuela",
"Lirikl", "Lirikl",
"Liu Cong", "Liu Cong",
@ -690,6 +700,7 @@ const char * auto_contributors[] {
"Maksim Alekseev", "Maksim Alekseev",
"Maksim Buren", "Maksim Buren",
"Maksim Fedotov", "Maksim Fedotov",
"Maksim Galkin",
"Maksim Kita", "Maksim Kita",
"Maksym Sobolyev", "Maksym Sobolyev",
"Mal Curtis", "Mal Curtis",
@ -724,6 +735,7 @@ const char * auto_contributors[] {
"Max Akhmedov", "Max Akhmedov",
"Max Bruce", "Max Bruce",
"Max K", "Max K",
"Max K.",
"Max Kainov", "Max Kainov",
"Max Vetrov", "Max Vetrov",
"MaxTheHuman", "MaxTheHuman",
@ -811,6 +823,7 @@ const char * auto_contributors[] {
"Nataly Merezhuk", "Nataly Merezhuk",
"Natalya Chizhonkova", "Natalya Chizhonkova",
"Natasha Murashkina", "Natasha Murashkina",
"Nathan Clevenger",
"NeZeD [Mac Pro]", "NeZeD [Mac Pro]",
"Neeke Gao", "Neeke Gao",
"Neng Liu", "Neng Liu",
@ -946,6 +959,7 @@ const char * auto_contributors[] {
"Robert Coelho", "Robert Coelho",
"Robert Hodges", "Robert Hodges",
"Robert Schulze", "Robert Schulze",
"Rodolphe Dugé de Bernonville",
"RogerYK", "RogerYK",
"Rohit Agarwal", "Rohit Agarwal",
"Romain Neutron", "Romain Neutron",
@ -1107,6 +1121,7 @@ const char * auto_contributors[] {
"Timur Solodovnikov", "Timur Solodovnikov",
"TiunovNN", "TiunovNN",
"Tobias Adamson", "Tobias Adamson",
"Tobias Florek",
"Tobias Lins", "Tobias Lins",
"Tom Bombadil", "Tom Bombadil",
"Tom Risse", "Tom Risse",
@ -1231,11 +1246,13 @@ const char * auto_contributors[] {
"Yingchun Lai", "Yingchun Lai",
"Yingfan Chen", "Yingfan Chen",
"Yinzheng-Sun", "Yinzheng-Sun",
"Yinzuo Jiang",
"Yiğit Konur", "Yiğit Konur",
"Yohann Jardin", "Yohann Jardin",
"Yong Wang", "Yong Wang",
"Yong-Hao Zou", "Yong-Hao Zou",
"Youenn Lebras", "Youenn Lebras",
"Your Name",
"Yu, Peng", "Yu, Peng",
"Yuko Takagi", "Yuko Takagi",
"Yuntao Wu", "Yuntao Wu",
@ -1250,6 +1267,7 @@ const char * auto_contributors[] {
"Yury Stankevich", "Yury Stankevich",
"Yusuke Tanaka", "Yusuke Tanaka",
"Zach Naimon", "Zach Naimon",
"Zawa-II",
"Zheng Miao", "Zheng Miao",
"ZhiHong Zhang", "ZhiHong Zhang",
"ZhiYong Wang", "ZhiYong Wang",
@ -1380,6 +1398,7 @@ const char * auto_contributors[] {
"conicliu", "conicliu",
"copperybean", "copperybean",
"coraxster", "coraxster",
"cw5121",
"cwkyaoyao", "cwkyaoyao",
"d.v.semenov", "d.v.semenov",
"dalei2019", "dalei2019",
@ -1460,12 +1479,14 @@ const char * auto_contributors[] {
"fuzzERot", "fuzzERot",
"fyu", "fyu",
"g-arslan", "g-arslan",
"gabrielmcg44",
"ggerogery", "ggerogery",
"giordyb", "giordyb",
"glockbender", "glockbender",
"glushkovds", "glushkovds",
"grantovsky", "grantovsky",
"gulige", "gulige",
"gun9nir",
"guoleiyi", "guoleiyi",
"guomaolin", "guomaolin",
"guov100", "guov100",
@ -1527,6 +1548,7 @@ const char * auto_contributors[] {
"jferroal", "jferroal",
"jiahui-97", "jiahui-97",
"jianmei zhang", "jianmei zhang",
"jiaosenvip",
"jinjunzh", "jinjunzh",
"jiyoungyoooo", "jiyoungyoooo",
"jktng", "jktng",
@ -1541,6 +1563,7 @@ const char * auto_contributors[] {
"jun won", "jun won",
"jus1096", "jus1096",
"justindeguzman", "justindeguzman",
"jwoodhead",
"jyz0309", "jyz0309",
"karnevil13", "karnevil13",
"kashwy", "kashwy",
@ -1633,10 +1656,12 @@ const char * auto_contributors[] {
"mateng0915", "mateng0915",
"mateng915", "mateng915",
"mauidude", "mauidude",
"max-vostrikov",
"maxim", "maxim",
"maxim-babenko", "maxim-babenko",
"maxkuzn", "maxkuzn",
"maxulan", "maxulan",
"maxvostrikov",
"mayamika", "mayamika",
"mehanizm", "mehanizm",
"melin", "melin",
@ -1677,6 +1702,7 @@ const char * auto_contributors[] {
"nathanbegbie", "nathanbegbie",
"nauta", "nauta",
"nautaa", "nautaa",
"nauu",
"ndchikin", "ndchikin",
"nellicus", "nellicus",
"nemonlou", "nemonlou",
@ -1975,6 +2001,7 @@ const char * auto_contributors[] {
"张健", "张健",
"张风啸", "张风啸",
"徐炘", "徐炘",
"忒休斯~Theseus",
"曲正鹏", "曲正鹏",
"木木夕120", "木木夕120",
"未来星___费", "未来星___费",

View File

@ -271,7 +271,8 @@ bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node)
static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( static const ActionsDAG::Node * splitFilterNodeForAllowedInputs(
const ActionsDAG::Node * node, const ActionsDAG::Node * node,
const Block * allowed_inputs, const Block * allowed_inputs,
ActionsDAG::Nodes & additional_nodes) ActionsDAG::Nodes & additional_nodes,
bool allow_non_deterministic_functions)
{ {
if (node->type == ActionsDAG::ActionType::FUNCTION) if (node->type == ActionsDAG::ActionType::FUNCTION)
{ {
@ -280,8 +281,14 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs(
auto & node_copy = additional_nodes.emplace_back(*node); auto & node_copy = additional_nodes.emplace_back(*node);
node_copy.children.clear(); node_copy.children.clear();
for (const auto * child : node->children) for (const auto * child : node->children)
if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes)) if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_non_deterministic_functions))
node_copy.children.push_back(child_copy); node_copy.children.push_back(child_copy);
/// Expression like (now_allowed AND allowed) is not allowed if allow_non_deterministic_functions = true. This is important for
/// trivial count optimization, otherwise we can get incorrect results. For example, if the query is
/// SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1, we cannot apply
/// trivial count.
else if (!allow_non_deterministic_functions)
return nullptr;
if (node_copy.children.empty()) if (node_copy.children.empty())
return nullptr; return nullptr;
@ -307,7 +314,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs(
{ {
auto & node_copy = additional_nodes.emplace_back(*node); auto & node_copy = additional_nodes.emplace_back(*node);
for (auto & child : node_copy.children) for (auto & child : node_copy.children)
if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes); !child) if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_non_deterministic_functions); !child)
return nullptr; return nullptr;
return &node_copy; return &node_copy;
@ -321,7 +328,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs(
auto index_hint_dag = index_hint->getActions()->clone(); auto index_hint_dag = index_hint->getActions()->clone();
ActionsDAG::NodeRawConstPtrs atoms; ActionsDAG::NodeRawConstPtrs atoms;
for (const auto & output : index_hint_dag->getOutputs()) for (const auto & output : index_hint_dag->getOutputs())
if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes)) if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes, allow_non_deterministic_functions))
atoms.push_back(child_copy); atoms.push_back(child_copy);
if (!atoms.empty()) if (!atoms.empty())
@ -355,13 +362,13 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs(
return node; return node;
} }
ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs) ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions)
{ {
if (!predicate) if (!predicate)
return nullptr; return nullptr;
ActionsDAG::Nodes additional_nodes; ActionsDAG::Nodes additional_nodes;
const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes); const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes, allow_non_deterministic_functions);
if (!res) if (!res)
return nullptr; return nullptr;
@ -370,7 +377,7 @@ ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate,
void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context) void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context)
{ {
auto dag = splitFilterDagForAllowedInputs(predicate, &block); auto dag = splitFilterDagForAllowedInputs(predicate, &block, /*allow_non_deterministic_functions=*/ false);
if (dag) if (dag)
filterBlockWithDAG(dag, block, context); filterBlockWithDAG(dag, block, context);
} }

View File

@ -32,7 +32,15 @@ void buildSetsForDAG(const ActionsDAG & dag, const ContextPtr & context);
bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node); bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node);
/// Extract a part of predicate that can be evaluated using only columns from input_names. /// Extract a part of predicate that can be evaluated using only columns from input_names.
ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs); /// When allow_non_deterministic_functions is true then even if the predicate contains non-deterministic
/// functions, we still allow to extract a part of the predicate, otherwise we return nullptr.
/// allow_non_deterministic_functions must be false when we are going to use the result to filter parts in
/// MergeTreeData::totalRowsByPartitionPredicateImp. For example, if the query is
/// `SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1`
/// The predicate will be `_partition_id = '0' AND rowNumberInBlock() = 1`, and `rowNumberInBlock()` is
/// non-deterministic. If we still extract the part `_partition_id = '0'` for filtering parts, then trivial
/// count optimization will be mistakenly applied to the query.
ActionsDAGPtr splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_non_deterministic_functions = true);
/// Extract from the input stream a set of `name` column values /// Extract from the input stream a set of `name` column values
template <typename T> template <typename T>

View File

@ -31,6 +31,7 @@ class CIBuddy:
self.sha = pr_info.sha[:10] self.sha = pr_info.sha[:10]
def check_workflow(self): def check_workflow(self):
GHActions.print_workflow_results()
res = GHActions.get_workflow_job_result(GHActions.ActionsNames.RunConfig) res = GHActions.get_workflow_job_result(GHActions.ActionsNames.RunConfig)
if res != GHActions.ActionStatuses.SUCCESS: if res != GHActions.ActionStatuses.SUCCESS:
self.post_job_error("Workflow Configuration Failed", critical=True) self.post_job_error("Workflow Configuration Failed", critical=True)

View File

@ -92,15 +92,33 @@ class GHActions:
PENDING = "pending" PENDING = "pending"
SUCCESS = "success" SUCCESS = "success"
@staticmethod @classmethod
def get_workflow_job_result(wf_job_name: str) -> Optional[str]: def _get_workflow_results(cls):
if not Path(Envs.WORKFLOW_RESULT_FILE).exists(): if not Path(Envs.WORKFLOW_RESULT_FILE).exists():
print( print(
f"ERROR: Failed to get workflow results from file [{Envs.WORKFLOW_RESULT_FILE}]" f"ERROR: Failed to get workflow results from file [{Envs.WORKFLOW_RESULT_FILE}]"
) )
return None return {}
with open(Envs.WORKFLOW_RESULT_FILE, "r", encoding="utf-8") as json_file: with open(Envs.WORKFLOW_RESULT_FILE, "r", encoding="utf-8") as json_file:
try:
res = json.load(json_file) res = json.load(json_file)
except json.JSONDecodeError as e:
print(f"ERROR: json decoder exception {e}")
json_file.seek(0)
print(" File content:")
print(json_file.read())
return {}
return res
@classmethod
def print_workflow_results(cls):
res = cls._get_workflow_results()
results = [f"{job}: {data['result']}" for job, data in res.items()]
cls.print_in_group("Workflow results", results)
@classmethod
def get_workflow_job_result(cls, wf_job_name: str) -> Optional[str]:
res = cls._get_workflow_results()
if wf_job_name in res: if wf_job_name in res:
return res[wf_job_name]["result"] # type: ignore return res[wf_job_name]["result"] # type: ignore
else: else:

View File

@ -197,6 +197,10 @@ def get_instance_id():
return _query_imds("latest/meta-data/instance-id") return _query_imds("latest/meta-data/instance-id")
def get_instance_lifecycle():
return _query_imds("latest/meta-data/instance-life-cycle")
def prepare_tests_results_for_clickhouse( def prepare_tests_results_for_clickhouse(
pr_info: PRInfo, pr_info: PRInfo,
test_results: TestResults, test_results: TestResults,
@ -233,7 +237,7 @@ def prepare_tests_results_for_clickhouse(
"head_ref": head_ref, "head_ref": head_ref,
"head_repo": head_repo, "head_repo": head_repo,
"task_url": pr_info.task_url, "task_url": pr_info.task_url,
"instance_type": get_instance_type(), "instance_type": ",".join([get_instance_type(), get_instance_lifecycle()]),
"instance_id": get_instance_id(), "instance_id": get_instance_id(),
} }

View File

@ -21,7 +21,7 @@ from env_helper import (
TEMP_PATH, TEMP_PATH,
) )
from git_helper import Git from git_helper import Git
from pr_info import PRInfo, EventType from pr_info import PRInfo
from report import FAILURE, SUCCESS, JobReport, TestResult, TestResults from report import FAILURE, SUCCESS, JobReport, TestResult, TestResults
from stopwatch import Stopwatch from stopwatch import Stopwatch
from tee_popen import TeePopen from tee_popen import TeePopen
@ -375,25 +375,23 @@ def main():
tags = gen_tags(args.version, args.release_type) tags = gen_tags(args.version, args.release_type)
repo_urls = {} repo_urls = {}
direct_urls: Dict[str, List[str]] = {} direct_urls: Dict[str, List[str]] = {}
if pr_info.event_type == EventType.PULL_REQUEST:
release_or_pr = str(pr_info.number)
sha = pr_info.sha
elif pr_info.event_type == EventType.PUSH and pr_info.is_master:
release_or_pr = str(0)
sha = pr_info.sha
else:
release_or_pr = f"{args.version.major}.{args.version.minor}"
sha = args.sha
assert sha
for arch, build_name in zip(ARCH, ("package_release", "package_aarch64")): for arch, build_name in zip(ARCH, ("package_release", "package_aarch64")):
if not args.bucket_prefix: if args.bucket_prefix:
assert not args.allow_build_reuse
repo_urls[arch] = f"{args.bucket_prefix}/{build_name}"
elif args.sha:
# CreateRelease workflow only. TODO
version = args.version
repo_urls[arch] = ( repo_urls[arch] = (
f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/" f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/"
f"{release_or_pr}/{sha}/{build_name}" f"{version.major}.{version.minor}/{args.sha}/{build_name}"
) )
else: else:
repo_urls[arch] = f"{args.bucket_prefix}/{build_name}" # In all other cases urls must be fetched from build reports. TODO: script needs refactoring
repo_urls[arch] = ""
assert args.allow_build_reuse
if args.allow_build_reuse: if args.allow_build_reuse:
# read s3 urls from pre-downloaded build reports # read s3 urls from pre-downloaded build reports
if "clickhouse-server" in image_repo: if "clickhouse-server" in image_repo:
@ -431,7 +429,6 @@ def main():
) )
if test_results[-1].status != "OK": if test_results[-1].status != "OK":
status = FAILURE status = FAILURE
pr_info = pr_info or PRInfo()
description = f"Processed tags: {', '.join(tags)}" description = f"Processed tags: {', '.join(tags)}"
JobReport( JobReport(

View File

@ -1 +0,0 @@
generated_*init_runner.sh

View File

@ -1,87 +0,0 @@
#!/usr/bin/env bash
set -e
usage() {
echo "Usage: $0 ENVIRONMENT" >&2
echo "Valid values for ENVIRONMENT: staging, production" >&2
exit 1
}
case "$1" in
staging|production)
ENVIRONMENT="$1" ;;
--help)
usage ;;
*)
echo "Invalid argument" >&2
usage ;;
esac
cd "$(dirname "$0")" || exit 1
SOURCE_SCRIPT='init_runner.sh'
check_response() {
# Are we even in the interactive shell?
[ -t 1 ] || return 1
local request
request="$1"
read -rp "$request (y/N): " response
case "$response" in
[Yy])
return 0
# Your code to continue goes here
;;
*)
return 1
;;
esac
}
check_dirty() {
if [ -n "$(git status --porcelain=v2 "$SOURCE_SCRIPT")" ]; then
echo "The $SOURCE_SCRIPT has uncommited changes, won't deploy it" >&2
exit 1
fi
}
GIT_HASH=$(git log -1 --format=format:%H)
header() {
cat << EOF
#!/usr/bin/env bash
echo 'The $ENVIRONMENT script is generated from $SOURCE_SCRIPT, commit $GIT_HASH'
EOF
}
body() {
local first_line
first_line=$(sed -n '/^# THE SCRIPT START$/{=;q;}' "$SOURCE_SCRIPT")
if [ -z "$first_line" ]; then
echo "The pattern '# THE SCRIPT START' is not found in $SOURCE_SCRIPT" >&2
exit 1
fi
tail "+$first_line" "$SOURCE_SCRIPT"
}
GENERATED_FILE="generated_${ENVIRONMENT}_${SOURCE_SCRIPT}"
{ header && body; } > "$GENERATED_FILE"
echo "The file $GENERATED_FILE is generated"
if check_response "Display the content of $GENERATED_FILE?"; then
if [ -z "$PAGER" ]; then
less "$GENERATED_FILE"
else
$PAGER "$GENERATED_FILE"
fi
fi
check_dirty
S3_OBJECT=${S3_OBJECT:-s3://github-runners-data/cloud-init/${ENVIRONMENT}.sh}
if check_response "Deploy the generated script to $S3_OBJECT?"; then
aws s3 mv "$GENERATED_FILE" "$S3_OBJECT"
fi

View File

@ -1,406 +0,0 @@
#!/usr/bin/env bash
cat > /dev/null << 'EOF'
The following content is embedded into the s3 object via the script
deploy-runner-init.sh {staging,production}
with additional helping information
In the `user data` you should define as the following text
between `### COPY BELOW` and `### COPY ABOVE`
### COPY BELOW
Content-Type: multipart/mixed; boundary="//"
MIME-Version: 1.0
--//
Content-Type: text/cloud-config; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="cloud-config.txt"
#cloud-config
cloud_final_modules:
- [scripts-user, always]
--//
Content-Type: text/x-shellscript; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="userdata.txt"
#!/bin/bash
INSTANCE_ID=$(ec2metadata --instance-id)
INIT_ENVIRONMENT=$(/usr/local/bin/aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --query "Tags[?Key=='github:init-environment'].Value" --output text)
echo "Downloading and using $INIT_ENVIRONMENT cloud-init.sh"
aws s3 cp "s3://github-runners-data/cloud-init/${INIT_ENVIRONMENT:-production}.sh" /tmp/cloud-init.sh
chmod 0700 /tmp/cloud-init.sh
exec bash /tmp/cloud-init.sh
--//
### COPY ABOVE
EOF
# THE SCRIPT START
set -uo pipefail
####################################
# IMPORTANT! #
# EC2 instance should have #
# `github:runner-type` tag #
# set accordingly to a runner role #
####################################
echo "Running init v1.1"
export DEBIAN_FRONTEND=noninteractive
export RUNNER_HOME=/home/ubuntu/actions-runner
export RUNNER_ORG="ClickHouse"
export RUNNER_URL="https://github.com/${RUNNER_ORG}"
# Funny fact, but metadata service has fixed IP
INSTANCE_ID=$(ec2metadata --instance-id)
export INSTANCE_ID
bash /usr/local/share/scripts/init-network.sh
# combine labels
RUNNER_TYPE=$(/usr/local/bin/aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --query "Tags[?Key=='github:runner-type'].Value" --output text)
LABELS="self-hosted,Linux,$(uname -m),$RUNNER_TYPE"
export LABELS
echo "Instance Labels: $LABELS"
LIFE_CYCLE=$(curl -s --fail http://169.254.169.254/latest/meta-data/instance-life-cycle)
export LIFE_CYCLE
echo "Instance lifecycle: $LIFE_CYCLE"
INSTANCE_TYPE=$(ec2metadata --instance-type)
echo "Instance type: $INSTANCE_TYPE"
# Refresh CloudWatch agent config
aws ssm get-parameter --region us-east-1 --name AmazonCloudWatch-github-runners --query 'Parameter.Value' --output text > /opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json
systemctl restart amazon-cloudwatch-agent.service
# Refresh teams ssh keys
TEAM_KEYS_URL=$(aws ssm get-parameter --region us-east-1 --name team-keys-url --query 'Parameter.Value' --output=text)
curl -s "${TEAM_KEYS_URL}" > /home/ubuntu/.ssh/authorized_keys2
chown ubuntu: /home/ubuntu/.ssh -R
# Create a pre-run script that will provide diagnostics info
mkdir -p /tmp/actions-hooks
cat > /tmp/actions-hooks/common.sh << 'EOF'
#!/bin/bash
EOF
terminate_delayed() {
# The function for post hook to gracefully finish the job and then tear down
# The very specific sleep time is used later to determine in the main loop if
# the instance is tearing down
# IF `sleep` IS CHANGED, CHANGE ANOTHER VALUE IN `pgrep`
sleep=13.14159265358979323846
echo "Going to terminate the runner's instance in $sleep seconds"
# We execute it with `at` to not have it as an orphan process, but launched independently
# GH Runners kill all remain processes
echo "sleep '$sleep'; aws ec2 terminate-instances --instance-ids $INSTANCE_ID" | at now || \
aws ec2 terminate-instances --instance-ids "$INSTANCE_ID" # workaround for complete out of space or non-installed `at`
exit 0
}
detect_delayed_termination() {
# The function look for very specific sleep with pi
if pgrep 'sleep 13.14159265358979323846'; then
echo 'The instance has delayed termination, sleep the same time to wait if it goes down'
sleep 14
fi
}
declare -f terminate_delayed >> /tmp/actions-hooks/common.sh
terminate_and_exit() {
# Terminate instance and exit from the script instantly
echo "Going to terminate the runner's instance"
aws ec2 terminate-instances --instance-ids "$INSTANCE_ID"
exit 0
}
terminate_decrease_and_exit() {
# Terminate instance and exit from the script instantly
echo "Going to terminate the runner's instance and decrease asg capacity"
aws autoscaling terminate-instance-in-auto-scaling-group --instance-id "$INSTANCE_ID" --should-decrement-desired-capacity
exit 0
}
declare -f terminate_and_exit >> /tmp/actions-hooks/common.sh
check_spot_instance_is_old() {
if [ "$LIFE_CYCLE" == "spot" ]; then
local UPTIME
UPTIME=$(< /proc/uptime)
UPTIME=${UPTIME%%.*}
if (( 3600 < UPTIME )); then
echo "The spot instance has uptime $UPTIME, it's time to shut it down"
return 0
fi
fi
return 1
}
check_proceed_spot_termination() {
# The function checks and proceeds spot instance termination if exists
# The event for spot instance termination
local FORCE
FORCE=${1:-}
if TERMINATION_DATA=$(curl -s --fail http://169.254.169.254/latest/meta-data/spot/instance-action); then
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-instance-termination-notices.html#instance-action-metadata
_action=$(jq '.action' -r <<< "$TERMINATION_DATA")
_time=$(jq '.time | fromdate' <<< "$TERMINATION_DATA")
_until_action=$((_time - $(date +%s)))
echo "Received the '$_action' event that will be effective in $_until_action seconds"
if (( _until_action <= 30 )) || [ "$FORCE" == "force" ]; then
echo "The action $_action will be done in $_until_action, killing the runner and exit"
local runner_pid
runner_pid=$(pgrep Runner.Listener)
if [ -n "$runner_pid" ]; then
# Kill the runner to not allow it cancelling the job
# shellcheck disable=SC2046
kill -9 "$runner_pid" $(list_children "$runner_pid")
fi
sudo -u ubuntu ./config.sh remove --token "$(get_runner_token)"
terminate_and_exit
fi
fi
}
no_terminating_metadata() {
# The function check that instance could continue work
# Returns 1 if any of termination events are received
# The event for rebalance recommendation. Not strict, so we have some room to make a decision here
if curl -s --fail http://169.254.169.254/latest/meta-data/events/recommendations/rebalance; then
echo 'Received recommendation to rebalance, checking the uptime'
local UPTIME
UPTIME=$(< /proc/uptime)
UPTIME=${UPTIME%%.*}
# We don't shutdown the instances younger than 30m
if (( 1800 < UPTIME )); then
# To not shutdown everything at once, use the 66% to survive
if (( $((RANDOM % 3)) == 0 )); then
echo 'The instance is older than 30m and won the roulette'
return 1
fi
echo 'The instance is older than 30m, but is not chosen for rebalance'
else
echo 'The instance is younger than 30m, do not shut it down'
fi
fi
# Checks if the ASG in a lifecycle hook state
local ASG_STATUS
ASG_STATUS=$(curl -s http://169.254.169.254/latest/meta-data/autoscaling/target-lifecycle-state)
if [ "$ASG_STATUS" == "Terminated" ]; then
echo 'The instance in ASG status Terminating:Wait'
return 1
fi
}
terminate_on_event() {
# If there is a rebalance event, then the instance could die soon
# Let's don't wait for it and terminate proactively
if curl -s --fail http://169.254.169.254/latest/meta-data/events/recommendations/rebalance; then
terminate_and_exit
fi
# Here we check if the autoscaling group marked the instance for termination, and it's wait for the job to finish
ASG_STATUS=$(curl -s http://169.254.169.254/latest/meta-data/autoscaling/target-lifecycle-state)
if [ "$ASG_STATUS" == "Terminated" ]; then
INSTANCE_ID=$(ec2metadata --instance-id)
ASG_NAME=$(aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --query "Tags[?Key=='aws:autoscaling:groupName'].Value" --output text)
LIFECYCLE_HOOKS=$(aws autoscaling describe-lifecycle-hooks --auto-scaling-group-name "$ASG_NAME" --query "LifecycleHooks[].LifecycleHookName" --output text)
for LCH in $LIFECYCLE_HOOKS; do
aws autoscaling complete-lifecycle-action --lifecycle-action-result CONTINUE \
--lifecycle-hook-name "$LCH" --auto-scaling-group-name "$ASG_NAME" \
--instance-id "$INSTANCE_ID"
true # autoformat issue
done
echo 'The runner is marked as "Terminated" by the autoscaling group, we are terminating'
terminate_and_exit
fi
}
cat > /tmp/actions-hooks/pre-run.sh << EOF
#!/bin/bash
set -uo pipefail
echo "Runner's public DNS: $(ec2metadata --public-hostname)"
echo "Runner's labels: ${LABELS}"
echo "Runner's instance type: $(ec2metadata --instance-type)"
EOF
# Create a post-run script that will restart docker daemon before the job started
cat > /tmp/actions-hooks/post-run.sh << 'EOF'
#!/bin/bash
set -xuo pipefail
source /tmp/actions-hooks/common.sh
# Free KiB, free percents
ROOT_STAT=($(df / | awk '/\// {print $4 " " int($4/$2 * 100)}'))
if [[ ${ROOT_STAT[0]} -lt 3000000 ]] || [[ ${ROOT_STAT[1]} -lt 5 ]]; then
echo "The runner has ${ROOT_STAT[0]}KiB and ${ROOT_STAT[1]}% of free space on /"
terminate_delayed
fi
# shellcheck disable=SC2046
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
# shellcheck disable=SC2046
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
# If we have hanged containers after the previous commands, than we have a hanged one
# and should restart the daemon
if [ "$(docker ps --all --quiet)" ]; then
# Systemd service of docker has StartLimitBurst=3 and StartLimitInterval=60s,
# that's why we try restarting it for long
for i in {1..25};
do
sudo systemctl restart docker && break || sleep 5
done
for i in {1..10}
do
docker info && break || sleep 2
done
# Last chance, otherwise we have to terminate poor instance
docker info 1>/dev/null || { echo Docker unable to start; terminate_delayed ; }
fi
EOF
get_runner_token() {
/usr/local/bin/aws ssm get-parameter --name github_runner_registration_token --with-decryption --output text --query Parameter.Value
}
is_job_assigned() {
local runner_pid
runner_pid=$(pgrep Runner.Listener)
if [ -z "$runner_pid" ]; then
# if runner has finished, it's fine
return 0
fi
local log_file
log_file=$(lsof -p "$runner_pid" 2>/dev/null | grep -o "$RUNNER_HOME/_diag/Runner.*log")
if [ -z "$log_file" ]; then
# assume, the process is over or just started
return 0
fi
# So far it's the only solid way to determine that the job is starting
grep -q 'Terminal] .* Running job:' "$log_file" \
&& return 0 \
|| return 1
}
list_children () {
local children
children=$(ps --ppid "$1" -o pid=)
if [ -z "$children" ]; then
return
fi
for pid in $children; do
list_children "$pid"
done
echo "$children"
}
# There's possibility that it fails because the runner's version is outdated,
# so after the first failure we'll try to launch it with enabled autoupdate.
#
# We'll fail and terminate after 10 consequent failures.
ATTEMPT=0
# In `kill` 0 means "all processes in process group", -1 is "all but PID 1"
# We use `-2` to get an error
RUNNER_PID=-2
while true; do
# Does not send signal, but checks that the process $RUNNER_PID is running
if kill -0 -- $RUNNER_PID; then
ATTEMPT=0
echo "Runner is working with pid $RUNNER_PID, checking the metadata in background"
check_proceed_spot_termination
if ! is_job_assigned; then
RUNNER_AGE=$(( $(date +%s) - $(stat -c +%Y /proc/"$RUNNER_PID" 2>/dev/null || date +%s) ))
echo "The runner is launched $RUNNER_AGE seconds ago and still hasn't received a job"
if (( 60 < RUNNER_AGE )); then
echo "Attempt to delete the runner for a graceful shutdown"
sudo -u ubuntu ./config.sh remove --token "$(get_runner_token)" \
|| continue
echo "Runner didn't launch or have assigned jobs after ${RUNNER_AGE} seconds, shutting down"
terminate_decrease_and_exit
fi
fi
else
if [ "$RUNNER_PID" != "-2" ]; then
wait $RUNNER_PID \
&& echo "Runner with PID $RUNNER_PID successfully finished" \
|| echo "Attempt $((++ATTEMPT)) to start the runner"
fi
if (( ATTEMPT > 10 )); then
echo "The runner has failed to start after $ATTEMPT attempt. Give up and terminate it"
terminate_and_exit
fi
cd $RUNNER_HOME || terminate_and_exit
detect_delayed_termination
# If runner is not active, check that it needs to terminate itself
echo "Checking if the instance suppose to terminate"
no_terminating_metadata || terminate_on_event
check_spot_instance_is_old && terminate_and_exit
check_proceed_spot_termination force
echo "Going to configure runner"
token_args=(--token "$(get_runner_token)")
config_args=(
"${token_args[@]}" --url "$RUNNER_URL"
--ephemeral --unattended --replace --runnergroup Default
--labels "$LABELS" --work _work --name "$INSTANCE_ID"
)
if (( ATTEMPT > 1 )); then
echo 'The runner failed to start at least once. Removing it and then configuring with autoupdate enabled.'
sudo -u ubuntu ./config.sh remove "${token_args[@]}"
sudo -u ubuntu ./config.sh "${config_args[@]}"
else
echo "Configure runner with disabled autoupdate"
config_args+=("--disableupdate")
sudo -u ubuntu ./config.sh "${config_args[@]}"
fi
echo "Another one check to avoid race between runner and infrastructure"
no_terminating_metadata || terminate_on_event
check_spot_instance_is_old && terminate_and_exit
check_proceed_spot_termination force
# There were some failures to start the Job because of trash in _work
rm -rf _work
# https://github.com/actions/runner/issues/3266
# We're unable to know if the runner is failed to start.
echo 'Monkey-patching run helpers to get genuine exit code of the runner'
for script in run.sh run-helper.sh.template; do
# shellcheck disable=SC2016
grep -q 'exit 0$' "$script" && \
sed 's/exit 0/exit $returnCode/' -i "$script" && \
echo "Script $script is patched"
done
echo "Run"
sudo -u ubuntu \
ACTIONS_RUNNER_HOOK_JOB_STARTED=/tmp/actions-hooks/pre-run.sh \
ACTIONS_RUNNER_HOOK_JOB_COMPLETED=/tmp/actions-hooks/post-run.sh \
./run.sh &
RUNNER_PID=$!
sleep 10
fi
sleep 5
done
# vim:ts=4:sw=4

View File

@ -104,3 +104,24 @@ def test_keeper_map_without_zk(started_cluster):
node.query("DETACH TABLE test_keeper_map_without_zk") node.query("DETACH TABLE test_keeper_map_without_zk")
client.stop() client.stop()
def test_keeper_map_with_failed_drop(started_cluster):
run_query(
"CREATE TABLE test_keeper_map_with_failed_drop (key UInt64, value UInt64) ENGINE = KeeperMap('/test_keeper_map_with_failed_drop') PRIMARY KEY(key);"
)
run_query("INSERT INTO test_keeper_map_with_failed_drop VALUES (1, 11)")
run_query("SYSTEM ENABLE FAILPOINT keepermap_fail_drop_data")
node.query("DROP TABLE test_keeper_map_with_failed_drop SYNC")
zk_client = get_genuine_zk()
assert (
zk_client.get("/test_keeper_map/test_keeper_map_with_failed_drop/data")
is not None
)
run_query("SYSTEM DISABLE FAILPOINT keepermap_fail_drop_data")
run_query(
"CREATE TABLE test_keeper_map_with_failed_drop_another (key UInt64, value UInt64) ENGINE = KeeperMap('/test_keeper_map_with_failed_drop') PRIMARY KEY(key);"
)

View File

@ -41,7 +41,7 @@ function thread3()
function thread4() function thread4()
{ {
while true; do $CLICKHOUSE_CLIENT --receive_timeout=3 -q "OPTIMIZE TABLE alter_table0 FINAL" | grep -Fv "Timeout exceeded while receiving data from server"; done while true; do $CLICKHOUSE_CLIENT --receive_timeout=1 -q "OPTIMIZE TABLE alter_table0 FINAL" | grep -Fv "Timeout exceeded while receiving data from server"; done
} }
function thread5() function thread5()

View File

@ -1,126 +0,0 @@
12 4 21722 2209341 4 1415 2333 4 61 64 3
21 1134 11363 58484 1106 1458 1592 136 26 62 32
22 210 4504 5729 196 291 767 124 47 54 8
26 196 1327684 5221 195 4140 5661 161 28 49 19
28 5 2034378 7102 5 325 3255 2 53 60 4
29 53 45041 45189 45 1580 211 31 55 84 18
38 424 1600675 4653 424 562 5944 244 60 65 6
45 17 62743 674873 17 6239 6494 17 65 76 8
72 1862 1210073 6200 1677 2498 528 859 51 61 11
79 2 2255228 2255293 2 5495 7057 2 65 65 1
85 459 1051571 1829638 459 6402 7131 334 32 61 25
86 10 1748130 1754217 10 4374 7003 10 56 59 4
91 165 5718 5802 75 282 7113 112 41 63 22
94 20 1231916 2050003 20 4802 4917 19 53 59 7
99 2 3665 36667 2 497 697 2 70 71 2
103 1 2446615 2446615 1 2498 2498 1 58 58 1
106 72 6149 6699 67 527 826 40 61 61 1
111 43 2273186 5272 43 492 4923 4 54 72 15
120 3129 45117 6735 2868 1030 1625 561 59 64 6
138 2 49243 49374 2 1428 1519 2 47 48 2
143 100 23321 63639 100 1115 1624 88 51 51 1
145 1 2447976 2447976 1 6173 6173 1 44 44 1
153 16 13748 16881 16 1506 1636 16 54 68 9
159 19952 1525336 7131 12957 1280 6163 2668 24 66 39
171 5 15042 16698 5 1302 1608 5 65 65 1
179 6264 1362341 2686 6244 2554 7132 2705 61 67 7
192 1 1639623 1639623 1 3406 3406 1 32 32 1
193 1 1429969 1429969 1 7131 7131 1 45 45 1
207 12 23057 32500 12 1491 1726 12 32 46 7
221 5081 1366870 6649 3432 4527 5226 687 24 69 39
228 73 12281 17929 71 1328 2034 63 49 71 18
229 2 1617478 1723791 2 4590 5578 2 41 42 2
230 3916 1332729 6949 3668 1330 4703 845 62 65 4
238 25 2624456 2625673 24 2535 6465 25 58 75 14
241 154 2554929 2616444 154 2626 7131 148 34 57 17
248 276 15529 30987 274 1040 1222 136 37 79 27
254 3018 33966 6635 2837 1057 1622 539 24 60 33
255 20 1581774 1811334 20 6068 6301 18 33 57 10
256 5 5145 6841 5 367 376 5 58 58 1
270 2 2195579 2262119 2 7102 7123 2 33 34 2
281 32 2379460 616215 32 6042 6086 23 53 64 12
282 7 1292651 24244 7 1607 2455 6 46 55 5
286 123 1521935 5269 123 3793 3940 81 40 66 22
291 21 2419080 3567 21 297 4731 21 54 55 2
316 4 5221 5616 4 505 558 4 32 35 3
319 232 56480 63033 230 1599 313 50 33 64 26
327 15 51647 51894 14 1292 1585 14 47 57 7
332 24 23484 54948 24 1609 1726 16 32 49 11
333 1 14189 14189 1 1550 1550 1 63 63 1
342 49 2579220 2622432 49 4626 6933 48 34 54 14
344 1 6486 6486 1 509 509 1 24 24 1
346 1987 53016 6735 1823 1334 174 294 26 62 32
358 45 59058 60844 44 6746 722 40 57 84 15
363 1198 1260033 2568811 1196 5710 5790 82 55 80 26
384 150 2361175 476024 150 7008 7123 81 38 64 22
387 277 5200 6553 252 243 521 130 65 65 1
392 1877 1607428 2030850 1875 1416 7131 1379 54 66 13
396 8181 1380803 6186 7920 545 798 1743 24 67 39
398 3 5183 5213 2 291 352 3 53 59 3
399 62 51494 59203 61 7073 754 42 55 78 18
412 2141 1360120 2189792 2136 2491 5658 1371 71 75 5
413 2 2036037 2064917 2 3963 4666 2 43 45 2
431 33 2302331 2348449 33 4425 6516 32 69 69 1
447 59 25125 33094 59 1176 1817 56 53 58 6
456 1 53157 53157 1 1556 1556 1 26 26 1
462 5 5456 6280 5 348 4337 5 28 40 5
472 1 1443716 1443716 1 6122 6122 1 42 42 1
491 34 1066102 1183673 34 6606 6822 32 46 67 15
498 896 2230163 3054 895 537 7131 714 24 59 28
504 108 12281 25180 108 1318 1784 94 55 66 12
515 22 1588883 2640809 22 6554 6571 15 46 59 12
518 1 37743 37743 1 1558 1558 1 72 72 1
530 1 3033 3033 1 561 561 1 59 59 1
532 26 5721 6355 25 549 665 14 44 50 7
546 156 2577874 48517 156 1105 324 133 44 51 8
554 12 1665194 2640066 12 1817 2951 12 57 57 1
564 3865 2028049 2083433 3722 1115 985 2203 44 84 41
566 4432 50605 57509 3217 1191 267 459 26 72 39
567 8 5221 5893 7 333 558 8 27 35 4
582 1172 1320619 2019743 1172 5819 7131 757 26 63 30
584 43100 2500 5594 22561 134 4573 1660 48 84 37
589 28 6046 6068 19 345 564 27 55 62 8
595 139 1585165 1683606 138 2231 3598 132 54 84 28
615 3 1056081 1116230 3 5794 5796 2 59 62 3
619 7 1543114 5241 7 2442 3105 7 41 45 3
634 2722 1221058 4999 2686 2426 7131 1735 54 60 7
635 237 2119333 4667 237 561 5999 176 49 60 12
644 5 1774169 2056171 5 5591 6091 4 33 39 3
647 8 51632 64403 8 1457 1624 8 26 34 5
651 1325 1620565 6281 1301 528 792 815 62 63 2
665 13 4598 4789 13 511 558 11 39 46 7
679 1560 1613200 25940 1552 1569 3118 781 49 84 35
704 2 14226 15594 2 1086 1116 2 65 71 2
715 25 1199352 3490 25 5036 5112 23 34 55 13
716 1253 61989 6735 1050 1203 1625 397 52 65 14
730 2584 5560 6170 634 2421 627 293 56 69 14
736 8 1433153 4941 8 339 4594 8 28 36 5
749 2 1326176 1339862 2 4339 6213 2 49 50 2
753 1 53157 53157 1 1556 1556 1 26 26 1
761 63 1443230 6881 63 3154 3204 26 56 73 14
762 49 1449596 1968154 49 2437 3753 48 54 62 9
775 35107 5330 769436 2471 447 6607 656 70 81 12
789 1 1552458 1552458 1 2441 2441 1 62 62 1
794 158 5585 6585 155 495 929 67 24 50 20
839 9 29223 46530 9 1336 1465 9 52 52 1
844 5 2377545 2377635 5 5129 6321 5 53 69 5
846 50 2172273 2589295 50 1582 3053 48 64 68 5
847 2577 56656 63658 1582 1444 838 474 26 63 33
861 1333 5570 6909 839 457 489 37 33 70 34
873 2360 1519811 50487 2248 1310 1784 316 60 68 9
879 228 6704 6785 79 279 507 121 35 66 24
889 5130 2070007 39692 5040 1151 6791 2606 44 66 23
896 4 511246 859452 4 6554 6561 4 67 71 4
912 146 1322641 2238040 146 1366 6354 143 59 59 1
913 82 5495 6870 78 350 565 67 24 43 15
921 763 1580790 416881 763 6191 7131 509 63 64 2
925 318 2500952 5025 309 476 6114 182 32 56 21
931 12 4277 4809 12 238 256 9 63 83 9
942 954 1331 2228193 952 1121 5047 788 65 70 6
948 14 1785593 2600431 14 6550 6598 13 34 49 9
956 5 5755 6023 5 359 411 5 43 48 4
963 4 3812 3835 4 444 537 4 47 53 4
978 5 51632 58212 5 1127 1556 5 24 32 5
980 53 47201 59744 53 1537 1625 36 41 49 9
987 6033 2020131 763444 4306 256 792 1832 60 64 5
993 4 1615159 1718339 4 1570 3093 4 62 63 2

View File

@ -0,0 +1,15 @@
12 4 21722 2209341 4 1415 2333 4 61 64 3
21 1134 11363 58484 1106 1458 1592 136 26 62 32
22 210 4504 5729 196 291 767 124 47 54 8
26 196 1327684 5221 195 4140 5661 161 28 49 19
28 5 2034378 7102 5 325 3255 2 53 60 4
29 53 45041 45189 45 1580 211 31 55 84 18
38 424 1600675 4653 424 562 5944 244 60 65 6
45 17 62743 674873 17 6239 6494 17 65 76 8
72 1862 1210073 6200 1677 2498 528 859 51 61 11
79 2 2255228 2255293 2 5495 7057 2 65 65 1
85 459 1051571 1829638 459 6402 7131 334 32 61 25
86 10 1748130 1754217 10 4374 7003 10 56 59 4
91 165 5718 5802 75 282 7113 112 41 63 22
94 20 1231916 2050003 20 4802 4917 19 53 59 7
99 2 3665 36667 2 497 697 2 70 71 2

View File

@ -26,7 +26,7 @@ DETACH TABLE test;
ATTACH TABLE test; ATTACH TABLE test;
" "
for i in {1..1000} for i in {1..100}
do do
echo " echo "
WITH ${i} AS try WITH ${i} AS try

View File

@ -0,0 +1,13 @@
912 146 1322641 2238040 146 1366 6354 143 59 59 1
913 82 5495 6870 78 350 565 67 24 43 15
921 763 1580790 416881 763 6191 7131 509 63 64 2
925 318 2500952 5025 309 476 6114 182 32 56 21
931 12 4277 4809 12 238 256 9 63 83 9
942 954 1331 2228193 952 1121 5047 788 65 70 6
948 14 1785593 2600431 14 6550 6598 13 34 49 9
956 5 5755 6023 5 359 411 5 43 48 4
963 4 3812 3835 4 444 537 4 47 53 4
978 5 51632 58212 5 1127 1556 5 24 32 5
980 53 47201 59744 53 1537 1625 36 41 49 9
987 6033 2020131 763444 4306 256 792 1832 60 64 5
993 4 1615159 1718339 4 1570 3093 4 62 63 2

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery "
DROP TABLE IF EXISTS test;
CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11;
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
DETACH TABLE test;
ATTACH TABLE test;
"
for i in {901..1000}
do
echo "
WITH ${i} AS try
SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test
WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String
AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String
AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String
AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String
AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String
AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String
HAVING count() > 0;
"
done | ${CLICKHOUSE_CLIENT} --multiquery
${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test"

View File

@ -0,0 +1,13 @@
103 1 2446615 2446615 1 2498 2498 1 58 58 1
106 72 6149 6699 67 527 826 40 61 61 1
111 43 2273186 5272 43 492 4923 4 54 72 15
120 3129 45117 6735 2868 1030 1625 561 59 64 6
138 2 49243 49374 2 1428 1519 2 47 48 2
143 100 23321 63639 100 1115 1624 88 51 51 1
145 1 2447976 2447976 1 6173 6173 1 44 44 1
153 16 13748 16881 16 1506 1636 16 54 68 9
159 19952 1525336 7131 12957 1280 6163 2668 24 66 39
171 5 15042 16698 5 1302 1608 5 65 65 1
179 6264 1362341 2686 6244 2554 7132 2705 61 67 7
192 1 1639623 1639623 1 3406 3406 1 32 32 1
193 1 1429969 1429969 1 7131 7131 1 45 45 1

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery "
DROP TABLE IF EXISTS test;
CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11;
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
DETACH TABLE test;
ATTACH TABLE test;
"
for i in {101..200}
do
echo "
WITH ${i} AS try
SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test
WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String
AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String
AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String
AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String
AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String
AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String
HAVING count() > 0;
"
done | ${CLICKHOUSE_CLIENT} --multiquery
${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test"

View File

@ -0,0 +1,16 @@
207 12 23057 32500 12 1491 1726 12 32 46 7
221 5081 1366870 6649 3432 4527 5226 687 24 69 39
228 73 12281 17929 71 1328 2034 63 49 71 18
229 2 1617478 1723791 2 4590 5578 2 41 42 2
230 3916 1332729 6949 3668 1330 4703 845 62 65 4
238 25 2624456 2625673 24 2535 6465 25 58 75 14
241 154 2554929 2616444 154 2626 7131 148 34 57 17
248 276 15529 30987 274 1040 1222 136 37 79 27
254 3018 33966 6635 2837 1057 1622 539 24 60 33
255 20 1581774 1811334 20 6068 6301 18 33 57 10
256 5 5145 6841 5 367 376 5 58 58 1
270 2 2195579 2262119 2 7102 7123 2 33 34 2
281 32 2379460 616215 32 6042 6086 23 53 64 12
282 7 1292651 24244 7 1607 2455 6 46 55 5
286 123 1521935 5269 123 3793 3940 81 40 66 22
291 21 2419080 3567 21 297 4731 21 54 55 2

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery "
DROP TABLE IF EXISTS test;
CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11;
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
DETACH TABLE test;
ATTACH TABLE test;
"
for i in {201..300}
do
echo "
WITH ${i} AS try
SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test
WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String
AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String
AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String
AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String
AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String
AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String
HAVING count() > 0;
"
done | ${CLICKHOUSE_CLIENT} --multiquery
${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test"

View File

@ -0,0 +1,16 @@
316 4 5221 5616 4 505 558 4 32 35 3
319 232 56480 63033 230 1599 313 50 33 64 26
327 15 51647 51894 14 1292 1585 14 47 57 7
332 24 23484 54948 24 1609 1726 16 32 49 11
333 1 14189 14189 1 1550 1550 1 63 63 1
342 49 2579220 2622432 49 4626 6933 48 34 54 14
344 1 6486 6486 1 509 509 1 24 24 1
346 1987 53016 6735 1823 1334 174 294 26 62 32
358 45 59058 60844 44 6746 722 40 57 84 15
363 1198 1260033 2568811 1196 5710 5790 82 55 80 26
384 150 2361175 476024 150 7008 7123 81 38 64 22
387 277 5200 6553 252 243 521 130 65 65 1
392 1877 1607428 2030850 1875 1416 7131 1379 54 66 13
396 8181 1380803 6186 7920 545 798 1743 24 67 39
398 3 5183 5213 2 291 352 3 53 59 3
399 62 51494 59203 61 7073 754 42 55 78 18

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery "
DROP TABLE IF EXISTS test;
CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11;
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
DETACH TABLE test;
ATTACH TABLE test;
"
for i in {301..400}
do
echo "
WITH ${i} AS try
SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test
WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String
AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String
AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String
AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String
AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String
AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String
HAVING count() > 0;
"
done | ${CLICKHOUSE_CLIENT} --multiquery
${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test"

View File

@ -0,0 +1,9 @@
412 2141 1360120 2189792 2136 2491 5658 1371 71 75 5
413 2 2036037 2064917 2 3963 4666 2 43 45 2
431 33 2302331 2348449 33 4425 6516 32 69 69 1
447 59 25125 33094 59 1176 1817 56 53 58 6
456 1 53157 53157 1 1556 1556 1 26 26 1
462 5 5456 6280 5 348 4337 5 28 40 5
472 1 1443716 1443716 1 6122 6122 1 42 42 1
491 34 1066102 1183673 34 6606 6822 32 46 67 15
498 896 2230163 3054 895 537 7131 714 24 59 28

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery "
DROP TABLE IF EXISTS test;
CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11;
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
DETACH TABLE test;
ATTACH TABLE test;
"
for i in {401..500}
do
echo "
WITH ${i} AS try
SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test
WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String
AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String
AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String
AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String
AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String
AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String
HAVING count() > 0;
"
done | ${CLICKHOUSE_CLIENT} --multiquery
${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test"

View File

@ -0,0 +1,14 @@
504 108 12281 25180 108 1318 1784 94 55 66 12
515 22 1588883 2640809 22 6554 6571 15 46 59 12
518 1 37743 37743 1 1558 1558 1 72 72 1
530 1 3033 3033 1 561 561 1 59 59 1
532 26 5721 6355 25 549 665 14 44 50 7
546 156 2577874 48517 156 1105 324 133 44 51 8
554 12 1665194 2640066 12 1817 2951 12 57 57 1
564 3865 2028049 2083433 3722 1115 985 2203 44 84 41
566 4432 50605 57509 3217 1191 267 459 26 72 39
567 8 5221 5893 7 333 558 8 27 35 4
582 1172 1320619 2019743 1172 5819 7131 757 26 63 30
584 43100 2500 5594 22561 134 4573 1660 48 84 37
589 28 6046 6068 19 345 564 27 55 62 8
595 139 1585165 1683606 138 2231 3598 132 54 84 28

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery "
DROP TABLE IF EXISTS test;
CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11;
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
DETACH TABLE test;
ATTACH TABLE test;
"
for i in {501..600}
do
echo "
WITH ${i} AS try
SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test
WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String
AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String
AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String
AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String
AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String
AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String
HAVING count() > 0;
"
done | ${CLICKHOUSE_CLIENT} --multiquery
${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test"

View File

@ -0,0 +1,9 @@
615 3 1056081 1116230 3 5794 5796 2 59 62 3
619 7 1543114 5241 7 2442 3105 7 41 45 3
634 2722 1221058 4999 2686 2426 7131 1735 54 60 7
635 237 2119333 4667 237 561 5999 176 49 60 12
644 5 1774169 2056171 5 5591 6091 4 33 39 3
647 8 51632 64403 8 1457 1624 8 26 34 5
651 1325 1620565 6281 1301 528 792 815 62 63 2
665 13 4598 4789 13 511 558 11 39 46 7
679 1560 1613200 25940 1552 1569 3118 781 49 84 35

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery "
DROP TABLE IF EXISTS test;
CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11;
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
DETACH TABLE test;
ATTACH TABLE test;
"
for i in {601..700}
do
echo "
WITH ${i} AS try
SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test
WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String
AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String
AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String
AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String
AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String
AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String
HAVING count() > 0;
"
done | ${CLICKHOUSE_CLIENT} --multiquery
${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test"

View File

@ -0,0 +1,12 @@
704 2 14226 15594 2 1086 1116 2 65 71 2
715 25 1199352 3490 25 5036 5112 23 34 55 13
716 1253 61989 6735 1050 1203 1625 397 52 65 14
730 2584 5560 6170 634 2421 627 293 56 69 14
736 8 1433153 4941 8 339 4594 8 28 36 5
749 2 1326176 1339862 2 4339 6213 2 49 50 2
753 1 53157 53157 1 1556 1556 1 26 26 1
761 63 1443230 6881 63 3154 3204 26 56 73 14
762 49 1449596 1968154 49 2437 3753 48 54 62 9
775 35107 5330 769436 2471 447 6607 656 70 81 12
789 1 1552458 1552458 1 2441 2441 1 62 62 1
794 158 5585 6585 155 495 929 67 24 50 20

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery "
DROP TABLE IF EXISTS test;
CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11;
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
DETACH TABLE test;
ATTACH TABLE test;
"
for i in {701..800}
do
echo "
WITH ${i} AS try
SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test
WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String
AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String
AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String
AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String
AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String
AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String
HAVING count() > 0;
"
done | ${CLICKHOUSE_CLIENT} --multiquery
${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test"

View File

@ -0,0 +1,9 @@
839 9 29223 46530 9 1336 1465 9 52 52 1
844 5 2377545 2377635 5 5129 6321 5 53 69 5
846 50 2172273 2589295 50 1582 3053 48 64 68 5
847 2577 56656 63658 1582 1444 838 474 26 63 33
861 1333 5570 6909 839 457 489 37 33 70 34
873 2360 1519811 50487 2248 1310 1784 316 60 68 9
879 228 6704 6785 79 279 507 121 35 66 24
889 5130 2070007 39692 5040 1151 6791 2606 44 66 23
896 4 511246 859452 4 6554 6561 4 67 71 4

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --multiquery "
DROP TABLE IF EXISTS test;
CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11;
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/4)), round(pow(sipHash64(2, number), 1/6)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/3)), round(pow(sipHash64(2, number), 1/5)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
INSERT INTO test
SELECT round(pow(sipHash64(1, number), 1/5)), round(pow(sipHash64(2, number), 1/7)), round(pow(sipHash64(3, number), 1/10))
FROM numbers(100000);
DETACH TABLE test;
ATTACH TABLE test;
"
for i in {801..900}
do
echo "
WITH ${i} AS try
SELECT try, count(), min(a), max(a), uniqExact(a), min(b), max(b), uniqExact(b), min(c), max(c), uniqExact(c) FROM test
WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1)::String
AND a <= (a1 + round(pow(sipHash64(3, try), 1 / (3 + sipHash64(4, try) % 8))))::String
AND b >= (round(pow(sipHash64(5, try), 1 / (3 + sipHash64(6, try) % 8))) AS b1)::String
AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String
AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String
AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String
HAVING count() > 0;
"
done | ${CLICKHOUSE_CLIENT} --multiquery
${CLICKHOUSE_CLIENT} --multiquery "DROP TABLE test"

View File

@ -1,92 +0,0 @@
MergeTree compact + horizontal merge
test
16667 Tuple(a Dynamic(max_types=3)):Date
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):String
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 UInt64:None
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 UInt64:None
16667 Tuple(a Dynamic(max_types=3)):DateTime
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
133333 Tuple(a Dynamic(max_types=3)):None
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
116667 Tuple(a Dynamic(max_types=3)):String
133333 Tuple(a Dynamic(max_types=3)):None
MergeTree wide + horizontal merge
test
16667 Tuple(a Dynamic(max_types=3)):Date
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):String
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 UInt64:None
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 UInt64:None
16667 Tuple(a Dynamic(max_types=3)):DateTime
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
133333 Tuple(a Dynamic(max_types=3)):None
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
116667 Tuple(a Dynamic(max_types=3)):String
133333 Tuple(a Dynamic(max_types=3)):None
MergeTree compact + vertical merge
test
16667 Tuple(a Dynamic(max_types=3)):Date
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):String
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 UInt64:None
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 UInt64:None
16667 Tuple(a Dynamic(max_types=3)):DateTime
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
133333 Tuple(a Dynamic(max_types=3)):None
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
116667 Tuple(a Dynamic(max_types=3)):String
133333 Tuple(a Dynamic(max_types=3)):None
MergeTree wide + vertical merge
test
16667 Tuple(a Dynamic(max_types=3)):Date
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):String
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 UInt64:None
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 UInt64:None
16667 Tuple(a Dynamic(max_types=3)):DateTime
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
133333 Tuple(a Dynamic(max_types=3)):None
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
116667 Tuple(a Dynamic(max_types=3)):String
133333 Tuple(a Dynamic(max_types=3)):None

View File

@ -1,53 +0,0 @@
#!/usr/bin/env bash
# Tags: long
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# reset --log_comment
CLICKHOUSE_LOG_COMMENT=
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1 --enable_named_columns_in_function_tuple=0"
function test()
{
echo "test"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, number from numbers(100000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -nm -q "system start merges test; optimize table test final;"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -nm -q "system start merges test; optimize table test final;"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
}
$CH_CLIENT -q "drop table if exists test;"
echo "MergeTree compact + horizontal merge"
$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;"
test
$CH_CLIENT -q "drop table test;"
echo "MergeTree wide + horizontal merge"
$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;"
test
$CH_CLIENT -q "drop table test;"
echo "MergeTree compact + vertical merge"
$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;"
test
$CH_CLIENT -q "drop table test;"
echo "MergeTree wide + vertical merge"
$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;"
test
$CH_CLIENT -q "drop table test;"

View File

@ -0,0 +1,21 @@
16667 Tuple(a Dynamic(max_types=3)):Date
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):String
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 UInt64:None
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 UInt64:None
16667 Tuple(a Dynamic(max_types=3)):DateTime
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
133333 Tuple(a Dynamic(max_types=3)):None
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
116667 Tuple(a Dynamic(max_types=3)):String
133333 Tuple(a Dynamic(max_types=3)):None

View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Tags: long
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# reset --log_comment
CLICKHOUSE_LOG_COMMENT=
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1 --enable_named_columns_in_function_tuple=0"
$CH_CLIENT -q "drop table if exists test;"
$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, number from numbers(100000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -nm -q "system start merges test; optimize table test final;"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -nm -q "system start merges test; optimize table test final;"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -q "drop table test;"

View File

@ -0,0 +1,21 @@
16667 Tuple(a Dynamic(max_types=3)):Date
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):String
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 UInt64:None
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 UInt64:None
16667 Tuple(a Dynamic(max_types=3)):DateTime
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
133333 Tuple(a Dynamic(max_types=3)):None
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
116667 Tuple(a Dynamic(max_types=3)):String
133333 Tuple(a Dynamic(max_types=3)):None

View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Tags: long
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# reset --log_comment
CLICKHOUSE_LOG_COMMENT=
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1 --enable_named_columns_in_function_tuple=0"
$CH_CLIENT -q "drop table if exists test;"
$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, number from numbers(100000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -nm -q "system start merges test; optimize table test final;"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -nm -q "system start merges test; optimize table test final;"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -q "drop table test;"

View File

@ -0,0 +1,21 @@
16667 Tuple(a Dynamic(max_types=3)):Date
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):String
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 UInt64:None
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 UInt64:None
16667 Tuple(a Dynamic(max_types=3)):DateTime
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
133333 Tuple(a Dynamic(max_types=3)):None
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
116667 Tuple(a Dynamic(max_types=3)):String
133333 Tuple(a Dynamic(max_types=3)):None

View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Tags: long
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# reset --log_comment
CLICKHOUSE_LOG_COMMENT=
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1 --enable_named_columns_in_function_tuple=0"
$CH_CLIENT -q "drop table if exists test;"
$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, number from numbers(100000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -nm -q "system start merges test; optimize table test final;"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -nm -q "system start merges test; optimize table test final;"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -q "drop table test;"

View File

@ -0,0 +1,21 @@
16667 Tuple(a Dynamic(max_types=3)):Date
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):String
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 UInt64:None
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 UInt64:None
16667 Tuple(a Dynamic(max_types=3)):DateTime
33333 Tuple(a Dynamic(max_types=3)):Array(UInt8)
50000 Tuple(a Dynamic(max_types=3)):UInt64
66667 Tuple(a Dynamic(max_types=3)):String
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
133333 Tuple(a Dynamic(max_types=3)):None
50000 Tuple(a Dynamic(max_types=3)):UInt64
100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64)
100000 UInt64:None
116667 Tuple(a Dynamic(max_types=3)):String
133333 Tuple(a Dynamic(max_types=3)):None

View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Tags: long
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# reset --log_comment
CLICKHOUSE_LOG_COMMENT=
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_experimental_dynamic_type=1 --enable_named_columns_in_function_tuple=0"
$CH_CLIENT -q "drop table if exists test;"
$CH_CLIENT -q "create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;"
$CH_CLIENT -q "system stop merges test"
$CH_CLIENT -q "insert into test select number, number from numbers(100000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000)"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -nm -q "system start merges test; optimize table test final;"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000)"
$CH_CLIENT -q "insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000)"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -nm -q "system start merges test; optimize table test final;"
$CH_CLIENT -q "select count(), dynamicType(d) || ':' || dynamicType(d.\`Tuple(a Dynamic(max_types=3))\`.a) as type from test group by type order by count(), type"
$CH_CLIENT -q "drop table test;"

View File

@ -0,0 +1,4 @@
CREATE TABLE t (p UInt8, x UInt64) Engine = MergeTree PARTITION BY p ORDER BY x;
INSERT INTO t SELECT 0, number FROM numbers(10) SETTINGS max_block_size = 100;
SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS allow_experimental_analyzer = 0;
SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS allow_experimental_analyzer = 1;

View File

@ -51,6 +51,14 @@ function check_replication_consistency()
table_name_prefix=$1 table_name_prefix=$1
check_query_part=$2 check_query_part=$2
# Try to kill some mutations because sometimes tests run too much (it's not guarenteed to kill all mutations, see below)
# Try multiple replicas, because queries are not finished yet, and "global" KILL MUTATION may fail due to another query (like DROP TABLE)
readarray -t tables_arr < <(${CLICKHOUSE_CLIENT} -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%'")
for t in "${tables_arr[@]}"
do
${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table='$t'" > /dev/null 2>/dev/null
done
# Wait for all queries to finish (query may still be running if thread is killed by timeout) # Wait for all queries to finish (query may still be running if thread is killed by timeout)
num_tries=0 num_tries=0
while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE current_database=currentDatabase() AND query LIKE '%$table_name_prefix%'") -ne 1 ]]; do while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE current_database=currentDatabase() AND query LIKE '%$table_name_prefix%'") -ne 1 ]]; do
@ -96,7 +104,7 @@ function check_replication_consistency()
some_table=$($CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%' ORDER BY rand() LIMIT 1") some_table=$($CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database=currentDatabase() AND name like '$table_name_prefix%' ORDER BY rand() LIMIT 1")
$CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA $some_table PULL" 1>/dev/null 2>/dev/null ||: $CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA $some_table PULL" 1>/dev/null 2>/dev/null ||:
# Forcefully cancel mutations to avoid waiting for them to finish # Forcefully cancel mutations to avoid waiting for them to finish. Kills the remaining mutations
${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table like '$table_name_prefix%'" > /dev/null ${CLICKHOUSE_CLIENT} -q "KILL MUTATION WHERE database=currentDatabase() AND table like '$table_name_prefix%'" > /dev/null
# SYNC REPLICA is not enough if some MUTATE_PARTs are not assigned yet # SYNC REPLICA is not enough if some MUTATE_PARTs are not assigned yet

View File

@ -1,6 +1,6 @@
DROP TABLE IF EXISTS hits_none; DROP TABLE IF EXISTS hits_none;
CREATE TABLE hits_none (Title String CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; CREATE TABLE hits_none (Title String CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';
INSERT INTO hits_none SELECT Title FROM test.hits; INSERT INTO hits_none SELECT Title FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16;
SET min_bytes_to_use_mmap_io = 1; SET min_bytes_to_use_mmap_io = 1;
SELECT sum(length(Title)) FROM hits_none; SELECT sum(length(Title)) FROM hits_none;