diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 1e70213adf5..836421f34dd 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -79,13 +79,14 @@ jobs: - name: Set envs run: | cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse + BUILD_NAME=coverity CACHES_PATH=${{runner.temp}}/../ccaches CHECK_NAME=ClickHouse build check (actions) - BUILD_NAME=coverity + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + TEMP_PATH=${{runner.temp}}/build_check EOF + echo "COVERITY_TOKEN=${{ secrets.COVERITY_TOKEN }}" >> "$GITHUB_ENV" - name: Download changed images uses: actions/download-artifact@v2 with: @@ -105,12 +106,12 @@ jobs: sudo rm -fr "$TEMP_PATH" mkdir -p "$TEMP_PATH" cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME" "${{ secrets.COV_TOKEN }}" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$CHECK_NAME" "$BUILD_NAME" - name: Upload Coverity Analysis if: ${{ success() || failure() }} run: | - curl --form token='${{ secrets.COV_TOKEN }}' \ - --form email='${{ secrets.ROBOT_CLICKHOUSE_EMAIL }}' \ + curl --form token="${COVERITY_TOKEN}" \ + --form email='security+coverity@clickhouse.com' \ --form file="@$TEMP_PATH/$BUILD_NAME/clickhouse-scan.tgz" \ --form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \ --form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \ diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 269d3eb52c6..2bedb50dd40 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -27,7 +27,9 @@ cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUI if [ "coverity" == "$COMBINED_OUTPUT" ] then - wget --post-data "token=$COV_TOKEN&project=ClickHouse%2FClickHouse" -qO- https://scan.coverity.com/download/linux64 | tar xz -C /opt/cov-analysis --strip-components 1 + mkdir -p /opt/cov-analysis + + wget --post-data "token=$COVERITY_TOKEN&project=ClickHouse%2FClickHouse" -qO- https://scan.coverity.com/download/linux64 | tar xz -C /opt/cov-analysis --strip-components 1 export PATH=$PATH:/opt/cov-analysis/bin cov-configure --config ./coverity.config --template --comptype clangcc --compiler "$CC" SCAN_WRAPPER="cov-build --config ./coverity.config --dir cov-int" diff --git a/docker/packager/packager b/docker/packager/packager index 1a79b497fa2..9a72a16bd70 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -86,7 +86,6 @@ def parse_env_variables( additional_pkgs, with_coverage, with_binaries, - coverity_scan, ): DARWIN_SUFFIX = "-darwin" DARWIN_ARM_SUFFIX = "-darwin-aarch64" @@ -179,7 +178,7 @@ def parse_env_variables( cmake_flags.append("-DENABLE_TESTS=0") elif package_type == "coverity": result.append("COMBINED_OUTPUT=coverity") - result.append("COV_TOKEN={}".format(cov_token)) + result.append('COVERITY_TOKEN="$COVERITY_TOKEN"') elif split_binary: result.append("COMBINED_OUTPUT=shared_build") @@ -328,13 +327,16 @@ if __name__ == "__main__": parser.add_argument( "--docker-image-version", default="latest", help="docker image tag to use" ) - parser.add_argument("--cov_token", default="") args = parser.parse_args() if not os.path.isabs(args.output_dir): args.output_dir = os.path.abspath(os.path.join(os.getcwd(), args.output_dir)) - image_type = "binary" if args.package_type in ("performance", "coverity") else args.package_type + image_type = ( + "binary" + if args.package_type in ("performance", "coverity") + else args.package_type + ) image_name = "clickhouse/binary-builder" if not os.path.isabs(args.clickhouse_repo_path): @@ -376,7 +378,6 @@ if __name__ == "__main__": args.additional_pkgs, args.with_coverage, args.with_binaries, - args.cov_token, ) run_docker_image_with_env( diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh index 861e17848a4..6aa9d88f5b4 100755 --- a/docker/test/stateful/run.sh +++ b/docker/test/stateful/run.sh @@ -96,7 +96,7 @@ else clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" - clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits" + clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0" fi clickhouse-client --query "SHOW TABLES FROM test" diff --git a/docker/test/stress/stress b/docker/test/stress/stress index 86f8edf5980..10c6088af75 100755 --- a/docker/test/stress/stress +++ b/docker/test/stress/stress @@ -10,7 +10,7 @@ import logging import time -def get_options(i): +def get_options(i, backward_compatibility_check): options = [] client_options = [] if 0 < i: @@ -19,7 +19,7 @@ def get_options(i): if i % 3 == 1: options.append("--db-engine=Ordinary") - if i % 3 == 2: + if i % 3 == 2 and not backward_compatibility_check: options.append('''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i)) client_options.append('allow_experimental_database_replicated=1') @@ -57,7 +57,7 @@ def run_func_test(cmd, output_prefix, num_processes, skip_tests_option, global_t pipes = [] for i in range(0, len(output_paths)): f = open(output_paths[i], 'w') - full_command = "{} {} {} {} {}".format(cmd, get_options(i), global_time_limit_option, skip_tests_option, backward_compatibility_check_option) + full_command = "{} {} {} {} {}".format(cmd, get_options(i, backward_compatibility_check), global_time_limit_option, skip_tests_option, backward_compatibility_check_option) logging.info("Run func tests '%s'", full_command) p = Popen(full_command, shell=True, stdout=f, stderr=f) pipes.append(p) diff --git a/src/Common/CurrentThread.h b/src/Common/CurrentThread.h index 9dbe8d355d6..4888adb511a 100644 --- a/src/Common/CurrentThread.h +++ b/src/Common/CurrentThread.h @@ -91,6 +91,7 @@ public: struct QueryScope { explicit QueryScope(ContextMutablePtr query_context); + explicit QueryScope(ContextPtr query_context); ~QueryScope(); void logPeakMemoryUsage(); diff --git a/src/Common/FileCache.cpp b/src/Common/FileCache.cpp index d648267b95d..05d32f5ffe4 100644 --- a/src/Common/FileCache.cpp +++ b/src/Common/FileCache.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -31,13 +32,11 @@ namespace IFileCache::IFileCache( const String & cache_base_path_, - size_t max_size_, - size_t max_element_size_, - size_t max_file_segment_size_) + const FileCacheSettings & cache_settings_) : cache_base_path(cache_base_path_) - , max_size(max_size_) - , max_element_size(max_element_size_) - , max_file_segment_size(max_file_segment_size_) + , max_size(cache_settings_.max_size) + , max_element_size(cache_settings_.max_elements) + , max_file_segment_size(cache_settings_.max_file_segment_size) { } @@ -58,7 +57,7 @@ String IFileCache::getPathInLocalCache(const Key & key) return fs::path(cache_base_path) / key_str.substr(0, 3) / key_str; } -bool IFileCache::shouldBypassCache() +bool IFileCache::isReadOnly() { return !CurrentThread::isInitialized() || !CurrentThread::get().getQueryContext() @@ -71,8 +70,8 @@ void IFileCache::assertInitialized() const throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Cache not initialized"); } -LRUFileCache::LRUFileCache(const String & cache_base_path_, size_t max_size_, size_t max_element_size_, size_t max_file_segment_size_) - : IFileCache(cache_base_path_, max_size_, max_element_size_, max_file_segment_size_) +LRUFileCache::LRUFileCache(const String & cache_base_path_, const FileCacheSettings & cache_settings_) + : IFileCache(cache_base_path_, cache_settings_) , log(&Poco::Logger::get("LRUFileCache")) { } @@ -205,8 +204,8 @@ FileSegments LRUFileCache::getImpl( return result; } -FileSegments LRUFileCache::splitRangeIntoEmptyCells( - const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock) +FileSegments LRUFileCache::splitRangeIntoCells( + const Key & key, size_t offset, size_t size, FileSegment::State state, std::lock_guard & cache_lock) { assert(size > 0); @@ -222,9 +221,10 @@ FileSegments LRUFileCache::splitRangeIntoEmptyCells( current_cell_size = std::min(remaining_size, max_file_segment_size); remaining_size -= current_cell_size; - auto * cell = addCell(key, current_pos, current_cell_size, FileSegment::State::EMPTY, cache_lock); + auto * cell = addCell(key, current_pos, current_cell_size, state, cache_lock); if (cell) file_segments.push_back(cell->file_segment); + assert(cell); current_pos += current_cell_size; } @@ -250,7 +250,7 @@ FileSegmentsHolder LRUFileCache::getOrSet(const Key & key, size_t offset, size_t if (file_segments.empty()) { - file_segments = splitRangeIntoEmptyCells(key, offset, size, cache_lock); + file_segments = splitRangeIntoCells(key, offset, size, FileSegment::State::EMPTY, cache_lock); } else { @@ -295,7 +295,7 @@ FileSegmentsHolder LRUFileCache::getOrSet(const Key & key, size_t offset, size_t assert(current_pos < segment_range.left); auto hole_size = segment_range.left - current_pos; - file_segments.splice(it, splitRangeIntoEmptyCells(key, current_pos, hole_size, cache_lock)); + file_segments.splice(it, splitRangeIntoCells(key, current_pos, hole_size, FileSegment::State::EMPTY, cache_lock)); current_pos = segment_range.right + 1; ++it; @@ -309,7 +309,7 @@ FileSegmentsHolder LRUFileCache::getOrSet(const Key & key, size_t offset, size_t /// segmentN auto hole_size = range.right - current_pos + 1; - file_segments.splice(file_segments.end(), splitRangeIntoEmptyCells(key, current_pos, hole_size, cache_lock)); + file_segments.splice(file_segments.end(), splitRangeIntoCells(key, current_pos, hole_size, FileSegment::State::EMPTY, cache_lock)); } } @@ -354,6 +354,21 @@ LRUFileCache::FileSegmentCell * LRUFileCache::addCell( return &(it->second); } +FileSegmentsHolder LRUFileCache::setDownloading(const Key & key, size_t offset, size_t size) +{ + std::lock_guard cache_lock(mutex); + + auto * cell = getCell(key, offset, cache_lock); + if (cell) + throw Exception( + ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, + "Cache cell already exists for key `{}` and offset {}", + keyToStr(key), offset); + + auto file_segments = splitRangeIntoCells(key, offset, size, FileSegment::State::DOWNLOADING, cache_lock); + return FileSegmentsHolder(std::move(file_segments)); +} + bool LRUFileCache::tryReserve( const Key & key_, size_t offset_, size_t size, std::lock_guard & cache_lock) { @@ -372,7 +387,8 @@ bool LRUFileCache::tryReserve( auto is_overflow = [&] { - return (current_size + size - removed_size > max_size) + /// max_size == 0 means unlimited cache size, max_element_size means unlimited number of cache elements. + return (max_size != 0 && current_size + size - removed_size > max_size) || (max_element_size != 0 && queue_size > max_element_size); }; @@ -484,6 +500,30 @@ void LRUFileCache::remove(const Key & key) fs::remove(key_path); } +void LRUFileCache::tryRemoveAll() +{ + /// Try remove all cached files by cache_base_path. + /// Only releasable file segments are evicted. + + std::lock_guard cache_lock(mutex); + + for (auto it = queue.begin(); it != queue.end();) + { + auto & [key, offset] = *it++; + + auto * cell = getCell(key, offset, cache_lock); + if (cell->releasable()) + { + auto file_segment = cell->file_segment; + if (file_segment) + { + std::lock_guard segment_lock(file_segment->mutex); + remove(file_segment->key(), file_segment->offset(), cache_lock, segment_lock); + } + } + } +} + void LRUFileCache::remove( Key key, size_t offset, std::lock_guard & cache_lock, std::lock_guard & /* segment_lock */) @@ -668,6 +708,38 @@ bool LRUFileCache::isLastFileSegmentHolder( return cell->file_segment.use_count() == 2; } +FileSegments LRUFileCache::getSnapshot() const +{ + std::lock_guard cache_lock(mutex); + + FileSegments file_segments; + + for (const auto & [key, cells_by_offset] : files) + { + for (const auto & [offset, cell] : cells_by_offset) + file_segments.push_back(FileSegment::getSnapshot(cell.file_segment, cache_lock)); + } + + return file_segments; +} + +std::vector LRUFileCache::tryGetCachePaths(const Key & key) +{ + std::lock_guard cache_lock(mutex); + + std::vector cache_paths; + + const auto & cells_by_offset = files[key]; + + for (const auto & [offset, cell] : cells_by_offset) + { + if (cell.file_segment->state() == FileSegment::State::DOWNLOADED) + cache_paths.push_back(getPathInLocalCache(key, offset)); + } + + return cache_paths; +} + LRUFileCache::FileSegmentCell::FileSegmentCell(FileSegmentPtr file_segment_, LRUQueue & queue_) : file_segment(file_segment_) { @@ -685,12 +757,13 @@ LRUFileCache::FileSegmentCell::FileSegmentCell(FileSegmentPtr file_segment_, LRU break; } case FileSegment::State::EMPTY: + case FileSegment::State::DOWNLOADING: { break; } default: throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, - "Can create cell with either DOWNLOADED or EMPTY state, got: {}", + "Can create cell with either EMPTY, DOWNLOADED, DOWNLOADING state, got: {}", FileSegment::stateToString(file_segment->download_state)); } } diff --git a/src/Common/FileCache.h b/src/Common/FileCache.h index d58711cef0a..e706376bc89 100644 --- a/src/Common/FileCache.h +++ b/src/Common/FileCache.h @@ -33,9 +33,7 @@ public: IFileCache( const String & cache_base_path_, - size_t max_size_, - size_t max_element_size_, - size_t max_file_segment_size_); + const FileCacheSettings & cache_settings_); virtual ~IFileCache() = default; @@ -44,7 +42,9 @@ public: virtual void remove(const Key & key) = 0; - static bool shouldBypassCache(); + virtual void tryRemoveAll() = 0; + + static bool isReadOnly(); /// Cache capacity in bytes. size_t capacity() const { return max_size; } @@ -55,6 +55,10 @@ public: String getPathInLocalCache(const Key & key); + const String & getBasePath() const { return cache_base_path; } + + virtual std::vector tryGetCachePaths(const Key & key) = 0; + /** * Given an `offset` and `size` representing [offset, offset + size) bytes interval, * return list of cached non-overlapping non-empty @@ -68,6 +72,10 @@ public: */ virtual FileSegmentsHolder getOrSet(const Key & key, size_t offset, size_t size) = 0; + virtual FileSegmentsHolder setDownloading(const Key & key, size_t offset, size_t size) = 0; + + virtual FileSegments getSnapshot() const = 0; + /// For debug. virtual String dumpStructure(const Key & key) = 0; @@ -112,16 +120,22 @@ class LRUFileCache final : public IFileCache public: LRUFileCache( const String & cache_base_path_, - size_t max_size_, - size_t max_element_size_ = REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_ELEMENTS, - size_t max_file_segment_size_ = REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE); + const FileCacheSettings & cache_settings_); FileSegmentsHolder getOrSet(const Key & key, size_t offset, size_t size) override; + FileSegments getSnapshot() const override; + + FileSegmentsHolder setDownloading(const Key & key, size_t offset, size_t size) override; + void initialize() override; void remove(const Key & key) override; + void tryRemoveAll() override; + + std::vector tryGetCachePaths(const Key & key) override; + private: using FileKeyAndOffset = std::pair; using LRUQueue = std::list; @@ -194,8 +208,8 @@ private: void loadCacheInfoIntoMemory(); - FileSegments splitRangeIntoEmptyCells( - const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock); + FileSegments splitRangeIntoCells( + const Key & key, size_t offset, size_t size, FileSegment::State state, std::lock_guard & cache_lock); String dumpStructureImpl(const Key & key_, std::lock_guard & cache_lock); diff --git a/src/Common/FileCacheFactory.cpp b/src/Common/FileCacheFactory.cpp index fc8dff0b26c..9eadea05547 100644 --- a/src/Common/FileCacheFactory.cpp +++ b/src/Common/FileCacheFactory.cpp @@ -15,28 +15,53 @@ FileCacheFactory & FileCacheFactory::instance() return ret; } -FileCachePtr FileCacheFactory::getImpl(const std::string & cache_base_path, std::lock_guard &) +FileCacheFactory::CacheByBasePath FileCacheFactory::getAll() +{ + std::lock_guard lock(mutex); + return caches; +} + +const FileCacheSettings & FileCacheFactory::getSettings(const std::string & cache_base_path) +{ + std::lock_guard lock(mutex); + + auto * cache_data = getImpl(cache_base_path, lock); + if (cache_data) + return cache_data->settings; + + throw Exception(ErrorCodes::BAD_ARGUMENTS, "No cache found by path: {}", cache_base_path); +} + +FileCacheFactory::CacheData * FileCacheFactory::getImpl(const std::string & cache_base_path, std::lock_guard &) { auto it = caches.find(cache_base_path); if (it == caches.end()) return nullptr; - return it->second; + return &it->second; +} + +FileCachePtr FileCacheFactory::get(const std::string & cache_base_path) +{ + std::lock_guard lock(mutex); + + auto * cache_data = getImpl(cache_base_path, lock); + if (cache_data) + return cache_data->cache; + + throw Exception(ErrorCodes::BAD_ARGUMENTS, "No cache found by path: {}", cache_base_path); } FileCachePtr FileCacheFactory::getOrCreate( - const std::string & cache_base_path, size_t max_size, size_t max_elements_size, size_t max_file_segment_size) + const std::string & cache_base_path, const FileCacheSettings & file_cache_settings) { std::lock_guard lock(mutex); - auto cache = getImpl(cache_base_path, lock); - if (cache) - { - if (cache->capacity() != max_size) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cache with path `{}` already exists, but has different max size", cache_base_path); - return cache; - } - cache = std::make_shared(cache_base_path, max_size, max_elements_size, max_file_segment_size); - caches.emplace(cache_base_path, cache); + auto * cache_data = getImpl(cache_base_path, lock); + if (cache_data) + return cache_data->cache; + + auto cache = std::make_shared(cache_base_path, file_cache_settings); + caches.emplace(cache_base_path, CacheData(cache, file_cache_settings)); return cache; } diff --git a/src/Common/FileCacheFactory.h b/src/Common/FileCacheFactory.h index f2432f03cae..3518f487b6d 100644 --- a/src/Common/FileCacheFactory.h +++ b/src/Common/FileCacheFactory.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include @@ -14,16 +15,32 @@ namespace DB */ class FileCacheFactory final : private boost::noncopyable { + struct CacheData + { + FileCachePtr cache; + FileCacheSettings settings; + + CacheData(FileCachePtr cache_, const FileCacheSettings & settings_) : cache(cache_), settings(settings_) {} + }; + + using CacheByBasePath = std::unordered_map; + public: static FileCacheFactory & instance(); - FileCachePtr getOrCreate(const std::string & cache_base_path, size_t max_size, size_t max_elements_size, size_t max_file_segment_size); + FileCachePtr getOrCreate(const std::string & cache_base_path, const FileCacheSettings & file_cache_settings); + + FileCachePtr get(const std::string & cache_base_path); + + CacheByBasePath getAll(); + + const FileCacheSettings & getSettings(const std::string & cache_base_path); private: - FileCachePtr getImpl(const std::string & cache_base_path, std::lock_guard &); + CacheData * getImpl(const std::string & cache_base_path, std::lock_guard &); std::mutex mutex; - std::unordered_map caches; + CacheByBasePath caches; }; } diff --git a/src/Common/FileCacheSettings.cpp b/src/Common/FileCacheSettings.cpp new file mode 100644 index 00000000000..f555de277b2 --- /dev/null +++ b/src/Common/FileCacheSettings.cpp @@ -0,0 +1,16 @@ +#include "FileCacheSettings.h" + +#include + +namespace DB +{ + +void FileCacheSettings::loadFromConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix) +{ + max_size = config.getUInt64(config_prefix + ".data_cache_max_size", REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_CACHE_SIZE); + max_elements = config.getUInt64(config_prefix + ".data_cache_max_elements", REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_ELEMENTS); + max_file_segment_size = config.getUInt64(config_prefix + ".max_file_segment_size", REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE); + cache_on_write_operations = config.getUInt64(config_prefix + ".cache_on_write_operations", false); +} + +} diff --git a/src/Common/FileCacheSettings.h b/src/Common/FileCacheSettings.h new file mode 100644 index 00000000000..0b34e1e3d82 --- /dev/null +++ b/src/Common/FileCacheSettings.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace Poco { namespace Util { class AbstractConfiguration; } } + +namespace DB +{ + +struct FileCacheSettings +{ + size_t max_size = 0; + size_t max_elements = REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_ELEMENTS; + size_t max_file_segment_size = REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE; + bool cache_on_write_operations = false; + + void loadFromConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix); +}; + +} diff --git a/src/Common/FileCache_fwd.h b/src/Common/FileCache_fwd.h index cab1525600b..7448f0c8c89 100644 --- a/src/Common/FileCache_fwd.h +++ b/src/Common/FileCache_fwd.h @@ -4,10 +4,13 @@ namespace DB { +static constexpr int REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_CACHE_SIZE = 1024 * 1024 * 1024; static constexpr int REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE = 100 * 1024 * 1024; static constexpr int REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_ELEMENTS = 1024 * 1024; class IFileCache; using FileCachePtr = std::shared_ptr; +struct FileCacheSettings; + } diff --git a/src/Common/FileSegment.cpp b/src/Common/FileSegment.cpp index ac89721683e..5a13ea7d207 100644 --- a/src/Common/FileSegment.cpp +++ b/src/Common/FileSegment.cpp @@ -31,10 +31,34 @@ FileSegment::FileSegment( , log(&Poco::Logger::get("FileSegment")) #endif { - if (download_state == State::DOWNLOADED) - reserved_size = downloaded_size = size_; - else if (download_state != State::EMPTY) - throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Can create cell with either DOWNLOADED or EMPTY state"); + /// On creation, file segment state can be EMPTY, DOWNLOADED, DOWNLOADING. + switch (download_state) + { + /// EMPTY is used when file segment is not in cache and + /// someone will _potentially_ want to download it (after calling getOrSetDownloader()). + case (State::EMPTY): + { + break; + } + /// DOWNLOADED is used either on initial cache metadata load into memory on server startup + /// or on reduceSizeToDownloaded() -- when file segment object is updated. + case (State::DOWNLOADED): + { + reserved_size = downloaded_size = size_; + break; + } + /// DOWNLOADING is used only for write-through caching (e.g. getOrSetDownloader() is not + /// needed, downloader is set on file segment creation). + case (State::DOWNLOADING): + { + downloader_id = getCallerId(); + break; + } + default: + { + throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Can create cell with either EMPTY, DOWNLOADED, DOWNLOADING state"); + } + } } FileSegment::State FileSegment::state() const @@ -49,6 +73,12 @@ size_t FileSegment::getDownloadOffset() const return range().left + getDownloadedSize(segment_lock); } +size_t FileSegment::getDownloadedSize() const +{ + std::lock_guard segment_lock(mutex); + return getDownloadedSize(segment_lock); +} + size_t FileSegment::getDownloadedSize(std::lock_guard & /* segment_lock */) const { if (download_state == State::DOWNLOADED) @@ -60,24 +90,15 @@ size_t FileSegment::getDownloadedSize(std::lock_guard & /* segment_l String FileSegment::getCallerId() { - return getCallerIdImpl(false); + return getCallerIdImpl(); } -String FileSegment::getCallerIdImpl(bool allow_non_strict_checking) +String FileSegment::getCallerIdImpl() { - if (IFileCache::shouldBypassCache()) - { - /// getCallerId() can be called from completeImpl(), which can be called from complete(). - /// complete() is called from destructor of CachedReadBufferFromRemoteFS when there is no query id anymore. - /// Allow non strict checking in this case. This works correctly as if getCallerIdImpl() is called from destructor, - /// then we know that caller is not a downloader, because downloader is reset each nextImpl() call either - /// manually or via SCOPE_EXIT. - - if (allow_non_strict_checking) - return "None"; - - throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Cannot use cache without query id"); - } + if (!CurrentThread::isInitialized() + || !CurrentThread::get().getQueryContext() + || CurrentThread::getQueryId().size == 0) + return "None:" + toString(getThreadId()); return CurrentThread::getQueryId().toString() + ":" + toString(getThreadId()); } @@ -136,7 +157,6 @@ String FileSegment::getDownloader() const bool FileSegment::isDownloader() const { std::lock_guard segment_lock(mutex); - LOG_TEST(log, "Checking for current downloader. Caller: {}, downloader: {}, current state: {}", getCallerId(), downloader_id, stateToString(download_state)); return getCallerId() == downloader_id; } @@ -221,15 +241,9 @@ void FileSegment::write(const char * from, size_t size, size_t offset_) { std::lock_guard segment_lock(mutex); - auto info = getInfoForLogImpl(segment_lock); - e.addMessage("while writing into cache, info: " + info); + wrapWithCacheInfo(e, "while writing into cache", segment_lock); - LOG_ERROR(log, "Failed to write to cache. File segment info: {}", info); - - download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION; - - cache_writer->finalize(); - cache_writer.reset(); + setDownloadFailed(segment_lock); cv.notify_all(); @@ -239,6 +253,77 @@ void FileSegment::write(const char * from, size_t size, size_t offset_) assert(getDownloadOffset() == offset_ + size); } +void FileSegment::writeInMemory(const char * from, size_t size) +{ + if (!size) + throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Attempt to write zero size cache file"); + + if (availableSize() < size) + throw Exception( + ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, + "Not enough space is reserved. Available: {}, expected: {}", availableSize(), size); + + std::lock_guard segment_lock(mutex); + + if (cache_writer) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cache writer already initialized"); + + auto download_path = cache->getPathInLocalCache(key(), offset()); + cache_writer = std::make_unique(download_path, size + 1); + + try + { + cache_writer->write(from, size); + } + catch (Exception & e) + { + wrapWithCacheInfo(e, "while writing into cache", segment_lock); + + setDownloadFailed(segment_lock); + + cv.notify_all(); + + throw; + } +} + +size_t FileSegment::finalizeWrite() +{ + std::lock_guard segment_lock(mutex); + + if (!cache_writer) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cache writer not initialized"); + + size_t size = cache_writer->offset(); + + if (size == 0) + throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Writing size is not allowed"); + + try + { + cache_writer->next(); + } + catch (Exception & e) + { + wrapWithCacheInfo(e, "while writing into cache", segment_lock); + + setDownloadFailed(segment_lock); + + cv.notify_all(); + + throw; + } + + downloaded_size += size; + + if (downloaded_size != range().size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected downloaded size to equal file segment size ({} == {})", downloaded_size, range().size()); + + setDownloaded(segment_lock); + + return size; +} + FileSegment::State FileSegment::wait() { std::unique_lock segment_lock(mutex); @@ -303,6 +388,20 @@ void FileSegment::setDownloaded(std::lock_guard & /* segment_lock */ { download_state = State::DOWNLOADED; is_downloaded = true; + downloader_id.clear(); + + if (cache_writer) + { + cache_writer->finalize(); + cache_writer.reset(); + remote_file_reader.reset(); + } +} + +void FileSegment::setDownloadFailed(std::lock_guard & /* segment_lock */) +{ + download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION; + downloader_id.clear(); if (cache_writer) { @@ -360,7 +459,7 @@ void FileSegment::complete(State state) } catch (...) { - if (!downloader_id.empty() && downloader_id == getCallerIdImpl(true)) + if (!downloader_id.empty() && downloader_id == getCallerIdImpl()) downloader_id.clear(); cv.notify_all(); @@ -385,7 +484,7 @@ void FileSegment::complete(std::lock_guard & cache_lock) /// Segment state can be changed from DOWNLOADING or EMPTY only if the caller is the /// downloader or the only owner of the segment. - bool can_update_segment_state = downloader_id == getCallerIdImpl(true) + bool can_update_segment_state = downloader_id == getCallerIdImpl() || cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock); if (can_update_segment_state) @@ -394,11 +493,11 @@ void FileSegment::complete(std::lock_guard & cache_lock) try { - completeImpl(cache_lock, segment_lock, /* allow_non_strict_checking */true); + completeImpl(cache_lock, segment_lock); } catch (...) { - if (!downloader_id.empty() && downloader_id == getCallerIdImpl(true)) + if (!downloader_id.empty() && downloader_id == getCallerIdImpl()) downloader_id.clear(); cv.notify_all(); @@ -408,7 +507,7 @@ void FileSegment::complete(std::lock_guard & cache_lock) cv.notify_all(); } -void FileSegment::completeImpl(std::lock_guard & cache_lock, std::lock_guard & segment_lock, bool allow_non_strict_checking) +void FileSegment::completeImpl(std::lock_guard & cache_lock, std::lock_guard & segment_lock) { bool is_last_holder = cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock); @@ -444,7 +543,7 @@ void FileSegment::completeImpl(std::lock_guard & cache_lock, std::lo } } - if (!downloader_id.empty() && (downloader_id == getCallerIdImpl(allow_non_strict_checking) || is_last_holder)) + if (!downloader_id.empty() && (downloader_id == getCallerIdImpl() || is_last_holder)) { LOG_TEST(log, "Clearing downloader id: {}, current state: {}", downloader_id, stateToString(download_state)); downloader_id.clear(); @@ -471,6 +570,11 @@ String FileSegment::getInfoForLogImpl(std::lock_guard & segment_lock return info.str(); } +void FileSegment::wrapWithCacheInfo(Exception & e, const String & message, std::lock_guard & segment_lock) const +{ + e.addMessage(fmt::format("{}, current cache state: {}", message, getInfoForLogImpl(segment_lock))); +} + String FileSegment::stateToString(FileSegment::State state) { switch (state) @@ -504,6 +608,23 @@ void FileSegment::assertCorrectnessImpl(std::lock_guard & /* segment assert(download_state != FileSegment::State::DOWNLOADED || std::filesystem::file_size(cache->getPathInLocalCache(key(), offset())) > 0); } +FileSegmentPtr FileSegment::getSnapshot(const FileSegmentPtr & file_segment, std::lock_guard & /* cache_lock */) +{ + auto snapshot = std::make_shared( + file_segment->offset(), + file_segment->range().size(), + file_segment->key(), + nullptr, + State::EMPTY); + + snapshot->hits_count = file_segment->getHitsCount(); + snapshot->ref_count = file_segment.use_count(); + snapshot->downloaded_size = file_segment->getDownloadedSize(); + snapshot->download_state = file_segment->state(); + + return snapshot; +} + FileSegmentsHolder::~FileSegmentsHolder() { /// In CacheableReadBufferFromRemoteFS file segment's downloader removes file segments from diff --git a/src/Common/FileSegment.h b/src/Common/FileSegment.h index b7501640913..615fd9a56de 100644 --- a/src/Common/FileSegment.h +++ b/src/Common/FileSegment.h @@ -97,6 +97,15 @@ public: void write(const char * from, size_t size, size_t offset_); + /** + * writeInMemory and finalizeWrite are used together to write a single file with delay. + * Both can be called only once, one after another. Used for writing cache via threadpool + * on wrote operations. TODO: this solution is temporary, until adding a separate cache layer. + */ + void writeInMemory(const char * from, size_t size); + + size_t finalizeWrite(); + RemoteFileReaderPtr getRemoteFileReader(); void setRemoteFileReader(RemoteFileReaderPtr remote_file_reader_); @@ -117,14 +126,24 @@ public: size_t getDownloadOffset() const; + size_t getDownloadedSize() const; + void completeBatchAndResetDownloader(); void complete(State state); String getInfoForLog() const; + size_t getHitsCount() const { return hits_count; } + + size_t getRefCount() const { return ref_count; } + + void incrementHitsCount() { ++hits_count; } + void assertCorrectness() const; + static FileSegmentPtr getSnapshot(const FileSegmentPtr & file_segment, std::lock_guard & cache_lock); + private: size_t availableSize() const { return reserved_size - downloaded_size; } @@ -133,6 +152,9 @@ private: void assertCorrectnessImpl(std::lock_guard & segment_lock) const; void setDownloaded(std::lock_guard & segment_lock); + void setDownloadFailed(std::lock_guard & segment_lock); + + void wrapWithCacheInfo(Exception & e, const String & message, std::lock_guard & segment_lock) const; bool lastFileSegmentHolder() const; @@ -144,9 +166,9 @@ private: void completeImpl( std::lock_guard & cache_lock, - std::lock_guard & segment_lock, bool allow_non_strict_checking = false); + std::lock_guard & segment_lock); - static String getCallerIdImpl(bool allow_non_strict_checking = false); + static String getCallerIdImpl(); void resetDownloaderImpl(std::lock_guard & segment_lock); @@ -180,6 +202,8 @@ private: bool detached = false; std::atomic is_downloaded{false}; + std::atomic hits_count = 0; /// cache hits. + std::atomic ref_count = 0; /// Used for getting snapshot state }; struct FileSegmentsHolder : private boost::noncopyable diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index f3920474111..3d7ec08cdaf 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -216,6 +216,11 @@ public: return query_context.lock(); } + auto getGlobalContext() const + { + return global_context.lock(); + } + void disableProfiling() { assert(!query_profiler_real && !query_profiler_cpu); diff --git a/src/Common/tests/gtest_lru_file_cache.cpp b/src/Common/tests/gtest_lru_file_cache.cpp index c08b12857a1..dfcf51ddf2f 100644 --- a/src/Common/tests/gtest_lru_file_cache.cpp +++ b/src/Common/tests/gtest_lru_file_cache.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -102,7 +103,10 @@ TEST(LRUFileCache, get) query_context->setCurrentQueryId("query_id"); DB::CurrentThread::QueryScope query_scope_holder(query_context); - auto cache = DB::LRUFileCache(cache_base_path, 30, 5); + DB::FileCacheSettings settings; + settings.max_size = 30; + settings.max_elements = 5; + auto cache = DB::LRUFileCache(cache_base_path, settings); cache.initialize(); auto key = cache.hash("key1"); @@ -472,7 +476,7 @@ TEST(LRUFileCache, get) { /// Test LRUCache::restore(). - auto cache2 = DB::LRUFileCache(cache_base_path, 30, 5); + auto cache2 = DB::LRUFileCache(cache_base_path, settings); cache2.initialize(); ASSERT_EQ(cache2.getStat().downloaded_size, 5); @@ -491,7 +495,9 @@ TEST(LRUFileCache, get) { /// Test max file segment size - auto cache2 = DB::LRUFileCache(caches_dir / "cache2", 30, 5, /* max_file_segment_size */10); + auto settings2 = settings; + settings2.max_file_segment_size = 10; + auto cache2 = DB::LRUFileCache(caches_dir / "cache2", settings2); cache2.initialize(); auto holder1 = cache2.getOrSet(key, 0, 25); /// Get [0, 24] diff --git a/src/Core/Settings.h b/src/Core/Settings.h index cd72f55fe91..5e6519e5f14 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -560,8 +560,10 @@ class IColumn; \ M(UInt64, remote_fs_read_max_backoff_ms, 10000, "Max wait time when trying to read data for remote disk", 0) \ M(UInt64, remote_fs_read_backoff_max_tries, 5, "Max attempts to read with backoff", 0) \ - M(Bool, remote_fs_enable_cache, true, "Use cache for remote filesystem. This setting does not turn on/off cache for disks (must me done via disk config), but allows to bypass cache for some queries if intended", 0) \ - M(UInt64, remote_fs_cache_max_wait_sec, 5, "Allow to wait at most this number of seconds for download of current remote_fs_buffer_size bytes, and skip cache if exceeded", 0) \ + M(Bool, enable_filesystem_cache, true, "Use cache for remote filesystem. This setting does not turn on/off cache for disks (must me done via disk config), but allows to bypass cache for some queries if intended", 0) \ + M(UInt64, filesystem_cache_max_wait_sec, 5, "Allow to wait at most this number of seconds for download of current remote_fs_buffer_size bytes, and skip cache if exceeded", 0) \ + M(Bool, enable_filesystem_cache_on_write_operations, false, "Write into cache on write operations. To actually work this setting requires be added to disk config too", 0) \ + M(Bool, read_from_filesystem_cache_if_exists_otherwise_bypass_cache, false, "", 0) \ \ M(UInt64, http_max_tries, 10, "Max attempts to read via http.", 0) \ M(UInt64, http_retry_initial_backoff_ms, 100, "Min milliseconds for backoff, when retrying read via http", 0) \ diff --git a/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.cpp b/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.cpp index fb07d8c356b..97e98fb3a3e 100644 --- a/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.cpp +++ b/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.cpp @@ -90,7 +90,8 @@ std::unique_ptr DiskAzureBlobStorage::readFile( std::unique_ptr DiskAzureBlobStorage::writeFile( const String & path, size_t buf_size, - WriteMode mode) + WriteMode mode, + const WriteSettings &) { auto blob_path = path + "_" + getRandomASCIIString(8); /// NOTE: path contains the tmp_* prefix in the blob name diff --git a/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.h b/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.h index 63c3c735812..efc245e7eb3 100644 --- a/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.h +++ b/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.h @@ -56,7 +56,8 @@ public: std::unique_ptr writeFile( const String & path, size_t buf_size, - WriteMode mode) override; + WriteMode mode, + const WriteSettings & settings) override; DiskType getType() const override; diff --git a/src/Disks/DiskCacheWrapper.cpp b/src/Disks/DiskCacheWrapper.cpp index 3519b1212a4..cc2c330975a 100644 --- a/src/Disks/DiskCacheWrapper.cpp +++ b/src/Disks/DiskCacheWrapper.cpp @@ -150,7 +150,7 @@ DiskCacheWrapper::readFile( /// Note: enabling `threadpool` read requires to call setReadUntilEnd(). current_read_settings.remote_fs_method = RemoteFSReadMethod::read; /// Disable data cache. - current_read_settings.remote_fs_enable_cache = false; + current_read_settings.enable_filesystem_cache = false; if (metadata->status == DOWNLOADING) { @@ -167,7 +167,11 @@ DiskCacheWrapper::readFile( auto tmp_path = path + ".tmp"; { auto src_buffer = DiskDecorator::readFile(path, current_read_settings, read_hint, file_size); - auto dst_buffer = cache_disk->writeFile(tmp_path, settings.local_fs_buffer_size, WriteMode::Rewrite); + + WriteSettings write_settings; + write_settings.enable_filesystem_cache_on_write_operations = false; + + auto dst_buffer = cache_disk->writeFile(tmp_path, settings.local_fs_buffer_size, WriteMode::Rewrite, write_settings); copyData(*src_buffer, *dst_buffer); } cache_disk->moveFile(tmp_path, path); @@ -196,10 +200,15 @@ DiskCacheWrapper::readFile( } std::unique_ptr -DiskCacheWrapper::writeFile(const String & path, size_t buf_size, WriteMode mode) +DiskCacheWrapper::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings & settings) { if (!cache_file_predicate(path)) - return DiskDecorator::writeFile(path, buf_size, mode); + return DiskDecorator::writeFile(path, buf_size, mode, settings); + + WriteSettings current_settings = settings; + /// There are two different cache implementations. Disable second one if the first is enabled. + /// The first will soon be removed, this disabling is temporary. + current_settings.enable_filesystem_cache_on_write_operations = false; LOG_TEST(log, "Write file {} to cache", backQuote(path)); @@ -208,15 +217,15 @@ DiskCacheWrapper::writeFile(const String & path, size_t buf_size, WriteMode mode cache_disk->createDirectories(dir_path); return std::make_unique( - cache_disk->writeFile(path, buf_size, mode), + cache_disk->writeFile(path, buf_size, mode, current_settings), [this, path]() { /// Copy file from cache to actual disk when cached buffer is finalized. return cache_disk->readFile(path, ReadSettings(), /* read_hint= */ {}, /* file_size= */ {}); }, - [this, path, buf_size, mode]() + [this, path, buf_size, mode, current_settings]() { - return DiskDecorator::writeFile(path, buf_size, mode); + return DiskDecorator::writeFile(path, buf_size, mode, current_settings); }); } diff --git a/src/Disks/DiskCacheWrapper.h b/src/Disks/DiskCacheWrapper.h index dc66333758f..e413a3742f3 100644 --- a/src/Disks/DiskCacheWrapper.h +++ b/src/Disks/DiskCacheWrapper.h @@ -40,7 +40,7 @@ public: std::optional read_hint, std::optional file_size) const override; - std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode) override; + std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings &) override; void removeFile(const String & path) override; void removeFileIfExists(const String & path) override; diff --git a/src/Disks/DiskDecorator.cpp b/src/Disks/DiskDecorator.cpp index 37911f16913..14f507af55d 100644 --- a/src/Disks/DiskDecorator.cpp +++ b/src/Disks/DiskDecorator.cpp @@ -121,9 +121,9 @@ DiskDecorator::readFile( } std::unique_ptr -DiskDecorator::writeFile(const String & path, size_t buf_size, WriteMode mode) +DiskDecorator::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings & settings) { - return delegate->writeFile(path, buf_size, mode); + return delegate->writeFile(path, buf_size, mode, settings); } void DiskDecorator::removeFile(const String & path) diff --git a/src/Disks/DiskDecorator.h b/src/Disks/DiskDecorator.h index bace54ff22a..e5c9c7699bf 100644 --- a/src/Disks/DiskDecorator.h +++ b/src/Disks/DiskDecorator.h @@ -44,7 +44,8 @@ public: std::unique_ptr writeFile( const String & path, size_t buf_size, - WriteMode mode) override; + WriteMode mode, + const WriteSettings & settings) override; void removeFile(const String & path) override; void removeFileIfExists(const String & path) override; @@ -71,6 +72,9 @@ public: void shutdown() override; void startup() override; void applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String & config_prefix, const DisksMap & map) override; + String getCacheBasePath() const override { return delegate->getCacheBasePath(); } + std::vector getRemotePaths(const String & path) const override { return delegate->getRemotePaths(path); } + void getRemotePathsRecursive(const String & path, std::vector & paths_map) override { return delegate->getRemotePathsRecursive(path, paths_map); } DiskPtr getMetadataDiskIfExistsOrSelf() override { return delegate->getMetadataDiskIfExistsOrSelf(); } diff --git a/src/Disks/DiskEncrypted.cpp b/src/Disks/DiskEncrypted.cpp index 714264b7720..3cee205fafc 100644 --- a/src/Disks/DiskEncrypted.cpp +++ b/src/Disks/DiskEncrypted.cpp @@ -269,7 +269,7 @@ std::unique_ptr DiskEncrypted::readFile( return std::make_unique(settings.local_fs_buffer_size, std::move(buffer), key, header); } -std::unique_ptr DiskEncrypted::writeFile(const String & path, size_t buf_size, WriteMode mode) +std::unique_ptr DiskEncrypted::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings &) { auto wrapped_path = wrappedPath(path); FileEncryption::Header header; diff --git a/src/Disks/DiskEncrypted.h b/src/Disks/DiskEncrypted.h index d99fe17457d..07a2ad81010 100644 --- a/src/Disks/DiskEncrypted.h +++ b/src/Disks/DiskEncrypted.h @@ -126,7 +126,8 @@ public: std::unique_ptr writeFile( const String & path, size_t buf_size, - WriteMode mode) override; + WriteMode mode, + const WriteSettings & settings) override; void removeFile(const String & path) override { diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index a91db508295..8aad42ab475 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -345,7 +345,7 @@ std::unique_ptr DiskLocal::readFile(const String & path, } std::unique_ptr -DiskLocal::writeFile(const String & path, size_t buf_size, WriteMode mode) +DiskLocal::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings &) { int flags = (mode == WriteMode::Append) ? (O_APPEND | O_CREAT | O_WRONLY) : -1; return std::make_unique(fs::path(disk_path) / path, buf_size, flags); @@ -624,7 +624,7 @@ bool DiskLocal::setup() pcg32_fast rng(randomSeed()); UInt32 magic_number = rng(); { - auto buf = writeFile(disk_checker_path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite); + auto buf = writeFile(disk_checker_path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, {}); writeIntBinary(magic_number, *buf); } disk_checker_magic_number = magic_number; diff --git a/src/Disks/DiskLocal.h b/src/Disks/DiskLocal.h index 76d5a88a626..59dcf5e5c13 100644 --- a/src/Disks/DiskLocal.h +++ b/src/Disks/DiskLocal.h @@ -79,7 +79,8 @@ public: std::unique_ptr writeFile( const String & path, size_t buf_size, - WriteMode mode) override; + WriteMode mode, + const WriteSettings & settings) override; void removeFile(const String & path) override; void removeFileIfExists(const String & path) override; diff --git a/src/Disks/DiskMemory.cpp b/src/Disks/DiskMemory.cpp index abaea0846a5..4f0e881e079 100644 --- a/src/Disks/DiskMemory.cpp +++ b/src/Disks/DiskMemory.cpp @@ -326,7 +326,7 @@ std::unique_ptr DiskMemory::readFile(const String & path return std::make_unique(path, iter->second.data); } -std::unique_ptr DiskMemory::writeFile(const String & path, size_t buf_size, WriteMode mode) +std::unique_ptr DiskMemory::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings &) { std::lock_guard lock(mutex); diff --git a/src/Disks/DiskMemory.h b/src/Disks/DiskMemory.h index fe108f53c68..726be8bc3b5 100644 --- a/src/Disks/DiskMemory.h +++ b/src/Disks/DiskMemory.h @@ -71,7 +71,8 @@ public: std::unique_ptr writeFile( const String & path, size_t buf_size, - WriteMode mode) override; + WriteMode mode, + const WriteSettings & settings) override; void removeFile(const String & path) override; void removeFileIfExists(const String & path) override; diff --git a/src/Disks/DiskRestartProxy.cpp b/src/Disks/DiskRestartProxy.cpp index 43011a4cf72..8045a0e8c72 100644 --- a/src/Disks/DiskRestartProxy.cpp +++ b/src/Disks/DiskRestartProxy.cpp @@ -214,10 +214,10 @@ std::unique_ptr DiskRestartProxy::readFile( return std::make_unique(*this, std::move(impl)); } -std::unique_ptr DiskRestartProxy::writeFile(const String & path, size_t buf_size, WriteMode mode) +std::unique_ptr DiskRestartProxy::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings & settings) { ReadLock lock (mutex); - auto impl = DiskDecorator::writeFile(path, buf_size, mode); + auto impl = DiskDecorator::writeFile(path, buf_size, mode, settings); return std::make_unique(*this, std::move(impl)); } @@ -305,6 +305,24 @@ bool DiskRestartProxy::checkUniqueId(const String & id) const return DiskDecorator::checkUniqueId(id); } +String DiskRestartProxy::getCacheBasePath() const +{ + ReadLock lock (mutex); + return DiskDecorator::getCacheBasePath(); +} + +std::vector DiskRestartProxy::getRemotePaths(const String & path) const +{ + ReadLock lock (mutex); + return DiskDecorator::getRemotePaths(path); +} + +void DiskRestartProxy::getRemotePathsRecursive(const String & path, std::vector & paths_map) +{ + ReadLock lock (mutex); + return DiskDecorator::getRemotePathsRecursive(path, paths_map); +} + void DiskRestartProxy::restart() { /// Speed up processing unhealthy requests. diff --git a/src/Disks/DiskRestartProxy.h b/src/Disks/DiskRestartProxy.h index 30f553f4fe0..baa57386e68 100644 --- a/src/Disks/DiskRestartProxy.h +++ b/src/Disks/DiskRestartProxy.h @@ -48,7 +48,7 @@ public: const ReadSettings & settings, std::optional read_hint, std::optional file_size) const override; - std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode) override; + std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings & settings) override; void removeFile(const String & path) override; void removeFileIfExists(const String & path) override; void removeDirectory(const String & path) override; @@ -63,6 +63,9 @@ public: void truncateFile(const String & path, size_t size) override; String getUniqueId(const String & path) const override; bool checkUniqueId(const String & id) const override; + String getCacheBasePath() const override; + std::vector getRemotePaths(const String & path) const override; + void getRemotePathsRecursive(const String & path, std::vector & paths_map) override; void restart(); diff --git a/src/Disks/DiskWebServer.h b/src/Disks/DiskWebServer.h index e2da0b2a1e1..6341b582174 100644 --- a/src/Disks/DiskWebServer.h +++ b/src/Disks/DiskWebServer.h @@ -77,7 +77,6 @@ public: UInt64 getTotalSpace() const final override { return std::numeric_limits::max(); } UInt64 getAvailableSpace() const final override { return std::numeric_limits::max(); } - UInt64 getUnreservedSpace() const final override { return std::numeric_limits::max(); } /// Read-only part @@ -100,7 +99,7 @@ public: /// Write and modification part - std::unique_ptr writeFile(const String &, size_t, WriteMode) override + std::unique_ptr writeFile(const String &, size_t, WriteMode, const WriteSettings &) override { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Disk {} is read-only", getName()); } @@ -165,6 +164,10 @@ public: throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Disk {} is read-only", getName()); } + std::vector getRemotePaths(const String &) const override { return {}; } + + void getRemotePathsRecursive(const String &, std::vector &) override {} + /// Create part void createFile(const String &) final override {} diff --git a/src/Disks/HDFS/DiskHDFS.cpp b/src/Disks/HDFS/DiskHDFS.cpp index 7f60b219a4b..f78ecd2669a 100644 --- a/src/Disks/HDFS/DiskHDFS.cpp +++ b/src/Disks/HDFS/DiskHDFS.cpp @@ -88,7 +88,7 @@ std::unique_ptr DiskHDFS::readFile(const String & path, } -std::unique_ptr DiskHDFS::writeFile(const String & path, size_t buf_size, WriteMode mode) +std::unique_ptr DiskHDFS::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings &) { /// Path to store new HDFS object. auto file_name = getRandomName(); diff --git a/src/Disks/HDFS/DiskHDFS.h b/src/Disks/HDFS/DiskHDFS.h index 23a108507b4..eba58101bc4 100644 --- a/src/Disks/HDFS/DiskHDFS.h +++ b/src/Disks/HDFS/DiskHDFS.h @@ -60,7 +60,7 @@ public: std::optional read_hint, std::optional file_size) const override; - std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode) override; + std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings & settings) override; void removeFromRemoteFS(RemoteFSPathKeeperPtr fs_paths_keeper) override; diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 4fa73b8eba8..81cdf47e1fb 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -31,6 +32,11 @@ namespace Poco namespace DB { +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + class IDiskDirectoryIterator; using DiskDirectoryIteratorPtr = std::unique_ptr; @@ -168,7 +174,8 @@ public: virtual std::unique_ptr writeFile( /// NOLINT const String & path, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, - WriteMode mode = WriteMode::Rewrite) = 0; + WriteMode mode = WriteMode::Rewrite, + const WriteSettings & settings = {}) = 0; /// Remove file. Throws exception if file doesn't exists or it's a directory. virtual void removeFile(const String & path) = 0; @@ -197,6 +204,24 @@ public: /// Second bool param is a flag to remove (true) or keep (false) shared data on S3 virtual void removeSharedFileIfExists(const String & path, bool) { removeFileIfExists(path); } + + virtual String getCacheBasePath() const { return ""; } + + /// Returns a list of paths because for Log family engines there might be + /// multiple files in remote fs for single clickhouse file. + virtual std::vector getRemotePaths(const String &) const + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method `getRemotePaths() not implemented for disk: {}`", getType()); + } + + /// For one local path there might be multiple remote paths in case of Log family engines. + using LocalPathWithRemotePaths = std::pair>; + + virtual void getRemotePathsRecursive(const String &, std::vector &) + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method `getRemotePathsRecursive() not implemented for disk: {}`", getType()); + } + struct RemoveRequest { String path; diff --git a/src/Disks/IDiskRemote.cpp b/src/Disks/IDiskRemote.cpp index fa4189abc53..fb1c0ddc378 100644 --- a/src/Disks/IDiskRemote.cpp +++ b/src/Disks/IDiskRemote.cpp @@ -136,13 +136,15 @@ void IDiskRemote::Metadata::load() } catch (Exception & e) { + tryLogCurrentException(__PRETTY_FUNCTION__); + if (e.code() == ErrorCodes::UNKNOWN_FORMAT) throw; if (e.code() == ErrorCodes::MEMORY_LIMIT_EXCEEDED) throw; - throw Exception("Failed to read metadata file", e, ErrorCodes::UNKNOWN_FORMAT); + throw Exception("Failed to read metadata file: " + metadata_file_path, e, ErrorCodes::UNKNOWN_FORMAT); } } @@ -341,6 +343,30 @@ void IDiskRemote::removeMetadataRecursive(const String & path, RemoteFSPathKeepe } } +std::vector IDiskRemote::getRemotePaths(const String & local_path) const +{ + auto metadata = readMetadata(local_path); + + std::vector remote_paths; + for (const auto & [remote_path, _] : metadata.remote_fs_objects) + remote_paths.push_back(remote_path); + + return remote_paths; +} + +void IDiskRemote::getRemotePathsRecursive(const String & local_path, std::vector & paths_map) +{ + if (metadata_disk->isFile(local_path)) + { + paths_map.emplace_back(local_path, getRemotePaths(local_path)); + } + else + { + for (auto it = iterateDirectory(local_path); it->isValid(); it->next()) + IDiskRemote::getRemotePathsRecursive(fs::path(local_path) / it->name(), paths_map); + } +} + DiskPtr DiskRemoteReservation::getDisk(size_t i) const { if (i != 0) @@ -348,7 +374,6 @@ DiskPtr DiskRemoteReservation::getDisk(size_t i) const return disk; } - void DiskRemoteReservation::update(UInt64 new_size) { std::lock_guard lock(disk->reservation_mutex); @@ -402,6 +427,12 @@ IDiskRemote::IDiskRemote( } +String IDiskRemote::getCacheBasePath() const +{ + return cache ? cache->getBasePath() : ""; +} + + bool IDiskRemote::exists(const String & path) const { return metadata_disk->exists(path); diff --git a/src/Disks/IDiskRemote.h b/src/Disks/IDiskRemote.h index 82e76b8f68d..a8a299391bf 100644 --- a/src/Disks/IDiskRemote.h +++ b/src/Disks/IDiskRemote.h @@ -66,6 +66,12 @@ public: const String & getPath() const final override { return metadata_disk->getPath(); } + String getCacheBasePath() const final override; + + std::vector getRemotePaths(const String & local_path) const final override; + + void getRemotePathsRecursive(const String & local_path, std::vector & paths_map) override; + /// Methods for working with metadata. For some operations (like hardlink /// creation) metadata can be updated concurrently from multiple threads /// (file actually rewritten on disk). So additional RW lock is required for @@ -163,6 +169,7 @@ protected: const String remote_fs_root_path; DiskPtr metadata_disk; + FileCachePtr cache; private: diff --git a/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp b/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp index de671e58687..16c1dd54f9f 100644 --- a/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp +++ b/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp @@ -122,10 +122,25 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getReadBufferForFileSegment( { auto range = file_segment->range(); - size_t wait_download_max_tries = settings.remote_fs_cache_max_wait_sec; + size_t wait_download_max_tries = settings.filesystem_cache_max_wait_sec; size_t wait_download_tries = 0; auto download_state = file_segment->state(); + + if (settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache) + { + if (download_state == FileSegment::State::DOWNLOADED) + { + read_type = ReadType::CACHED; + return getCacheReadBuffer(range.left); + } + else + { + read_type = ReadType::REMOTE_FS_READ_BYPASS_CACHE; + return getRemoteFSReadBuffer(file_segment, read_type); + } + } + while (true) { switch (download_state) @@ -375,6 +390,9 @@ bool CachedReadBufferFromRemoteFS::completeFileSegmentAndGetNext() implementation_buffer = getImplementationBuffer(*current_file_segment_it); + if (read_type == ReadType::CACHED) + (*current_file_segment_it)->incrementHitsCount(); + LOG_TEST(log, "New segment: {}", (*current_file_segment_it)->range().toString()); return true; } @@ -559,9 +577,6 @@ bool CachedReadBufferFromRemoteFS::nextImplStep() { last_caller_id = FileSegment::getCallerId(); - if (IFileCache::shouldBypassCache()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Using cache when not allowed"); - if (!initialized) initialize(file_offset_of_buffer_end, getTotalSizeToRead()); @@ -606,6 +621,9 @@ bool CachedReadBufferFromRemoteFS::nextImplStep() else { implementation_buffer = getImplementationBuffer(*current_file_segment_it); + + if (read_type == ReadType::CACHED) + (*current_file_segment_it)->incrementHitsCount(); } assert(!internal_buffer.empty()); diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index 0d50b24f7a5..7014b21e8b4 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -38,7 +38,7 @@ SeekableReadBufferPtr ReadBufferFromS3Gather::createImplementationBuffer(const S current_path = path; auto cache = settings.remote_fs_cache; - bool with_cache = cache && settings.remote_fs_enable_cache && !IFileCache::shouldBypassCache(); + bool with_cache = cache && settings.enable_filesystem_cache; auto remote_file_reader_creator = [=, this]() { diff --git a/src/Disks/IO/ThreadPoolRemoteFSReader.cpp b/src/Disks/IO/ThreadPoolRemoteFSReader.cpp index af545d15c0e..b1ae42d03d6 100644 --- a/src/Disks/IO/ThreadPoolRemoteFSReader.cpp +++ b/src/Disks/IO/ThreadPoolRemoteFSReader.cpp @@ -54,14 +54,14 @@ std::future ThreadPoolRemoteFSReader::submit(Reques { ThreadStatus thread_status; - /// Save query context if any, because cache implementation needs it. - if (query_context) - thread_status.attachQueryContext(query_context); - /// To be able to pass ProfileEvents. if (running_group) thread_status.attachQuery(running_group); + /// Save query context if any, because cache implementation needs it. + if (query_context) + thread_status.attachQueryContext(query_context); + setThreadName("VFSRead"); CurrentMetrics::Increment metric_increment{CurrentMetrics::Read}; @@ -83,12 +83,11 @@ std::future ThreadPoolRemoteFSReader::submit(Reques watch.stop(); - if (running_group) - CurrentThread::detachQuery(); - ProfileEvents::increment(ProfileEvents::RemoteFSReadMicroseconds, watch.elapsedMicroseconds()); ProfileEvents::increment(ProfileEvents::RemoteFSReadBytes, result.offset ? result.size - result.offset : result.size); + thread_status.detachQuery(/* if_not_detached */true); + return Result{ .size = result.size, .offset = result.offset }; }); diff --git a/src/Disks/RemoteDisksCommon.cpp b/src/Disks/RemoteDisksCommon.cpp index 36f2aed3e7c..da6ffed5f11 100644 --- a/src/Disks/RemoteDisksCommon.cpp +++ b/src/Disks/RemoteDisksCommon.cpp @@ -2,6 +2,7 @@ #include #include #include +#include namespace DB { @@ -64,18 +65,23 @@ FileCachePtr getCachePtrForDisk( if (!fs::exists(cache_base_path)) fs::create_directories(cache_base_path); - LOG_INFO(&Poco::Logger::get("Disk(" + name + ")"), "Disk registered with cache path: {}", cache_base_path); - auto metadata_path = getDiskMetadataPath(name, config, config_prefix, context); if (metadata_path == cache_base_path) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Metadata path and cache base path must be different: {}", metadata_path); - size_t max_cache_size = config.getUInt64(config_prefix + ".data_cache_max_size", 1024*1024*1024); - size_t max_cache_elements = config.getUInt64(config_prefix + ".data_cache_max_elements", REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_ELEMENTS); - size_t max_file_segment_size = config.getUInt64(config_prefix + ".max_file_segment_size", REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE); + FileCacheSettings file_cache_settings; + file_cache_settings.loadFromConfig(config, config_prefix); - auto cache = FileCacheFactory::instance().getOrCreate(cache_base_path, max_cache_size, max_cache_elements, max_file_segment_size); + auto cache = FileCacheFactory::instance().getOrCreate(cache_base_path, file_cache_settings); cache->initialize(); + + auto * log = &Poco::Logger::get("Disk(" + name + ")"); + LOG_INFO(log, "Disk registered with cache path: {}. Cache size: {}, max cache elements size: {}, max_file_segment_size: {}", + cache_base_path, + file_cache_settings.max_size ? toString(file_cache_settings.max_size) : "UNLIMITED", + file_cache_settings.max_elements ? toString(file_cache_settings.max_elements) : "UNLIMITED", + file_cache_settings.max_file_segment_size); + return cache; } diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index e05ccef74c0..d879953bd9e 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include @@ -227,7 +229,12 @@ std::unique_ptr DiskS3::readFile(const String & path, co ReadSettings disk_read_settings{read_settings}; if (cache) + { + if (IFileCache::isReadOnly()) + disk_read_settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true; + disk_read_settings.remote_fs_cache = cache; + } auto s3_impl = std::make_unique( path, settings->client, bucket, metadata, @@ -245,7 +252,7 @@ std::unique_ptr DiskS3::readFile(const String & path, co } } -std::unique_ptr DiskS3::writeFile(const String & path, size_t buf_size, WriteMode mode) +std::unique_ptr DiskS3::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings & write_settings) { auto settings = current_settings.get(); @@ -265,6 +272,11 @@ std::unique_ptr DiskS3::writeFile(const String & path, LOG_TRACE(log, "{} to file by path: {}. S3 path: {}", mode == WriteMode::Rewrite ? "Write" : "Append", backQuote(metadata_disk->getPath() + path), remote_fs_root_path + blob_name); + bool cache_on_write = cache + && fs::path(path).extension() != ".tmp" + && write_settings.enable_filesystem_cache_on_write_operations + && FileCacheFactory::instance().getSettings(getCacheBasePath()).cache_on_write_operations; + auto s3_buffer = std::make_unique( settings->client, bucket, @@ -274,7 +286,7 @@ std::unique_ptr DiskS3::writeFile(const String & path, settings->s3_upload_part_size_multiply_parts_count_threshold, settings->s3_max_single_part_upload_size, std::move(object_metadata), - buf_size, threadPoolCallbackRunner(getThreadPoolWriter())); + buf_size, threadPoolCallbackRunner(getThreadPoolWriter()), blob_name, cache_on_write ? cache : nullptr); auto create_metadata_callback = [this, path, blob_name, mode] (size_t count) { diff --git a/src/Disks/S3/DiskS3.h b/src/Disks/S3/DiskS3.h index 7e39c9d9b3c..32eb9ee7aef 100644 --- a/src/Disks/S3/DiskS3.h +++ b/src/Disks/S3/DiskS3.h @@ -88,7 +88,8 @@ public: std::unique_ptr writeFile( const String & path, size_t buf_size, - WriteMode mode) override; + WriteMode mode, + const WriteSettings & settings) override; void removeFromRemoteFS(RemoteFSPathKeeperPtr keeper) override; diff --git a/src/Disks/tests/gtest_disk_encrypted.cpp b/src/Disks/tests/gtest_disk_encrypted.cpp index fd3cc1acbe5..da041437951 100644 --- a/src/Disks/tests/gtest_disk_encrypted.cpp +++ b/src/Disks/tests/gtest_disk_encrypted.cpp @@ -96,7 +96,7 @@ TEST_F(DiskEncryptedTest, WriteAndRead) /// Write a file. { - auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite); + auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, {}); writeString(std::string_view{"Some text"}, *buf); } @@ -122,7 +122,7 @@ TEST_F(DiskEncryptedTest, Append) /// Write a file (we use the append mode). { - auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append); + auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append, {}); writeString(std::string_view{"Some text"}, *buf); } @@ -132,7 +132,7 @@ TEST_F(DiskEncryptedTest, Append) /// Append the file. { - auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append); + auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append, {}); writeString(std::string_view{" Another text"}, *buf); } @@ -148,7 +148,7 @@ TEST_F(DiskEncryptedTest, Truncate) /// Write a file (we use the append mode). { - auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append); + auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append, {}); writeString(std::string_view{"Some text"}, *buf); } @@ -178,7 +178,7 @@ TEST_F(DiskEncryptedTest, ZeroFileSize) /// Write nothing to a file. { - auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite); + auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, {}); } EXPECT_EQ(encrypted_disk->getFileSize("a.txt"), 0); @@ -187,7 +187,7 @@ TEST_F(DiskEncryptedTest, ZeroFileSize) /// Append the file with nothing. { - auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append); + auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append, {}); } EXPECT_EQ(encrypted_disk->getFileSize("a.txt"), 0); @@ -211,7 +211,7 @@ TEST_F(DiskEncryptedTest, AnotherFolder) /// Write a file. { - auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite); + auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, {}); writeString(std::string_view{"Some text"}, *buf); } @@ -231,11 +231,11 @@ TEST_F(DiskEncryptedTest, RandomIV) /// Write two files with the same contents. { - auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite); + auto buf = encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, {}); writeString(std::string_view{"Some text"}, *buf); } { - auto buf = encrypted_disk->writeFile("b.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite); + auto buf = encrypted_disk->writeFile("b.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, {}); writeString(std::string_view{"Some text"}, *buf); } @@ -277,7 +277,7 @@ TEST_F(DiskEncryptedTest, RemoveFileDuringWriting) std::thread t1{[&] { for (size_t i = 0; i != n; ++i) - encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append); + encrypted_disk->writeFile("a.txt", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append, {}); }}; std::thread t2{[&] diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index e321eecf104..92346615a7a 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -77,8 +77,9 @@ struct ReadSettings size_t remote_fs_read_max_backoff_ms = 10000; size_t remote_fs_read_backoff_max_tries = 4; - bool remote_fs_enable_cache = true; - size_t remote_fs_cache_max_wait_sec = 1; + bool enable_filesystem_cache = true; + size_t filesystem_cache_max_wait_sec = 1; + bool read_from_filesystem_cache_if_exists_otherwise_bypass_cache = false; size_t remote_read_min_bytes_for_seek = DBMS_DEFAULT_BUFFER_SIZE; diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index eda7bb6f8ae..c85f3989531 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -2,25 +2,28 @@ #if USE_AWS_S3 -# include -# include +#include +#include -# include -# include -# include -# include -# include -# include +#include +#include +#include -# include +#include +#include +#include +#include +#include + +#include namespace ProfileEvents { extern const Event S3WriteBytes; + extern const Event RemoteFSCacheDownloadBytes; } - namespace DB { // S3 protocol does not allow to have multipart upload with more than 10000 parts. @@ -32,6 +35,7 @@ const int S3_WARN_MAX_PARTS = 10000; namespace ErrorCodes { extern const int S3_ERROR; + extern const int LOGICAL_ERROR; } struct WriteBufferFromS3::UploadPartTask @@ -40,6 +44,7 @@ struct WriteBufferFromS3::UploadPartTask bool is_finised = false; std::string tag; std::exception_ptr exception; + std::optional cache_files; }; struct WriteBufferFromS3::PutObjectTask @@ -47,6 +52,7 @@ struct WriteBufferFromS3::PutObjectTask Aws::S3::Model::PutObjectRequest req; bool is_finised = false; std::exception_ptr exception; + std::optional cache_files; }; WriteBufferFromS3::WriteBufferFromS3( @@ -59,7 +65,9 @@ WriteBufferFromS3::WriteBufferFromS3( size_t max_single_part_upload_size_, std::optional> object_metadata_, size_t buffer_size_, - ScheduleFunc schedule_) + ScheduleFunc schedule_, + const String & blob_name_, + FileCachePtr cache_) : BufferWithOwnMemory(buffer_size_, nullptr, 0) , bucket(bucket_) , key(key_) @@ -70,6 +78,8 @@ WriteBufferFromS3::WriteBufferFromS3( , upload_part_size_multiply_threshold(upload_part_size_multiply_threshold_) , max_single_part_upload_size(max_single_part_upload_size_) , schedule(std::move(schedule_)) + , blob_name(blob_name_) + , cache(cache_) { allocateBuffer(); } @@ -83,7 +93,41 @@ void WriteBufferFromS3::nextImpl() if (temporary_buffer->tellp() == -1) allocateBuffer(); - temporary_buffer->write(working_buffer.begin(), offset()); + size_t size = offset(); + temporary_buffer->write(working_buffer.begin(), size); + + ThreadGroupStatusPtr running_group = CurrentThread::isInitialized() && CurrentThread::get().getThreadGroup() + ? CurrentThread::get().getThreadGroup() + : MainThreadStatus::getInstance().getThreadGroup(); + + if (cacheEnabled()) + { + if (blob_name.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty blob name"); + + auto cache_key = cache->hash(blob_name); + file_segments_holder.emplace(cache->setDownloading(cache_key, current_download_offset, size)); + current_download_offset += size; + + size_t remaining_size = size; + auto & file_segments = file_segments_holder->file_segments; + for (auto file_segment_it = file_segments.begin(); file_segment_it != file_segments.end(); ++file_segment_it) + { + auto & file_segment = *file_segment_it; + size_t current_size = std::min(file_segment->range().size(), remaining_size); + remaining_size -= current_size; + + if (file_segment->reserve(current_size)) + { + file_segment->writeInMemory(working_buffer.begin(), current_size); + } + else + { + file_segments.erase(file_segment_it, file_segments.end()); + break; + } + } + } ProfileEvents::increment(ProfileEvents::S3WriteBytes, offset()); @@ -95,7 +139,6 @@ void WriteBufferFromS3::nextImpl() if (!multipart_upload_id.empty() && last_part_size > upload_part_size) { - writePart(); allocateBuffer(); @@ -126,6 +169,11 @@ WriteBufferFromS3::~WriteBufferFromS3() } } +bool WriteBufferFromS3::cacheEnabled() const +{ + return cache != nullptr; +} + void WriteBufferFromS3::preFinalize() { next(); @@ -213,6 +261,13 @@ void WriteBufferFromS3::writePart() } fillUploadRequest(task->req, part_number); + + if (file_segments_holder) + { + task->cache_files.emplace(std::move(*file_segments_holder)); + file_segments_holder.reset(); + } + schedule([this, task]() { try @@ -224,6 +279,15 @@ void WriteBufferFromS3::writePart() task->exception = std::current_exception(); } + try + { + finalizeCacheIfNeeded(task->cache_files); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + { std::lock_guard lock(bg_tasks_mutex); task->is_finised = true; @@ -240,8 +304,14 @@ void WriteBufferFromS3::writePart() { UploadPartTask task; fillUploadRequest(task.req, part_tags.size() + 1); + if (file_segments_holder) + { + task.cache_files.emplace(std::move(*file_segments_holder)); + file_segments_holder.reset(); + } processUploadRequest(task); part_tags.push_back(task.tag); + finalizeCacheIfNeeded(task.cache_files); } } @@ -328,7 +398,14 @@ void WriteBufferFromS3::makeSinglepartUpload() if (schedule) { put_object_task = std::make_unique(); + fillPutRequest(put_object_task->req); + if (file_segments_holder) + { + put_object_task->cache_files.emplace(std::move(*file_segments_holder)); + file_segments_holder.reset(); + } + schedule([this]() { try @@ -340,6 +417,15 @@ void WriteBufferFromS3::makeSinglepartUpload() put_object_task->exception = std::current_exception(); } + try + { + finalizeCacheIfNeeded(put_object_task->cache_files); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + { std::lock_guard lock(bg_tasks_mutex); put_object_task->is_finised = true; @@ -349,14 +435,19 @@ void WriteBufferFromS3::makeSinglepartUpload() /// Releasing lock and condvar notification. bg_tasks_condvar.notify_one(); } - }); } else { PutObjectTask task; fillPutRequest(task.req); + if (file_segments_holder) + { + task.cache_files.emplace(std::move(*file_segments_holder)); + file_segments_holder.reset(); + } processPutRequest(task); + finalizeCacheIfNeeded(task.cache_files); } } @@ -384,6 +475,28 @@ void WriteBufferFromS3::processPutRequest(PutObjectTask & task) throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR); } +void WriteBufferFromS3::finalizeCacheIfNeeded(std::optional & file_segments_holder) +{ + if (!file_segments_holder) + return; + + auto & file_segments = file_segments_holder->file_segments; + for (auto file_segment_it = file_segments.begin(); file_segment_it != file_segments.end();) + { + try + { + size_t size = (*file_segment_it)->finalizeWrite(); + file_segment_it = file_segments.erase(file_segment_it); + + ProfileEvents::increment(ProfileEvents::RemoteFSCacheDownloadBytes, size); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } +} + void WriteBufferFromS3::waitForReadyBackGroundTasks() { if (schedule) diff --git a/src/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h index a4fbcbcdeeb..8e91bbc04da 100644 --- a/src/IO/WriteBufferFromS3.h +++ b/src/IO/WriteBufferFromS3.h @@ -4,16 +4,20 @@ #if USE_AWS_S3 -# include -# include -# include -# include -# include +#include +#include +#include +#include +#include -# include -# include +#include +#include +#include -# include +#include +#include + +#include namespace Aws::S3 { @@ -30,6 +34,7 @@ namespace DB { using ScheduleFunc = std::function)>; +class WriteBufferFromFile; /** * Buffer to write a data to a S3 object with specified bucket and key. @@ -51,7 +56,9 @@ public: size_t max_single_part_upload_size_, std::optional> object_metadata_ = std::nullopt, size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE, - ScheduleFunc schedule_ = {}); + ScheduleFunc schedule_ = {}, + const String & blob_name = "", + FileCachePtr cache_ = nullptr); ~WriteBufferFromS3() override; @@ -82,6 +89,8 @@ private: void waitForReadyBackGroundTasks(); void waitForAllBackGroundTasks(); + bool cacheEnabled() const; + String bucket; String key; std::optional> object_metadata; @@ -113,6 +122,12 @@ private: std::condition_variable bg_tasks_condvar; Poco::Logger * log = &Poco::Logger::get("WriteBufferFromS3"); + + const String blob_name; + FileCachePtr cache; + size_t current_download_offset = 0; + std::optional file_segments_holder; + static void finalizeCacheIfNeeded(std::optional &); }; } diff --git a/src/IO/WriteSettings.h b/src/IO/WriteSettings.h new file mode 100644 index 00000000000..3464bb31664 --- /dev/null +++ b/src/IO/WriteSettings.h @@ -0,0 +1,12 @@ +#pragma once + +namespace DB +{ + +/// Settings to be passed to IDisk::writeFile() +struct WriteSettings +{ + bool enable_filesystem_cache_on_write_operations = false; +}; + +} diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 027b9e20881..40ed4d58993 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -68,6 +68,7 @@ #include #include #include +#include #include #include #include @@ -3311,8 +3312,9 @@ ReadSettings Context::getReadSettings() const res.remote_fs_read_max_backoff_ms = settings.remote_fs_read_max_backoff_ms; res.remote_fs_read_backoff_max_tries = settings.remote_fs_read_backoff_max_tries; - res.remote_fs_enable_cache = settings.remote_fs_enable_cache; - res.remote_fs_cache_max_wait_sec = settings.remote_fs_cache_max_wait_sec; + res.enable_filesystem_cache = settings.enable_filesystem_cache; + res.filesystem_cache_max_wait_sec = settings.filesystem_cache_max_wait_sec; + res.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache; res.remote_read_min_bytes_for_seek = settings.remote_read_min_bytes_for_seek; @@ -3331,4 +3333,13 @@ ReadSettings Context::getReadSettings() const return res; } +WriteSettings Context::getWriteSettings() const +{ + WriteSettings res; + + res.enable_filesystem_cache_on_write_operations = settings.enable_filesystem_cache_on_write_operations; + + return res; +} + } diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index c258768d549..b53e3945188 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -123,6 +123,7 @@ struct PartUUIDs; using PartUUIDsPtr = std::shared_ptr; class KeeperDispatcher; class Session; +struct WriteSettings; class IInputFormat; class IOutputFormat; @@ -934,6 +935,9 @@ public: /** Get settings for reading from filesystem. */ ReadSettings getReadSettings() const; + /** Get settings for writing to filesystem. */ + WriteSettings getWriteSettings() const; + private: std::unique_lock getLock() const; diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 8c5b5b2bd07..30f0f892ca4 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include #include @@ -298,6 +300,21 @@ BlockIO InterpreterSystemQuery::execute() cache->reset(); break; #endif + case Type::DROP_FILESYSTEM_CACHE: + { + if (query.filesystem_cache_path.empty()) + { + auto caches = FileCacheFactory::instance().getAll(); + for (const auto & [_, cache_data] : caches) + cache_data.cache->tryRemoveAll(); + } + else + { + auto cache = FileCacheFactory::instance().get(query.filesystem_cache_path); + cache->tryRemoveAll(); + } + break; + } case Type::RELOAD_DICTIONARY: { getContext()->checkAccess(AccessType::SYSTEM_RELOAD_DICTIONARY); @@ -762,6 +779,7 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() case Type::DROP_UNCOMPRESSED_CACHE: case Type::DROP_INDEX_MARK_CACHE: case Type::DROP_INDEX_UNCOMPRESSED_CACHE: + case Type::DROP_FILESYSTEM_CACHE: { required_access.emplace_back(AccessType::SYSTEM_DROP_CACHE); break; diff --git a/src/Interpreters/JoinToSubqueryTransformVisitor.cpp b/src/Interpreters/JoinToSubqueryTransformVisitor.cpp index d6a00ba89b4..c43302e0de9 100644 --- a/src/Interpreters/JoinToSubqueryTransformVisitor.cpp +++ b/src/Interpreters/JoinToSubqueryTransformVisitor.cpp @@ -182,7 +182,6 @@ struct RewriteTablesVisitorData } }; -template bool needRewrite(ASTSelectQuery & select, std::vector & table_expressions) { if (!select.tables()) @@ -233,8 +232,6 @@ bool needRewrite(ASTSelectQuery & select, std::vectorchildren.empty()) + data.expression_list->children.emplace_back(std::make_shared()); select.setExpression(ASTSelectQuery::Expression::SELECT, std::move(data.expression_list)); + } data.done = true; } }; @@ -605,7 +606,7 @@ void JoinToSubqueryTransformMatcher::visit(ASTPtr & ast, Data & data) void JoinToSubqueryTransformMatcher::visit(ASTSelectQuery & select, ASTPtr & ast, Data & data) { std::vector table_expressions; - if (!needRewrite<2>(select, table_expressions)) + if (!needRewrite(select, table_expressions)) return; auto & src_tables = select.tables()->children; diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index 2ea371d3d03..8fbbdb44c99 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -597,6 +597,16 @@ CurrentThread::QueryScope::QueryScope(ContextMutablePtr query_context) query_context->makeQueryContext(); } +CurrentThread::QueryScope::QueryScope(ContextPtr query_context) +{ + if (!query_context->hasQueryContext()) + throw Exception( + ErrorCodes::LOGICAL_ERROR, "Cannot initialize query scope without query context"); + + CurrentThread::initializeQuery(); + CurrentThread::attachQueryContext(query_context); +} + void CurrentThread::QueryScope::logPeakMemoryUsage() { auto group = CurrentThread::getGroup(); diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h index 22488e35e12..600525f9abe 100644 --- a/src/Parsers/ASTSystemQuery.h +++ b/src/Parsers/ASTSystemQuery.h @@ -28,6 +28,7 @@ public: #if USE_EMBEDDED_COMPILER DROP_COMPILED_EXPRESSION_CACHE, #endif + DROP_FILESYSTEM_CACHE, STOP_LISTEN_QUERIES, START_LISTEN_QUERIES, RESTART_REPLICAS, @@ -88,6 +89,7 @@ public: String volume; String disk; UInt64 seconds{}; + String filesystem_cache_path; String getID(char) const override { return "SYSTEM query"; } diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index e64abb3c982..e3146f169a7 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -230,7 +230,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() case MergeAlgorithm::Vertical : { ctx->rows_sources_file = createTemporaryFile(ctx->tmp_disk->getPath()); - ctx->rows_sources_uncompressed_write_buf = ctx->tmp_disk->writeFile(fileName(ctx->rows_sources_file->path())); + ctx->rows_sources_uncompressed_write_buf = ctx->tmp_disk->writeFile(fileName(ctx->rows_sources_file->path()), DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, global_ctx->context->getWriteSettings()); ctx->rows_sources_write_buf = std::make_unique(*ctx->rows_sources_uncompressed_write_buf); MergeTreeDataPartInMemory::ColumnToSize local_merged_column_to_size; @@ -262,7 +262,8 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() ctx->compression_codec, global_ctx->txn, /*reset_columns=*/ true, - ctx->blocks_are_granules_size); + ctx->blocks_are_granules_size, + global_ctx->context->getWriteSettings()); global_ctx->rows_written = 0; ctx->initial_reservation = global_ctx->space_reservation ? global_ctx->space_reservation->getSize() : 0; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 6f3768bb01d..2174f92edac 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -305,7 +305,7 @@ MergeTreeData::MergeTreeData( format_version = min_format_version; if (!version_file.second->isReadOnly()) { - auto buf = version_file.second->writeFile(version_file.first); + auto buf = version_file.second->writeFile(version_file.first, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, context_->getWriteSettings()); writeIntText(format_version.toUnderType(), *buf); if (getContext()->getSettingsRef().fsync_metadata) buf->sync(); @@ -3980,9 +3980,9 @@ private: for (const String & filename : filenames) { - auto backup_entry = backup->readFile(data_path_in_backup + part_name + "/" + filename); + auto backup_entry = backup->readFile(fs::path(data_path_in_backup) / part_name / filename); auto read_buffer = backup_entry->getReadBuffer(); - auto write_buffer = disk->writeFile(temp_part_dir + "/" + filename); + auto write_buffer = disk->writeFile(fs::path(temp_part_dir) / filename); copyData(*read_buffer, *write_buffer); reservation->update(reservation->getSize() - backup_entry->getSize()); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index d7b8f2c4165..6caff7c683f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -24,12 +24,14 @@ MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( , plain_file(data_part->volume->getDisk()->writeFile( part_path + MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION, settings.max_compress_block_size, - WriteMode::Rewrite)) + WriteMode::Rewrite, + settings_.query_write_settings)) , plain_hashing(*plain_file) , marks_file(data_part->volume->getDisk()->writeFile( part_path + MergeTreeDataPartCompact::DATA_FILE_NAME + marks_file_extension_, 4096, - WriteMode::Rewrite)) + WriteMode::Rewrite, + settings_.query_write_settings)) , marks(*marks_file) { const auto & storage_columns = metadata_snapshot->getColumns(); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 8dca93f574f..6cba4db19e3 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -47,15 +47,16 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( const std::string & marks_path_, const std::string & marks_file_extension_, const CompressionCodecPtr & compression_codec_, - size_t max_compress_block_size_) : + size_t max_compress_block_size_, + const WriteSettings & query_write_settings) : escaped_column_name(escaped_column_name_), data_file_extension{data_file_extension_}, marks_file_extension{marks_file_extension_}, - plain_file(disk_->writeFile(data_path_ + data_file_extension, max_compress_block_size_, WriteMode::Rewrite)), + plain_file(disk_->writeFile(data_path_ + data_file_extension, max_compress_block_size_, WriteMode::Rewrite, query_write_settings)), plain_hashing(*plain_file), compressed_buf(plain_hashing, compression_codec_, max_compress_block_size_), compressed(compressed_buf), - marks_file(disk_->writeFile(marks_path_ + marks_file_extension, 4096, WriteMode::Rewrite)), marks(*marks_file) + marks_file(disk_->writeFile(marks_path_ + marks_file_extension, 4096, WriteMode::Rewrite, query_write_settings)), marks(*marks_file) { } @@ -156,7 +157,7 @@ void MergeTreeDataPartWriterOnDisk::initPrimaryIndex() { if (metadata_snapshot->hasPrimaryKey()) { - index_file_stream = data_part->volume->getDisk()->writeFile(part_path + "primary.idx", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite); + index_file_stream = data_part->volume->getDisk()->writeFile(part_path + "primary.idx", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, settings.query_write_settings); index_stream = std::make_unique(*index_file_stream); } } @@ -172,7 +173,7 @@ void MergeTreeDataPartWriterOnDisk::initSkipIndices() data_part->volume->getDisk(), part_path + stream_name, index_helper->getSerializedFileExtension(), part_path + stream_name, marks_file_extension, - default_codec, settings.max_compress_block_size)); + default_codec, settings.max_compress_block_size, settings.query_write_settings)); skip_indices_aggregators.push_back(index_helper->createIndexAggregator()); skip_index_accumulated_marks.push_back(0); } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 5af8cbc1650..67b51df7d56 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -55,7 +55,8 @@ public: const std::string & marks_path_, const std::string & marks_file_extension_, const CompressionCodecPtr & compression_codec_, - size_t max_compress_block_size_); + size_t max_compress_block_size_, + const WriteSettings & query_write_settings); String escaped_column_name; std::string data_file_extension; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index a3eec3e54bc..933814d27ba 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -115,7 +115,8 @@ void MergeTreeDataPartWriterWide::addStreams( part_path + stream_name, DATA_FILE_EXTENSION, part_path + stream_name, marks_file_extension, compression_codec, - settings.max_compress_block_size); + settings.max_compress_block_size, + settings.query_write_settings); }; ISerialization::SubstreamPath path; diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index b260000030e..e39ae7a4037 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -434,7 +434,8 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( const auto & index_factory = MergeTreeIndexFactory::instance(); auto out = std::make_unique(new_data_part, metadata_snapshot, columns, - index_factory.getMany(metadata_snapshot->getSecondaryIndices()), compression_codec, context->getCurrentTransaction()); + index_factory.getMany(metadata_snapshot->getSecondaryIndices()), compression_codec, + context->getCurrentTransaction(), false, false, context->getWriteSettings()); out->writeWithPermutation(block, perm_ptr); @@ -450,7 +451,11 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPart( temp_part.streams.emplace_back(std::move(stream)); } } - auto finalizer = out->finalizePartAsync(new_data_part, data_settings->fsync_after_insert); + auto finalizer = out->finalizePartAsync( + new_data_part, + data_settings->fsync_after_insert, + nullptr, nullptr, + context->getWriteSettings()); temp_part.part = new_data_part; temp_part.streams.emplace_back(TemporaryPart::Stream{.stream = std::move(out), .finalizer = std::move(finalizer)}); diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.h b/src/Storages/MergeTree/MergeTreeIOSettings.h index 4fe424c98ff..85cf3e9eda6 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.h +++ b/src/Storages/MergeTree/MergeTreeIOSettings.h @@ -2,6 +2,7 @@ #include #include #include +#include namespace DB @@ -30,6 +31,7 @@ struct MergeTreeWriterSettings MergeTreeWriterSettings( const Settings & global_settings, + const WriteSettings & query_write_settings_, const MergeTreeSettingsPtr & storage_settings, bool can_use_adaptive_granularity_, bool rewrite_primary_key_, @@ -42,6 +44,7 @@ struct MergeTreeWriterSettings , can_use_adaptive_granularity(can_use_adaptive_granularity_) , rewrite_primary_key(rewrite_primary_key_) , blocks_are_granules_size(blocks_are_granules_size_) + , query_write_settings(query_write_settings_) { } @@ -50,6 +53,7 @@ struct MergeTreeWriterSettings bool can_use_adaptive_granularity; bool rewrite_primary_key; bool blocks_are_granules_size; + WriteSettings query_write_settings; }; } diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp index 0d557ed36c2..bfa9129bd53 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.cpp +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.cpp @@ -45,7 +45,7 @@ UInt64 MergeTreeMutationEntry::parseFileName(const String & file_name_) } MergeTreeMutationEntry::MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk_, const String & path_prefix_, UInt64 tmp_number, - const TransactionID & tid_) + const TransactionID & tid_, const WriteSettings & settings) : create_time(time(nullptr)) , commands(std::move(commands_)) , disk(std::move(disk_)) @@ -56,7 +56,7 @@ MergeTreeMutationEntry::MergeTreeMutationEntry(MutationCommands commands_, DiskP { try { - auto out = disk->writeFile(path_prefix + file_name); + auto out = disk->writeFile(std::filesystem::path(path_prefix) / file_name, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, settings); *out << "format version: 1\n" << "create time: " << LocalDateTime(create_time) << "\n"; *out << "commands: "; diff --git a/src/Storages/MergeTree/MergeTreeMutationEntry.h b/src/Storages/MergeTree/MergeTreeMutationEntry.h index 466ab0e3223..3d4c4d0c4a1 100644 --- a/src/Storages/MergeTree/MergeTreeMutationEntry.h +++ b/src/Storages/MergeTree/MergeTreeMutationEntry.h @@ -37,8 +37,7 @@ struct MergeTreeMutationEntry /// Create a new entry and write it to a temporary file. MergeTreeMutationEntry(MutationCommands commands_, DiskPtr disk, const String & path_prefix_, UInt64 tmp_number, - const TransactionID & tid_); - + const TransactionID & tid_, const WriteSettings & settings); MergeTreeMutationEntry(const MergeTreeMutationEntry &) = delete; MergeTreeMutationEntry(MergeTreeMutationEntry &&) = default; diff --git a/src/Storages/MergeTree/MergeTreePartition.cpp b/src/Storages/MergeTree/MergeTreePartition.cpp index 4955a63387c..ddbb536d7da 100644 --- a/src/Storages/MergeTree/MergeTreePartition.cpp +++ b/src/Storages/MergeTree/MergeTreePartition.cpp @@ -384,16 +384,18 @@ void MergeTreePartition::load(const MergeTreeData & storage, const PartMetadataM std::unique_ptr MergeTreePartition::store(const MergeTreeData & storage, const DiskPtr & disk, const String & part_path, MergeTreeDataPartChecksums & checksums) const { auto metadata_snapshot = storage.getInMemoryMetadataPtr(); - const auto & partition_key_sample = adjustPartitionKey(metadata_snapshot, storage.getContext()).sample_block; - return store(partition_key_sample, disk, part_path, checksums); + const auto & context = storage.getContext(); + const auto & partition_key_sample = adjustPartitionKey(metadata_snapshot, context).sample_block; + return store(partition_key_sample, disk, part_path, checksums, context->getWriteSettings()); } -std::unique_ptr MergeTreePartition::store(const Block & partition_key_sample, const DiskPtr & disk, const String & part_path, MergeTreeDataPartChecksums & checksums) const +std::unique_ptr MergeTreePartition::store( + const Block & partition_key_sample, const DiskPtr & disk, const String & part_path, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const { if (!partition_key_sample) return nullptr; - auto out = disk->writeFile(part_path + "partition.dat"); + auto out = disk->writeFile(std::filesystem::path(part_path) / "partition.dat", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, settings); HashingWriteBuffer out_hashing(*out); for (size_t i = 0; i < value.size(); ++i) { diff --git a/src/Storages/MergeTree/MergeTreePartition.h b/src/Storages/MergeTree/MergeTreePartition.h index e7328799e4c..1afb833498c 100644 --- a/src/Storages/MergeTree/MergeTreePartition.h +++ b/src/Storages/MergeTree/MergeTreePartition.h @@ -43,7 +43,7 @@ public: /// Store functions return write buffer with written but not finalized data. /// User must call finish() for returned object. [[nodiscard]] std::unique_ptr store(const MergeTreeData & storage, const DiskPtr & disk, const String & part_path, MergeTreeDataPartChecksums & checksums) const; - [[nodiscard]] std::unique_ptr store(const Block & partition_key_sample, const DiskPtr & disk, const String & part_path, MergeTreeDataPartChecksums & checksums) const; + [[nodiscard]] std::unique_ptr store(const Block & partition_key_sample, const DiskPtr & disk, const String & part_path, MergeTreeDataPartChecksums & checksums, const WriteSettings & settings) const; void assign(const MergeTreePartition & other) { value = other.value; } diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 7066d7bb9cd..6acbfacd4c1 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -21,13 +21,15 @@ MergedBlockOutputStream::MergedBlockOutputStream( CompressionCodecPtr default_codec_, const MergeTreeTransactionPtr & txn, bool reset_columns_, - bool blocks_are_granules_size) + bool blocks_are_granules_size, + const WriteSettings & write_settings) : IMergedBlockOutputStream(data_part, metadata_snapshot_, columns_list_, reset_columns_) , columns_list(columns_list_) , default_codec(default_codec_) { MergeTreeWriterSettings writer_settings( storage.getContext()->getSettings(), + write_settings, storage.getSettings(), data_part->index_granularity_info.is_adaptive, /* rewrite_primary_key = */ true, @@ -131,7 +133,8 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( MergeTreeData::MutableDataPartPtr & new_part, bool sync, const NamesAndTypesList * total_columns_list, - MergeTreeData::DataPart::Checksums * additional_column_checksums) + MergeTreeData::DataPart::Checksums * additional_column_checksums, + const WriteSettings & write_settings) { /// Finish write and get checksums. MergeTreeData::DataPart::Checksums checksums; @@ -165,7 +168,7 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( auto finalizer = std::make_unique(*writer, new_part, files_to_remove_after_sync, sync); if (new_part->isStoredOnDisk()) - finalizer->written_files = finalizePartOnDisk(new_part, checksums); + finalizer->written_files = finalizePartOnDisk(new_part, checksums, write_settings); new_part->rows_count = rows_count; new_part->modification_time = time(nullptr); @@ -183,14 +186,15 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDisk( const MergeTreeData::DataPartPtr & new_part, - MergeTreeData::DataPart::Checksums & checksums) + MergeTreeData::DataPart::Checksums & checksums, + const WriteSettings & settings) { WrittenFiles written_files; if (new_part->isProjectionPart()) { if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING || isCompactPart(new_part)) { - auto count_out = volume->getDisk()->writeFile(part_path + "count.txt", 4096); + auto count_out = volume->getDisk()->writeFile(part_path + "count.txt", 4096, WriteMode::Rewrite, settings); HashingWriteBuffer count_out_hashing(*count_out); writeIntText(rows_count, count_out_hashing); count_out_hashing.next(); @@ -204,7 +208,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { if (new_part->uuid != UUIDHelpers::Nil) { - auto out = volume->getDisk()->writeFile(fs::path(part_path) / IMergeTreeDataPart::UUID_FILE_NAME, 4096); + auto out = volume->getDisk()->writeFile(fs::path(part_path) / IMergeTreeDataPart::UUID_FILE_NAME, 4096, WriteMode::Rewrite, settings); HashingWriteBuffer out_hashing(*out); writeUUIDText(new_part->uuid, out_hashing); checksums.files[IMergeTreeDataPart::UUID_FILE_NAME].file_size = out_hashing.count(); @@ -230,7 +234,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis } { - auto count_out = volume->getDisk()->writeFile(fs::path(part_path) / "count.txt", 4096); + auto count_out = volume->getDisk()->writeFile(fs::path(part_path) / "count.txt", 4096, WriteMode::Rewrite, settings); HashingWriteBuffer count_out_hashing(*count_out); writeIntText(rows_count, count_out_hashing); count_out_hashing.next(); @@ -244,7 +248,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (!new_part->ttl_infos.empty()) { /// Write a file with ttl infos in json format. - auto out = volume->getDisk()->writeFile(fs::path(part_path) / "ttl.txt", 4096); + auto out = volume->getDisk()->writeFile(fs::path(part_path) / "ttl.txt", 4096, WriteMode::Rewrite, settings); HashingWriteBuffer out_hashing(*out); new_part->ttl_infos.write(out_hashing); checksums.files["ttl.txt"].file_size = out_hashing.count(); @@ -255,7 +259,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (!new_part->getSerializationInfos().empty()) { - auto out = volume->getDisk()->writeFile(part_path + IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096); + auto out = volume->getDisk()->writeFile(part_path + IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, WriteMode::Rewrite, settings); HashingWriteBuffer out_hashing(*out); new_part->getSerializationInfos().writeJSON(out_hashing); checksums.files[IMergeTreeDataPart::SERIALIZATION_FILE_NAME].file_size = out_hashing.count(); @@ -266,7 +270,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { /// Write a file with a description of columns. - auto out = volume->getDisk()->writeFile(fs::path(part_path) / "columns.txt", 4096); + auto out = volume->getDisk()->writeFile(fs::path(part_path) / "columns.txt", 4096, WriteMode::Rewrite, settings); new_part->getColumns().writeText(*out); out->preFinalize(); written_files.emplace_back(std::move(out)); @@ -274,7 +278,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis if (default_codec != nullptr) { - auto out = volume->getDisk()->writeFile(part_path + IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096); + auto out = volume->getDisk()->writeFile(part_path + IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, WriteMode::Rewrite, settings); DB::writeText(queryToString(default_codec->getFullCodecDesc()), *out); out->preFinalize(); written_files.emplace_back(std::move(out)); @@ -287,7 +291,7 @@ MergedBlockOutputStream::WrittenFiles MergedBlockOutputStream::finalizePartOnDis { /// Write file with checksums. - auto out = volume->getDisk()->writeFile(fs::path(part_path) / "checksums.txt", 4096); + auto out = volume->getDisk()->writeFile(fs::path(part_path) / "checksums.txt", 4096, WriteMode::Rewrite, settings); checksums.write(*out); out->preFinalize(); written_files.emplace_back(std::move(out)); diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.h b/src/Storages/MergeTree/MergedBlockOutputStream.h index 3f325524167..67dec1923e8 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.h +++ b/src/Storages/MergeTree/MergedBlockOutputStream.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB @@ -21,7 +22,8 @@ public: CompressionCodecPtr default_codec_, const MergeTreeTransactionPtr & txn, bool reset_columns_ = false, - bool blocks_are_granules_size = false); + bool blocks_are_granules_size = false, + const WriteSettings & write_settings = {}); Block getHeader() const { return metadata_snapshot->getSampleBlock(); } @@ -55,7 +57,8 @@ public: MergeTreeData::MutableDataPartPtr & new_part, bool sync, const NamesAndTypesList * total_columns_list = nullptr, - MergeTreeData::DataPart::Checksums * additional_column_checksums = nullptr); + MergeTreeData::DataPart::Checksums * additional_column_checksums = nullptr, + const WriteSettings & settings = {}); void finalizePart( MergeTreeData::MutableDataPartPtr & new_part, @@ -72,7 +75,8 @@ private: using WrittenFiles = std::vector>; WrittenFiles finalizePartOnDisk( const MergeTreeData::DataPartPtr & new_part, - MergeTreeData::DataPart::Checksums & checksums); + MergeTreeData::DataPart::Checksums & checksums, + const WriteSettings & write_settings); NamesAndTypesList columns_list; IMergeTreeDataPart::MinMaxIndex minmax_idx; diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index 5a706165000..4fb993bfcc7 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB { @@ -26,6 +27,7 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( MergeTreeWriterSettings writer_settings( global_settings, + data_part->storage.getContext()->getWriteSettings(), storage_settings, index_granularity_info ? index_granularity_info->is_adaptive : data_part->storage.canUseAdaptiveGranularity(), /* rewrite_primary_key = */false); diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index f4d5ab93a3f..b9bebc665b2 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -420,14 +420,15 @@ void finalizeMutatedPart( const MergeTreeDataPartPtr & source_part, MergeTreeData::MutableDataPartPtr new_data_part, ExecuteTTLType execute_ttl_type, - const CompressionCodecPtr & codec) + const CompressionCodecPtr & codec, + ContextPtr context) { auto disk = new_data_part->volume->getDisk(); auto part_path = fs::path(new_data_part->getFullRelativePath()); if (new_data_part->uuid != UUIDHelpers::Nil) { - auto out = disk->writeFile(part_path / IMergeTreeDataPart::UUID_FILE_NAME, 4096); + auto out = disk->writeFile(part_path / IMergeTreeDataPart::UUID_FILE_NAME, 4096, WriteMode::Rewrite, context->getWriteSettings()); HashingWriteBuffer out_hashing(*out); writeUUIDText(new_data_part->uuid, out_hashing); new_data_part->checksums.files[IMergeTreeDataPart::UUID_FILE_NAME].file_size = out_hashing.count(); @@ -437,7 +438,7 @@ void finalizeMutatedPart( if (execute_ttl_type != ExecuteTTLType::NONE) { /// Write a file with ttl infos in json format. - auto out_ttl = disk->writeFile(part_path / "ttl.txt", 4096); + auto out_ttl = disk->writeFile(part_path / "ttl.txt", 4096, WriteMode::Rewrite, context->getWriteSettings()); HashingWriteBuffer out_hashing(*out_ttl); new_data_part->ttl_infos.write(out_hashing); new_data_part->checksums.files["ttl.txt"].file_size = out_hashing.count(); @@ -446,7 +447,7 @@ void finalizeMutatedPart( if (!new_data_part->getSerializationInfos().empty()) { - auto out = disk->writeFile(part_path / IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096); + auto out = disk->writeFile(part_path / IMergeTreeDataPart::SERIALIZATION_FILE_NAME, 4096, WriteMode::Rewrite, context->getWriteSettings()); HashingWriteBuffer out_hashing(*out); new_data_part->getSerializationInfos().writeJSON(out_hashing); new_data_part->checksums.files[IMergeTreeDataPart::SERIALIZATION_FILE_NAME].file_size = out_hashing.count(); @@ -455,18 +456,18 @@ void finalizeMutatedPart( { /// Write file with checksums. - auto out_checksums = disk->writeFile(part_path / "checksums.txt", 4096); + auto out_checksums = disk->writeFile(part_path / "checksums.txt", 4096, WriteMode::Rewrite, context->getWriteSettings()); new_data_part->checksums.write(*out_checksums); } /// close fd { - auto out = disk->writeFile(part_path / IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096); + auto out = disk->writeFile(part_path / IMergeTreeDataPart::DEFAULT_COMPRESSION_CODEC_FILE_NAME, 4096, WriteMode::Rewrite, context->getWriteSettings()); DB::writeText(queryToString(codec->getFullCodecDesc()), *out); } { /// Write a file with a description of columns. - auto out_columns = disk->writeFile(part_path / "columns.txt", 4096); + auto out_columns = disk->writeFile(part_path / "columns.txt", 4096, WriteMode::Rewrite, context->getWriteSettings()); new_data_part->getColumns().writeText(*out_columns); } /// close fd @@ -1174,7 +1175,7 @@ private: } } - MutationHelpers::finalizeMutatedPart(ctx->source_part, ctx->new_data_part, ctx->execute_ttl_type, ctx->compression_codec); + MutationHelpers::finalizeMutatedPart(ctx->source_part, ctx->new_data_part, ctx->execute_ttl_type, ctx->compression_codec, ctx->context); } diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index a4cd99ab7f9..b25b47ac772 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -451,7 +451,7 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, Context { std::lock_guard lock(currently_processing_in_background_mutex); - MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get(), current_tid); + MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get(), current_tid, getContext()->getWriteSettings()); version = increment.get(); entry.commit(version); String mutation_id = entry.file_name; @@ -1752,7 +1752,7 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, ContextPtr local_ { auto calculated_checksums = checkDataPart(part, false); calculated_checksums.checkEqual(part->checksums, true); - auto out = disk->writeFile(tmp_checksums_path, 4096); + auto out = disk->writeFile(tmp_checksums_path, 4096, WriteMode::Rewrite, local_context->getWriteSettings()); part->checksums.write(*out); disk->moveFile(tmp_checksums_path, checksums_path); diff --git a/src/Storages/System/StorageSystemDisks.cpp b/src/Storages/System/StorageSystemDisks.cpp index 3841abc2f2d..fb6a055c6e5 100644 --- a/src/Storages/System/StorageSystemDisks.cpp +++ b/src/Storages/System/StorageSystemDisks.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB { @@ -22,6 +23,7 @@ StorageSystemDisks::StorageSystemDisks(const StorageID & table_id_) {"total_space", std::make_shared()}, {"keep_free_space", std::make_shared()}, {"type", std::make_shared()}, + {"cache_path", std::make_shared()}, })); setInMemoryMetadata(storage_metadata); } @@ -43,6 +45,7 @@ Pipe StorageSystemDisks::read( MutableColumnPtr col_total = ColumnUInt64::create(); MutableColumnPtr col_keep = ColumnUInt64::create(); MutableColumnPtr col_type = ColumnString::create(); + MutableColumnPtr col_cache_path = ColumnString::create(); for (const auto & [disk_name, disk_ptr] : context->getDisksMap()) { @@ -52,6 +55,12 @@ Pipe StorageSystemDisks::read( col_total->insert(disk_ptr->getTotalSpace()); col_keep->insert(disk_ptr->getKeepingFreeSpace()); col_type->insert(toString(disk_ptr->getType())); + + String cache_path; + if (disk_ptr->isRemote()) + cache_path = disk_ptr->getCacheBasePath(); + + col_cache_path->insert(cache_path); } Columns res_columns; @@ -61,6 +70,7 @@ Pipe StorageSystemDisks::read( res_columns.emplace_back(std::move(col_total)); res_columns.emplace_back(std::move(col_keep)); res_columns.emplace_back(std::move(col_type)); + res_columns.emplace_back(std::move(col_cache_path)); UInt64 num_rows = res_columns.at(0)->size(); Chunk chunk(std::move(res_columns), num_rows); diff --git a/src/Storages/System/StorageSystemFilesystemCache.cpp b/src/Storages/System/StorageSystemFilesystemCache.cpp new file mode 100644 index 00000000000..f3ead8a95f0 --- /dev/null +++ b/src/Storages/System/StorageSystemFilesystemCache.cpp @@ -0,0 +1,60 @@ +#include "StorageSystemFilesystemCache.h" +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +NamesAndTypesList StorageSystemFilesystemCache::getNamesAndTypes() +{ + return { + {"cache_base_path", std::make_shared()}, + {"cache_path", std::make_shared()}, + {"file_segment_range_begin", std::make_shared()}, + {"file_segment_range_end", std::make_shared()}, + {"size", std::make_shared()}, + {"state", std::make_shared()}, + {"cache_hits", std::make_shared()}, + {"references", std::make_shared()}, + {"downloaded_size", std::make_shared()}, + }; +} + +StorageSystemFilesystemCache::StorageSystemFilesystemCache(const StorageID & table_id_) + : IStorageSystemOneBlock(table_id_) +{ +} + +void StorageSystemFilesystemCache::fillData(MutableColumns & res_columns, ContextPtr, const SelectQueryInfo &) const +{ + auto caches = FileCacheFactory::instance().getAll(); + + for (const auto & [cache_base_path, cache_data] : caches) + { + const auto & cache = cache_data.cache; + auto file_segments = cache->getSnapshot(); + + for (const auto & file_segment : file_segments) + { + res_columns[0]->insert(cache_base_path); + res_columns[1]->insert(cache->getPathInLocalCache(file_segment->key(), file_segment->offset())); + + const auto & range = file_segment->range(); + res_columns[2]->insert(range.left); + res_columns[3]->insert(range.right); + res_columns[4]->insert(range.size()); + res_columns[5]->insert(FileSegment::stateToString(file_segment->state())); + res_columns[6]->insert(file_segment->getHitsCount()); + res_columns[7]->insert(file_segment->getRefCount()); + res_columns[8]->insert(file_segment->getDownloadedSize()); + } + } +} + +} diff --git a/src/Storages/System/StorageSystemFilesystemCache.h b/src/Storages/System/StorageSystemFilesystemCache.h new file mode 100644 index 00000000000..1d9d28d7b50 --- /dev/null +++ b/src/Storages/System/StorageSystemFilesystemCache.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +namespace DB +{ + +/** + * Usgae example. How to get mapping from local paths to remote paths: + * SELECT + * cache_path, + * cache_hits, + * remote_path, + * local_path, + * file_segment_range_begin, + * file_segment_range_end, + * size, + * state + * FROM + * ( + * SELECT + * arrayJoin(cache_paths) AS cache_path, + * local_path, + * remote_path + * FROM system.remote_data_paths + * ) AS data_paths + * INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path + * FORMAT Vertical + */ + +class StorageSystemFilesystemCache final : public shared_ptr_helper, + public IStorageSystemOneBlock +{ + friend struct shared_ptr_helper; +public: + std::string getName() const override { return "SystemFilesystemCache"; } + + static NamesAndTypesList getNamesAndTypes(); + +protected: + explicit StorageSystemFilesystemCache(const StorageID & table_id_); + + void fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & query_info) const override; +}; + +} diff --git a/src/Storages/System/StorageSystemRemoteDataPaths.cpp b/src/Storages/System/StorageSystemRemoteDataPaths.cpp new file mode 100644 index 00000000000..410d1ae6dd4 --- /dev/null +++ b/src/Storages/System/StorageSystemRemoteDataPaths.cpp @@ -0,0 +1,100 @@ +#include "StorageSystemRemoteDataPaths.h" +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +StorageSystemRemoteDataPaths::StorageSystemRemoteDataPaths(const StorageID & table_id_) + : IStorage(table_id_) +{ + StorageInMemoryMetadata storage_metadata; + storage_metadata.setColumns(ColumnsDescription( + { + {"disk_name", std::make_shared()}, + {"path", std::make_shared()}, + {"cache_base_path", std::make_shared()}, + {"local_path", std::make_shared()}, + {"remote_path", std::make_shared()}, + {"cache_paths", std::make_shared(std::make_shared())}, + })); + setInMemoryMetadata(storage_metadata); +} + +Pipe StorageSystemRemoteDataPaths::read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & /*query_info*/, + ContextPtr context, + QueryProcessingStage::Enum /*processed_stage*/, + const size_t /*max_block_size*/, + const unsigned /*num_streams*/) +{ + storage_snapshot->check(column_names); + + MutableColumnPtr col_disk_name = ColumnString::create(); + MutableColumnPtr col_base_path = ColumnString::create(); + MutableColumnPtr col_cache_base_path = ColumnString::create(); + MutableColumnPtr col_local_path = ColumnString::create(); + MutableColumnPtr col_remote_path = ColumnString::create(); + MutableColumnPtr col_cache_paths = ColumnArray::create(ColumnString::create()); + + auto disks = context->getDisksMap(); + for (const auto & [disk_name, disk] : disks) + { + if (disk->isRemote()) + { + std::vector remote_paths_by_local_path; + disk->getRemotePathsRecursive("store", remote_paths_by_local_path); + + FileCachePtr cache; + auto cache_base_path = disk->getCacheBasePath(); + if (!cache_base_path.empty()) + cache = FileCacheFactory::instance().get(cache_base_path); + + for (const auto & [local_path, remote_paths] : remote_paths_by_local_path) + { + for (const auto & remote_path : remote_paths) + { + col_disk_name->insert(disk_name); + col_base_path->insert(disk->getPath()); + col_cache_base_path->insert(cache_base_path); + col_local_path->insert(local_path); + col_remote_path->insert(remote_path); + + if (cache) + { + auto cache_paths = cache->tryGetCachePaths(cache->hash(remote_path)); + col_cache_paths->insert(Array(cache_paths.begin(), cache_paths.end())); + } + else + { + col_cache_paths->insertDefault(); + } + } + } + } + } + + Columns res_columns; + res_columns.emplace_back(std::move(col_disk_name)); + res_columns.emplace_back(std::move(col_base_path)); + res_columns.emplace_back(std::move(col_cache_base_path)); + res_columns.emplace_back(std::move(col_local_path)); + res_columns.emplace_back(std::move(col_remote_path)); + res_columns.emplace_back(std::move(col_cache_paths)); + + UInt64 num_rows = res_columns.at(0)->size(); + Chunk chunk(std::move(res_columns), num_rows); + + return Pipe(std::make_shared(storage_snapshot->metadata->getSampleBlock(), std::move(chunk))); +} + +} diff --git a/src/Storages/System/StorageSystemRemoteDataPaths.h b/src/Storages/System/StorageSystemRemoteDataPaths.h new file mode 100644 index 00000000000..0057dcf12f2 --- /dev/null +++ b/src/Storages/System/StorageSystemRemoteDataPaths.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class StorageSystemRemoteDataPaths : public shared_ptr_helper, public IStorage +{ + friend struct shared_ptr_helper; +public: + std::string getName() const override { return "SystemRemoteDataPaths"; } + + bool isSystemStorage() const override { return true; } + + Pipe read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum processed_stage, + size_t max_block_size, + unsigned num_streams) override; + +protected: + explicit StorageSystemRemoteDataPaths(const StorageID & table_id_); +}; + +} diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index 151301cca07..6558890b8c4 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -69,6 +69,8 @@ #include #include #include +#include +#include #ifdef OS_LINUX #include @@ -162,6 +164,8 @@ void attachSystemTablesServer(ContextPtr context, IDatabase & system_database, b attach(context, system_database, "replicated_fetches"); attach(context, system_database, "part_moves_between_shards"); attach(context, system_database, "asynchronous_inserts"); + attach(context, system_database, "filesystem_cache"); + attach(context, system_database, "remote_data_paths"); if (has_zookeeper) attach(context, system_database, "zookeeper"); diff --git a/tests/ci/build_check.py b/tests/ci/build_check.py index 74ebebf1e6f..e9d660156e1 100644 --- a/tests/ci/build_check.py +++ b/tests/ci/build_check.py @@ -55,7 +55,6 @@ def get_packager_cmd( image_version: str, ccache_path: str, official: bool, - cov_token: str, ) -> str: package_type = build_config["package_type"] comp = build_config["compiler"] @@ -88,8 +87,6 @@ def get_packager_cmd( if official: cmd += " --official" - if cov_token: - cmd += " --cov-token={}".format(cov_token) return cmd @@ -206,9 +203,6 @@ def main(): build_check_name = sys.argv[1] build_name = sys.argv[2] - cov_token = "" - if len(sys.argv) > 3: - cov_token = sys.argv[3] build_config = get_build_config(build_check_name, build_name) @@ -303,7 +297,6 @@ def main(): image_version, ccache_path, official_flag, - cov_token, ) logging.info("Going to run packager with %s", packager_cmd) diff --git a/tests/ci/env_helper.py b/tests/ci/env_helper.py index 242d6bf5e40..6462baad729 100644 --- a/tests/ci/env_helper.py +++ b/tests/ci/env_helper.py @@ -15,7 +15,7 @@ GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID", "0") GITHUB_SERVER_URL = os.getenv("GITHUB_SERVER_URL", "https://github.com") GITHUB_WORKSPACE = os.getenv("GITHUB_WORKSPACE", git_root) GITHUB_RUN_URL = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/actions/runs/{GITHUB_RUN_ID}" -IMAGES_PATH = os.getenv("IMAGES_PATH") +IMAGES_PATH = os.getenv("IMAGES_PATH", TEMP_PATH) REPORTS_PATH = os.getenv("REPORTS_PATH", p.abspath(p.join(module_dir, "./reports"))) REPO_COPY = os.getenv("REPO_COPY", git_root) RUNNER_TEMP = os.getenv("RUNNER_TEMP", p.abspath(p.join(module_dir, "./tmp"))) diff --git a/tests/ci/run_check.py b/tests/ci/run_check.py index 6f00232be77..bda3cf40bba 100644 --- a/tests/ci/run_check.py +++ b/tests/ci/run_check.py @@ -255,6 +255,7 @@ if __name__ == "__main__": elif SUBMODULE_CHANGED_LABEL in pr_info.labels: pr_labels_to_remove.append(SUBMODULE_CHANGED_LABEL) + print("change labels: add {}, remove {}".format(pr_labels_to_add, pr_labels_to_remove)) if pr_labels_to_add: post_labels(gh, pr_info, pr_labels_to_add) diff --git a/tests/config/config.d/storage_conf.xml b/tests/config/config.d/storage_conf.xml index 2e43f735605..3dd4811b1bf 100644 --- a/tests/config/config.d/storage_conf.xml +++ b/tests/config/config.d/storage_conf.xml @@ -7,7 +7,9 @@ clickhouse clickhouse 1 + 0 22548578304 + 1 diff --git a/tests/config/install.sh b/tests/config/install.sh index 4e8252f32e0..8f28fb386ff 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -86,6 +86,7 @@ fi if [[ -n "$EXPORT_S3_STORAGE_POLICIES" ]]; then ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/ + ln -sf $SRC_PATH/users.d/s3_cache.xml $DEST_SERVER_PATH/users.d/ fi if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then diff --git a/tests/config/users.d/s3_cache.xml b/tests/config/users.d/s3_cache.xml new file mode 100644 index 00000000000..69b24ecbbc4 --- /dev/null +++ b/tests/config/users.d/s3_cache.xml @@ -0,0 +1,8 @@ + + + + 1 + 1 + + + diff --git a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql index 750809da338..7654be4eb29 100644 --- a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql +++ b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql @@ -22,7 +22,7 @@ OPTIMIZE TABLE adaptive_table FINAL; SELECT marks FROM system.parts WHERE table = 'adaptive_table' and database=currentDatabase() and active; -SET remote_fs_enable_cache = 0; +SET enable_filesystem_cache = 0; -- If we have computed granularity incorrectly than we will exceed this limit. SET max_memory_usage='30M'; diff --git a/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql b/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql index 7ec3153886c..36b6c97460c 100644 --- a/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql +++ b/tests/queries/0_stateless/01641_memory_tracking_insert_optimize.sql @@ -3,7 +3,7 @@ drop table if exists data_01641; -- Disable cache for s3 storage tests because it increases memory usage. -set remote_fs_enable_cache=0; +set enable_filesystem_cache=0; set remote_filesystem_read_method='read'; create table data_01641 (key Int, value String) engine=MergeTree order by (key, repeat(value, 40)) settings old_parts_lifetime=0, min_bytes_for_wide_part=0; diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh index 23df052a8d6..a29d0661621 100755 --- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash # Tags: long, zookeeper, no-parallel +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=error + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01926_order_by_desc_limit.sql b/tests/queries/0_stateless/01926_order_by_desc_limit.sql index 9f65cf73252..86468b4fcd6 100644 --- a/tests/queries/0_stateless/01926_order_by_desc_limit.sql +++ b/tests/queries/0_stateless/01926_order_by_desc_limit.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS order_by_desc; -SET remote_fs_enable_cache=0; +SET enable_filesystem_cache=0; CREATE TABLE order_by_desc (u UInt32, s String) ENGINE MergeTree ORDER BY u PARTITION BY u % 100 diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 5129ea52e40..246b8ef6d3b 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -12,7 +12,7 @@ CREATE TABLE system.data_type_families\n(\n `name` String,\n `case_insensi CREATE TABLE system.databases\n(\n `name` String,\n `engine` String,\n `data_path` String,\n `metadata_path` String,\n `uuid` UUID,\n `comment` String,\n `database` String\n)\nENGINE = SystemDatabases()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.detached_parts\n(\n `database` String,\n `table` String,\n `partition_id` Nullable(String),\n `name` String,\n `disk` String,\n `reason` Nullable(String),\n `min_block_number` Nullable(Int64),\n `max_block_number` Nullable(Int64),\n `level` Nullable(UInt32)\n)\nENGINE = SystemDetachedParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.dictionaries\n(\n `database` String,\n `name` String,\n `uuid` UUID,\n `status` Enum8(\'NOT_LOADED\' = 0, \'LOADED\' = 1, \'FAILED\' = 2, \'LOADING\' = 3, \'FAILED_AND_RELOADING\' = 4, \'LOADED_AND_RELOADING\' = 5, \'NOT_EXIST\' = 6),\n `origin` String,\n `type` String,\n `key.names` Array(String),\n `key.types` Array(String),\n `attribute.names` Array(String),\n `attribute.types` Array(String),\n `bytes_allocated` UInt64,\n `query_count` UInt64,\n `hit_rate` Float64,\n `found_rate` Float64,\n `element_count` UInt64,\n `load_factor` Float64,\n `source` String,\n `lifetime_min` UInt64,\n `lifetime_max` UInt64,\n `loading_start_time` DateTime,\n `last_successful_update_time` DateTime,\n `loading_duration` Float32,\n `last_exception` String,\n `comment` String\n)\nENGINE = SystemDictionaries()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' -CREATE TABLE system.disks\n(\n `name` String,\n `path` String,\n `free_space` UInt64,\n `total_space` UInt64,\n `keep_free_space` UInt64,\n `type` String\n)\nENGINE = SystemDisks()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.disks\n(\n `name` String,\n `path` String,\n `free_space` UInt64,\n `total_space` UInt64,\n `keep_free_space` UInt64,\n `type` String,\n `cache_path` String\n)\nENGINE = SystemDisks()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.distributed_ddl_queue\n(\n `entry` String,\n `entry_version` Nullable(UInt8),\n `initiator_host` Nullable(String),\n `initiator_port` Nullable(UInt16),\n `cluster` String,\n `query` String,\n `settings` Map(String, String),\n `query_create_time` DateTime,\n `host` Nullable(String),\n `port` Nullable(UInt16),\n `status` Nullable(Enum8(\'Inactive\' = 0, \'Active\' = 1, \'Finished\' = 2, \'Removing\' = 3, \'Unknown\' = 4)),\n `exception_code` Nullable(UInt16),\n `exception_text` Nullable(String),\n `query_finish_time` Nullable(DateTime),\n `query_duration_ms` Nullable(UInt64)\n)\nENGINE = SystemDDLWorkerQueue()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.distribution_queue\n(\n `database` String,\n `table` String,\n `data_path` String,\n `is_blocked` UInt8,\n `error_count` UInt64,\n `data_files` UInt64,\n `data_compressed_bytes` UInt64,\n `broken_data_files` UInt64,\n `broken_data_compressed_bytes` UInt64,\n `last_exception` String\n)\nENGINE = SystemDistributionQueue()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.enabled_roles\n(\n `role_name` String,\n `with_admin_option` UInt8,\n `is_current` UInt8,\n `is_default` UInt8\n)\nENGINE = SystemEnabledRoles()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' diff --git a/tests/queries/0_stateless/02226_s3_with_cache.reference b/tests/queries/0_stateless/02226_s3_with_cache.reference index 214addac2d6..4041f51b3f9 100644 --- a/tests/queries/0_stateless/02226_s3_with_cache.reference +++ b/tests/queries/0_stateless/02226_s3_with_cache.reference @@ -1,2 +1,4 @@ SELECT 1, * FROM test LIMIT 10 FORMAT Null; 1 0 1 SELECT 2, * FROM test LIMIT 10 FORMAT Null; 0 1 0 +0 +SELECT 3, * FROM test LIMIT 10 FORMAT Null; 1 1 0 diff --git a/tests/queries/0_stateless/02226_s3_with_cache.sql b/tests/queries/0_stateless/02226_s3_with_cache.sql index b3126a419df..d470f2ef140 100644 --- a/tests/queries/0_stateless/02226_s3_with_cache.sql +++ b/tests/queries/0_stateless/02226_s3_with_cache.sql @@ -1,7 +1,9 @@ -- Tags: no-parallel, no-fasttest, long SET max_memory_usage='20G'; +SET enable_filesystem_cache_on_write_operations = 0; +DROP TABLE IF EXISTS test; CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache'; INSERT INTO test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000; @@ -41,4 +43,27 @@ SET remote_filesystem_read_method='threadpool'; SELECT * FROM test WHERE value LIKE '%abc%' ORDER BY value LIMIT 10 FORMAT Null; +SET enable_filesystem_cache_on_write_operations = 1; + +TRUNCATE TABLE test; +SELECT count() FROM test; + +SYSTEM DROP FILESYSTEM CACHE; + +INSERT INTO test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000; + +SELECT 3, * FROM test LIMIT 10 FORMAT Null; + +SYSTEM FLUSH LOGS; +SELECT query, + ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read, + ProfileEvents['RemoteFSCacheReadBytes'] > 0 as remote_fs_cache_read, + ProfileEvents['RemoteFSCacheDownloadBytes'] > 0 as remote_fs_read_and_download +FROM system.query_log +WHERE query LIKE 'SELECT 3, * FROM test LIMIT%' +AND type = 'QueryFinish' +AND current_database = currentDatabase() +ORDER BY query_start_time DESC +LIMIT 1; + DROP TABLE test; diff --git a/tests/queries/0_stateless/02240_system_remote_filesystem_cache.reference b/tests/queries/0_stateless/02240_system_remote_filesystem_cache.reference new file mode 100644 index 00000000000..8bcb7e1dd42 --- /dev/null +++ b/tests/queries/0_stateless/02240_system_remote_filesystem_cache.reference @@ -0,0 +1,19 @@ +-- { echo } + +SYSTEM DROP FILESYSTEM CACHE; +SET enable_filesystem_cache_on_write_operations=0; +DROP TABLE IF EXISTS test; +CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760; +INSERT INTO test SELECT number, toString(number) FROM numbers(100); +SELECT * FROM test FORMAT Null; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size; +0 0 1 +0 79 80 +0 745 746 +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache; +SELECT * FROM test FORMAT Null; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache; +0 745 746 +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache; diff --git a/tests/queries/0_stateless/02240_system_remote_filesystem_cache.sql b/tests/queries/0_stateless/02240_system_remote_filesystem_cache.sql new file mode 100644 index 00000000000..aa469779130 --- /dev/null +++ b/tests/queries/0_stateless/02240_system_remote_filesystem_cache.sql @@ -0,0 +1,18 @@ +-- Tags: no-parallel, no-fasttest, no-s3-storage + +-- { echo } + +SYSTEM DROP FILESYSTEM CACHE; +SET enable_filesystem_cache_on_write_operations=0; +DROP TABLE IF EXISTS test; +CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760; +INSERT INTO test SELECT number, toString(number) FROM numbers(100); + +SELECT * FROM test FORMAT Null; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size; +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache; +SELECT * FROM test FORMAT Null; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache; +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache; diff --git a/tests/queries/0_stateless/02241_remote_filesystem_cache_on_insert.reference b/tests/queries/0_stateless/02241_remote_filesystem_cache_on_insert.reference new file mode 100644 index 00000000000..b2269c16264 --- /dev/null +++ b/tests/queries/0_stateless/02241_remote_filesystem_cache_on_insert.reference @@ -0,0 +1,75 @@ +-- { echo } + +SET enable_filesystem_cache_on_write_operations=1; +DROP TABLE IF EXISTS test; +CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760; +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size, state FROM (SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path) WHERE endsWith(local_path, 'data.bin') FORMAT Vertical; +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; +0 +SELECT count() FROM system.filesystem_cache; +0 +INSERT INTO test SELECT number, toString(number) FROM numbers(100); +SELECT file_segment_range_begin, file_segment_range_end, size, state FROM (SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path) WHERE endsWith(local_path, 'data.bin') FORMAT Vertical; +Row 1: +────── +file_segment_range_begin: 0 +file_segment_range_end: 745 +size: 746 +state: DOWNLOADED +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; +7 +SELECT count() FROM system.filesystem_cache; +7 +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0; +0 +SELECT * FROM test FORMAT Null; +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0; +2 +SELECT * FROM test FORMAT Null; +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0; +2 +SELECT count() size FROM system.filesystem_cache; +7 +SYSTEM DROP FILESYSTEM CACHE; +INSERT INTO test SELECT number, toString(number) FROM numbers(100, 200); +SELECT file_segment_range_begin, file_segment_range_end, size, state FROM (SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path) WHERE endsWith(local_path, 'data.bin') FORMAT Vertical; +Row 1: +────── +file_segment_range_begin: 0 +file_segment_range_end: 1659 +size: 1660 +state: DOWNLOADED +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; +7 +SELECT count() FROM system.filesystem_cache; +7 +SELECT count() FROM system.filesystem_cache; +7 +INSERT INTO test SELECT number, toString(number) FROM numbers(100) SETTINGS enable_filesystem_cache_on_write_operations=0; +SELECT count() FROM system.filesystem_cache; +7 +INSERT INTO test SELECT number, toString(number) FROM numbers(100); +INSERT INTO test SELECT number, toString(number) FROM numbers(300, 10000); +SELECT count() FROM system.filesystem_cache; +21 +OPTIMIZE TABLE test FINAL; +SELECT count() FROM system.filesystem_cache; +27 +SET mutations_sync=2; +ALTER TABLE test UPDATE value = 'kek' WHERE key = 100; +SELECT count() FROM system.filesystem_cache; +28 +INSERT INTO test SELECT number, toString(number) FROM numbers(5000000); +SYSTEM FLUSH LOGS; +SELECT query, ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read +FROM system.query_log +WHERE query LIKE 'SELECT number, toString(number) FROM numbers(5000000)%' +AND type = 'QueryFinish' +AND current_database = currentDatabase() +ORDER BY query_start_time DESC +LIMIT 1; +SELECT count() FROM test; +5010500 +SELECT count() FROM test WHERE value LIKE '%010%'; +18816 diff --git a/tests/queries/0_stateless/02241_remote_filesystem_cache_on_insert.sql b/tests/queries/0_stateless/02241_remote_filesystem_cache_on_insert.sql new file mode 100644 index 00000000000..c3ab1de3693 --- /dev/null +++ b/tests/queries/0_stateless/02241_remote_filesystem_cache_on_insert.sql @@ -0,0 +1,64 @@ +-- Tags: no-parallel, no-fasttest, no-s3-storage + +-- { echo } + +SET enable_filesystem_cache_on_write_operations=1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760; + +SYSTEM DROP FILESYSTEM CACHE; + +SELECT file_segment_range_begin, file_segment_range_end, size, state FROM (SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path) WHERE endsWith(local_path, 'data.bin') FORMAT Vertical; +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; +SELECT count() FROM system.filesystem_cache; + +INSERT INTO test SELECT number, toString(number) FROM numbers(100); + +SELECT file_segment_range_begin, file_segment_range_end, size, state FROM (SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path) WHERE endsWith(local_path, 'data.bin') FORMAT Vertical; +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; +SELECT count() FROM system.filesystem_cache; + +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0; + +SELECT * FROM test FORMAT Null; +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0; + +SELECT * FROM test FORMAT Null; +SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0; + +SELECT count() size FROM system.filesystem_cache; + +SYSTEM DROP FILESYSTEM CACHE; + +INSERT INTO test SELECT number, toString(number) FROM numbers(100, 200); + +SELECT file_segment_range_begin, file_segment_range_end, size, state FROM (SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path) WHERE endsWith(local_path, 'data.bin') FORMAT Vertical; +SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path; +SELECT count() FROM system.filesystem_cache; + +SELECT count() FROM system.filesystem_cache; +INSERT INTO test SELECT number, toString(number) FROM numbers(100) SETTINGS enable_filesystem_cache_on_write_operations=0; +SELECT count() FROM system.filesystem_cache; + +INSERT INTO test SELECT number, toString(number) FROM numbers(100); +INSERT INTO test SELECT number, toString(number) FROM numbers(300, 10000); +SELECT count() FROM system.filesystem_cache; +OPTIMIZE TABLE test FINAL; +SELECT count() FROM system.filesystem_cache; + +SET mutations_sync=2; +ALTER TABLE test UPDATE value = 'kek' WHERE key = 100; +SELECT count() FROM system.filesystem_cache; + +INSERT INTO test SELECT number, toString(number) FROM numbers(5000000); +SYSTEM FLUSH LOGS; +SELECT query, ProfileEvents['RemoteFSReadBytes'] > 0 as remote_fs_read +FROM system.query_log +WHERE query LIKE 'SELECT number, toString(number) FROM numbers(5000000)%' +AND type = 'QueryFinish' +AND current_database = currentDatabase() +ORDER BY query_start_time DESC +LIMIT 1; +SELECT count() FROM test; +SELECT count() FROM test WHERE value LIKE '%010%'; diff --git a/tests/queries/0_stateless/02265_cross_join_empty_list.reference b/tests/queries/0_stateless/02265_cross_join_empty_list.reference new file mode 100644 index 00000000000..fef5e889a1e --- /dev/null +++ b/tests/queries/0_stateless/02265_cross_join_empty_list.reference @@ -0,0 +1,52 @@ +24 +24 +24 +24 24 24 +0 0 0 +0 0 1 +0 0 2 +0 0 3 +0 1 0 +0 1 1 +0 1 2 +0 1 3 +0 2 0 +0 2 1 +0 2 2 +0 2 3 +1 0 0 +1 0 1 +1 0 2 +1 0 3 +1 1 0 +1 1 1 +1 1 2 +1 1 3 +1 2 0 +1 2 1 +1 2 2 +1 2 3 +0 0 0 +0 0 1 +0 0 2 +0 0 3 +0 1 0 +0 1 1 +0 1 2 +0 1 3 +0 2 0 +0 2 1 +0 2 2 +0 2 3 +1 0 0 +1 0 1 +1 0 2 +1 0 3 +1 1 0 +1 1 1 +1 1 2 +1 1 3 +1 2 0 +1 2 1 +1 2 2 +1 2 3 diff --git a/tests/queries/0_stateless/02265_cross_join_empty_list.sql b/tests/queries/0_stateless/02265_cross_join_empty_list.sql new file mode 100644 index 00000000000..346a047351d --- /dev/null +++ b/tests/queries/0_stateless/02265_cross_join_empty_list.sql @@ -0,0 +1,6 @@ +SELECT count(1) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3; +SELECT count(*) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3; +SELECT count() FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3; +SELECT count(n1.number), count(n2.number), count(n3.number) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3; +SELECT * FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 ORDER BY n1.number, n2.number, n3.number; +SELECT n1.number, n2.number, n3.number FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 ORDER BY n1.number, n2.number, n3.number;