mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Merge master
This commit is contained in:
commit
1fb9440d47
10
.github/workflows/master.yml
vendored
10
.github/workflows/master.yml
vendored
@ -149,7 +149,6 @@ jobs:
|
|||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
SplitBuildSmokeTest:
|
SplitBuildSmokeTest:
|
||||||
needs: [BuilderDebSplitted]
|
needs: [BuilderDebSplitted]
|
||||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
@ -316,7 +315,6 @@ jobs:
|
|||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinRelease:
|
BuilderBinRelease:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
@ -362,7 +360,6 @@ jobs:
|
|||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinGCC:
|
BuilderBinGCC:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
@ -636,7 +633,6 @@ jobs:
|
|||||||
##########################################################################################
|
##########################################################################################
|
||||||
BuilderDebSplitted:
|
BuilderDebSplitted:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
@ -682,7 +678,6 @@ jobs:
|
|||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinTidy:
|
BuilderBinTidy:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
@ -728,7 +723,6 @@ jobs:
|
|||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinDarwin:
|
BuilderBinDarwin:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
@ -774,7 +768,6 @@ jobs:
|
|||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinAarch64:
|
BuilderBinAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
@ -820,7 +813,6 @@ jobs:
|
|||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinFreeBSD:
|
BuilderBinFreeBSD:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
@ -866,7 +858,6 @@ jobs:
|
|||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinDarwinAarch64:
|
BuilderBinDarwinAarch64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
@ -912,7 +903,6 @@ jobs:
|
|||||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||||
BuilderBinPPC64:
|
BuilderBinPPC64:
|
||||||
needs: [DockerHubPush]
|
needs: [DockerHubPush]
|
||||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
|
||||||
runs-on: [self-hosted, builder]
|
runs-on: [self-hosted, builder]
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
|
@ -1,9 +1,12 @@
|
|||||||
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
|
# During cross-compilation in our CI we have to use llvm-tblgen and other building tools
|
||||||
|
# tools to be build for host architecture and everything else for target architecture (e.g. AArch64)
|
||||||
|
# Possible workaround is to use llvm-tblgen from some package...
|
||||||
|
# But lets just enable LLVM for native builds
|
||||||
|
if (CMAKE_CROSSCOMPILING OR SANITIZE STREQUAL "undefined")
|
||||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
||||||
else()
|
else()
|
||||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
|
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||||
|
|
||||||
if (NOT ENABLE_EMBEDDED_COMPILER)
|
if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||||
|
@ -1378,7 +1378,7 @@ $REF_SHA $SHA_TO_TEST $(numactl --hardware | sed -n 's/^available:[[:space:]]\+/
|
|||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Also insert some data about the check into the CI checks table.
|
# Also insert some data about the check into the CI checks table.
|
||||||
"${client[@]}" --query "INSERT INTO "'"'"gh-data"'"'".checks FORMAT TSVWithNamesAndTypes" \
|
"${client[@]}" --query "INSERT INTO "'"'"default"'"'".checks FORMAT TSVWithNamesAndTypes" \
|
||||||
< ci-checks.tsv
|
< ci-checks.tsv
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
|
@ -8,7 +8,7 @@ toc_title: "版本折叠MergeTree"
|
|||||||
这个引擎:
|
这个引擎:
|
||||||
|
|
||||||
- 允许快速写入不断变化的对象状态。
|
- 允许快速写入不断变化的对象状态。
|
||||||
- 删除后台中的旧对象状态。 这显着降低了存储体积。
|
- 删除后台中的旧对象状态。 这显著降低了存储体积。
|
||||||
|
|
||||||
请参阅部分 [崩溃](#table_engines_versionedcollapsingmergetree) 有关详细信息。
|
请参阅部分 [崩溃](#table_engines_versionedcollapsingmergetree) 有关详细信息。
|
||||||
|
|
||||||
|
@ -184,6 +184,11 @@ void LocalServer::tryInitPath()
|
|||||||
if (path.back() != '/')
|
if (path.back() != '/')
|
||||||
path += '/';
|
path += '/';
|
||||||
|
|
||||||
|
fs::create_directories(fs::path(path) / "user_defined/");
|
||||||
|
fs::create_directories(fs::path(path) / "data/");
|
||||||
|
fs::create_directories(fs::path(path) / "metadata/");
|
||||||
|
fs::create_directories(fs::path(path) / "metadata_dropped/");
|
||||||
|
|
||||||
global_context->setPath(path);
|
global_context->setPath(path);
|
||||||
|
|
||||||
global_context->setTemporaryStorage(path + "tmp");
|
global_context->setTemporaryStorage(path + "tmp");
|
||||||
@ -563,7 +568,6 @@ void LocalServer::processConfig()
|
|||||||
/// Lock path directory before read
|
/// Lock path directory before read
|
||||||
status.emplace(fs::path(path) / "status", StatusFile::write_full_info);
|
status.emplace(fs::path(path) / "status", StatusFile::write_full_info);
|
||||||
|
|
||||||
fs::create_directories(fs::path(path) / "user_defined/");
|
|
||||||
LOG_DEBUG(log, "Loading user defined objects from {}", path);
|
LOG_DEBUG(log, "Loading user defined objects from {}", path);
|
||||||
Poco::File(path + "user_defined/").createDirectories();
|
Poco::File(path + "user_defined/").createDirectories();
|
||||||
UserDefinedSQLObjectsLoader::instance().loadObjects(global_context);
|
UserDefinedSQLObjectsLoader::instance().loadObjects(global_context);
|
||||||
@ -571,9 +575,6 @@ void LocalServer::processConfig()
|
|||||||
LOG_DEBUG(log, "Loaded user defined objects.");
|
LOG_DEBUG(log, "Loaded user defined objects.");
|
||||||
|
|
||||||
LOG_DEBUG(log, "Loading metadata from {}", path);
|
LOG_DEBUG(log, "Loading metadata from {}", path);
|
||||||
fs::create_directories(fs::path(path) / "data/");
|
|
||||||
fs::create_directories(fs::path(path) / "metadata/");
|
|
||||||
|
|
||||||
loadMetadataSystem(global_context);
|
loadMetadataSystem(global_context);
|
||||||
attachSystemTablesLocal(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE));
|
attachSystemTablesLocal(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE));
|
||||||
attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA));
|
attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA));
|
||||||
|
@ -829,6 +829,36 @@ if (ThreadFuzzer::instance().isEffective())
|
|||||||
fs::create_directories(path / "metadata_dropped/");
|
fs::create_directories(path / "metadata_dropped/");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
/// Initialize merge tree metadata cache
|
||||||
|
if (config().has("merge_tree_metadata_cache"))
|
||||||
|
{
|
||||||
|
fs::create_directories(path / "rocksdb/");
|
||||||
|
size_t size = config().getUInt64("merge_tree_metadata_cache.lru_cache_size", 256 << 20);
|
||||||
|
bool continue_if_corrupted = config().getBool("merge_tree_metadata_cache.continue_if_corrupted", false);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
LOG_DEBUG(
|
||||||
|
log, "Initiailizing merge tree metadata cache lru_cache_size:{} continue_if_corrupted:{}", size, continue_if_corrupted);
|
||||||
|
global_context->initializeMergeTreeMetadataCache(path_str + "/" + "rocksdb", size);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
if (continue_if_corrupted)
|
||||||
|
{
|
||||||
|
/// Rename rocksdb directory and reinitialize merge tree metadata cache
|
||||||
|
time_t now = time(nullptr);
|
||||||
|
fs::rename(path / "rocksdb", path / ("rocksdb.old." + std::to_string(now)));
|
||||||
|
global_context->initializeMergeTreeMetadataCache(path_str + "/" + "rocksdb", size);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (config().has("interserver_http_port") && config().has("interserver_https_port"))
|
if (config().has("interserver_http_port") && config().has("interserver_https_port"))
|
||||||
throw Exception("Both http and https interserver ports are specified", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
throw Exception("Both http and https interserver ports are specified", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||||
|
|
||||||
|
@ -1294,4 +1294,10 @@
|
|||||||
</tables>
|
</tables>
|
||||||
</rocksdb>
|
</rocksdb>
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
<!-- Uncomment if enable merge tree metadata cache -->
|
||||||
|
<merge_tree_metadata_cache>
|
||||||
|
<lru_cache_size>268435456</lru_cache_size>
|
||||||
|
<continue_if_corrupted>true</continue_if_corrupted>
|
||||||
|
</merge_tree_metadata_cache>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
|
@ -494,6 +494,11 @@ endif()
|
|||||||
|
|
||||||
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::fast_float)
|
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::fast_float)
|
||||||
|
|
||||||
|
if (USE_ORC)
|
||||||
|
dbms_target_link_libraries(PUBLIC ${ORC_LIBRARIES})
|
||||||
|
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR} "${CMAKE_BINARY_DIR}/contrib/orc/c++/include")
|
||||||
|
endif ()
|
||||||
|
|
||||||
if (TARGET ch_contrib::rocksdb)
|
if (TARGET ch_contrib::rocksdb)
|
||||||
dbms_target_link_libraries(PUBLIC ch_contrib::rocksdb)
|
dbms_target_link_libraries(PUBLIC ch_contrib::rocksdb)
|
||||||
endif()
|
endif()
|
||||||
|
@ -35,10 +35,10 @@ public:
|
|||||||
{}
|
{}
|
||||||
|
|
||||||
// Format message with fmt::format, like the logging functions.
|
// Format message with fmt::format, like the logging functions.
|
||||||
template <typename ...Args>
|
template <typename... Args>
|
||||||
Exception(int code, const std::string & fmt, Args&&... args)
|
Exception(int code, fmt::format_string<Args...> fmt, Args &&... args) : Exception(fmt::format(fmt, std::forward<Args>(args)...), code)
|
||||||
: Exception(fmt::format(fmt::runtime(fmt), std::forward<Args>(args)...), code)
|
{
|
||||||
{}
|
}
|
||||||
|
|
||||||
struct CreateFromPocoTag {};
|
struct CreateFromPocoTag {};
|
||||||
struct CreateFromSTDTag {};
|
struct CreateFromSTDTag {};
|
||||||
@ -52,10 +52,10 @@ public:
|
|||||||
const char * what() const throw() override { return message().data(); }
|
const char * what() const throw() override { return message().data(); }
|
||||||
|
|
||||||
/// Add something to the existing message.
|
/// Add something to the existing message.
|
||||||
template <typename ...Args>
|
template <typename... Args>
|
||||||
void addMessage(const std::string& format, Args&&... args)
|
void addMessage(fmt::format_string<Args...> format, Args &&... args)
|
||||||
{
|
{
|
||||||
extendedMessage(fmt::format(fmt::runtime(format), std::forward<Args>(args)...));
|
extendedMessage(fmt::format(format, std::forward<Args>(args)...));
|
||||||
}
|
}
|
||||||
|
|
||||||
void addMessage(const std::string& message)
|
void addMessage(const std::string& message)
|
||||||
@ -117,10 +117,10 @@ public:
|
|||||||
ParsingException(int code, const std::string & message);
|
ParsingException(int code, const std::string & message);
|
||||||
|
|
||||||
// Format message with fmt::format, like the logging functions.
|
// Format message with fmt::format, like the logging functions.
|
||||||
template <typename ...Args>
|
template <typename... Args>
|
||||||
ParsingException(int code, const std::string & fmt, Args&&... args)
|
ParsingException(int code, fmt::format_string<Args...> fmt, Args &&... args) : Exception(code, fmt, std::forward<Args>(args)...)
|
||||||
: Exception(fmt::format(fmt::runtime(fmt), std::forward<Args>(args)...), code)
|
{
|
||||||
{}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::string displayText() const
|
std::string displayText() const
|
||||||
|
@ -285,6 +285,13 @@
|
|||||||
\
|
\
|
||||||
M(MainConfigLoads, "Number of times the main configuration was reloaded.") \
|
M(MainConfigLoads, "Number of times the main configuration was reloaded.") \
|
||||||
\
|
\
|
||||||
|
M(MergeTreeMetadataCacheGet, "Number of rocksdb reads(used for merge tree metadata cache)") \
|
||||||
|
M(MergeTreeMetadataCachePut, "Number of rocksdb puts(used for merge tree metadata cache)") \
|
||||||
|
M(MergeTreeMetadataCacheDelete, "Number of rocksdb deletes(used for merge tree metadata cache)") \
|
||||||
|
M(MergeTreeMetadataCacheSeek, "Number of rocksdb seeks(used for merge tree metadata cache)") \
|
||||||
|
M(MergeTreeMetadataCacheHit, "Number of times the read of meta file was done from MergeTree metadata cache") \
|
||||||
|
M(MergeTreeMetadataCacheMiss, "Number of times the read of meta file was not done from MergeTree metadata cache") \
|
||||||
|
\
|
||||||
M(ScalarSubqueriesGlobalCacheHit, "Number of times a read from a scalar subquery was done using the global cache") \
|
M(ScalarSubqueriesGlobalCacheHit, "Number of times a read from a scalar subquery was done using the global cache") \
|
||||||
M(ScalarSubqueriesLocalCacheHit, "Number of times a read from a scalar subquery was done using the local cache") \
|
M(ScalarSubqueriesLocalCacheHit, "Number of times a read from a scalar subquery was done using the local cache") \
|
||||||
M(ScalarSubqueriesCacheMiss, "Number of times a read from a scalar subquery was not cached and had to be calculated completely")
|
M(ScalarSubqueriesCacheMiss, "Number of times a read from a scalar subquery was not cached and had to be calculated completely")
|
||||||
|
46
src/Common/RangeGenerator.h
Normal file
46
src/Common/RangeGenerator.h
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <optional>
|
||||||
|
#include <cmath>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
class RangeGenerator
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit RangeGenerator(size_t total_size_, size_t range_step_, size_t range_start = 0)
|
||||||
|
: from(range_start), range_step(range_step_), total_size(total_size_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t totalRanges() const { return static_cast<size_t>(round(static_cast<float>(total_size - from) / range_step)); }
|
||||||
|
|
||||||
|
using Range = std::pair<size_t, size_t>;
|
||||||
|
|
||||||
|
// return upper exclusive range of values, i.e. [from_range, to_range>
|
||||||
|
std::optional<Range> nextRange()
|
||||||
|
{
|
||||||
|
if (from >= total_size)
|
||||||
|
{
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto to = from + range_step;
|
||||||
|
if (to >= total_size)
|
||||||
|
{
|
||||||
|
to = total_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
Range range{from, to};
|
||||||
|
from = to;
|
||||||
|
return range;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t from;
|
||||||
|
size_t range_step;
|
||||||
|
size_t total_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
178
src/Common/format.h
Normal file
178
src/Common/format.h
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <base/types.h>
|
||||||
|
#include <Common/Exception.h>
|
||||||
|
#include <Common/PODArray.h>
|
||||||
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Format
|
||||||
|
{
|
||||||
|
using IndexPositions = PODArrayWithStackMemory<UInt64, 64>;
|
||||||
|
|
||||||
|
static inline void parseNumber(const String & description, UInt64 l, UInt64 r, UInt64 & res, UInt64 argument_number)
|
||||||
|
{
|
||||||
|
res = 0;
|
||||||
|
for (UInt64 pos = l; pos < r; ++pos)
|
||||||
|
{
|
||||||
|
if (!isNumericASCII(description[pos]))
|
||||||
|
throw Exception("Not a number in curly braces at position " + std::to_string(pos), ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
res = res * 10 + description[pos] - '0';
|
||||||
|
if (res >= argument_number)
|
||||||
|
throw Exception(
|
||||||
|
"Too big number for arguments, must be at most " + std::to_string(argument_number - 1), ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void init(
|
||||||
|
const String & pattern,
|
||||||
|
size_t argument_number,
|
||||||
|
const std::vector<std::optional<String>> & constant_strings,
|
||||||
|
IndexPositions & index_positions,
|
||||||
|
std::vector<String> & substrings)
|
||||||
|
{
|
||||||
|
/// Is current position after open curly brace.
|
||||||
|
bool is_open_curly = false;
|
||||||
|
/// The position of last open token.
|
||||||
|
size_t last_open = -1;
|
||||||
|
|
||||||
|
/// Is formatting in a plain {} token.
|
||||||
|
std::optional<bool> is_plain_numbering;
|
||||||
|
UInt64 index_if_plain = 0;
|
||||||
|
|
||||||
|
/// Left position of adding substrings, just to the closed brace position or the start of the string.
|
||||||
|
/// Invariant --- the start of substring is in this position.
|
||||||
|
size_t start_pos = 0;
|
||||||
|
|
||||||
|
/// A flag to decide whether we should glue the constant strings.
|
||||||
|
bool glue_to_next = false;
|
||||||
|
|
||||||
|
/// Handling double braces (escaping).
|
||||||
|
auto double_brace_removal = [](String & str)
|
||||||
|
{
|
||||||
|
size_t i = 0;
|
||||||
|
bool should_delete = true;
|
||||||
|
str.erase(
|
||||||
|
std::remove_if(
|
||||||
|
str.begin(),
|
||||||
|
str.end(),
|
||||||
|
[&i, &should_delete, &str](char)
|
||||||
|
{
|
||||||
|
bool is_double_brace = (str[i] == '{' && str[i + 1] == '{') || (str[i] == '}' && str[i + 1] == '}');
|
||||||
|
++i;
|
||||||
|
if (is_double_brace && should_delete)
|
||||||
|
{
|
||||||
|
should_delete = false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
should_delete = true;
|
||||||
|
return false;
|
||||||
|
}),
|
||||||
|
str.end());
|
||||||
|
};
|
||||||
|
|
||||||
|
index_positions.emplace_back();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < pattern.size(); ++i)
|
||||||
|
{
|
||||||
|
if (pattern[i] == '{')
|
||||||
|
{
|
||||||
|
/// Escaping handling
|
||||||
|
/// It is safe to access because of null termination
|
||||||
|
if (pattern[i + 1] == '{')
|
||||||
|
{
|
||||||
|
++i;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_open_curly)
|
||||||
|
throw Exception("Two open curly braces without close one at position " + std::to_string(i), ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
String to_add = String(pattern.data() + start_pos, i - start_pos);
|
||||||
|
double_brace_removal(to_add);
|
||||||
|
if (!glue_to_next)
|
||||||
|
substrings.emplace_back(to_add);
|
||||||
|
else
|
||||||
|
substrings.back() += to_add;
|
||||||
|
|
||||||
|
glue_to_next = false;
|
||||||
|
|
||||||
|
is_open_curly = true;
|
||||||
|
last_open = i + 1;
|
||||||
|
}
|
||||||
|
else if (pattern[i] == '}')
|
||||||
|
{
|
||||||
|
if (pattern[i + 1] == '}')
|
||||||
|
{
|
||||||
|
++i;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!is_open_curly)
|
||||||
|
throw Exception("Closed curly brace without open one at position " + std::to_string(i), ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
is_open_curly = false;
|
||||||
|
|
||||||
|
if (last_open == i)
|
||||||
|
{
|
||||||
|
if (is_plain_numbering && !*is_plain_numbering)
|
||||||
|
throw Exception(
|
||||||
|
"Cannot switch from automatic field numbering to manual field specification", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
is_plain_numbering = true;
|
||||||
|
if (index_if_plain >= argument_number)
|
||||||
|
throw Exception("Argument is too big for formatting", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
index_positions.back() = index_if_plain++;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (is_plain_numbering && *is_plain_numbering)
|
||||||
|
throw Exception(
|
||||||
|
"Cannot switch from automatic field numbering to manual field specification", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
is_plain_numbering = false;
|
||||||
|
|
||||||
|
UInt64 arg;
|
||||||
|
parseNumber(pattern, last_open, i, arg, argument_number);
|
||||||
|
|
||||||
|
if (arg >= argument_number)
|
||||||
|
throw Exception(
|
||||||
|
"Argument is too big for formatting. Note that indexing starts from zero", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
index_positions.back() = arg;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!constant_strings.empty() && constant_strings[index_positions.back()])
|
||||||
|
{
|
||||||
|
/// The next string should be glued to last `A {} C`.format('B') -> `A B C`.
|
||||||
|
glue_to_next = true;
|
||||||
|
substrings.back() += *constant_strings[index_positions.back()];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
index_positions.emplace_back(); /// Otherwise we commit arg number and proceed.
|
||||||
|
|
||||||
|
start_pos = i + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_open_curly)
|
||||||
|
throw Exception("Last open curly brace is not closed", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
String to_add = String(pattern.data() + start_pos, pattern.size() - start_pos);
|
||||||
|
double_brace_removal(to_add);
|
||||||
|
|
||||||
|
if (!glue_to_next)
|
||||||
|
substrings.emplace_back(to_add);
|
||||||
|
else
|
||||||
|
substrings.back() += to_add;
|
||||||
|
|
||||||
|
index_positions.pop_back();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -1,7 +1,18 @@
|
|||||||
#include "gtest_global_context.h"
|
#include "gtest_global_context.h"
|
||||||
|
|
||||||
const ContextHolder & getContext()
|
const ContextHolder & getContext()
|
||||||
|
{
|
||||||
|
return getMutableContext();
|
||||||
|
}
|
||||||
|
|
||||||
|
ContextHolder & getMutableContext()
|
||||||
{
|
{
|
||||||
static ContextHolder holder;
|
static ContextHolder holder;
|
||||||
return holder;
|
return holder;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void destroyContext()
|
||||||
|
{
|
||||||
|
auto & holder = getMutableContext();
|
||||||
|
return holder.destroy();
|
||||||
|
}
|
||||||
|
@ -16,6 +16,17 @@ struct ContextHolder
|
|||||||
}
|
}
|
||||||
|
|
||||||
ContextHolder(ContextHolder &&) = default;
|
ContextHolder(ContextHolder &&) = default;
|
||||||
|
|
||||||
|
void destroy()
|
||||||
|
{
|
||||||
|
context->shutdown();
|
||||||
|
context.reset();
|
||||||
|
shared_context.reset();
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const ContextHolder & getContext();
|
const ContextHolder & getContext();
|
||||||
|
|
||||||
|
ContextHolder & getMutableContext();
|
||||||
|
|
||||||
|
void destroyContext();
|
||||||
|
@ -187,5 +187,4 @@ DECLARE_SETTING_ENUM_WITH_RENAME(EnumComparingMode, FormatSettings::EnumComparin
|
|||||||
DECLARE_SETTING_ENUM_WITH_RENAME(EscapingRule, FormatSettings::EscapingRule)
|
DECLARE_SETTING_ENUM_WITH_RENAME(EscapingRule, FormatSettings::EscapingRule)
|
||||||
|
|
||||||
DECLARE_SETTING_ENUM_WITH_RENAME(MsgPackUUIDRepresentation, FormatSettings::MsgPackUUIDRepresentation)
|
DECLARE_SETTING_ENUM_WITH_RENAME(MsgPackUUIDRepresentation, FormatSettings::MsgPackUUIDRepresentation)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Interpreters/InterpreterCreateQuery.h>
|
#include <Interpreters/InterpreterCreateQuery.h>
|
||||||
|
#include <Interpreters/ApplyWithSubqueryVisitor.h>
|
||||||
#include <Parsers/ASTCreateQuery.h>
|
#include <Parsers/ASTCreateQuery.h>
|
||||||
#include <Parsers/ASTFunction.h>
|
#include <Parsers/ASTFunction.h>
|
||||||
#include <Parsers/ParserCreateQuery.h>
|
#include <Parsers/ParserCreateQuery.h>
|
||||||
@ -55,6 +56,9 @@ std::pair<String, StoragePtr> createTableFromAST(
|
|||||||
ast_create_query.attach = true;
|
ast_create_query.attach = true;
|
||||||
ast_create_query.setDatabase(database_name);
|
ast_create_query.setDatabase(database_name);
|
||||||
|
|
||||||
|
if (ast_create_query.select && ast_create_query.isView())
|
||||||
|
ApplyWithSubqueryVisitor().visit(*ast_create_query.select);
|
||||||
|
|
||||||
if (ast_create_query.as_table_function)
|
if (ast_create_query.as_table_function)
|
||||||
{
|
{
|
||||||
const auto & factory = TableFunctionFactory::instance();
|
const auto & factory = TableFunctionFactory::instance();
|
||||||
|
@ -179,8 +179,12 @@ String DatabaseReplicatedDDLWorker::tryEnqueueAndExecuteEntry(DDLLogEntry & entr
|
|||||||
|
|
||||||
if (!task->was_executed)
|
if (!task->was_executed)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Entry {} was executed, but was not committed: code {}: {}",
|
throw Exception(
|
||||||
task->execution_status.code, task->execution_status.message);
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Entry {} was executed, but was not committed: code {}: {}",
|
||||||
|
task->entry_name,
|
||||||
|
task->execution_status.code,
|
||||||
|
task->execution_status.message);
|
||||||
}
|
}
|
||||||
|
|
||||||
try_node->setAlreadyRemoved();
|
try_node->setAlreadyRemoved();
|
||||||
|
@ -50,7 +50,7 @@ namespace
|
|||||||
{
|
{
|
||||||
if (!qualified_name.database.empty())
|
if (!qualified_name.database.empty())
|
||||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||||
"Dictionary source of type {} specifies a schema but schema is not supported by {}-driver",
|
"Dictionary source specifies a schema but schema is not supported by {}-driver",
|
||||||
bridge_.getName());
|
bridge_.getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -392,8 +392,13 @@ void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment)
|
|||||||
if (bytes_to_predownload)
|
if (bytes_to_predownload)
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::LOGICAL_ERROR,
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
"Failed to predownload remaining {} bytes. Current file segment: {}, current download offset: {}, expected: {}, eof: {}",
|
"Failed to predownload remaining {} bytes. Current file segment: {}, current download offset: {}, expected: {}, "
|
||||||
file_segment->range().toString(), file_segment->getDownloadOffset(), file_offset_of_buffer_end, implementation_buffer->eof());
|
"eof: {}",
|
||||||
|
bytes_to_predownload,
|
||||||
|
file_segment->range().toString(),
|
||||||
|
file_segment->getDownloadOffset(),
|
||||||
|
file_offset_of_buffer_end,
|
||||||
|
implementation_buffer->eof());
|
||||||
|
|
||||||
auto result = implementation_buffer->hasPendingData();
|
auto result = implementation_buffer->hasPendingData();
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ SeekableReadBufferPtr ReadBufferFromS3Gather::createImplementationBuffer(const S
|
|||||||
{
|
{
|
||||||
return std::make_unique<ReadBufferFromS3>(
|
return std::make_unique<ReadBufferFromS3>(
|
||||||
client_ptr, bucket, fs::path(metadata.remote_fs_root_path) / path, max_single_read_retries,
|
client_ptr, bucket, fs::path(metadata.remote_fs_root_path) / path, max_single_read_retries,
|
||||||
settings, /* use_external_buffer */true, read_until_position, /* restricted_seek */true);
|
settings, /* use_external_buffer */true, /* offset */ 0, read_until_position, /* restricted_seek */true);
|
||||||
};
|
};
|
||||||
|
|
||||||
if (with_cache)
|
if (with_cache)
|
||||||
|
@ -85,9 +85,12 @@ FormatSchemaInfo::FormatSchemaInfo(const String & format_schema, const String &
|
|||||||
else if (path.has_parent_path() && !fs::weakly_canonical(default_schema_directory_path / path).string().starts_with(fs::weakly_canonical(default_schema_directory_path).string()))
|
else if (path.has_parent_path() && !fs::weakly_canonical(default_schema_directory_path / path).string().starts_with(fs::weakly_canonical(default_schema_directory_path).string()))
|
||||||
{
|
{
|
||||||
if (is_server)
|
if (is_server)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
throw Exception(
|
||||||
|
ErrorCodes::BAD_ARGUMENTS,
|
||||||
"Path in the 'format_schema' setting shouldn't go outside the 'format_schema_path' directory: {} ({} not in {})",
|
"Path in the 'format_schema' setting shouldn't go outside the 'format_schema_path' directory: {} ({} not in {})",
|
||||||
path.string());
|
default_schema_directory(),
|
||||||
|
path.string(),
|
||||||
|
default_schema_directory());
|
||||||
path = default_schema_directory_path / path;
|
path = default_schema_directory_path / path;
|
||||||
schema_path = path.filename();
|
schema_path = path.filename();
|
||||||
schema_directory = path.parent_path() / "";
|
schema_directory = path.parent_path() / "";
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
#include <DataTypes/DataTypeTuple.h>
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
#include <DataTypes/DataTypeMap.h>
|
#include <DataTypes/DataTypeMap.h>
|
||||||
|
#include <DataTypes/DataTypeObject.h>
|
||||||
#include <Common/JSONParsers/SimdJSONParser.h>
|
#include <Common/JSONParsers/SimdJSONParser.h>
|
||||||
#include <Common/JSONParsers/RapidJSONParser.h>
|
#include <Common/JSONParsers/RapidJSONParser.h>
|
||||||
#include <Common/JSONParsers/DummyJSONParser.h>
|
#include <Common/JSONParsers/DummyJSONParser.h>
|
||||||
@ -158,22 +159,37 @@ DataTypePtr getDataTypeFromJSONFieldImpl(const Element & field)
|
|||||||
{
|
{
|
||||||
auto object = field.getObject();
|
auto object = field.getObject();
|
||||||
DataTypePtr value_type;
|
DataTypePtr value_type;
|
||||||
|
bool is_object = false;
|
||||||
for (const auto key_value_pair : object)
|
for (const auto key_value_pair : object)
|
||||||
{
|
{
|
||||||
auto type = getDataTypeFromJSONFieldImpl(key_value_pair.second);
|
auto type = getDataTypeFromJSONFieldImpl(key_value_pair.second);
|
||||||
if (!type)
|
if (!type)
|
||||||
return nullptr;
|
continue;
|
||||||
|
|
||||||
if (value_type && value_type->getName() != type->getName())
|
if (isObject(type))
|
||||||
return nullptr;
|
{
|
||||||
|
is_object = true;
|
||||||
value_type = type;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!value_type)
|
if (!value_type)
|
||||||
return nullptr;
|
{
|
||||||
|
value_type = type;
|
||||||
|
}
|
||||||
|
else if (!value_type->equals(*type))
|
||||||
|
{
|
||||||
|
is_object = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_object)
|
||||||
|
return std::make_shared<DataTypeObject>("json", false);
|
||||||
|
|
||||||
|
if (value_type)
|
||||||
return std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), value_type);
|
return std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), value_type);
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
throw Exception{ErrorCodes::INCORRECT_DATA, "Unexpected JSON type"};
|
throw Exception{ErrorCodes::INCORRECT_DATA, "Unexpected JSON type"};
|
||||||
|
@ -7,6 +7,8 @@
|
|||||||
#include <Formats/ReadSchemaUtils.h>
|
#include <Formats/ReadSchemaUtils.h>
|
||||||
#include <Processors/Formats/ISchemaReader.h>
|
#include <Processors/Formats/ISchemaReader.h>
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Storages/IStorage.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -17,6 +19,28 @@ namespace ErrorCodes
|
|||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static std::optional<NamesAndTypesList> getOrderedColumnsList(
|
||||||
|
const NamesAndTypesList & columns_list, const Names & columns_order_hint)
|
||||||
|
{
|
||||||
|
if (columns_list.size() != columns_order_hint.size())
|
||||||
|
return {};
|
||||||
|
|
||||||
|
std::unordered_map<String, DataTypePtr> available_columns;
|
||||||
|
for (const auto & [name, type] : columns_list)
|
||||||
|
available_columns.emplace(name, type);
|
||||||
|
|
||||||
|
NamesAndTypesList res;
|
||||||
|
for (const auto & name : columns_order_hint)
|
||||||
|
{
|
||||||
|
auto it = available_columns.find(name);
|
||||||
|
if (it == available_columns.end())
|
||||||
|
return {};
|
||||||
|
|
||||||
|
res.emplace_back(name, it->second);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
ColumnsDescription readSchemaFromFormat(
|
ColumnsDescription readSchemaFromFormat(
|
||||||
const String & format_name,
|
const String & format_name,
|
||||||
const std::optional<FormatSettings> & format_settings,
|
const std::optional<FormatSettings> & format_settings,
|
||||||
@ -52,6 +76,22 @@ ColumnsDescription readSchemaFromFormat(
|
|||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "Cannot extract table structure from {} format file. Error: {}", format_name, e.message());
|
throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "Cannot extract table structure from {} format file. Error: {}", format_name, e.message());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// If we have "INSERT SELECT" query then try to order
|
||||||
|
/// columns as they are ordered in table schema for formats
|
||||||
|
/// without strict column order (like JSON and TSKV).
|
||||||
|
/// It will allow to execute simple data loading with query
|
||||||
|
/// "INSERT INTO table SELECT * FROM ..."
|
||||||
|
const auto & insertion_table = context->getInsertionTable();
|
||||||
|
if (!schema_reader->hasStrictOrderOfColumns() && !insertion_table.empty())
|
||||||
|
{
|
||||||
|
auto storage = DatabaseCatalog::instance().getTable(insertion_table, context);
|
||||||
|
auto metadata = storage->getInMemoryMetadataPtr();
|
||||||
|
auto names_in_storage = metadata->getColumns().getNamesOfPhysical();
|
||||||
|
auto ordered_list = getOrderedColumnsList(names_and_types, names_in_storage);
|
||||||
|
if (ordered_list)
|
||||||
|
names_and_types = *ordered_list;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "{} file format doesn't support schema inference", format_name);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "{} file format doesn't support schema inference", format_name);
|
||||||
|
@ -53,6 +53,7 @@
|
|||||||
#include <DataTypes/DataTypeLowCardinality.h>
|
#include <DataTypes/DataTypeLowCardinality.h>
|
||||||
#include <Columns/ColumnLowCardinality.h>
|
#include <Columns/ColumnLowCardinality.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Common/HashTable/HashMap.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -3140,31 +3141,30 @@ private:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
WrapperType createObjectWrapper(const DataTypePtr & from_type, const DataTypeObject * to_type) const
|
WrapperType createTupleToObjectWrapper(const DataTypeTuple & from_tuple, bool has_nullable_subcolumns) const
|
||||||
{
|
{
|
||||||
if (const auto * from_tuple = checkAndGetDataType<DataTypeTuple>(from_type.get()))
|
if (!from_tuple.haveExplicitNames())
|
||||||
{
|
|
||||||
if (!from_tuple->haveExplicitNames())
|
|
||||||
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
||||||
"Cast to Object can be performed only from flatten Named Tuple. Got: {}", from_type->getName());
|
"Cast to Object can be performed only from flatten Named Tuple. Got: {}", from_tuple.getName());
|
||||||
|
|
||||||
PathsInData paths;
|
PathsInData paths;
|
||||||
DataTypes from_types;
|
DataTypes from_types;
|
||||||
|
|
||||||
std::tie(paths, from_types) = flattenTuple(from_type);
|
std::tie(paths, from_types) = flattenTuple(from_tuple.getPtr());
|
||||||
auto to_types = from_types;
|
auto to_types = from_types;
|
||||||
|
|
||||||
for (auto & type : to_types)
|
for (auto & type : to_types)
|
||||||
{
|
{
|
||||||
if (isTuple(type) || isNested(type))
|
if (isTuple(type) || isNested(type))
|
||||||
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
||||||
"Cast to Object can be performed only from flatten Named Tuple. Got: {}", from_type->getName());
|
"Cast to Object can be performed only from flatten Named Tuple. Got: {}",
|
||||||
|
from_tuple.getName());
|
||||||
|
|
||||||
type = recursiveRemoveLowCardinality(type);
|
type = recursiveRemoveLowCardinality(type);
|
||||||
}
|
}
|
||||||
|
|
||||||
return [element_wrappers = getElementWrappers(from_types, to_types),
|
return [element_wrappers = getElementWrappers(from_types, to_types),
|
||||||
has_nullable_subcolumns = to_type->hasNullableSubcolumns(), from_types, to_types, paths]
|
has_nullable_subcolumns, from_types, to_types, paths]
|
||||||
(ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * nullable_source, size_t input_rows_count)
|
(ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * nullable_source, size_t input_rows_count)
|
||||||
{
|
{
|
||||||
size_t tuple_size = to_types.size();
|
size_t tuple_size = to_types.size();
|
||||||
@ -3187,6 +3187,93 @@ private:
|
|||||||
return res;
|
return res;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
WrapperType createMapToObjectWrapper(const DataTypeMap & from_map, bool has_nullable_subcolumns) const
|
||||||
|
{
|
||||||
|
auto key_value_types = from_map.getKeyValueTypes();
|
||||||
|
|
||||||
|
if (!isStringOrFixedString(key_value_types[0]))
|
||||||
|
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
||||||
|
"Cast to Object from Map can be performed only from Map "
|
||||||
|
"with String or FixedString key. Got: {}", from_map.getName());
|
||||||
|
|
||||||
|
const auto & value_type = key_value_types[1];
|
||||||
|
auto to_value_type = value_type;
|
||||||
|
|
||||||
|
if (!has_nullable_subcolumns && value_type->isNullable())
|
||||||
|
to_value_type = removeNullable(value_type);
|
||||||
|
|
||||||
|
if (has_nullable_subcolumns && !value_type->isNullable())
|
||||||
|
to_value_type = makeNullable(value_type);
|
||||||
|
|
||||||
|
DataTypes to_key_value_types{std::make_shared<DataTypeString>(), std::move(to_value_type)};
|
||||||
|
auto element_wrappers = getElementWrappers(key_value_types, to_key_value_types);
|
||||||
|
|
||||||
|
return [has_nullable_subcolumns, element_wrappers, key_value_types, to_key_value_types]
|
||||||
|
(ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * nullable_source, size_t) -> ColumnPtr
|
||||||
|
{
|
||||||
|
const auto & column_map = assert_cast<const ColumnMap &>(*arguments.front().column);
|
||||||
|
const auto & offsets = column_map.getNestedColumn().getOffsets();
|
||||||
|
auto key_value_columns = column_map.getNestedData().getColumnsCopy();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < 2; ++i)
|
||||||
|
{
|
||||||
|
ColumnsWithTypeAndName element{{key_value_columns[i], key_value_types[i], ""}};
|
||||||
|
key_value_columns[i] = element_wrappers[i](element, to_key_value_types[i], nullable_source, key_value_columns[i]->size());
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto & key_column_str = assert_cast<const ColumnString &>(*key_value_columns[0]);
|
||||||
|
const auto & value_column = *key_value_columns[1];
|
||||||
|
|
||||||
|
using SubcolumnsMap = HashMap<StringRef, MutableColumnPtr, StringRefHash>;
|
||||||
|
SubcolumnsMap subcolumns;
|
||||||
|
|
||||||
|
for (size_t row = 0; row < offsets.size(); ++row)
|
||||||
|
{
|
||||||
|
for (size_t i = offsets[static_cast<ssize_t>(row) - 1]; i < offsets[row]; ++i)
|
||||||
|
{
|
||||||
|
auto ref = key_column_str.getDataAt(i);
|
||||||
|
|
||||||
|
bool inserted;
|
||||||
|
SubcolumnsMap::LookupResult it;
|
||||||
|
subcolumns.emplace(ref, it, inserted);
|
||||||
|
auto & subcolumn = it->getMapped();
|
||||||
|
|
||||||
|
if (inserted)
|
||||||
|
subcolumn = value_column.cloneEmpty()->cloneResized(row);
|
||||||
|
|
||||||
|
/// Map can have duplicated keys. We insert only first one.
|
||||||
|
if (subcolumn->size() == row)
|
||||||
|
subcolumn->insertFrom(value_column, i);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Insert default values for keys missed in current row.
|
||||||
|
for (const auto & [_, subcolumn] : subcolumns)
|
||||||
|
if (subcolumn->size() == row)
|
||||||
|
subcolumn->insertDefault();
|
||||||
|
}
|
||||||
|
|
||||||
|
auto column_object = ColumnObject::create(has_nullable_subcolumns);
|
||||||
|
for (auto && [key, subcolumn] : subcolumns)
|
||||||
|
{
|
||||||
|
PathInData path(key.toView());
|
||||||
|
column_object->addSubcolumn(path, std::move(subcolumn));
|
||||||
|
}
|
||||||
|
|
||||||
|
return column_object;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
WrapperType createObjectWrapper(const DataTypePtr & from_type, const DataTypeObject * to_type) const
|
||||||
|
{
|
||||||
|
if (const auto * from_tuple = checkAndGetDataType<DataTypeTuple>(from_type.get()))
|
||||||
|
{
|
||||||
|
return createTupleToObjectWrapper(*from_tuple, to_type->hasNullableSubcolumns());
|
||||||
|
}
|
||||||
|
else if (const auto * from_map = checkAndGetDataType<DataTypeMap>(from_type.get()))
|
||||||
|
{
|
||||||
|
return createMapToObjectWrapper(*from_map, to_type->hasNullableSubcolumns());
|
||||||
|
}
|
||||||
else if (checkAndGetDataType<DataTypeString>(from_type.get()))
|
else if (checkAndGetDataType<DataTypeString>(from_type.get()))
|
||||||
{
|
{
|
||||||
return [] (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count)
|
return [] (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count)
|
||||||
@ -3199,7 +3286,7 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
||||||
"Cast to Object can be performed only from flatten named tuple or string. Got: {}", from_type->getName());
|
"Cast to Object can be performed only from flatten named Tuple, Map or String. Got: {}", from_type->getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename FieldType>
|
template <typename FieldType>
|
||||||
|
@ -259,7 +259,7 @@ public:
|
|||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||||
"Function '{}' needs at least 2 arguments, at most 3 arguments; passed {}.",
|
"Function '{}' needs at least 2 arguments, at most 3 arguments; passed {}.",
|
||||||
arguments.size());
|
name, arguments.size());
|
||||||
|
|
||||||
if (!isString(arguments[0]))
|
if (!isString(arguments[0]))
|
||||||
throw Exception("Illegal type " + arguments[0]->getName() + " of first argument of function " + getName() + ". Must be String.",
|
throw Exception("Illegal type " + arguments[0]->getName() + " of first argument of function " + getName() + ". Must be String.",
|
||||||
|
@ -181,9 +181,12 @@ ColumnPtr IExecutableFunction::defaultImplementationForNulls(
|
|||||||
// Default implementation for nulls returns null result for null arguments,
|
// Default implementation for nulls returns null result for null arguments,
|
||||||
// so the result type must be nullable.
|
// so the result type must be nullable.
|
||||||
if (!result_type->isNullable())
|
if (!result_type->isNullable())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
throw Exception(
|
||||||
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
"Function {} with Null argument and default implementation for Nulls "
|
"Function {} with Null argument and default implementation for Nulls "
|
||||||
"is expected to return Nullable result, got {}", result_type->getName());
|
"is expected to return Nullable result, got {}",
|
||||||
|
getName(),
|
||||||
|
result_type->getName());
|
||||||
|
|
||||||
return result_type->createColumnConstWithDefaultValue(input_rows_count);
|
return result_type->createColumnConstWithDefaultValue(input_rows_count);
|
||||||
}
|
}
|
||||||
|
@ -231,7 +231,7 @@ private:
|
|||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
"Function {} decimal scale should have native UInt type. Actual {}",
|
"Function {} decimal scale should have native UInt type. Actual {}",
|
||||||
scale_argument.type->getName());
|
getName(), scale_argument.type->getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
scale = arguments[additional_argument_index].column->getUInt(0);
|
scale = arguments[additional_argument_index].column->getUInt(0);
|
||||||
|
@ -52,23 +52,21 @@ public:
|
|||||||
{
|
{
|
||||||
if (arguments.size() < 2)
|
if (arguments.size() < 2)
|
||||||
throw Exception(
|
throw Exception(
|
||||||
"Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size())
|
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||||
+ ", should be at least 2.",
|
"Number of arguments for function {} doesn't match: passed {}, should be at least 2",
|
||||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
getName(),
|
||||||
|
arguments.size());
|
||||||
if (arguments.size() > FormatImpl::argument_threshold)
|
|
||||||
throw Exception(
|
|
||||||
"Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size())
|
|
||||||
+ ", should be at most " + std::to_string(FormatImpl::argument_threshold),
|
|
||||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
|
||||||
|
|
||||||
for (const auto arg_idx : collections::range(0, arguments.size()))
|
for (const auto arg_idx : collections::range(0, arguments.size()))
|
||||||
{
|
{
|
||||||
const auto * arg = arguments[arg_idx].get();
|
const auto * arg = arguments[arg_idx].get();
|
||||||
if (!isStringOrFixedString(arg))
|
if (!isStringOrFixedString(arg))
|
||||||
throw Exception{"Illegal type " + arg->getName() + " of argument " + std::to_string(arg_idx + 1) + " of function "
|
throw Exception(
|
||||||
+ getName(),
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
|
"Illegal type {} of argument {} of function {}",
|
||||||
|
arg->getName(),
|
||||||
|
arg_idx + 1,
|
||||||
|
getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<DataTypeString>();
|
return std::make_shared<DataTypeString>();
|
||||||
@ -125,7 +123,7 @@ private:
|
|||||||
std::vector<const ColumnString::Chars *> data(num_arguments);
|
std::vector<const ColumnString::Chars *> data(num_arguments);
|
||||||
std::vector<const ColumnString::Offsets *> offsets(num_arguments);
|
std::vector<const ColumnString::Offsets *> offsets(num_arguments);
|
||||||
std::vector<size_t> fixed_string_sizes(num_arguments);
|
std::vector<size_t> fixed_string_sizes(num_arguments);
|
||||||
std::vector<String> constant_strings(num_arguments);
|
std::vector<std::optional<String>> constant_strings(num_arguments);
|
||||||
bool has_column_string = false;
|
bool has_column_string = false;
|
||||||
bool has_column_fixed_string = false;
|
bool has_column_fixed_string = false;
|
||||||
for (size_t i = 0; i < num_arguments; ++i)
|
for (size_t i = 0; i < num_arguments; ++i)
|
||||||
|
@ -112,7 +112,7 @@ public:
|
|||||||
|| (res = executeType<DataTypeDateTime64>(arguments, result_type))))
|
|| (res = executeType<DataTypeDateTime64>(arguments, result_type))))
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::ILLEGAL_COLUMN,
|
ErrorCodes::ILLEGAL_COLUMN,
|
||||||
"Illegal column {} of function {], must be Date or DateTime.",
|
"Illegal column {} of function {}, must be Date or DateTime.",
|
||||||
arguments[1].column->getName(),
|
arguments[1].column->getName(),
|
||||||
getName());
|
getName());
|
||||||
|
|
||||||
|
68
src/Functions/flattenTuple.cpp
Normal file
68
src/Functions/flattenTuple.cpp
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
#include <Functions/IFunction.h>
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <Functions/FunctionHelpers.h>
|
||||||
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
|
#include <DataTypes/ObjectUtils.h>
|
||||||
|
#include <Columns/ColumnTuple.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ILLEGAL_COLUMN;
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
class FunctionFlattenTuple : public IFunction
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
static constexpr auto name = "flattenTuple";
|
||||||
|
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionFlattenTuple>(); }
|
||||||
|
|
||||||
|
String getName() const override { return name; }
|
||||||
|
size_t getNumberOfArguments() const override { return 1; }
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo &) const override { return true; }
|
||||||
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||||
|
{
|
||||||
|
const auto & type = arguments[0];
|
||||||
|
const auto * type_tuple = checkAndGetDataType<DataTypeTuple>(type.get());
|
||||||
|
if (!type_tuple || !type_tuple->haveExplicitNames())
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||||
|
"Argument for function '{}' must be Named Tuple. Got '{}'",
|
||||||
|
getName(), type->getName());
|
||||||
|
|
||||||
|
auto [paths, types] = flattenTuple(type);
|
||||||
|
Names names;
|
||||||
|
names.reserve(paths.size());
|
||||||
|
for (const auto & path : paths)
|
||||||
|
names.push_back(path.getPath());
|
||||||
|
|
||||||
|
return std::make_shared<DataTypeTuple>(types, names);
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
|
||||||
|
{
|
||||||
|
auto column = arguments.at(0).column;
|
||||||
|
if (!checkAndGetColumn<ColumnTuple>(column.get()))
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
|
||||||
|
"Illegal column {} of first argument of function {}. Expected ColumnTuple",
|
||||||
|
column->getName(), getName());
|
||||||
|
|
||||||
|
return flattenTuple(column);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void registerFunctionFlattenTuple(FunctionFactory & factory)
|
||||||
|
{
|
||||||
|
factory.registerFunction<FunctionFlattenTuple>();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -45,25 +45,23 @@ public:
|
|||||||
|
|
||||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||||
{
|
{
|
||||||
if (arguments.empty())
|
if (arguments.size() < 2)
|
||||||
throw Exception(
|
throw Exception(
|
||||||
"Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size())
|
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||||
+ ", should be at least 1",
|
"Number of arguments for function {} doesn't match: passed {}, should be at least 2",
|
||||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
getName(),
|
||||||
|
arguments.size());
|
||||||
if (arguments.size() > FormatImpl::argument_threshold)
|
|
||||||
throw Exception(
|
|
||||||
"Number of arguments for function " + getName() + " doesn't match: passed " + toString(arguments.size())
|
|
||||||
+ ", should be at most " + std::to_string(FormatImpl::argument_threshold),
|
|
||||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
|
||||||
|
|
||||||
for (const auto arg_idx : collections::range(0, arguments.size()))
|
for (const auto arg_idx : collections::range(0, arguments.size()))
|
||||||
{
|
{
|
||||||
const auto * arg = arguments[arg_idx].get();
|
const auto * arg = arguments[arg_idx].get();
|
||||||
if (!isStringOrFixedString(arg))
|
if (!isStringOrFixedString(arg))
|
||||||
throw Exception(
|
throw Exception(
|
||||||
"Illegal type " + arg->getName() + " of argument " + std::to_string(arg_idx + 1) + " of function " + getName(),
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
"Illegal type {} of argument {} of function {}",
|
||||||
|
arg->getName(),
|
||||||
|
arg_idx + 1,
|
||||||
|
getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<DataTypeString>();
|
return std::make_shared<DataTypeString>();
|
||||||
@ -84,7 +82,7 @@ public:
|
|||||||
std::vector<const ColumnString::Chars *> data(arguments.size() - 1);
|
std::vector<const ColumnString::Chars *> data(arguments.size() - 1);
|
||||||
std::vector<const ColumnString::Offsets *> offsets(arguments.size() - 1);
|
std::vector<const ColumnString::Offsets *> offsets(arguments.size() - 1);
|
||||||
std::vector<size_t> fixed_string_sizes(arguments.size() - 1);
|
std::vector<size_t> fixed_string_sizes(arguments.size() - 1);
|
||||||
std::vector<String> constant_strings(arguments.size() - 1);
|
std::vector<std::optional<String>> constant_strings(arguments.size() - 1);
|
||||||
|
|
||||||
bool has_column_string = false;
|
bool has_column_string = false;
|
||||||
bool has_column_fixed_string = false;
|
bool has_column_fixed_string = false;
|
||||||
|
@ -4,8 +4,10 @@
|
|||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
|
#include <Common/format.h>
|
||||||
#include <Common/memcpySmall.h>
|
#include <Common/memcpySmall.h>
|
||||||
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <string>
|
#include <string>
|
||||||
@ -15,15 +17,9 @@
|
|||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int BAD_ARGUMENTS;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct FormatImpl
|
struct FormatImpl
|
||||||
{
|
{
|
||||||
static constexpr size_t small_argument_threshold = 1024;
|
|
||||||
static constexpr size_t argument_threshold = std::numeric_limits<UInt32>::max();
|
|
||||||
static constexpr size_t right_padding = 15;
|
static constexpr size_t right_padding = 15;
|
||||||
|
|
||||||
template <typename... Args>
|
template <typename... Args>
|
||||||
@ -39,165 +35,10 @@ struct FormatImpl
|
|||||||
format<false, false>(std::forward<Args>(args)...);
|
format<false, false>(std::forward<Args>(args)...);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void parseNumber(const String & description, UInt64 l, UInt64 r, UInt64 & res)
|
|
||||||
{
|
|
||||||
res = 0;
|
|
||||||
for (UInt64 pos = l; pos < r; ++pos)
|
|
||||||
{
|
|
||||||
if (!isNumericASCII(description[pos]))
|
|
||||||
throw Exception("Not a number in curly braces at position " + std::to_string(pos), ErrorCodes::BAD_ARGUMENTS);
|
|
||||||
res = res * 10 + description[pos] - '0';
|
|
||||||
if (res >= argument_threshold)
|
|
||||||
throw Exception(
|
|
||||||
"Too big number for arguments, must be at most " + std::to_string(argument_threshold), ErrorCodes::BAD_ARGUMENTS);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void init(
|
|
||||||
const String & pattern,
|
|
||||||
const std::vector<const ColumnString::Chars *> & data,
|
|
||||||
size_t argument_number,
|
|
||||||
const std::vector<String> & constant_strings,
|
|
||||||
UInt64 * index_positions_ptr,
|
|
||||||
std::vector<String> & substrings)
|
|
||||||
{
|
|
||||||
/// Is current position after open curly brace.
|
|
||||||
bool is_open_curly = false;
|
|
||||||
/// The position of last open token.
|
|
||||||
size_t last_open = -1;
|
|
||||||
|
|
||||||
/// Is formatting in a plain {} token.
|
|
||||||
std::optional<bool> is_plain_numbering;
|
|
||||||
UInt64 index_if_plain = 0;
|
|
||||||
|
|
||||||
/// Left position of adding substrings, just to the closed brace position or the start of the string.
|
|
||||||
/// Invariant --- the start of substring is in this position.
|
|
||||||
size_t start_pos = 0;
|
|
||||||
|
|
||||||
/// A flag to decide whether we should glue the constant strings.
|
|
||||||
bool glue_to_next = false;
|
|
||||||
|
|
||||||
/// Handling double braces (escaping).
|
|
||||||
auto double_brace_removal = [](String & str)
|
|
||||||
{
|
|
||||||
size_t i = 0;
|
|
||||||
bool should_delete = true;
|
|
||||||
str.erase(
|
|
||||||
std::remove_if(
|
|
||||||
str.begin(),
|
|
||||||
str.end(),
|
|
||||||
[&i, &should_delete, &str](char)
|
|
||||||
{
|
|
||||||
bool is_double_brace = (str[i] == '{' && str[i + 1] == '{') || (str[i] == '}' && str[i + 1] == '}');
|
|
||||||
++i;
|
|
||||||
if (is_double_brace && should_delete)
|
|
||||||
{
|
|
||||||
should_delete = false;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
should_delete = true;
|
|
||||||
return false;
|
|
||||||
}),
|
|
||||||
str.end());
|
|
||||||
};
|
|
||||||
|
|
||||||
for (size_t i = 0; i < pattern.size(); ++i)
|
|
||||||
{
|
|
||||||
if (pattern[i] == '{')
|
|
||||||
{
|
|
||||||
/// Escaping handling
|
|
||||||
/// It is safe to access because of null termination
|
|
||||||
if (pattern[i + 1] == '{')
|
|
||||||
{
|
|
||||||
++i;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_open_curly)
|
|
||||||
throw Exception("Two open curly braces without close one at position " + std::to_string(i), ErrorCodes::BAD_ARGUMENTS);
|
|
||||||
|
|
||||||
String to_add = String(pattern.data() + start_pos, i - start_pos);
|
|
||||||
double_brace_removal(to_add);
|
|
||||||
if (!glue_to_next)
|
|
||||||
substrings.emplace_back(to_add);
|
|
||||||
else
|
|
||||||
substrings.back() += to_add;
|
|
||||||
|
|
||||||
glue_to_next = false;
|
|
||||||
|
|
||||||
is_open_curly = true;
|
|
||||||
last_open = i + 1;
|
|
||||||
}
|
|
||||||
else if (pattern[i] == '}')
|
|
||||||
{
|
|
||||||
if (pattern[i + 1] == '}')
|
|
||||||
{
|
|
||||||
++i;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!is_open_curly)
|
|
||||||
throw Exception("Closed curly brace without open one at position " + std::to_string(i), ErrorCodes::BAD_ARGUMENTS);
|
|
||||||
|
|
||||||
is_open_curly = false;
|
|
||||||
|
|
||||||
if (last_open == i)
|
|
||||||
{
|
|
||||||
if (is_plain_numbering && !*is_plain_numbering)
|
|
||||||
throw Exception(
|
|
||||||
"Cannot switch from automatic field numbering to manual field specification", ErrorCodes::BAD_ARGUMENTS);
|
|
||||||
is_plain_numbering = true;
|
|
||||||
if (index_if_plain >= argument_number)
|
|
||||||
throw Exception("Argument is too big for formatting", ErrorCodes::BAD_ARGUMENTS);
|
|
||||||
*index_positions_ptr = index_if_plain++;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (is_plain_numbering && *is_plain_numbering)
|
|
||||||
throw Exception(
|
|
||||||
"Cannot switch from automatic field numbering to manual field specification", ErrorCodes::BAD_ARGUMENTS);
|
|
||||||
is_plain_numbering = false;
|
|
||||||
|
|
||||||
UInt64 arg;
|
|
||||||
parseNumber(pattern, last_open, i, arg);
|
|
||||||
|
|
||||||
if (arg >= argument_number)
|
|
||||||
throw Exception(
|
|
||||||
"Argument is too big for formatting. Note that indexing starts from zero", ErrorCodes::BAD_ARGUMENTS);
|
|
||||||
|
|
||||||
*index_positions_ptr = arg;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Constant string.
|
|
||||||
if (!data[*index_positions_ptr])
|
|
||||||
{
|
|
||||||
/// The next string should be glued to last `A {} C`.format('B') -> `A B C`.
|
|
||||||
glue_to_next = true;
|
|
||||||
substrings.back() += constant_strings[*index_positions_ptr];
|
|
||||||
}
|
|
||||||
else
|
|
||||||
++index_positions_ptr; /// Otherwise we commit arg number and proceed.
|
|
||||||
|
|
||||||
start_pos = i + 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_open_curly)
|
|
||||||
throw Exception("Last open curly brace is not closed", ErrorCodes::BAD_ARGUMENTS);
|
|
||||||
|
|
||||||
String to_add = String(pattern.data() + start_pos, pattern.size() - start_pos);
|
|
||||||
double_brace_removal(to_add);
|
|
||||||
|
|
||||||
if (!glue_to_next)
|
|
||||||
substrings.emplace_back(to_add);
|
|
||||||
else
|
|
||||||
substrings.back() += to_add;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// data for ColumnString and ColumnFixed. Nullptr means no data, it is const string.
|
/// data for ColumnString and ColumnFixed. Nullptr means no data, it is const string.
|
||||||
/// offsets for ColumnString, nullptr is an indicator that there is a fixed string rather than ColumnString.
|
/// offsets for ColumnString, nullptr is an indicator that there is a fixed string rather than ColumnString.
|
||||||
/// fixed_string_N for savings N to fixed strings.
|
/// fixed_string_N for savings N to fixed strings.
|
||||||
/// constant_strings for constant strings. If data[i] is nullptr, than it is constant string.
|
/// constant_strings for constant strings. If data[i] is nullptr, it is constant string.
|
||||||
/// res_data is result_data, res_offsets is offset result.
|
/// res_data is result_data, res_offsets is offset result.
|
||||||
/// input_rows_count is the number of rows processed.
|
/// input_rows_count is the number of rows processed.
|
||||||
/// Precondition: data.size() == offsets.size() == fixed_string_N.size() == constant_strings.size().
|
/// Precondition: data.size() == offsets.size() == fixed_string_N.size() == constant_strings.size().
|
||||||
@ -207,29 +48,22 @@ struct FormatImpl
|
|||||||
const std::vector<const ColumnString::Chars *> & data,
|
const std::vector<const ColumnString::Chars *> & data,
|
||||||
const std::vector<const ColumnString::Offsets *> & offsets,
|
const std::vector<const ColumnString::Offsets *> & offsets,
|
||||||
[[maybe_unused]] /* Because sometimes !has_column_fixed_string */ const std::vector<size_t> & fixed_string_N,
|
[[maybe_unused]] /* Because sometimes !has_column_fixed_string */ const std::vector<size_t> & fixed_string_N,
|
||||||
const std::vector<String> & constant_strings,
|
const std::vector<std::optional<String>> & constant_strings,
|
||||||
ColumnString::Chars & res_data,
|
ColumnString::Chars & res_data,
|
||||||
ColumnString::Offsets & res_offsets,
|
ColumnString::Offsets & res_offsets,
|
||||||
size_t input_rows_count)
|
size_t input_rows_count)
|
||||||
{
|
{
|
||||||
const size_t argument_number = offsets.size();
|
const size_t argument_number = offsets.size();
|
||||||
|
|
||||||
UInt64 small_index_positions_buffer[small_argument_threshold];
|
/// The subsequent indexes of strings we should use. e.g `Hello world {1} {3} {1} {0}` this
|
||||||
/// The subsequent indexes of strings we should use. e.g `Hello world {1} {3} {1} {0}` this array will be filled with [1, 3, 1, 0, ... (garbage)] but without constant string indices.
|
/// array will be filled with [1, 3, 1, 0] but without constant string indices.
|
||||||
UInt64 * index_positions = small_index_positions_buffer;
|
Format::IndexPositions index_positions;
|
||||||
|
|
||||||
std::unique_ptr<UInt64[]> big_index_positions_buffer;
|
|
||||||
if (argument_number > small_argument_threshold)
|
|
||||||
{
|
|
||||||
big_index_positions_buffer.reset(new UInt64[argument_number]);
|
|
||||||
index_positions = big_index_positions_buffer.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Vector of substrings of pattern that will be copied to the answer, not string view because of escaping and iterators invalidation.
|
/// Vector of substrings of pattern that will be copied to the answer, not string view because of escaping and iterators invalidation.
|
||||||
/// These are exactly what is between {} tokens, for `Hello {} world {}` we will have [`Hello `, ` world `, ``].
|
/// These are exactly what is between {} tokens, for `Hello {} world {}` we will have [`Hello `, ` world `, ``].
|
||||||
std::vector<String> substrings;
|
std::vector<String> substrings;
|
||||||
|
|
||||||
init(pattern, data, argument_number, constant_strings, index_positions, substrings);
|
Format::init(pattern, argument_number, constant_strings, index_positions, substrings);
|
||||||
|
|
||||||
UInt64 final_size = 0;
|
UInt64 final_size = 0;
|
||||||
|
|
||||||
@ -271,7 +105,7 @@ struct FormatImpl
|
|||||||
for (size_t j = 1; j < substrings.size(); ++j)
|
for (size_t j = 1; j < substrings.size(); ++j)
|
||||||
{
|
{
|
||||||
UInt64 arg = index_positions[j - 1];
|
UInt64 arg = index_positions[j - 1];
|
||||||
auto offset_ptr = offsets[arg];
|
const auto * offset_ptr = offsets[arg];
|
||||||
UInt64 arg_offset = 0;
|
UInt64 arg_offset = 0;
|
||||||
UInt64 size = 0;
|
UInt64 size = 0;
|
||||||
|
|
||||||
|
155
src/Functions/makeDate.cpp
Normal file
155
src/Functions/makeDate.cpp
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
#include <Functions/IFunction.h>
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <DataTypes/DataTypeDate.h>
|
||||||
|
#include <DataTypes/DataTypeDate32.h>
|
||||||
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
#include <Columns/ColumnsNumber.h>
|
||||||
|
#include <Interpreters/castColumn.h>
|
||||||
|
|
||||||
|
#include <Common/DateLUT.h>
|
||||||
|
#include <Common/typeid_cast.h>
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
// A helper function to simplify comparisons of valid YYYY-MM-DD values for <,>,=
|
||||||
|
inline constexpr Int64 YearMonthDayToSingleInt(Int64 year, Int64 month, Int64 day)
|
||||||
|
{
|
||||||
|
return year * 512 + month * 32 + day;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Common implementation for makeDate, makeDate32
|
||||||
|
template <typename Traits>
|
||||||
|
class FunctionMakeDate : public IFunction
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
static constexpr std::array<const char*, 3> argument_names = {"year", "month", "day"};
|
||||||
|
|
||||||
|
public:
|
||||||
|
static constexpr auto name = Traits::name;
|
||||||
|
|
||||||
|
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionMakeDate>(); }
|
||||||
|
|
||||||
|
String getName() const override { return name; }
|
||||||
|
|
||||||
|
bool isVariadic() const override { return false; }
|
||||||
|
|
||||||
|
size_t getNumberOfArguments() const override { return argument_names.size(); }
|
||||||
|
|
||||||
|
bool isInjective(const ColumnsWithTypeAndName &) const override
|
||||||
|
{
|
||||||
|
return false; // {year,month,day} that are out of supported range are converted into a default value
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||||
|
|
||||||
|
bool useDefaultImplementationForNulls() const override { return true; }
|
||||||
|
|
||||||
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||||
|
{
|
||||||
|
if (arguments.size() != argument_names.size())
|
||||||
|
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||||
|
"Function {} requires 3 arguments, but {} given", getName(), arguments.size());
|
||||||
|
|
||||||
|
for (size_t i = 0; i < argument_names.size(); ++i)
|
||||||
|
{
|
||||||
|
DataTypePtr argument_type = arguments[i];
|
||||||
|
if (!isNumber(argument_type))
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||||
|
"Argument '{}' for function {} must be number", std::string(argument_names[i]), getName());
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::make_shared<typename Traits::ReturnDataType>();
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
|
{
|
||||||
|
const DataTypePtr converted_argument_type = std::make_shared<DataTypeFloat32>();
|
||||||
|
Columns converted_arguments;
|
||||||
|
converted_arguments.reserve(arguments.size());
|
||||||
|
for (const auto & argument : arguments)
|
||||||
|
{
|
||||||
|
ColumnPtr argument_column = castColumn(argument, converted_argument_type);
|
||||||
|
argument_column = argument_column->convertToFullColumnIfConst();
|
||||||
|
converted_arguments.push_back(argument_column);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto res_column = Traits::ReturnColumnType::create(input_rows_count);
|
||||||
|
auto & result_data = res_column->getData();
|
||||||
|
|
||||||
|
const auto & year_data = typeid_cast<const ColumnFloat32 &>(*converted_arguments[0]).getData();
|
||||||
|
const auto & month_data = typeid_cast<const ColumnFloat32 &>(*converted_arguments[1]).getData();
|
||||||
|
const auto & day_data = typeid_cast<const ColumnFloat32 &>(*converted_arguments[2]).getData();
|
||||||
|
|
||||||
|
const auto & date_lut = DateLUT::instance();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
|
{
|
||||||
|
const auto year = year_data[i];
|
||||||
|
const auto month = month_data[i];
|
||||||
|
const auto day = day_data[i];
|
||||||
|
|
||||||
|
Int32 day_num = 0;
|
||||||
|
|
||||||
|
if (year >= Traits::MIN_YEAR &&
|
||||||
|
year <= Traits::MAX_YEAR &&
|
||||||
|
month >= 1 && month <= 12 &&
|
||||||
|
day >= 1 && day <= 31 &&
|
||||||
|
YearMonthDayToSingleInt(year, month, day) <= Traits::MAX_DATE)
|
||||||
|
{
|
||||||
|
day_num = date_lut.makeDayNum(year, month, day);
|
||||||
|
}
|
||||||
|
|
||||||
|
result_data[i] = day_num;
|
||||||
|
}
|
||||||
|
|
||||||
|
return res_column;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// makeDate(year, month, day)
|
||||||
|
struct MakeDateTraits
|
||||||
|
{
|
||||||
|
static constexpr auto name = "makeDate";
|
||||||
|
using ReturnDataType = DataTypeDate;
|
||||||
|
using ReturnColumnType = ColumnUInt16;
|
||||||
|
|
||||||
|
static constexpr auto MIN_YEAR = 1970;
|
||||||
|
static constexpr auto MAX_YEAR = 2149;
|
||||||
|
// This date has the maximum day number that fits in 16-bit uint
|
||||||
|
static constexpr auto MAX_DATE = YearMonthDayToSingleInt(MAX_YEAR, 6, 6);
|
||||||
|
};
|
||||||
|
|
||||||
|
// makeDate32(year, month, day)
|
||||||
|
struct MakeDate32Traits
|
||||||
|
{
|
||||||
|
static constexpr auto name = "makeDate32";
|
||||||
|
using ReturnDataType = DataTypeDate32;
|
||||||
|
using ReturnColumnType = ColumnInt32;
|
||||||
|
|
||||||
|
static constexpr auto MIN_YEAR = 1925;
|
||||||
|
static constexpr auto MAX_YEAR = 2283;
|
||||||
|
static constexpr auto MAX_DATE = YearMonthDayToSingleInt(MAX_YEAR, 11, 11);
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void registerFunctionsMakeDate(FunctionFactory & factory)
|
||||||
|
{
|
||||||
|
factory.registerFunction<FunctionMakeDate<MakeDateTraits>>();
|
||||||
|
factory.registerFunction<FunctionMakeDate<MakeDate32Traits>>();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -8,6 +8,7 @@ namespace DB
|
|||||||
void registerFunctionsArithmetic(FunctionFactory &);
|
void registerFunctionsArithmetic(FunctionFactory &);
|
||||||
void registerFunctionsArray(FunctionFactory &);
|
void registerFunctionsArray(FunctionFactory &);
|
||||||
void registerFunctionsTuple(FunctionFactory &);
|
void registerFunctionsTuple(FunctionFactory &);
|
||||||
|
void registerFunctionsMakeDate(FunctionFactory &);
|
||||||
void registerFunctionsMap(FunctionFactory &);
|
void registerFunctionsMap(FunctionFactory &);
|
||||||
void registerFunctionsBitmap(FunctionFactory &);
|
void registerFunctionsBitmap(FunctionFactory &);
|
||||||
void registerFunctionsBinaryRepr(FunctionFactory &);
|
void registerFunctionsBinaryRepr(FunctionFactory &);
|
||||||
@ -73,6 +74,7 @@ void registerFunctions()
|
|||||||
registerFunctionsArithmetic(factory);
|
registerFunctionsArithmetic(factory);
|
||||||
registerFunctionsArray(factory);
|
registerFunctionsArray(factory);
|
||||||
registerFunctionsTuple(factory);
|
registerFunctionsTuple(factory);
|
||||||
|
registerFunctionsMakeDate(factory);
|
||||||
registerFunctionsMap(factory);
|
registerFunctionsMap(factory);
|
||||||
registerFunctionsBitmap(factory);
|
registerFunctionsBitmap(factory);
|
||||||
registerFunctionsBinaryRepr(factory);
|
registerFunctionsBinaryRepr(factory);
|
||||||
|
@ -80,6 +80,7 @@ void registerFunctionInitialQueryID(FunctionFactory & factory);
|
|||||||
void registerFunctionServerUUID(FunctionFactory &);
|
void registerFunctionServerUUID(FunctionFactory &);
|
||||||
void registerFunctionZooKeeperSessionUptime(FunctionFactory &);
|
void registerFunctionZooKeeperSessionUptime(FunctionFactory &);
|
||||||
void registerFunctionGetOSKernelVersion(FunctionFactory &);
|
void registerFunctionGetOSKernelVersion(FunctionFactory &);
|
||||||
|
void registerFunctionFlattenTuple(FunctionFactory &);
|
||||||
|
|
||||||
#if USE_ICU
|
#if USE_ICU
|
||||||
void registerFunctionConvertCharset(FunctionFactory &);
|
void registerFunctionConvertCharset(FunctionFactory &);
|
||||||
@ -166,6 +167,7 @@ void registerFunctionsMiscellaneous(FunctionFactory & factory)
|
|||||||
registerFunctionServerUUID(factory);
|
registerFunctionServerUUID(factory);
|
||||||
registerFunctionZooKeeperSessionUptime(factory);
|
registerFunctionZooKeeperSessionUptime(factory);
|
||||||
registerFunctionGetOSKernelVersion(factory);
|
registerFunctionGetOSKernelVersion(factory);
|
||||||
|
registerFunctionFlattenTuple(factory);
|
||||||
|
|
||||||
#if USE_ICU
|
#if USE_ICU
|
||||||
registerFunctionConvertCharset(factory);
|
registerFunctionConvertCharset(factory);
|
||||||
|
@ -237,7 +237,7 @@ void ParallelReadBuffer::readerThreadFunction(ReadWorkerPtr read_worker)
|
|||||||
while (!emergency_stop && !read_worker->cancel)
|
while (!emergency_stop && !read_worker->cancel)
|
||||||
{
|
{
|
||||||
if (!read_worker->reader->next())
|
if (!read_worker->reader->next())
|
||||||
throw Exception("Failed to read all the data from the reader", ErrorCodes::LOGICAL_ERROR);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to read all the data from the reader, missing {} bytes", read_worker->bytes_left);
|
||||||
|
|
||||||
if (emergency_stop || read_worker->cancel)
|
if (emergency_stop || read_worker->cancel)
|
||||||
break;
|
break;
|
||||||
|
@ -82,8 +82,8 @@ public:
|
|||||||
std::unique_ptr<ReadBufferFactory> reader_factory_,
|
std::unique_ptr<ReadBufferFactory> reader_factory_,
|
||||||
ThreadPool * pool,
|
ThreadPool * pool,
|
||||||
size_t max_working_readers,
|
size_t max_working_readers,
|
||||||
WorkerSetup worker_setup = {},
|
WorkerSetup worker_setup = [](ThreadStatus &){},
|
||||||
WorkerCleanup worker_cleanup = {});
|
WorkerCleanup worker_cleanup = [](ThreadStatus &){});
|
||||||
|
|
||||||
~ParallelReadBuffer() override { finishAndWait(); }
|
~ParallelReadBuffer() override { finishAndWait(); }
|
||||||
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include <Common/config.h>
|
#include <Common/config.h>
|
||||||
|
#include "IO/S3Common.h"
|
||||||
|
|
||||||
#if USE_AWS_S3
|
#if USE_AWS_S3
|
||||||
|
|
||||||
@ -42,6 +43,7 @@ ReadBufferFromS3::ReadBufferFromS3(
|
|||||||
UInt64 max_single_read_retries_,
|
UInt64 max_single_read_retries_,
|
||||||
const ReadSettings & settings_,
|
const ReadSettings & settings_,
|
||||||
bool use_external_buffer_,
|
bool use_external_buffer_,
|
||||||
|
size_t offset_,
|
||||||
size_t read_until_position_,
|
size_t read_until_position_,
|
||||||
bool restricted_seek_)
|
bool restricted_seek_)
|
||||||
: SeekableReadBufferWithSize(nullptr, 0)
|
: SeekableReadBufferWithSize(nullptr, 0)
|
||||||
@ -49,9 +51,10 @@ ReadBufferFromS3::ReadBufferFromS3(
|
|||||||
, bucket(bucket_)
|
, bucket(bucket_)
|
||||||
, key(key_)
|
, key(key_)
|
||||||
, max_single_read_retries(max_single_read_retries_)
|
, max_single_read_retries(max_single_read_retries_)
|
||||||
|
, offset(offset_)
|
||||||
|
, read_until_position(read_until_position_)
|
||||||
, read_settings(settings_)
|
, read_settings(settings_)
|
||||||
, use_external_buffer(use_external_buffer_)
|
, use_external_buffer(use_external_buffer_)
|
||||||
, read_until_position(read_until_position_)
|
|
||||||
, restricted_seek(restricted_seek_)
|
, restricted_seek(restricted_seek_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -210,13 +213,14 @@ std::optional<size_t> ReadBufferFromS3::getTotalSize()
|
|||||||
if (file_size)
|
if (file_size)
|
||||||
return file_size;
|
return file_size;
|
||||||
|
|
||||||
Aws::S3::Model::HeadObjectRequest request;
|
auto object_size = S3::getObjectSize(client_ptr, bucket, key, false);
|
||||||
request.SetBucket(bucket);
|
|
||||||
request.SetKey(key);
|
|
||||||
|
|
||||||
auto outcome = client_ptr->HeadObject(request);
|
if (!object_size)
|
||||||
auto head_result = outcome.GetResultWithOwnership();
|
{
|
||||||
file_size = head_result.GetContentLength();
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
file_size = object_size;
|
||||||
return file_size;
|
return file_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -234,6 +238,11 @@ void ReadBufferFromS3::setReadUntilPosition(size_t position)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SeekableReadBuffer::Range ReadBufferFromS3::getRemainingReadRange() const
|
||||||
|
{
|
||||||
|
return Range{.left = static_cast<size_t>(offset), .right = read_until_position ? std::optional{read_until_position - 1} : std::nullopt};
|
||||||
|
}
|
||||||
|
|
||||||
std::unique_ptr<ReadBuffer> ReadBufferFromS3::initialize()
|
std::unique_ptr<ReadBuffer> ReadBufferFromS3::initialize()
|
||||||
{
|
{
|
||||||
Aws::S3::Model::GetObjectRequest req;
|
Aws::S3::Model::GetObjectRequest req;
|
||||||
@ -272,6 +281,36 @@ std::unique_ptr<ReadBuffer> ReadBufferFromS3::initialize()
|
|||||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SeekableReadBufferPtr ReadBufferS3Factory::getReader()
|
||||||
|
{
|
||||||
|
const auto next_range = range_generator.nextRange();
|
||||||
|
if (!next_range)
|
||||||
|
{
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto reader = std::make_shared<ReadBufferFromS3>(
|
||||||
|
client_ptr,
|
||||||
|
bucket,
|
||||||
|
key,
|
||||||
|
s3_max_single_read_retries,
|
||||||
|
read_settings,
|
||||||
|
false /*use_external_buffer*/,
|
||||||
|
next_range->first,
|
||||||
|
next_range->second);
|
||||||
|
return reader;
|
||||||
|
}
|
||||||
|
|
||||||
|
off_t ReadBufferS3Factory::seek(off_t off, [[maybe_unused]] int whence)
|
||||||
|
{
|
||||||
|
range_generator = RangeGenerator{object_size, range_step, static_cast<size_t>(off)};
|
||||||
|
return off;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<size_t> ReadBufferS3Factory::getTotalSize()
|
||||||
|
{
|
||||||
|
return object_size;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <Common/RangeGenerator.h>
|
||||||
#include <Common/config.h>
|
#include <Common/config.h>
|
||||||
|
|
||||||
#if USE_AWS_S3
|
#if USE_AWS_S3
|
||||||
@ -7,6 +8,7 @@
|
|||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#include <IO/HTTPCommon.h>
|
#include <IO/HTTPCommon.h>
|
||||||
|
#include <IO/ParallelReadBuffer.h>
|
||||||
#include <IO/ReadBuffer.h>
|
#include <IO/ReadBuffer.h>
|
||||||
#include <IO/ReadSettings.h>
|
#include <IO/ReadSettings.h>
|
||||||
#include <IO/SeekableReadBuffer.h>
|
#include <IO/SeekableReadBuffer.h>
|
||||||
@ -30,7 +32,9 @@ private:
|
|||||||
String bucket;
|
String bucket;
|
||||||
String key;
|
String key;
|
||||||
UInt64 max_single_read_retries;
|
UInt64 max_single_read_retries;
|
||||||
|
|
||||||
off_t offset = 0;
|
off_t offset = 0;
|
||||||
|
off_t read_until_position = 0;
|
||||||
|
|
||||||
Aws::S3::Model::GetObjectResult read_result;
|
Aws::S3::Model::GetObjectResult read_result;
|
||||||
std::unique_ptr<ReadBuffer> impl;
|
std::unique_ptr<ReadBuffer> impl;
|
||||||
@ -45,6 +49,7 @@ public:
|
|||||||
UInt64 max_single_read_retries_,
|
UInt64 max_single_read_retries_,
|
||||||
const ReadSettings & settings_,
|
const ReadSettings & settings_,
|
||||||
bool use_external_buffer = false,
|
bool use_external_buffer = false,
|
||||||
|
size_t offset_ = 0,
|
||||||
size_t read_until_position_ = 0,
|
size_t read_until_position_ = 0,
|
||||||
bool restricted_seek_ = false);
|
bool restricted_seek_ = false);
|
||||||
|
|
||||||
@ -58,7 +63,7 @@ public:
|
|||||||
|
|
||||||
void setReadUntilPosition(size_t position) override;
|
void setReadUntilPosition(size_t position) override;
|
||||||
|
|
||||||
Range getRemainingReadRange() const override { return Range{ .left = static_cast<size_t>(offset), .right = read_until_position }; }
|
Range getRemainingReadRange() const override;
|
||||||
|
|
||||||
size_t getFileOffsetOfBufferEnd() const override { return offset; }
|
size_t getFileOffsetOfBufferEnd() const override { return offset; }
|
||||||
|
|
||||||
@ -69,13 +74,55 @@ private:
|
|||||||
|
|
||||||
bool use_external_buffer;
|
bool use_external_buffer;
|
||||||
|
|
||||||
off_t read_until_position = 0;
|
|
||||||
|
|
||||||
/// There is different seek policy for disk seek and for non-disk seek
|
/// There is different seek policy for disk seek and for non-disk seek
|
||||||
/// (non-disk seek is applied for seekable input formats: orc, arrow, parquet).
|
/// (non-disk seek is applied for seekable input formats: orc, arrow, parquet).
|
||||||
bool restricted_seek;
|
bool restricted_seek;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Creates separate ReadBufferFromS3 for sequence of ranges of particular object
|
||||||
|
class ReadBufferS3Factory : public ParallelReadBuffer::ReadBufferFactory
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit ReadBufferS3Factory(
|
||||||
|
std::shared_ptr<Aws::S3::S3Client> client_ptr_,
|
||||||
|
const String & bucket_,
|
||||||
|
const String & key_,
|
||||||
|
size_t range_step_,
|
||||||
|
size_t object_size_,
|
||||||
|
UInt64 s3_max_single_read_retries_,
|
||||||
|
const ReadSettings & read_settings_)
|
||||||
|
: client_ptr(client_ptr_)
|
||||||
|
, bucket(bucket_)
|
||||||
|
, key(key_)
|
||||||
|
, read_settings(read_settings_)
|
||||||
|
, range_generator(object_size_, range_step_)
|
||||||
|
, range_step(range_step_)
|
||||||
|
, object_size(object_size_)
|
||||||
|
, s3_max_single_read_retries(s3_max_single_read_retries_)
|
||||||
|
{
|
||||||
|
assert(range_step > 0);
|
||||||
|
assert(range_step < object_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
SeekableReadBufferPtr getReader() override;
|
||||||
|
|
||||||
|
off_t seek(off_t off, [[maybe_unused]] int whence) override;
|
||||||
|
|
||||||
|
std::optional<size_t> getTotalSize() override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::shared_ptr<Aws::S3::S3Client> client_ptr;
|
||||||
|
const String bucket;
|
||||||
|
const String key;
|
||||||
|
ReadSettings read_settings;
|
||||||
|
|
||||||
|
RangeGenerator range_generator;
|
||||||
|
size_t range_step;
|
||||||
|
size_t object_size;
|
||||||
|
|
||||||
|
UInt64 s3_max_single_read_retries;
|
||||||
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -15,4 +15,11 @@ public:
|
|||||||
|
|
||||||
explicit ReadBufferFromString(std::string_view s) : ReadBufferFromMemory(s.data(), s.size()) {}
|
explicit ReadBufferFromString(std::string_view s) : ReadBufferFromMemory(s.data(), s.size()) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class ReadBufferFromOwnString : public String, public ReadBufferFromString
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit ReadBufferFromOwnString(const String & s_): String(s_), ReadBufferFromString(*this) {}
|
||||||
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
#include <Common/RangeGenerator.h>
|
||||||
#include <IO/ConnectionTimeouts.h>
|
#include <IO/ConnectionTimeouts.h>
|
||||||
#include <IO/HTTPCommon.h>
|
#include <IO/HTTPCommon.h>
|
||||||
#include <IO/ParallelReadBuffer.h>
|
#include <IO/ParallelReadBuffer.h>
|
||||||
@ -635,43 +636,6 @@ public:
|
|||||||
void buildNewSession(const Poco::URI & uri) override { session = makeHTTPSession(uri, timeouts); }
|
void buildNewSession(const Poco::URI & uri) override { session = makeHTTPSession(uri, timeouts); }
|
||||||
};
|
};
|
||||||
|
|
||||||
class RangeGenerator
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
explicit RangeGenerator(size_t total_size_, size_t range_step_, size_t range_start = 0)
|
|
||||||
: from(range_start), range_step(range_step_), total_size(total_size_)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t totalRanges() const { return static_cast<size_t>(round(static_cast<float>(total_size - from) / range_step)); }
|
|
||||||
|
|
||||||
using Range = std::pair<size_t, size_t>;
|
|
||||||
|
|
||||||
// return upper exclusive range of values, i.e. [from_range, to_range>
|
|
||||||
std::optional<Range> nextRange()
|
|
||||||
{
|
|
||||||
if (from >= total_size)
|
|
||||||
{
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto to = from + range_step;
|
|
||||||
if (to >= total_size)
|
|
||||||
{
|
|
||||||
to = total_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
Range range{from, to};
|
|
||||||
from = to;
|
|
||||||
return range;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
size_t from;
|
|
||||||
size_t range_step;
|
|
||||||
size_t total_size;
|
|
||||||
};
|
|
||||||
|
|
||||||
class ReadWriteBufferFromHTTP : public detail::ReadWriteBufferFromHTTPBase<std::shared_ptr<UpdatableSession>>
|
class ReadWriteBufferFromHTTP : public detail::ReadWriteBufferFromHTTPBase<std::shared_ptr<UpdatableSession>>
|
||||||
{
|
{
|
||||||
using Parent = detail::ReadWriteBufferFromHTTPBase<std::shared_ptr<UpdatableSession>>;
|
using Parent = detail::ReadWriteBufferFromHTTPBase<std::shared_ptr<UpdatableSession>>;
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
# include <aws/core/utils/UUID.h>
|
# include <aws/core/utils/UUID.h>
|
||||||
# include <aws/core/http/HttpClientFactory.h>
|
# include <aws/core/http/HttpClientFactory.h>
|
||||||
# include <aws/s3/S3Client.h>
|
# include <aws/s3/S3Client.h>
|
||||||
|
# include <aws/s3/model/HeadObjectRequest.h> // Y_IGNORE
|
||||||
|
|
||||||
# include <IO/S3/PocoHTTPClientFactory.h>
|
# include <IO/S3/PocoHTTPClientFactory.h>
|
||||||
# include <IO/S3/PocoHTTPClient.h>
|
# include <IO/S3/PocoHTTPClient.h>
|
||||||
@ -682,6 +683,7 @@ namespace DB
|
|||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
|
extern const int S3_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace S3
|
namespace S3
|
||||||
@ -839,6 +841,26 @@ namespace S3
|
|||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bucket name length is out of bounds in virtual hosted style S3 URI: {}{}",
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bucket name length is out of bounds in virtual hosted style S3 URI: {}{}",
|
||||||
quoteString(bucket), !uri.empty() ? " (" + uri.toString() + ")" : "");
|
quoteString(bucket), !uri.empty() ? " (" + uri.toString() + ")" : "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t getObjectSize(std::shared_ptr<Aws::S3::S3Client> client_ptr, const String & bucket, const String & key, bool throw_on_error)
|
||||||
|
{
|
||||||
|
Aws::S3::Model::HeadObjectRequest req;
|
||||||
|
req.SetBucket(bucket);
|
||||||
|
req.SetKey(key);
|
||||||
|
|
||||||
|
Aws::S3::Model::HeadObjectOutcome outcome = client_ptr->HeadObject(req);
|
||||||
|
|
||||||
|
if (outcome.IsSuccess())
|
||||||
|
{
|
||||||
|
auto read_result = outcome.GetResultWithOwnership();
|
||||||
|
return static_cast<size_t>(read_result.GetContentLength());
|
||||||
|
}
|
||||||
|
else if (throw_on_error)
|
||||||
|
{
|
||||||
|
throw DB::Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -75,6 +75,8 @@ struct URI
|
|||||||
static void validateBucket(const String & bucket, const Poco::URI & uri);
|
static void validateBucket(const String & bucket, const Poco::URI & uri);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
size_t getObjectSize(std::shared_ptr<Aws::S3::S3Client> client_ptr, const String & bucket, const String & key, bool throw_on_error = true);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -2,18 +2,15 @@
|
|||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
#include <base/types.h>
|
#include <IO/ReadBuffer.h>
|
||||||
#include <IO/ReadHelpers.h>
|
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
|
#include <IO/ReadHelpers.h>
|
||||||
|
#include <base/types.h>
|
||||||
|
|
||||||
|
int readAndPrint(DB::ReadBuffer & in)
|
||||||
int main(int, char **)
|
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
std::string s = "-123456 123.456 вася пе\\tтя\t'\\'xyz\\\\'";
|
|
||||||
DB::ReadBufferFromString in(s);
|
|
||||||
|
|
||||||
DB::Int64 a;
|
DB::Int64 a;
|
||||||
DB::Float64 b;
|
DB::Float64 b;
|
||||||
DB::String c, d;
|
DB::String c, d;
|
||||||
@ -31,12 +28,32 @@ int main(int, char **)
|
|||||||
|
|
||||||
std::cout << a << ' ' << b << ' ' << c << '\t' << '\'' << d << '\'' << std::endl;
|
std::cout << a << ' ' << b << ' ' << c << '\t' << '\'' << d << '\'' << std::endl;
|
||||||
std::cout << in.count() << std::endl;
|
std::cout << in.count() << std::endl;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
catch (const DB::Exception & e)
|
catch (const DB::Exception & e)
|
||||||
{
|
{
|
||||||
std::cerr << e.what() << ", " << e.displayText() << std::endl;
|
std::cerr << e.what() << ", " << e.displayText() << std::endl;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int, char **)
|
||||||
|
{
|
||||||
|
{
|
||||||
|
std::string s = "-123456 123.456 вася пе\\tтя\t'\\'xyz\\\\'";
|
||||||
|
DB::ReadBufferFromString in(s);
|
||||||
|
if (readAndPrint(in))
|
||||||
|
std::cout << "readAndPrint from ReadBufferFromString failed" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::shared_ptr<DB::ReadBufferFromOwnString> in;
|
||||||
|
{
|
||||||
|
std::string s = "-123456 123.456 вася пе\\tтя\t'\\'xyz\\\\'";
|
||||||
|
in = std::make_shared<DB::ReadBufferFromOwnString>(s);
|
||||||
|
}
|
||||||
|
if (readAndPrint(*in))
|
||||||
|
std::cout << "readAndPrint from ReadBufferFromOwnString failed" << std::endl;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include <Storages/MarkCache.h>
|
#include <Storages/MarkCache.h>
|
||||||
#include <Storages/StorageMergeTree.h>
|
#include <Storages/StorageMergeTree.h>
|
||||||
#include <Storages/StorageReplicatedMergeTree.h>
|
#include <Storages/StorageReplicatedMergeTree.h>
|
||||||
|
#include <Storages/MergeTree/MergeTreeMetadataCache.h>
|
||||||
#include <IO/UncompressedCache.h>
|
#include <IO/UncompressedCache.h>
|
||||||
#include <IO/MMappedFileCache.h>
|
#include <IO/MMappedFileCache.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
@ -607,6 +608,15 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
{
|
||||||
|
if (auto metadata_cache = getContext()->tryGetMergeTreeMetadataCache())
|
||||||
|
{
|
||||||
|
new_values["MergeTreeMetadataCacheSize"] = metadata_cache->getEstimateNumKeys();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
{
|
{
|
||||||
if (auto * compiled_expression_cache = CompiledExpressionCacheFactory::instance().tryGetCache())
|
if (auto * compiled_expression_cache = CompiledExpressionCacheFactory::instance().tryGetCache())
|
||||||
@ -617,6 +627,7 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
new_values["Uptime"] = getContext()->getUptimeSeconds();
|
new_values["Uptime"] = getContext()->getUptimeSeconds();
|
||||||
|
|
||||||
/// Process process memory usage according to OS
|
/// Process process memory usage according to OS
|
||||||
|
@ -169,6 +169,7 @@ public:
|
|||||||
if (columns.size() != float_features_count + cat_features_count)
|
if (columns.size() != float_features_count + cat_features_count)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
"Number of columns is different with number of features: columns size {} float features size {} + cat features size {}",
|
"Number of columns is different with number of features: columns size {} float features size {} + cat features size {}",
|
||||||
|
columns.size(),
|
||||||
float_features_count,
|
float_features_count,
|
||||||
cat_features_count);
|
cat_features_count);
|
||||||
|
|
||||||
|
@ -85,11 +85,15 @@
|
|||||||
#include <Storages/MergeTree/BackgroundJobsAssignee.h>
|
#include <Storages/MergeTree/BackgroundJobsAssignee.h>
|
||||||
#include <Storages/MergeTree/MergeTreeBackgroundExecutor.h>
|
#include <Storages/MergeTree/MergeTreeBackgroundExecutor.h>
|
||||||
#include <Storages/MergeTree/MergeTreeDataPartUUID.h>
|
#include <Storages/MergeTree/MergeTreeDataPartUUID.h>
|
||||||
|
#include <Storages/MergeTree/MergeTreeMetadataCache.h>
|
||||||
#include <Interpreters/SynonymsExtensions.h>
|
#include <Interpreters/SynonymsExtensions.h>
|
||||||
#include <Interpreters/Lemmatizers.h>
|
#include <Interpreters/Lemmatizers.h>
|
||||||
#include <Interpreters/ClusterDiscovery.h>
|
#include <Interpreters/ClusterDiscovery.h>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
#include <rocksdb/table.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
@ -276,6 +280,11 @@ struct ContextSharedPart
|
|||||||
|
|
||||||
Context::ConfigReloadCallback config_reload_callback;
|
Context::ConfigReloadCallback config_reload_callback;
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
/// Global merge tree metadata cache, stored in rocksdb.
|
||||||
|
MergeTreeMetadataCachePtr merge_tree_metadata_cache;
|
||||||
|
#endif
|
||||||
|
|
||||||
ContextSharedPart()
|
ContextSharedPart()
|
||||||
: access_control(std::make_unique<AccessControl>())
|
: access_control(std::make_unique<AccessControl>())
|
||||||
, global_overcommit_tracker(&process_list)
|
, global_overcommit_tracker(&process_list)
|
||||||
@ -410,6 +419,15 @@ struct ContextSharedPart
|
|||||||
trace_collector.reset();
|
trace_collector.reset();
|
||||||
/// Stop zookeeper connection
|
/// Stop zookeeper connection
|
||||||
zookeeper.reset();
|
zookeeper.reset();
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
/// Shutdown merge tree metadata cache
|
||||||
|
if (merge_tree_metadata_cache)
|
||||||
|
{
|
||||||
|
merge_tree_metadata_cache->shutdown();
|
||||||
|
merge_tree_metadata_cache.reset();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Can be removed w/o context lock
|
/// Can be removed w/o context lock
|
||||||
@ -2048,6 +2066,23 @@ zkutil::ZooKeeperPtr Context::getAuxiliaryZooKeeper(const String & name) const
|
|||||||
return zookeeper->second;
|
return zookeeper->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
MergeTreeMetadataCachePtr Context::getMergeTreeMetadataCache() const
|
||||||
|
{
|
||||||
|
auto cache = tryGetMergeTreeMetadataCache();
|
||||||
|
if (!cache)
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Merge tree metadata cache is not initialized, please add config merge_tree_metadata_cache in config.xml and restart");
|
||||||
|
return cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
MergeTreeMetadataCachePtr Context::tryGetMergeTreeMetadataCache() const
|
||||||
|
{
|
||||||
|
return shared->merge_tree_metadata_cache;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void Context::resetZooKeeper() const
|
void Context::resetZooKeeper() const
|
||||||
{
|
{
|
||||||
std::lock_guard lock(shared->zookeeper_mutex);
|
std::lock_guard lock(shared->zookeeper_mutex);
|
||||||
@ -2291,6 +2326,13 @@ void Context::initializeTraceCollector()
|
|||||||
shared->initializeTraceCollector(getTraceLog());
|
shared->initializeTraceCollector(getTraceLog());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
void Context::initializeMergeTreeMetadataCache(const String & dir, size_t size)
|
||||||
|
{
|
||||||
|
shared->merge_tree_metadata_cache = MergeTreeMetadataCache::create(dir, size);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
bool Context::hasTraceCollector() const
|
bool Context::hasTraceCollector() const
|
||||||
{
|
{
|
||||||
return shared->hasTraceCollector();
|
return shared->hasTraceCollector();
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
#include <Storages/MergeTree/ParallelReplicasReadingCoordinator.h>
|
#include <Storages/MergeTree/ParallelReplicasReadingCoordinator.h>
|
||||||
|
|
||||||
|
|
||||||
#include "config_core.h"
|
#include "config_core.h"
|
||||||
|
|
||||||
#include <boost/container/flat_set.hpp>
|
#include <boost/container/flat_set.hpp>
|
||||||
@ -152,6 +153,12 @@ using ReadTaskCallback = std::function<String()>;
|
|||||||
|
|
||||||
using MergeTreeReadTaskCallback = std::function<std::optional<PartitionReadResponse>(PartitionReadRequest)>;
|
using MergeTreeReadTaskCallback = std::function<std::optional<PartitionReadResponse>(PartitionReadRequest)>;
|
||||||
|
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
class MergeTreeMetadataCache;
|
||||||
|
using MergeTreeMetadataCachePtr = std::shared_ptr<MergeTreeMetadataCache>;
|
||||||
|
#endif
|
||||||
|
|
||||||
/// An empty interface for an arbitrary object that may be attached by a shared pointer
|
/// An empty interface for an arbitrary object that may be attached by a shared pointer
|
||||||
/// to query context, when using ClickHouse as a library.
|
/// to query context, when using ClickHouse as a library.
|
||||||
struct IHostContext
|
struct IHostContext
|
||||||
@ -179,6 +186,7 @@ private:
|
|||||||
std::unique_ptr<ContextSharedPart> shared;
|
std::unique_ptr<ContextSharedPart> shared;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/** A set of known objects that can be used in the query.
|
/** A set of known objects that can be used in the query.
|
||||||
* Consists of a shared part (always common to all sessions and queries)
|
* Consists of a shared part (always common to all sessions and queries)
|
||||||
* and copied part (which can be its own for each session or query).
|
* and copied part (which can be its own for each session or query).
|
||||||
@ -680,6 +688,11 @@ public:
|
|||||||
|
|
||||||
UInt32 getZooKeeperSessionUptime() const;
|
UInt32 getZooKeeperSessionUptime() const;
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
MergeTreeMetadataCachePtr getMergeTreeMetadataCache() const;
|
||||||
|
MergeTreeMetadataCachePtr tryGetMergeTreeMetadataCache() const;
|
||||||
|
#endif
|
||||||
|
|
||||||
#if USE_NURAFT
|
#if USE_NURAFT
|
||||||
std::shared_ptr<KeeperDispatcher> & getKeeperDispatcher() const;
|
std::shared_ptr<KeeperDispatcher> & getKeeperDispatcher() const;
|
||||||
#endif
|
#endif
|
||||||
@ -769,6 +782,10 @@ public:
|
|||||||
/// Call after initialization before using trace collector.
|
/// Call after initialization before using trace collector.
|
||||||
void initializeTraceCollector();
|
void initializeTraceCollector();
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
void initializeMergeTreeMetadataCache(const String & dir, size_t size);
|
||||||
|
#endif
|
||||||
|
|
||||||
bool hasTraceCollector() const;
|
bool hasTraceCollector() const;
|
||||||
|
|
||||||
/// Nullptr if the query log is not ready for this moment.
|
/// Nullptr if the query log is not ready for this moment.
|
||||||
|
@ -233,7 +233,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
|
|||||||
{
|
{
|
||||||
assert(!db_and_table.first && !db_and_table.second);
|
assert(!db_and_table.first && !db_and_table.second);
|
||||||
if (exception)
|
if (exception)
|
||||||
exception->emplace(ErrorCodes::UNKNOWN_TABLE, "Table {} doesn't exist", table_id.getNameForLogs());
|
exception->emplace(fmt::format("Table {} doesn't exist", table_id.getNameForLogs()), ErrorCodes::UNKNOWN_TABLE);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -263,7 +263,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
|
|||||||
/// If table_id has no UUID, then the name of database was specified by user and table_id was not resolved through context.
|
/// If table_id has no UUID, then the name of database was specified by user and table_id was not resolved through context.
|
||||||
/// Do not allow access to TEMPORARY_DATABASE because it contains all temporary tables of all contexts and users.
|
/// Do not allow access to TEMPORARY_DATABASE because it contains all temporary tables of all contexts and users.
|
||||||
if (exception)
|
if (exception)
|
||||||
exception->emplace(ErrorCodes::DATABASE_ACCESS_DENIED, "Direct access to `{}` database is not allowed", String(TEMPORARY_DATABASE));
|
exception->emplace(fmt::format("Direct access to `{}` database is not allowed", TEMPORARY_DATABASE), ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -274,7 +274,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
|
|||||||
if (databases.end() == it)
|
if (databases.end() == it)
|
||||||
{
|
{
|
||||||
if (exception)
|
if (exception)
|
||||||
exception->emplace(ErrorCodes::UNKNOWN_DATABASE, "Database {} doesn't exist", backQuoteIfNeed(table_id.getDatabaseName()));
|
exception->emplace(fmt::format("Database {} doesn't exist", backQuoteIfNeed(table_id.getDatabaseName())), ErrorCodes::UNKNOWN_DATABASE);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
database = it->second;
|
database = it->second;
|
||||||
@ -282,7 +282,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
|
|||||||
|
|
||||||
auto table = database->tryGetTable(table_id.table_name, context_);
|
auto table = database->tryGetTable(table_id.table_name, context_);
|
||||||
if (!table && exception)
|
if (!table && exception)
|
||||||
exception->emplace(ErrorCodes::UNKNOWN_TABLE, "Table {} doesn't exist", table_id.getNameForLogs());
|
exception->emplace(fmt::format("Table {} doesn't exist", table_id.getNameForLogs()), ErrorCodes::UNKNOWN_TABLE);
|
||||||
if (!table)
|
if (!table)
|
||||||
database = nullptr;
|
database = nullptr;
|
||||||
|
|
||||||
|
@ -358,6 +358,7 @@ BlockIO InterpreterInsertQuery::execute()
|
|||||||
|
|
||||||
auto new_context = Context::createCopy(context);
|
auto new_context = Context::createCopy(context);
|
||||||
new_context->setSettings(new_settings);
|
new_context->setSettings(new_settings);
|
||||||
|
new_context->setInsertionTable(getContext()->getInsertionTable());
|
||||||
|
|
||||||
InterpreterSelectWithUnionQuery interpreter_select{
|
InterpreterSelectWithUnionQuery interpreter_select{
|
||||||
query.select, new_context, SelectQueryOptions(QueryProcessingStage::Complete, 1)};
|
query.select, new_context, SelectQueryOptions(QueryProcessingStage::Complete, 1)};
|
||||||
|
@ -320,12 +320,13 @@ Chunk DDLQueryStatusSource::generate()
|
|||||||
if (throw_on_timeout)
|
if (throw_on_timeout)
|
||||||
{
|
{
|
||||||
if (!first_exception)
|
if (!first_exception)
|
||||||
first_exception = std::make_unique<Exception>(ErrorCodes::TIMEOUT_EXCEEDED, msg_format,
|
first_exception = std::make_unique<Exception>(
|
||||||
node_path, timeout_seconds, num_unfinished_hosts, num_active_hosts);
|
fmt::format(msg_format, node_path, timeout_seconds, num_unfinished_hosts, num_active_hosts),
|
||||||
|
ErrorCodes::TIMEOUT_EXCEEDED);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INFO(log, fmt::runtime(msg_format), node_path, timeout_seconds, num_unfinished_hosts, num_active_hosts);
|
LOG_INFO(log, msg_format, node_path, timeout_seconds, num_unfinished_hosts, num_active_hosts);
|
||||||
|
|
||||||
NameSet unfinished_hosts = waiting_hosts;
|
NameSet unfinished_hosts = waiting_hosts;
|
||||||
for (const auto & host_id : finished_hosts)
|
for (const auto & host_id : finished_hosts)
|
||||||
@ -358,9 +359,12 @@ Chunk DDLQueryStatusSource::generate()
|
|||||||
/// Paradoxically, this exception will be throw even in case of "never_throw" mode.
|
/// Paradoxically, this exception will be throw even in case of "never_throw" mode.
|
||||||
|
|
||||||
if (!first_exception)
|
if (!first_exception)
|
||||||
first_exception = std::make_unique<Exception>(ErrorCodes::UNFINISHED,
|
first_exception = std::make_unique<Exception>(
|
||||||
|
fmt::format(
|
||||||
"Cannot provide query execution status. The query's node {} has been deleted by the cleaner"
|
"Cannot provide query execution status. The query's node {} has been deleted by the cleaner"
|
||||||
" since it was finished (or its lifetime is expired)", node_path);
|
" since it was finished (or its lifetime is expired)",
|
||||||
|
node_path),
|
||||||
|
ErrorCodes::UNFINISHED);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -386,7 +390,8 @@ Chunk DDLQueryStatusSource::generate()
|
|||||||
if (status.code != 0 && !first_exception
|
if (status.code != 0 && !first_exception
|
||||||
&& context->getSettingsRef().distributed_ddl_output_mode != DistributedDDLOutputMode::NEVER_THROW)
|
&& context->getSettingsRef().distributed_ddl_output_mode != DistributedDDLOutputMode::NEVER_THROW)
|
||||||
{
|
{
|
||||||
first_exception = std::make_unique<Exception>(status.code, "There was an error on [{}:{}]: {}", host, port, status.message);
|
first_exception = std::make_unique<Exception>(
|
||||||
|
fmt::format("There was an error on [{}:{}]: {}", host, port, status.message), status.code);
|
||||||
}
|
}
|
||||||
|
|
||||||
++num_hosts_finished;
|
++num_hosts_finished;
|
||||||
|
@ -657,6 +657,14 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
|||||||
limits.size_limits = SizeLimits(settings.max_result_rows, settings.max_result_bytes, settings.result_overflow_mode);
|
limits.size_limits = SizeLimits(settings.max_result_rows, settings.max_result_bytes, settings.result_overflow_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (const auto * insert_interpreter = typeid_cast<const InterpreterInsertQuery *>(&*interpreter))
|
||||||
|
{
|
||||||
|
/// Save insertion table (not table function). TODO: support remote() table function.
|
||||||
|
auto table_id = insert_interpreter->getDatabaseTable();
|
||||||
|
if (!table_id.empty())
|
||||||
|
context->setInsertionTable(std::move(table_id));
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
std::unique_ptr<OpenTelemetrySpanHolder> span;
|
std::unique_ptr<OpenTelemetrySpanHolder> span;
|
||||||
if (context->query_trace_context.trace_id != UUID())
|
if (context->query_trace_context.trace_id != UUID())
|
||||||
@ -667,14 +675,6 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
|||||||
}
|
}
|
||||||
res = interpreter->execute();
|
res = interpreter->execute();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (const auto * insert_interpreter = typeid_cast<const InterpreterInsertQuery *>(&*interpreter))
|
|
||||||
{
|
|
||||||
/// Save insertion table (not table function). TODO: support remote() table function.
|
|
||||||
auto table_id = insert_interpreter->getDatabaseTable();
|
|
||||||
if (!table_id.empty())
|
|
||||||
context->setInsertionTable(std::move(table_id));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (process_list_entry)
|
if (process_list_entry)
|
||||||
|
@ -18,6 +18,10 @@ public:
|
|||||||
|
|
||||||
virtual NamesAndTypesList readSchema() = 0;
|
virtual NamesAndTypesList readSchema() = 0;
|
||||||
|
|
||||||
|
/// True if order of columns is important in format.
|
||||||
|
/// Exceptions: JSON, TSKV.
|
||||||
|
virtual bool hasStrictOrderOfColumns() const { return true; }
|
||||||
|
|
||||||
virtual ~ISchemaReader() = default;
|
virtual ~ISchemaReader() = default;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -60,6 +64,7 @@ class IRowWithNamesSchemaReader : public ISchemaReader
|
|||||||
public:
|
public:
|
||||||
IRowWithNamesSchemaReader(ReadBuffer & in_, size_t max_rows_to_read_, DataTypePtr default_type_ = nullptr);
|
IRowWithNamesSchemaReader(ReadBuffer & in_, size_t max_rows_to_read_, DataTypePtr default_type_ = nullptr);
|
||||||
NamesAndTypesList readSchema() override;
|
NamesAndTypesList readSchema() override;
|
||||||
|
bool hasStrictOrderOfColumns() const override { return false; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/// Read one row and determine types of columns in it.
|
/// Read one row and determine types of columns in it.
|
||||||
|
@ -359,7 +359,7 @@ bool MsgPackVisitor::visit_ext(const char * value, uint32_t size)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported MsgPack extension type: {%x}", type);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported MsgPack extension type: {:x}", type);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MsgPackVisitor::parse_error(size_t, size_t) // NOLINT
|
void MsgPackVisitor::parse_error(size_t, size_t) // NOLINT
|
||||||
@ -498,7 +498,7 @@ DataTypePtr MsgPackSchemaReader::getDataType(const msgpack::object & object)
|
|||||||
msgpack::object_ext object_ext = object.via.ext;
|
msgpack::object_ext object_ext = object.via.ext;
|
||||||
if (object_ext.type() == int8_t(MsgPackExtensionTypes::UUIDType))
|
if (object_ext.type() == int8_t(MsgPackExtensionTypes::UUIDType))
|
||||||
return std::make_shared<DataTypeUUID>();
|
return std::make_shared<DataTypeUUID>();
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Msgpack extension type {%x} is not supported", object_ext.type());
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Msgpack extension type {:x} is not supported", object_ext.type());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
__builtin_unreachable();
|
__builtin_unreachable();
|
||||||
|
@ -45,7 +45,8 @@ namespace ErrorCodes
|
|||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
static MergeTreeReaderSettings getMergeTreeReaderSettings(const ContextPtr & context)
|
static MergeTreeReaderSettings getMergeTreeReaderSettings(
|
||||||
|
const ContextPtr & context, const SelectQueryInfo & query_info)
|
||||||
{
|
{
|
||||||
const auto & settings = context->getSettingsRef();
|
const auto & settings = context->getSettingsRef();
|
||||||
return
|
return
|
||||||
@ -53,6 +54,7 @@ static MergeTreeReaderSettings getMergeTreeReaderSettings(const ContextPtr & con
|
|||||||
.read_settings = context->getReadSettings(),
|
.read_settings = context->getReadSettings(),
|
||||||
.save_marks_in_cache = true,
|
.save_marks_in_cache = true,
|
||||||
.checksum_on_read = settings.checksum_on_read,
|
.checksum_on_read = settings.checksum_on_read,
|
||||||
|
.read_in_order = query_info.input_order_info != nullptr,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,7 +84,7 @@ ReadFromMergeTree::ReadFromMergeTree(
|
|||||||
getPrewhereInfo(query_info_),
|
getPrewhereInfo(query_info_),
|
||||||
data_.getPartitionValueType(),
|
data_.getPartitionValueType(),
|
||||||
virt_column_names_)})
|
virt_column_names_)})
|
||||||
, reader_settings(getMergeTreeReaderSettings(context_))
|
, reader_settings(getMergeTreeReaderSettings(context_, query_info_))
|
||||||
, prepared_parts(std::move(parts_))
|
, prepared_parts(std::move(parts_))
|
||||||
, real_column_names(std::move(real_column_names_))
|
, real_column_names(std::move(real_column_names_))
|
||||||
, virt_column_names(std::move(virt_column_names_))
|
, virt_column_names(std::move(virt_column_names_))
|
||||||
@ -206,6 +208,7 @@ ProcessorPtr ReadFromMergeTree::createSource(
|
|||||||
.colums_to_read = required_columns
|
.colums_to_read = required_columns
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<TSource>(
|
return std::make_shared<TSource>(
|
||||||
data, storage_snapshot, part.data_part, max_block_size, preferred_block_size_bytes,
|
data, storage_snapshot, part.data_part, max_block_size, preferred_block_size_bytes,
|
||||||
preferred_max_column_in_block_size_bytes, required_columns, part.ranges, use_uncompressed_cache, prewhere_info,
|
preferred_max_column_in_block_size_bytes, required_columns, part.ranges, use_uncompressed_cache, prewhere_info,
|
||||||
@ -921,7 +924,7 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead(
|
|||||||
total_marks_pk += part->index_granularity.getMarksCountWithoutFinal();
|
total_marks_pk += part->index_granularity.getMarksCountWithoutFinal();
|
||||||
parts_before_pk = parts.size();
|
parts_before_pk = parts.size();
|
||||||
|
|
||||||
auto reader_settings = getMergeTreeReaderSettings(context);
|
auto reader_settings = getMergeTreeReaderSettings(context, query_info);
|
||||||
|
|
||||||
bool use_skip_indexes = settings.use_skip_indexes;
|
bool use_skip_indexes = settings.use_skip_indexes;
|
||||||
if (select.final() && !settings.use_skip_indexes_if_final)
|
if (select.final() && !settings.use_skip_indexes_if_final)
|
||||||
|
@ -1,10 +1,13 @@
|
|||||||
|
// Needs to go first because its partial specialization of fmt::formatter
|
||||||
|
// should be defined before any instantiation
|
||||||
|
#include <fmt/ostream.h>
|
||||||
|
|
||||||
#include <Storages/Kafka/ReadBufferFromKafkaConsumer.h>
|
#include <Storages/Kafka/ReadBufferFromKafkaConsumer.h>
|
||||||
|
|
||||||
#include <base/logger_useful.h>
|
#include <base/logger_useful.h>
|
||||||
|
|
||||||
#include <cppkafka/cppkafka.h>
|
#include <cppkafka/cppkafka.h>
|
||||||
#include <boost/algorithm/string/join.hpp>
|
#include <boost/algorithm/string/join.hpp>
|
||||||
#include <fmt/ostream.h>
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
#include "IMergeTreeDataPart.h"
|
#include "IMergeTreeDataPart.h"
|
||||||
|
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <boost/algorithm/string/join.hpp>
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
#include <Core/Defines.h>
|
#include <Core/Defines.h>
|
||||||
#include <IO/HashingWriteBuffer.h>
|
#include <IO/HashingWriteBuffer.h>
|
||||||
|
#include <IO/HashingReadBuffer.h>
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
@ -11,6 +13,8 @@
|
|||||||
#include <Storages/MergeTree/localBackup.h>
|
#include <Storages/MergeTree/localBackup.h>
|
||||||
#include <Storages/MergeTree/checkDataPart.h>
|
#include <Storages/MergeTree/checkDataPart.h>
|
||||||
#include <Storages/StorageReplicatedMergeTree.h>
|
#include <Storages/StorageReplicatedMergeTree.h>
|
||||||
|
#include <Storages/MergeTree/PartMetadataManagerOrdinary.h>
|
||||||
|
#include <Storages/MergeTree/PartMetadataManagerWithCache.h>
|
||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
#include <Common/escapeForFileName.h>
|
#include <Common/escapeForFileName.h>
|
||||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||||
@ -61,13 +65,7 @@ namespace ErrorCodes
|
|||||||
extern const int NOT_IMPLEMENTED;
|
extern const int NOT_IMPLEMENTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::unique_ptr<ReadBufferFromFileBase> openForReading(const DiskPtr & disk, const String & path)
|
void IMergeTreeDataPart::MinMaxIndex::load(const MergeTreeData & data, const PartMetadataManagerPtr & manager)
|
||||||
{
|
|
||||||
size_t file_size = disk->getFileSize(path);
|
|
||||||
return disk->readFile(path, ReadSettings().adjustBufferSize(file_size), file_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void IMergeTreeDataPart::MinMaxIndex::load(const MergeTreeData & data, const DiskPtr & disk_, const String & part_path)
|
|
||||||
{
|
{
|
||||||
auto metadata_snapshot = data.getInMemoryMetadataPtr();
|
auto metadata_snapshot = data.getInMemoryMetadataPtr();
|
||||||
const auto & partition_key = metadata_snapshot->getPartitionKey();
|
const auto & partition_key = metadata_snapshot->getPartitionKey();
|
||||||
@ -79,8 +77,8 @@ void IMergeTreeDataPart::MinMaxIndex::load(const MergeTreeData & data, const Dis
|
|||||||
hyperrectangle.reserve(minmax_idx_size);
|
hyperrectangle.reserve(minmax_idx_size);
|
||||||
for (size_t i = 0; i < minmax_idx_size; ++i)
|
for (size_t i = 0; i < minmax_idx_size; ++i)
|
||||||
{
|
{
|
||||||
String file_name = fs::path(part_path) / ("minmax_" + escapeForFileName(minmax_column_names[i]) + ".idx");
|
String file_name = "minmax_" + escapeForFileName(minmax_column_names[i]) + ".idx";
|
||||||
auto file = openForReading(disk_, file_name);
|
auto file = manager->read(file_name);
|
||||||
auto serialization = minmax_column_types[i]->getDefaultSerialization();
|
auto serialization = minmax_column_types[i]->getDefaultSerialization();
|
||||||
|
|
||||||
Field min_val;
|
Field min_val;
|
||||||
@ -192,6 +190,19 @@ void IMergeTreeDataPart::MinMaxIndex::merge(const MinMaxIndex & other)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IMergeTreeDataPart::MinMaxIndex::appendFiles(const MergeTreeData & data, Strings & files)
|
||||||
|
{
|
||||||
|
auto metadata_snapshot = data.getInMemoryMetadataPtr();
|
||||||
|
const auto & partition_key = metadata_snapshot->getPartitionKey();
|
||||||
|
auto minmax_column_names = data.getMinMaxColumnsNames(partition_key);
|
||||||
|
size_t minmax_idx_size = minmax_column_names.size();
|
||||||
|
for (size_t i = 0; i < minmax_idx_size; ++i)
|
||||||
|
{
|
||||||
|
String file_name = "minmax_" + escapeForFileName(minmax_column_names[i]) + ".idx";
|
||||||
|
files.push_back(file_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void incrementStateMetric(IMergeTreeDataPart::State state)
|
static void incrementStateMetric(IMergeTreeDataPart::State state)
|
||||||
{
|
{
|
||||||
@ -299,6 +310,7 @@ IMergeTreeDataPart::IMergeTreeDataPart(
|
|||||||
, index_granularity_info(storage_, part_type_)
|
, index_granularity_info(storage_, part_type_)
|
||||||
, part_type(part_type_)
|
, part_type(part_type_)
|
||||||
, parent_part(parent_part_)
|
, parent_part(parent_part_)
|
||||||
|
, use_metadata_cache(storage.use_metadata_cache)
|
||||||
{
|
{
|
||||||
if (parent_part)
|
if (parent_part)
|
||||||
state = State::Active;
|
state = State::Active;
|
||||||
@ -306,6 +318,8 @@ IMergeTreeDataPart::IMergeTreeDataPart(
|
|||||||
incrementTypeMetric(part_type);
|
incrementTypeMetric(part_type);
|
||||||
|
|
||||||
minmax_idx = std::make_shared<MinMaxIndex>();
|
minmax_idx = std::make_shared<MinMaxIndex>();
|
||||||
|
|
||||||
|
initializePartMetadataManager();
|
||||||
}
|
}
|
||||||
|
|
||||||
IMergeTreeDataPart::IMergeTreeDataPart(
|
IMergeTreeDataPart::IMergeTreeDataPart(
|
||||||
@ -324,6 +338,7 @@ IMergeTreeDataPart::IMergeTreeDataPart(
|
|||||||
, index_granularity_info(storage_, part_type_)
|
, index_granularity_info(storage_, part_type_)
|
||||||
, part_type(part_type_)
|
, part_type(part_type_)
|
||||||
, parent_part(parent_part_)
|
, parent_part(parent_part_)
|
||||||
|
, use_metadata_cache(storage.use_metadata_cache)
|
||||||
{
|
{
|
||||||
if (parent_part)
|
if (parent_part)
|
||||||
state = State::Active;
|
state = State::Active;
|
||||||
@ -331,6 +346,8 @@ IMergeTreeDataPart::IMergeTreeDataPart(
|
|||||||
incrementTypeMetric(part_type);
|
incrementTypeMetric(part_type);
|
||||||
|
|
||||||
minmax_idx = std::make_shared<MinMaxIndex>();
|
minmax_idx = std::make_shared<MinMaxIndex>();
|
||||||
|
|
||||||
|
initializePartMetadataManager();
|
||||||
}
|
}
|
||||||
|
|
||||||
IMergeTreeDataPart::~IMergeTreeDataPart()
|
IMergeTreeDataPart::~IMergeTreeDataPart()
|
||||||
@ -558,9 +575,10 @@ size_t IMergeTreeDataPart::getFileSizeOrZero(const String & file_name) const
|
|||||||
return checksum->second.file_size;
|
return checksum->second.file_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
String IMergeTreeDataPart::getColumnNameWithMinimumCompressedSize(const StorageMetadataPtr & metadata_snapshot) const
|
String IMergeTreeDataPart::getColumnNameWithMinimumCompressedSize(const StorageSnapshotPtr & storage_snapshot) const
|
||||||
{
|
{
|
||||||
const auto & storage_columns = metadata_snapshot->getColumns().getAllPhysical();
|
auto options = GetColumnsOptions(GetColumnsOptions::AllPhysical).withExtendedObjects().withSubcolumns();
|
||||||
|
auto storage_columns = storage_snapshot->getColumns(options);
|
||||||
MergeTreeData::AlterConversions alter_conversions;
|
MergeTreeData::AlterConversions alter_conversions;
|
||||||
if (!parent_part)
|
if (!parent_part)
|
||||||
alter_conversions = storage.getAlterConversionsForPart(shared_from_this());
|
alter_conversions = storage.getAlterConversionsForPart(shared_from_this());
|
||||||
@ -637,6 +655,33 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks
|
|||||||
loadDefaultCompressionCodec();
|
loadDefaultCompressionCodec();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IMergeTreeDataPart::appendFilesOfColumnsChecksumsIndexes(Strings & files, bool include_projection) const
|
||||||
|
{
|
||||||
|
if (isStoredOnDisk())
|
||||||
|
{
|
||||||
|
appendFilesOfUUID(files);
|
||||||
|
appendFilesOfColumns(files);
|
||||||
|
appendFilesOfChecksums(files);
|
||||||
|
appendFilesOfIndexGranularity(files);
|
||||||
|
appendFilesOfIndex(files);
|
||||||
|
appendFilesOfRowsCount(files);
|
||||||
|
appendFilesOfPartitionAndMinMaxIndex(files);
|
||||||
|
appendFilesOfTTLInfos(files);
|
||||||
|
appendFilesOfDefaultCompressionCodec(files);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!parent_part && include_projection)
|
||||||
|
{
|
||||||
|
for (const auto & [projection_name, projection_part] : projection_parts)
|
||||||
|
{
|
||||||
|
Strings projection_files;
|
||||||
|
projection_part->appendFilesOfColumnsChecksumsIndexes(projection_files, true);
|
||||||
|
for (const auto & projection_file : projection_files)
|
||||||
|
files.push_back(fs::path(projection_part->relative_path) / projection_file);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void IMergeTreeDataPart::loadProjections(bool require_columns_checksums, bool check_consistency)
|
void IMergeTreeDataPart::loadProjections(bool require_columns_checksums, bool check_consistency)
|
||||||
{
|
{
|
||||||
auto metadata_snapshot = storage.getInMemoryMetadataPtr();
|
auto metadata_snapshot = storage.getInMemoryMetadataPtr();
|
||||||
@ -657,6 +702,11 @@ void IMergeTreeDataPart::loadIndexGranularity()
|
|||||||
throw Exception("Method 'loadIndexGranularity' is not implemented for part with type " + getType().toString(), ErrorCodes::NOT_IMPLEMENTED);
|
throw Exception("Method 'loadIndexGranularity' is not implemented for part with type " + getType().toString(), ErrorCodes::NOT_IMPLEMENTED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Currently we don't cache mark files of part, because cache other meta files is enough to speed up loading.
|
||||||
|
void IMergeTreeDataPart::appendFilesOfIndexGranularity(Strings & /* files */) const
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
void IMergeTreeDataPart::loadIndex()
|
void IMergeTreeDataPart::loadIndex()
|
||||||
{
|
{
|
||||||
/// It can be empty in case of mutations
|
/// It can be empty in case of mutations
|
||||||
@ -680,9 +730,9 @@ void IMergeTreeDataPart::loadIndex()
|
|||||||
loaded_index[i]->reserve(index_granularity.getMarksCount());
|
loaded_index[i]->reserve(index_granularity.getMarksCount());
|
||||||
}
|
}
|
||||||
|
|
||||||
String index_path = fs::path(getFullRelativePath()) / "primary.idx";
|
String index_name = "primary.idx";
|
||||||
auto index_file = openForReading(volume->getDisk(), index_path);
|
String index_path = fs::path(getFullRelativePath()) / index_name;
|
||||||
|
auto index_file = metadata_manager->read(index_name);
|
||||||
size_t marks_count = index_granularity.getMarksCount();
|
size_t marks_count = index_granularity.getMarksCount();
|
||||||
|
|
||||||
Serializations key_serializations(key_size);
|
Serializations key_serializations(key_size);
|
||||||
@ -709,6 +759,19 @@ void IMergeTreeDataPart::loadIndex()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IMergeTreeDataPart::appendFilesOfIndex(Strings & files) const
|
||||||
|
{
|
||||||
|
auto metadata_snapshot = storage.getInMemoryMetadataPtr();
|
||||||
|
if (parent_part)
|
||||||
|
metadata_snapshot = metadata_snapshot->projections.has(name) ? metadata_snapshot->projections.get(name).metadata : nullptr;
|
||||||
|
|
||||||
|
if (!metadata_snapshot)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (metadata_snapshot->hasPrimaryKey())
|
||||||
|
files.push_back("primary.idx");
|
||||||
|
}
|
||||||
|
|
||||||
NameSet IMergeTreeDataPart::getFileNamesWithoutChecksums() const
|
NameSet IMergeTreeDataPart::getFileNamesWithoutChecksums() const
|
||||||
{
|
{
|
||||||
if (!isStoredOnDisk())
|
if (!isStoredOnDisk())
|
||||||
@ -733,14 +796,14 @@ void IMergeTreeDataPart::loadDefaultCompressionCodec()
|
|||||||
}
|
}
|
||||||
|
|
||||||
String path = fs::path(getFullRelativePath()) / DEFAULT_COMPRESSION_CODEC_FILE_NAME;
|
String path = fs::path(getFullRelativePath()) / DEFAULT_COMPRESSION_CODEC_FILE_NAME;
|
||||||
if (!volume->getDisk()->exists(path))
|
bool exists = metadata_manager->exists(DEFAULT_COMPRESSION_CODEC_FILE_NAME);
|
||||||
|
if (!exists)
|
||||||
{
|
{
|
||||||
default_codec = detectDefaultCompressionCodec();
|
default_codec = detectDefaultCompressionCodec();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
auto file_buf = metadata_manager->read(DEFAULT_COMPRESSION_CODEC_FILE_NAME);
|
||||||
auto file_buf = openForReading(volume->getDisk(), path);
|
|
||||||
String codec_line;
|
String codec_line;
|
||||||
readEscapedStringUntilEOL(codec_line, *file_buf);
|
readEscapedStringUntilEOL(codec_line, *file_buf);
|
||||||
|
|
||||||
@ -748,7 +811,13 @@ void IMergeTreeDataPart::loadDefaultCompressionCodec()
|
|||||||
|
|
||||||
if (!checkString("CODEC", buf))
|
if (!checkString("CODEC", buf))
|
||||||
{
|
{
|
||||||
LOG_WARNING(storage.log, "Cannot parse default codec for part {} from file {}, content '{}'. Default compression codec will be deduced automatically, from data on disk", name, path, codec_line);
|
LOG_WARNING(
|
||||||
|
storage.log,
|
||||||
|
"Cannot parse default codec for part {} from file {}, content '{}'. Default compression codec will be deduced "
|
||||||
|
"automatically, from data on disk",
|
||||||
|
name,
|
||||||
|
path,
|
||||||
|
codec_line);
|
||||||
default_codec = detectDefaultCompressionCodec();
|
default_codec = detectDefaultCompressionCodec();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -766,6 +835,11 @@ void IMergeTreeDataPart::loadDefaultCompressionCodec()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IMergeTreeDataPart::appendFilesOfDefaultCompressionCodec(Strings & files)
|
||||||
|
{
|
||||||
|
files.push_back(DEFAULT_COMPRESSION_CODEC_FILE_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
CompressionCodecPtr IMergeTreeDataPart::detectDefaultCompressionCodec() const
|
CompressionCodecPtr IMergeTreeDataPart::detectDefaultCompressionCodec() const
|
||||||
{
|
{
|
||||||
/// In memory parts doesn't have any compression
|
/// In memory parts doesn't have any compression
|
||||||
@ -828,7 +902,7 @@ void IMergeTreeDataPart::loadPartitionAndMinMaxIndex()
|
|||||||
{
|
{
|
||||||
String path = getFullRelativePath();
|
String path = getFullRelativePath();
|
||||||
if (!parent_part)
|
if (!parent_part)
|
||||||
partition.load(storage, volume->getDisk(), path);
|
partition.load(storage, metadata_manager);
|
||||||
|
|
||||||
if (!isEmpty())
|
if (!isEmpty())
|
||||||
{
|
{
|
||||||
@ -836,7 +910,7 @@ void IMergeTreeDataPart::loadPartitionAndMinMaxIndex()
|
|||||||
// projection parts don't have minmax_idx, and it's always initialized
|
// projection parts don't have minmax_idx, and it's always initialized
|
||||||
minmax_idx->initialized = true;
|
minmax_idx->initialized = true;
|
||||||
else
|
else
|
||||||
minmax_idx->load(storage, volume->getDisk(), path);
|
minmax_idx->load(storage, metadata_manager);
|
||||||
}
|
}
|
||||||
if (parent_part)
|
if (parent_part)
|
||||||
return;
|
return;
|
||||||
@ -851,13 +925,26 @@ void IMergeTreeDataPart::loadPartitionAndMinMaxIndex()
|
|||||||
ErrorCodes::CORRUPTED_DATA);
|
ErrorCodes::CORRUPTED_DATA);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IMergeTreeDataPart::appendFilesOfPartitionAndMinMaxIndex(Strings & files) const
|
||||||
|
{
|
||||||
|
if (storage.format_version < MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING && !parent_part)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!parent_part)
|
||||||
|
partition.appendFiles(storage, files);
|
||||||
|
|
||||||
|
if (!isEmpty())
|
||||||
|
if (!parent_part)
|
||||||
|
minmax_idx->appendFiles(storage, files);
|
||||||
|
}
|
||||||
|
|
||||||
void IMergeTreeDataPart::loadChecksums(bool require)
|
void IMergeTreeDataPart::loadChecksums(bool require)
|
||||||
{
|
{
|
||||||
const String path = fs::path(getFullRelativePath()) / "checksums.txt";
|
const String path = fs::path(getFullRelativePath()) / "checksums.txt";
|
||||||
|
bool exists = metadata_manager->exists("checksums.txt");
|
||||||
if (volume->getDisk()->exists(path))
|
if (exists)
|
||||||
{
|
{
|
||||||
auto buf = openForReading(volume->getDisk(), path);
|
auto buf = metadata_manager->read("checksums.txt");
|
||||||
if (checksums.read(*buf))
|
if (checksums.read(*buf))
|
||||||
{
|
{
|
||||||
assertEOF(*buf);
|
assertEOF(*buf);
|
||||||
@ -888,13 +975,18 @@ void IMergeTreeDataPart::loadChecksums(bool require)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IMergeTreeDataPart::appendFilesOfChecksums(Strings & files)
|
||||||
|
{
|
||||||
|
files.push_back("checksums.txt");
|
||||||
|
}
|
||||||
|
|
||||||
void IMergeTreeDataPart::loadRowsCount()
|
void IMergeTreeDataPart::loadRowsCount()
|
||||||
{
|
{
|
||||||
String path = fs::path(getFullRelativePath()) / "count.txt";
|
String path = fs::path(getFullRelativePath()) / "count.txt";
|
||||||
|
|
||||||
auto read_rows_count = [&]()
|
auto read_rows_count = [&]()
|
||||||
{
|
{
|
||||||
auto buf = openForReading(volume->getDisk(), path);
|
auto buf = metadata_manager->read("count.txt");
|
||||||
readIntText(rows_count, *buf);
|
readIntText(rows_count, *buf);
|
||||||
assertEOF(*buf);
|
assertEOF(*buf);
|
||||||
};
|
};
|
||||||
@ -905,7 +997,8 @@ void IMergeTreeDataPart::loadRowsCount()
|
|||||||
}
|
}
|
||||||
else if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING || part_type == Type::COMPACT || parent_part)
|
else if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING || part_type == Type::COMPACT || parent_part)
|
||||||
{
|
{
|
||||||
if (!volume->getDisk()->exists(path))
|
bool exists = metadata_manager->exists("count.txt");
|
||||||
|
if (!exists)
|
||||||
throw Exception("No count.txt in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART);
|
throw Exception("No count.txt in part " + name, ErrorCodes::NO_FILE_IN_DATA_PART);
|
||||||
|
|
||||||
read_rows_count();
|
read_rows_count();
|
||||||
@ -1003,12 +1096,17 @@ void IMergeTreeDataPart::loadRowsCount()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IMergeTreeDataPart::appendFilesOfRowsCount(Strings & files)
|
||||||
|
{
|
||||||
|
files.push_back("count.txt");
|
||||||
|
}
|
||||||
|
|
||||||
void IMergeTreeDataPart::loadTTLInfos()
|
void IMergeTreeDataPart::loadTTLInfos()
|
||||||
{
|
{
|
||||||
String path = fs::path(getFullRelativePath()) / "ttl.txt";
|
bool exists = metadata_manager->exists("ttl.txt");
|
||||||
if (volume->getDisk()->exists(path))
|
if (exists)
|
||||||
{
|
{
|
||||||
auto in = openForReading(volume->getDisk(), path);
|
auto in = metadata_manager->read("ttl.txt");
|
||||||
assertString("ttl format version: ", *in);
|
assertString("ttl format version: ", *in);
|
||||||
size_t format_version;
|
size_t format_version;
|
||||||
readText(format_version, *in);
|
readText(format_version, *in);
|
||||||
@ -1030,19 +1128,29 @@ void IMergeTreeDataPart::loadTTLInfos()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void IMergeTreeDataPart::appendFilesOfTTLInfos(Strings & files)
|
||||||
|
{
|
||||||
|
files.push_back("ttl.txt");
|
||||||
|
}
|
||||||
|
|
||||||
void IMergeTreeDataPart::loadUUID()
|
void IMergeTreeDataPart::loadUUID()
|
||||||
{
|
{
|
||||||
String path = fs::path(getFullRelativePath()) / UUID_FILE_NAME;
|
bool exists = metadata_manager->exists(UUID_FILE_NAME);
|
||||||
|
if (exists)
|
||||||
if (volume->getDisk()->exists(path))
|
|
||||||
{
|
{
|
||||||
auto in = openForReading(volume->getDisk(), path);
|
auto in = metadata_manager->read(UUID_FILE_NAME);
|
||||||
readText(uuid, *in);
|
readText(uuid, *in);
|
||||||
if (uuid == UUIDHelpers::Nil)
|
if (uuid == UUIDHelpers::Nil)
|
||||||
throw Exception("Unexpected empty " + String(UUID_FILE_NAME) + " in part: " + name, ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Unexpected empty " + String(UUID_FILE_NAME) + " in part: " + name, ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IMergeTreeDataPart::appendFilesOfUUID(Strings & files)
|
||||||
|
{
|
||||||
|
files.push_back(UUID_FILE_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
void IMergeTreeDataPart::loadColumns(bool require)
|
void IMergeTreeDataPart::loadColumns(bool require)
|
||||||
{
|
{
|
||||||
String path = fs::path(getFullRelativePath()) / "columns.txt";
|
String path = fs::path(getFullRelativePath()) / "columns.txt";
|
||||||
@ -1051,7 +1159,8 @@ void IMergeTreeDataPart::loadColumns(bool require)
|
|||||||
metadata_snapshot = metadata_snapshot->projections.get(name).metadata;
|
metadata_snapshot = metadata_snapshot->projections.get(name).metadata;
|
||||||
NamesAndTypesList loaded_columns;
|
NamesAndTypesList loaded_columns;
|
||||||
|
|
||||||
if (!volume->getDisk()->exists(path))
|
bool exists = metadata_manager->exists("columns.txt");
|
||||||
|
if (!exists)
|
||||||
{
|
{
|
||||||
/// We can get list of columns only from columns.txt in compact parts.
|
/// We can get list of columns only from columns.txt in compact parts.
|
||||||
if (require || part_type == Type::COMPACT)
|
if (require || part_type == Type::COMPACT)
|
||||||
@ -1074,7 +1183,8 @@ void IMergeTreeDataPart::loadColumns(bool require)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
loaded_columns.readText(*volume->getDisk()->readFile(path));
|
auto in = metadata_manager->read("columns.txt");
|
||||||
|
loaded_columns.readText(*in);
|
||||||
|
|
||||||
for (const auto & column : loaded_columns)
|
for (const auto & column : loaded_columns)
|
||||||
{
|
{
|
||||||
@ -1091,14 +1201,23 @@ void IMergeTreeDataPart::loadColumns(bool require)
|
|||||||
};
|
};
|
||||||
|
|
||||||
SerializationInfoByName infos(loaded_columns, settings);
|
SerializationInfoByName infos(loaded_columns, settings);
|
||||||
path = getFullRelativePath() + SERIALIZATION_FILE_NAME;
|
exists = metadata_manager->exists(SERIALIZATION_FILE_NAME);
|
||||||
if (volume->getDisk()->exists(path))
|
if (exists)
|
||||||
infos.readJSON(*volume->getDisk()->readFile(path));
|
{
|
||||||
|
auto in = metadata_manager->read(SERIALIZATION_FILE_NAME);
|
||||||
|
infos.readJSON(*in);
|
||||||
|
}
|
||||||
|
|
||||||
setColumns(loaded_columns);
|
setColumns(loaded_columns);
|
||||||
setSerializationInfos(infos);
|
setSerializationInfos(infos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IMergeTreeDataPart::appendFilesOfColumns(Strings & files)
|
||||||
|
{
|
||||||
|
files.push_back("columns.txt");
|
||||||
|
files.push_back(SERIALIZATION_FILE_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
bool IMergeTreeDataPart::shallParticipateInMerges(const StoragePolicyPtr & storage_policy) const
|
bool IMergeTreeDataPart::shallParticipateInMerges(const StoragePolicyPtr & storage_policy) const
|
||||||
{
|
{
|
||||||
/// `IMergeTreeDataPart::volume` describes space where current part belongs, and holds
|
/// `IMergeTreeDataPart::volume` describes space where current part belongs, and holds
|
||||||
@ -1150,9 +1269,12 @@ try
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metadata_manager->deleteAll(true);
|
||||||
|
metadata_manager->assertAllDeleted(true);
|
||||||
volume->getDisk()->setLastModified(from, Poco::Timestamp::fromEpochTime(time(nullptr)));
|
volume->getDisk()->setLastModified(from, Poco::Timestamp::fromEpochTime(time(nullptr)));
|
||||||
volume->getDisk()->moveDirectory(from, to);
|
volume->getDisk()->moveDirectory(from, to);
|
||||||
relative_path = new_relative_path;
|
relative_path = new_relative_path;
|
||||||
|
metadata_manager->updateAll(true);
|
||||||
|
|
||||||
SyncGuardPtr sync_guard;
|
SyncGuardPtr sync_guard;
|
||||||
if (storage.getSettings()->fsync_part_directory)
|
if (storage.getSettings()->fsync_part_directory)
|
||||||
@ -1190,6 +1312,18 @@ std::optional<bool> IMergeTreeDataPart::keepSharedDataInDecoupledStorage() const
|
|||||||
return !storage.unlockSharedData(*this);
|
return !storage.unlockSharedData(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IMergeTreeDataPart::initializePartMetadataManager()
|
||||||
|
{
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
if (use_metadata_cache)
|
||||||
|
metadata_manager = std::make_shared<PartMetadataManagerWithCache>(this, storage.getContext()->getMergeTreeMetadataCache());
|
||||||
|
else
|
||||||
|
metadata_manager = std::make_shared<PartMetadataManagerOrdinary>(this);
|
||||||
|
#else
|
||||||
|
metadata_manager = std::make_shared<PartMetadataManagerOrdinary>(this);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
void IMergeTreeDataPart::remove() const
|
void IMergeTreeDataPart::remove() const
|
||||||
{
|
{
|
||||||
std::optional<bool> keep_shared_data = keepSharedDataInDecoupledStorage();
|
std::optional<bool> keep_shared_data = keepSharedDataInDecoupledStorage();
|
||||||
@ -1209,6 +1343,9 @@ void IMergeTreeDataPart::remove() const
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metadata_manager->deleteAll(false);
|
||||||
|
metadata_manager->assertAllDeleted(false);
|
||||||
|
|
||||||
/** Atomic directory removal:
|
/** Atomic directory removal:
|
||||||
* - rename directory to temporary name;
|
* - rename directory to temporary name;
|
||||||
* - remove it recursive.
|
* - remove it recursive.
|
||||||
@ -1314,6 +1451,9 @@ void IMergeTreeDataPart::remove() const
|
|||||||
|
|
||||||
void IMergeTreeDataPart::projectionRemove(const String & parent_to, bool keep_shared_data) const
|
void IMergeTreeDataPart::projectionRemove(const String & parent_to, bool keep_shared_data) const
|
||||||
{
|
{
|
||||||
|
metadata_manager->deleteAll(false);
|
||||||
|
metadata_manager->assertAllDeleted(false);
|
||||||
|
|
||||||
String to = fs::path(parent_to) / relative_path;
|
String to = fs::path(parent_to) / relative_path;
|
||||||
auto disk = volume->getDisk();
|
auto disk = volume->getDisk();
|
||||||
if (checksums.empty())
|
if (checksums.empty())
|
||||||
@ -1661,6 +1801,35 @@ String IMergeTreeDataPart::getZeroLevelPartBlockID(std::string_view token) const
|
|||||||
return info.partition_id + "_" + toString(hash_value.words[0]) + "_" + toString(hash_value.words[1]);
|
return info.partition_id + "_" + toString(hash_value.words[0]) + "_" + toString(hash_value.words[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
IMergeTreeDataPart::uint128 IMergeTreeDataPart::getActualChecksumByFile(const String & file_path) const
|
||||||
|
{
|
||||||
|
assert(use_metadata_cache);
|
||||||
|
|
||||||
|
String file_name = std::filesystem::path(file_path).filename();
|
||||||
|
const auto filenames_without_checksums = getFileNamesWithoutChecksums();
|
||||||
|
auto it = checksums.files.find(file_name);
|
||||||
|
if (filenames_without_checksums.count(file_name) == 0 && it != checksums.files.end())
|
||||||
|
{
|
||||||
|
return it->second.file_hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!volume->getDisk()->exists(file_path))
|
||||||
|
{
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
std::unique_ptr<ReadBufferFromFileBase> in_file = volume->getDisk()->readFile(file_path);
|
||||||
|
HashingReadBuffer in_hash(*in_file);
|
||||||
|
|
||||||
|
String value;
|
||||||
|
readStringUntilEOF(value, in_hash);
|
||||||
|
return in_hash.getHash();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unordered_map<String, IMergeTreeDataPart::uint128> IMergeTreeDataPart::checkMetadata() const
|
||||||
|
{
|
||||||
|
return metadata_manager->check();
|
||||||
|
}
|
||||||
|
|
||||||
bool isCompactPart(const MergeTreeDataPartPtr & data_part)
|
bool isCompactPart(const MergeTreeDataPartPtr & data_part)
|
||||||
{
|
{
|
||||||
return (data_part && data_part->getType() == MergeTreeDataPartType::COMPACT);
|
return (data_part && data_part->getType() == MergeTreeDataPartType::COMPACT);
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#include <Storages/MergeTree/MergeTreeIOSettings.h>
|
#include <Storages/MergeTree/MergeTreeIOSettings.h>
|
||||||
#include <Storages/MergeTree/KeyCondition.h>
|
#include <Storages/MergeTree/KeyCondition.h>
|
||||||
#include <DataTypes/Serializations/SerializationInfo.h>
|
#include <DataTypes/Serializations/SerializationInfo.h>
|
||||||
|
#include <Storages/MergeTree/IPartMetadataManager.h>
|
||||||
|
|
||||||
#include <shared_mutex>
|
#include <shared_mutex>
|
||||||
|
|
||||||
@ -60,6 +61,8 @@ public:
|
|||||||
|
|
||||||
using Type = MergeTreeDataPartType;
|
using Type = MergeTreeDataPartType;
|
||||||
|
|
||||||
|
using uint128 = IPartMetadataManager::uint128;
|
||||||
|
|
||||||
|
|
||||||
IMergeTreeDataPart(
|
IMergeTreeDataPart(
|
||||||
const MergeTreeData & storage_,
|
const MergeTreeData & storage_,
|
||||||
@ -148,6 +151,7 @@ public:
|
|||||||
/// Initialize columns (from columns.txt if exists, or create from column files if not).
|
/// Initialize columns (from columns.txt if exists, or create from column files if not).
|
||||||
/// Load checksums from checksums.txt if exists. Load index if required.
|
/// Load checksums from checksums.txt if exists. Load index if required.
|
||||||
void loadColumnsChecksumsIndexes(bool require_columns_checksums, bool check_consistency);
|
void loadColumnsChecksumsIndexes(bool require_columns_checksums, bool check_consistency);
|
||||||
|
void appendFilesOfColumnsChecksumsIndexes(Strings & files, bool include_projection = false) const;
|
||||||
|
|
||||||
String getMarksFileExtension() const { return index_granularity_info.marks_file_extension; }
|
String getMarksFileExtension() const { return index_granularity_info.marks_file_extension; }
|
||||||
|
|
||||||
@ -164,7 +168,7 @@ public:
|
|||||||
|
|
||||||
/// Returns the name of a column with minimum compressed size (as returned by getColumnSize()).
|
/// Returns the name of a column with minimum compressed size (as returned by getColumnSize()).
|
||||||
/// If no checksums are present returns the name of the first physically existing column.
|
/// If no checksums are present returns the name of the first physically existing column.
|
||||||
String getColumnNameWithMinimumCompressedSize(const StorageMetadataPtr & metadata_snapshot) const;
|
String getColumnNameWithMinimumCompressedSize(const StorageSnapshotPtr & storage_snapshot) const;
|
||||||
|
|
||||||
bool contains(const IMergeTreeDataPart & other) const { return info.contains(other.info); }
|
bool contains(const IMergeTreeDataPart & other) const { return info.contains(other.info); }
|
||||||
|
|
||||||
@ -243,7 +247,7 @@ public:
|
|||||||
using TTLInfo = MergeTreeDataPartTTLInfo;
|
using TTLInfo = MergeTreeDataPartTTLInfo;
|
||||||
using TTLInfos = MergeTreeDataPartTTLInfos;
|
using TTLInfos = MergeTreeDataPartTTLInfos;
|
||||||
|
|
||||||
TTLInfos ttl_infos;
|
mutable TTLInfos ttl_infos;
|
||||||
|
|
||||||
/// Current state of the part. If the part is in working set already, it should be accessed via data_parts mutex
|
/// Current state of the part. If the part is in working set already, it should be accessed via data_parts mutex
|
||||||
void setState(State new_state) const;
|
void setState(State new_state) const;
|
||||||
@ -300,14 +304,16 @@ public:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void load(const MergeTreeData & data, const PartMetadataManagerPtr & manager);
|
||||||
|
|
||||||
using WrittenFiles = std::vector<std::unique_ptr<WriteBufferFromFileBase>>;
|
using WrittenFiles = std::vector<std::unique_ptr<WriteBufferFromFileBase>>;
|
||||||
|
|
||||||
void load(const MergeTreeData & data, const DiskPtr & disk_, const String & part_path);
|
|
||||||
[[nodiscard]] WrittenFiles store(const MergeTreeData & data, const DiskPtr & disk_, const String & part_path, Checksums & checksums) const;
|
[[nodiscard]] WrittenFiles store(const MergeTreeData & data, const DiskPtr & disk_, const String & part_path, Checksums & checksums) const;
|
||||||
[[nodiscard]] WrittenFiles store(const Names & column_names, const DataTypes & data_types, const DiskPtr & disk_, const String & part_path, Checksums & checksums) const;
|
[[nodiscard]] WrittenFiles store(const Names & column_names, const DataTypes & data_types, const DiskPtr & disk_, const String & part_path, Checksums & checksums) const;
|
||||||
|
|
||||||
void update(const Block & block, const Names & column_names);
|
void update(const Block & block, const Names & column_names);
|
||||||
void merge(const MinMaxIndex & other);
|
void merge(const MinMaxIndex & other);
|
||||||
|
static void appendFiles(const MergeTreeData & data, Strings & files);
|
||||||
};
|
};
|
||||||
|
|
||||||
using MinMaxIndexPtr = std::shared_ptr<MinMaxIndex>;
|
using MinMaxIndexPtr = std::shared_ptr<MinMaxIndex>;
|
||||||
@ -429,6 +435,12 @@ public:
|
|||||||
/// Required for distinguish different copies of the same part on remote FS.
|
/// Required for distinguish different copies of the same part on remote FS.
|
||||||
String getUniqueId() const;
|
String getUniqueId() const;
|
||||||
|
|
||||||
|
/// Get checksums of metadata file in part directory
|
||||||
|
IMergeTreeDataPart::uint128 getActualChecksumByFile(const String & file_path) const;
|
||||||
|
|
||||||
|
/// Check metadata in cache is consistent with actual metadata on disk(if use_metadata_cache is true)
|
||||||
|
std::unordered_map<String, uint128> checkMetadata() const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
/// Total size of all columns, calculated once in calcuateColumnSizesOnDisk
|
/// Total size of all columns, calculated once in calcuateColumnSizesOnDisk
|
||||||
@ -455,6 +467,11 @@ protected:
|
|||||||
|
|
||||||
std::map<String, std::shared_ptr<IMergeTreeDataPart>> projection_parts;
|
std::map<String, std::shared_ptr<IMergeTreeDataPart>> projection_parts;
|
||||||
|
|
||||||
|
/// Disabled when USE_ROCKSDB is OFF or use_metadata_cache is set to false in merge tree settings
|
||||||
|
bool use_metadata_cache = false;
|
||||||
|
|
||||||
|
mutable PartMetadataManagerPtr metadata_manager;
|
||||||
|
|
||||||
void removeIfNeeded();
|
void removeIfNeeded();
|
||||||
|
|
||||||
virtual void checkConsistency(bool require_part_metadata) const;
|
virtual void checkConsistency(bool require_part_metadata) const;
|
||||||
@ -468,6 +485,9 @@ protected:
|
|||||||
|
|
||||||
std::optional<bool> keepSharedDataInDecoupledStorage() const;
|
std::optional<bool> keepSharedDataInDecoupledStorage() const;
|
||||||
|
|
||||||
|
void initializePartMetadataManager();
|
||||||
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// In compact parts order of columns is necessary
|
/// In compact parts order of columns is necessary
|
||||||
NameToNumber column_name_to_position;
|
NameToNumber column_name_to_position;
|
||||||
@ -478,36 +498,54 @@ private:
|
|||||||
/// Reads part unique identifier (if exists) from uuid.txt
|
/// Reads part unique identifier (if exists) from uuid.txt
|
||||||
void loadUUID();
|
void loadUUID();
|
||||||
|
|
||||||
|
static void appendFilesOfUUID(Strings & files);
|
||||||
|
|
||||||
/// Reads columns names and types from columns.txt
|
/// Reads columns names and types from columns.txt
|
||||||
void loadColumns(bool require);
|
void loadColumns(bool require);
|
||||||
|
|
||||||
|
static void appendFilesOfColumns(Strings & files);
|
||||||
|
|
||||||
/// If checksums.txt exists, reads file's checksums (and sizes) from it
|
/// If checksums.txt exists, reads file's checksums (and sizes) from it
|
||||||
void loadChecksums(bool require);
|
void loadChecksums(bool require);
|
||||||
|
|
||||||
|
static void appendFilesOfChecksums(Strings & files);
|
||||||
|
|
||||||
/// Loads marks index granularity into memory
|
/// Loads marks index granularity into memory
|
||||||
virtual void loadIndexGranularity();
|
virtual void loadIndexGranularity();
|
||||||
|
|
||||||
|
virtual void appendFilesOfIndexGranularity(Strings & files) const;
|
||||||
|
|
||||||
/// Loads index file.
|
/// Loads index file.
|
||||||
void loadIndex();
|
void loadIndex();
|
||||||
|
|
||||||
|
void appendFilesOfIndex(Strings & files) const;
|
||||||
|
|
||||||
/// Load rows count for this part from disk (for the newer storage format version).
|
/// Load rows count for this part from disk (for the newer storage format version).
|
||||||
/// For the older format version calculates rows count from the size of a column with a fixed size.
|
/// For the older format version calculates rows count from the size of a column with a fixed size.
|
||||||
void loadRowsCount();
|
void loadRowsCount();
|
||||||
|
|
||||||
|
static void appendFilesOfRowsCount(Strings & files);
|
||||||
|
|
||||||
/// Loads ttl infos in json format from file ttl.txt. If file doesn't exists assigns ttl infos with all zeros
|
/// Loads ttl infos in json format from file ttl.txt. If file doesn't exists assigns ttl infos with all zeros
|
||||||
void loadTTLInfos();
|
void loadTTLInfos();
|
||||||
|
|
||||||
|
static void appendFilesOfTTLInfos(Strings & files);
|
||||||
|
|
||||||
void loadPartitionAndMinMaxIndex();
|
void loadPartitionAndMinMaxIndex();
|
||||||
|
|
||||||
void calculateColumnsSizesOnDisk();
|
void calculateColumnsSizesOnDisk();
|
||||||
|
|
||||||
void calculateSecondaryIndicesSizesOnDisk();
|
void calculateSecondaryIndicesSizesOnDisk();
|
||||||
|
|
||||||
|
void appendFilesOfPartitionAndMinMaxIndex(Strings & files) const;
|
||||||
|
|
||||||
/// Load default compression codec from file default_compression_codec.txt
|
/// Load default compression codec from file default_compression_codec.txt
|
||||||
/// if it not exists tries to deduce codec from compressed column without
|
/// if it not exists tries to deduce codec from compressed column without
|
||||||
/// any specifial compression.
|
/// any specifial compression.
|
||||||
void loadDefaultCompressionCodec();
|
void loadDefaultCompressionCodec();
|
||||||
|
|
||||||
|
static void appendFilesOfDefaultCompressionCodec(Strings & files);
|
||||||
|
|
||||||
/// Found column without specific compression and return codec
|
/// Found column without specific compression and return codec
|
||||||
/// for this column with default parameters.
|
/// for this column with default parameters.
|
||||||
CompressionCodecPtr detectDefaultCompressionCodec() const;
|
CompressionCodecPtr detectDefaultCompressionCodec() const;
|
||||||
|
11
src/Storages/MergeTree/IPartMetadataManager.cpp
Normal file
11
src/Storages/MergeTree/IPartMetadataManager.cpp
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
#include "IPartMetadataManager.h"
|
||||||
|
|
||||||
|
#include <Disks/IVolume.h>
|
||||||
|
#include <Storages/MergeTree/IMergeTreeDataPart.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
IPartMetadataManager::IPartMetadataManager(const IMergeTreeDataPart * part_) : part(part_), disk(part->volume->getDisk())
|
||||||
|
{
|
||||||
|
}
|
||||||
|
}
|
59
src/Storages/MergeTree/IPartMetadataManager.h
Normal file
59
src/Storages/MergeTree/IPartMetadataManager.h
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <city.h>
|
||||||
|
#include <base/types.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
class IMergeTreeDataPart;
|
||||||
|
|
||||||
|
class SeekableReadBuffer;
|
||||||
|
|
||||||
|
class IDisk;
|
||||||
|
using DiskPtr = std::shared_ptr<IDisk>;
|
||||||
|
|
||||||
|
/// Interface for managing metadata of merge tree part.
|
||||||
|
/// IPartMetadataManager has two implementations:
|
||||||
|
/// - PartMetadataManagerOrdinary: manage metadata from disk directly. deleteAll/assertAllDeleted/updateAll/check
|
||||||
|
/// are all empty implementations because they are not needed for PartMetadataManagerOrdinary(those operations
|
||||||
|
/// are done implicitly when removing or renaming part directory).
|
||||||
|
/// - PartMetadataManagerWithCache: manage metadata from RocksDB cache and disk.
|
||||||
|
class IPartMetadataManager
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
using uint128 = CityHash_v1_0_2::uint128;
|
||||||
|
|
||||||
|
explicit IPartMetadataManager(const IMergeTreeDataPart * part_);
|
||||||
|
|
||||||
|
virtual ~IPartMetadataManager() = default;
|
||||||
|
|
||||||
|
/// Read metadata content and return SeekableReadBuffer object.
|
||||||
|
virtual std::unique_ptr<SeekableReadBuffer> read(const String & file_name) const = 0;
|
||||||
|
|
||||||
|
/// Return true if metadata exists in part.
|
||||||
|
virtual bool exists(const String & file_name) const = 0;
|
||||||
|
|
||||||
|
/// Delete all metadatas in part.
|
||||||
|
/// If include_projection is true, also delete metadatas in projection parts.
|
||||||
|
virtual void deleteAll(bool include_projection) = 0;
|
||||||
|
|
||||||
|
/// Assert that all metadatas in part are deleted.
|
||||||
|
/// If include_projection is true, also assert that all metadatas in projection parts are deleted.
|
||||||
|
virtual void assertAllDeleted(bool include_projection) const = 0;
|
||||||
|
|
||||||
|
/// Update all metadatas in part.
|
||||||
|
/// If include_projection is true, also update metadatas in projection parts.
|
||||||
|
virtual void updateAll(bool include_projection) = 0;
|
||||||
|
|
||||||
|
/// Check all metadatas in part.
|
||||||
|
virtual std::unordered_map<String, uint128> check() const = 0;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
const IMergeTreeDataPart * part;
|
||||||
|
const DiskPtr disk;
|
||||||
|
};
|
||||||
|
|
||||||
|
using PartMetadataManagerPtr = std::shared_ptr<IPartMetadataManager>;
|
||||||
|
}
|
@ -53,7 +53,7 @@ String Range::toString() const
|
|||||||
|
|
||||||
|
|
||||||
/// Example: for `Hello\_World% ...` string it returns `Hello_World`, and for `%test%` returns an empty string.
|
/// Example: for `Hello\_World% ...` string it returns `Hello_World`, and for `%test%` returns an empty string.
|
||||||
static String extractFixedPrefixFromLikePattern(const String & like_pattern)
|
String extractFixedPrefixFromLikePattern(const String & like_pattern)
|
||||||
{
|
{
|
||||||
String fixed_prefix;
|
String fixed_prefix;
|
||||||
|
|
||||||
|
@ -442,4 +442,6 @@ private:
|
|||||||
bool strict;
|
bool strict;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
String extractFixedPrefixFromLikePattern(const String & like_pattern);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ namespace
|
|||||||
/// least one existing (physical) column in part.
|
/// least one existing (physical) column in part.
|
||||||
bool injectRequiredColumnsRecursively(
|
bool injectRequiredColumnsRecursively(
|
||||||
const String & column_name,
|
const String & column_name,
|
||||||
const ColumnsDescription & storage_columns,
|
const StorageSnapshotPtr & storage_snapshot,
|
||||||
const MergeTreeData::AlterConversions & alter_conversions,
|
const MergeTreeData::AlterConversions & alter_conversions,
|
||||||
const MergeTreeData::DataPartPtr & part,
|
const MergeTreeData::DataPartPtr & part,
|
||||||
Names & columns,
|
Names & columns,
|
||||||
@ -36,7 +36,8 @@ bool injectRequiredColumnsRecursively(
|
|||||||
/// stages.
|
/// stages.
|
||||||
checkStackSize();
|
checkStackSize();
|
||||||
|
|
||||||
auto column_in_storage = storage_columns.tryGetColumnOrSubcolumn(GetColumnsOptions::AllPhysical, column_name);
|
auto options = GetColumnsOptions(GetColumnsOptions::AllPhysical).withSubcolumns().withExtendedObjects();
|
||||||
|
auto column_in_storage = storage_snapshot->tryGetColumn(options, column_name);
|
||||||
if (column_in_storage)
|
if (column_in_storage)
|
||||||
{
|
{
|
||||||
auto column_name_in_part = column_in_storage->getNameInStorage();
|
auto column_name_in_part = column_in_storage->getNameInStorage();
|
||||||
@ -63,7 +64,8 @@ bool injectRequiredColumnsRecursively(
|
|||||||
|
|
||||||
/// Column doesn't have default value and don't exist in part
|
/// Column doesn't have default value and don't exist in part
|
||||||
/// don't need to add to required set.
|
/// don't need to add to required set.
|
||||||
const auto column_default = storage_columns.getDefault(column_name);
|
auto metadata_snapshot = storage_snapshot->getMetadataForQuery();
|
||||||
|
const auto column_default = metadata_snapshot->getColumns().getDefault(column_name);
|
||||||
if (!column_default)
|
if (!column_default)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -73,39 +75,36 @@ bool injectRequiredColumnsRecursively(
|
|||||||
|
|
||||||
bool result = false;
|
bool result = false;
|
||||||
for (const auto & identifier : identifiers)
|
for (const auto & identifier : identifiers)
|
||||||
result |= injectRequiredColumnsRecursively(identifier, storage_columns, alter_conversions, part, columns, required_columns, injected_columns);
|
result |= injectRequiredColumnsRecursively(identifier, storage_snapshot, alter_conversions, part, columns, required_columns, injected_columns);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NameSet injectRequiredColumns(const MergeTreeData & storage, const StorageMetadataPtr & metadata_snapshot, const MergeTreeData::DataPartPtr & part, Names & columns)
|
NameSet injectRequiredColumns(
|
||||||
|
const MergeTreeData & storage,
|
||||||
|
const StorageSnapshotPtr & storage_snapshot,
|
||||||
|
const MergeTreeData::DataPartPtr & part,
|
||||||
|
Names & columns)
|
||||||
{
|
{
|
||||||
NameSet required_columns{std::begin(columns), std::end(columns)};
|
NameSet required_columns{std::begin(columns), std::end(columns)};
|
||||||
NameSet injected_columns;
|
NameSet injected_columns;
|
||||||
|
|
||||||
bool have_at_least_one_physical_column = false;
|
bool have_at_least_one_physical_column = false;
|
||||||
|
|
||||||
const auto & storage_columns = metadata_snapshot->getColumns();
|
|
||||||
MergeTreeData::AlterConversions alter_conversions;
|
MergeTreeData::AlterConversions alter_conversions;
|
||||||
if (!part->isProjectionPart())
|
if (!part->isProjectionPart())
|
||||||
alter_conversions = storage.getAlterConversionsForPart(part);
|
alter_conversions = storage.getAlterConversionsForPart(part);
|
||||||
|
|
||||||
for (size_t i = 0; i < columns.size(); ++i)
|
for (size_t i = 0; i < columns.size(); ++i)
|
||||||
{
|
{
|
||||||
auto name_in_storage = Nested::extractTableName(columns[i]);
|
|
||||||
if (storage_columns.has(name_in_storage) && isObject(storage_columns.get(name_in_storage).type))
|
|
||||||
{
|
|
||||||
have_at_least_one_physical_column = true;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// We are going to fetch only physical columns
|
/// We are going to fetch only physical columns
|
||||||
if (!storage_columns.hasColumnOrSubcolumn(GetColumnsOptions::AllPhysical, columns[i]))
|
auto options = GetColumnsOptions(GetColumnsOptions::AllPhysical).withSubcolumns().withExtendedObjects();
|
||||||
throw Exception("There is no physical column or subcolumn " + columns[i] + " in table.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
|
if (!storage_snapshot->tryGetColumn(options, columns[i]))
|
||||||
|
throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE, "There is no physical column or subcolumn {} in table", columns[i]);
|
||||||
|
|
||||||
have_at_least_one_physical_column |= injectRequiredColumnsRecursively(
|
have_at_least_one_physical_column |= injectRequiredColumnsRecursively(
|
||||||
columns[i], storage_columns, alter_conversions,
|
columns[i], storage_snapshot, alter_conversions,
|
||||||
part, columns, required_columns, injected_columns);
|
part, columns, required_columns, injected_columns);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,7 +114,7 @@ NameSet injectRequiredColumns(const MergeTreeData & storage, const StorageMetada
|
|||||||
*/
|
*/
|
||||||
if (!have_at_least_one_physical_column)
|
if (!have_at_least_one_physical_column)
|
||||||
{
|
{
|
||||||
const auto minimum_size_column_name = part->getColumnNameWithMinimumCompressedSize(metadata_snapshot);
|
const auto minimum_size_column_name = part->getColumnNameWithMinimumCompressedSize(storage_snapshot);
|
||||||
columns.push_back(minimum_size_column_name);
|
columns.push_back(minimum_size_column_name);
|
||||||
/// correctly report added column
|
/// correctly report added column
|
||||||
injected_columns.insert(columns.back());
|
injected_columns.insert(columns.back());
|
||||||
@ -271,7 +270,7 @@ MergeTreeReadTaskColumns getReadTaskColumns(
|
|||||||
Names pre_column_names;
|
Names pre_column_names;
|
||||||
|
|
||||||
/// inject columns required for defaults evaluation
|
/// inject columns required for defaults evaluation
|
||||||
bool should_reorder = !injectRequiredColumns(storage, storage_snapshot->getMetadataForQuery(), data_part, column_names).empty();
|
bool should_reorder = !injectRequiredColumns(storage, storage_snapshot, data_part, column_names).empty();
|
||||||
|
|
||||||
if (prewhere_info)
|
if (prewhere_info)
|
||||||
{
|
{
|
||||||
@ -296,7 +295,7 @@ MergeTreeReadTaskColumns getReadTaskColumns(
|
|||||||
if (pre_column_names.empty())
|
if (pre_column_names.empty())
|
||||||
pre_column_names.push_back(column_names[0]);
|
pre_column_names.push_back(column_names[0]);
|
||||||
|
|
||||||
const auto injected_pre_columns = injectRequiredColumns(storage, storage_snapshot->getMetadataForQuery(), data_part, pre_column_names);
|
const auto injected_pre_columns = injectRequiredColumns(storage, storage_snapshot, data_part, pre_column_names);
|
||||||
if (!injected_pre_columns.empty())
|
if (!injected_pre_columns.empty())
|
||||||
should_reorder = true;
|
should_reorder = true;
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ using MergeTreeBlockSizePredictorPtr = std::shared_ptr<MergeTreeBlockSizePredict
|
|||||||
* so that you can calculate the DEFAULT expression for these columns.
|
* so that you can calculate the DEFAULT expression for these columns.
|
||||||
* Adds them to the `columns`.
|
* Adds them to the `columns`.
|
||||||
*/
|
*/
|
||||||
NameSet injectRequiredColumns(const MergeTreeData & storage, const StorageMetadataPtr & metadata_snapshot, const MergeTreeData::DataPartPtr & part, Names & columns);
|
NameSet injectRequiredColumns(const MergeTreeData & storage, const StorageSnapshotPtr & storage_snapshot, const MergeTreeData::DataPartPtr & part, Names & columns);
|
||||||
|
|
||||||
|
|
||||||
/// A batch of work for MergeTreeThreadSelectBlockInputStream
|
/// A batch of work for MergeTreeThreadSelectBlockInputStream
|
||||||
|
@ -214,6 +214,7 @@ MergeTreeData::MergeTreeData(
|
|||||||
, parts_mover(this)
|
, parts_mover(this)
|
||||||
, background_operations_assignee(*this, BackgroundJobsAssignee::Type::DataProcessing, getContext())
|
, background_operations_assignee(*this, BackgroundJobsAssignee::Type::DataProcessing, getContext())
|
||||||
, background_moves_assignee(*this, BackgroundJobsAssignee::Type::Moving, getContext())
|
, background_moves_assignee(*this, BackgroundJobsAssignee::Type::Moving, getContext())
|
||||||
|
, use_metadata_cache(getSettings()->use_metadata_cache)
|
||||||
{
|
{
|
||||||
context_->getGlobalContext()->initializeBackgroundExecutorsIfNeeded();
|
context_->getGlobalContext()->initializeBackgroundExecutorsIfNeeded();
|
||||||
|
|
||||||
@ -333,6 +334,11 @@ MergeTreeData::MergeTreeData(
|
|||||||
LOG_WARNING(log, "{} Settings 'min_rows_for_wide_part', 'min_bytes_for_wide_part', "
|
LOG_WARNING(log, "{} Settings 'min_rows_for_wide_part', 'min_bytes_for_wide_part', "
|
||||||
"'min_rows_for_compact_part' and 'min_bytes_for_compact_part' will be ignored.", reason);
|
"'min_rows_for_compact_part' and 'min_bytes_for_compact_part' will be ignored.", reason);
|
||||||
|
|
||||||
|
#if !USE_ROCKSDB
|
||||||
|
if (use_metadata_cache)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't use merge tree metadata cache if clickhouse was compiled without rocksdb");
|
||||||
|
#endif
|
||||||
|
|
||||||
common_assignee_trigger = [this] (bool delay) noexcept
|
common_assignee_trigger = [this] (bool delay) noexcept
|
||||||
{
|
{
|
||||||
if (delay)
|
if (delay)
|
||||||
@ -1372,7 +1378,6 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
|||||||
LOG_DEBUG(log, "Loaded data parts ({} items)", data_parts_indexes.size());
|
LOG_DEBUG(log, "Loaded data parts ({} items)", data_parts_indexes.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Is the part directory old.
|
/// Is the part directory old.
|
||||||
/// True if its modification time and the modification time of all files inside it is less then threshold.
|
/// True if its modification time and the modification time of all files inside it is less then threshold.
|
||||||
/// (Only files on the first level of nesting are considered).
|
/// (Only files on the first level of nesting are considered).
|
||||||
|
@ -946,6 +946,7 @@ protected:
|
|||||||
friend class StorageReplicatedMergeTree;
|
friend class StorageReplicatedMergeTree;
|
||||||
friend class MergeTreeDataWriter;
|
friend class MergeTreeDataWriter;
|
||||||
friend class MergeTask;
|
friend class MergeTask;
|
||||||
|
friend class IPartMetadataManager;
|
||||||
|
|
||||||
bool require_part_metadata;
|
bool require_part_metadata;
|
||||||
|
|
||||||
@ -1028,6 +1029,7 @@ protected:
|
|||||||
/// And for ReplicatedMergeTree we don't have LogEntry type for this operation.
|
/// And for ReplicatedMergeTree we don't have LogEntry type for this operation.
|
||||||
BackgroundJobsAssignee background_operations_assignee;
|
BackgroundJobsAssignee background_operations_assignee;
|
||||||
BackgroundJobsAssignee background_moves_assignee;
|
BackgroundJobsAssignee background_moves_assignee;
|
||||||
|
bool use_metadata_cache;
|
||||||
|
|
||||||
/// Strongly connected with two fields above.
|
/// Strongly connected with two fields above.
|
||||||
/// Every task that is finished will ask to assign a new one into an executor.
|
/// Every task that is finished will ask to assign a new one into an executor.
|
||||||
|
@ -69,6 +69,7 @@ private:
|
|||||||
ColumnSize getColumnSizeImpl(const NameAndTypePair & column, std::unordered_set<String> * processed_substreams) const;
|
ColumnSize getColumnSizeImpl(const NameAndTypePair & column, std::unordered_set<String> * processed_substreams) const;
|
||||||
|
|
||||||
void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const override;
|
void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const override;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -877,12 +877,22 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd
|
|||||||
{
|
{
|
||||||
std::atomic<size_t> total_rows{0};
|
std::atomic<size_t> total_rows{0};
|
||||||
|
|
||||||
|
/// Do not check number of read rows if we have reading
|
||||||
|
/// in order of sorting key with limit.
|
||||||
|
/// In general case, when there exists WHERE clause
|
||||||
|
/// it's impossible to estimate number of rows precisely,
|
||||||
|
/// because we can stop reading at any time.
|
||||||
|
|
||||||
SizeLimits limits;
|
SizeLimits limits;
|
||||||
if (settings.read_overflow_mode == OverflowMode::THROW && settings.max_rows_to_read)
|
if (settings.read_overflow_mode == OverflowMode::THROW
|
||||||
|
&& settings.max_rows_to_read
|
||||||
|
&& !query_info.input_order_info)
|
||||||
limits = SizeLimits(settings.max_rows_to_read, 0, settings.read_overflow_mode);
|
limits = SizeLimits(settings.max_rows_to_read, 0, settings.read_overflow_mode);
|
||||||
|
|
||||||
SizeLimits leaf_limits;
|
SizeLimits leaf_limits;
|
||||||
if (settings.read_overflow_mode_leaf == OverflowMode::THROW && settings.max_rows_to_read_leaf)
|
if (settings.read_overflow_mode_leaf == OverflowMode::THROW
|
||||||
|
&& settings.max_rows_to_read_leaf
|
||||||
|
&& !query_info.input_order_info)
|
||||||
leaf_limits = SizeLimits(settings.max_rows_to_read_leaf, 0, settings.read_overflow_mode_leaf);
|
leaf_limits = SizeLimits(settings.max_rows_to_read_leaf, 0, settings.read_overflow_mode_leaf);
|
||||||
|
|
||||||
auto mark_cache = context->getIndexMarkCache();
|
auto mark_cache = context->getIndexMarkCache();
|
||||||
|
@ -20,6 +20,8 @@ struct MergeTreeReaderSettings
|
|||||||
bool save_marks_in_cache = false;
|
bool save_marks_in_cache = false;
|
||||||
/// Validate checksums on reading (should be always enabled in production).
|
/// Validate checksums on reading (should be always enabled in production).
|
||||||
bool checksum_on_read = true;
|
bool checksum_on_read = true;
|
||||||
|
/// True if we read in order of sorting key.
|
||||||
|
bool read_in_order = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MergeTreeWriterSettings
|
struct MergeTreeWriterSettings
|
||||||
|
107
src/Storages/MergeTree/MergeTreeMetadataCache.cpp
Normal file
107
src/Storages/MergeTree/MergeTreeMetadataCache.cpp
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
#include "MergeTreeMetadataCache.h"
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
#include <Common/ProfileEvents.h>
|
||||||
|
#include <base/logger_useful.h>
|
||||||
|
|
||||||
|
namespace ProfileEvents
|
||||||
|
{
|
||||||
|
extern const Event MergeTreeMetadataCachePut;
|
||||||
|
extern const Event MergeTreeMetadataCacheGet;
|
||||||
|
extern const Event MergeTreeMetadataCacheDelete;
|
||||||
|
extern const Event MergeTreeMetadataCacheSeek;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int SYSTEM_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::unique_ptr<MergeTreeMetadataCache> MergeTreeMetadataCache::create(const String & dir, size_t size)
|
||||||
|
{
|
||||||
|
assert(size != 0);
|
||||||
|
rocksdb::Options options;
|
||||||
|
rocksdb::BlockBasedTableOptions table_options;
|
||||||
|
rocksdb::DB * db;
|
||||||
|
|
||||||
|
options.create_if_missing = true;
|
||||||
|
auto cache = rocksdb::NewLRUCache(size);
|
||||||
|
table_options.block_cache = cache;
|
||||||
|
options.table_factory.reset(rocksdb::NewBlockBasedTableFactory(table_options));
|
||||||
|
rocksdb::Status status = rocksdb::DB::Open(options, dir, &db);
|
||||||
|
if (status != rocksdb::Status::OK())
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::SYSTEM_ERROR,
|
||||||
|
"Fail to open rocksdb path at: {} status:{}. You can try to remove the cache (this will not affect any table data).",
|
||||||
|
dir,
|
||||||
|
status.ToString());
|
||||||
|
return std::make_unique<MergeTreeMetadataCache>(db);
|
||||||
|
}
|
||||||
|
|
||||||
|
MergeTreeMetadataCache::Status MergeTreeMetadataCache::put(const String & key, const String & value)
|
||||||
|
{
|
||||||
|
auto options = rocksdb::WriteOptions();
|
||||||
|
options.sync = true;
|
||||||
|
options.disableWAL = false;
|
||||||
|
auto status = rocksdb->Put(options, key, value);
|
||||||
|
ProfileEvents::increment(ProfileEvents::MergeTreeMetadataCachePut);
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
MergeTreeMetadataCache::Status MergeTreeMetadataCache::del(const String & key)
|
||||||
|
{
|
||||||
|
auto options = rocksdb::WriteOptions();
|
||||||
|
options.sync = true;
|
||||||
|
options.disableWAL = false;
|
||||||
|
auto status = rocksdb->Delete(options, key);
|
||||||
|
ProfileEvents::increment(ProfileEvents::MergeTreeMetadataCacheDelete);
|
||||||
|
LOG_TRACE(log, "Delete key:{} from MergeTreeMetadataCache status:{}", key, status.ToString());
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
MergeTreeMetadataCache::Status MergeTreeMetadataCache::get(const String & key, String & value)
|
||||||
|
{
|
||||||
|
auto status = rocksdb->Get(rocksdb::ReadOptions(), key, &value);
|
||||||
|
ProfileEvents::increment(ProfileEvents::MergeTreeMetadataCacheGet);
|
||||||
|
LOG_TRACE(log, "Get key:{} from MergeTreeMetadataCache status:{}", key, status.ToString());
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MergeTreeMetadataCache::getByPrefix(const String & prefix, Strings & keys, Strings & values)
|
||||||
|
{
|
||||||
|
auto * it = rocksdb->NewIterator(rocksdb::ReadOptions());
|
||||||
|
rocksdb::Slice target(prefix);
|
||||||
|
for (it->Seek(target); it->Valid(); it->Next())
|
||||||
|
{
|
||||||
|
const auto key = it->key();
|
||||||
|
if (!key.starts_with(target))
|
||||||
|
break;
|
||||||
|
|
||||||
|
const auto value = it->value();
|
||||||
|
keys.emplace_back(key.data(), key.size());
|
||||||
|
values.emplace_back(value.data(), value.size());
|
||||||
|
}
|
||||||
|
LOG_TRACE(log, "Seek with prefix:{} from MergeTreeMetadataCache items:{}", prefix, keys.size());
|
||||||
|
ProfileEvents::increment(ProfileEvents::MergeTreeMetadataCacheSeek);
|
||||||
|
delete it;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t MergeTreeMetadataCache::getEstimateNumKeys() const
|
||||||
|
{
|
||||||
|
uint64_t keys = 0;
|
||||||
|
rocksdb->GetAggregatedIntProperty("rocksdb.estimate-num-keys", &keys);
|
||||||
|
return keys;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MergeTreeMetadataCache::shutdown()
|
||||||
|
{
|
||||||
|
rocksdb->Close();
|
||||||
|
rocksdb.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
45
src/Storages/MergeTree/MergeTreeMetadataCache.h
Normal file
45
src/Storages/MergeTree/MergeTreeMetadataCache.h
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "config_core.h"
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
#include <base/types.h>
|
||||||
|
#include <Core/Types.h>
|
||||||
|
#include <Poco/Logger.h>
|
||||||
|
#include <rocksdb/table.h>
|
||||||
|
#include <rocksdb/db.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
class MergeTreeMetadataCache
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
using Status = rocksdb::Status;
|
||||||
|
|
||||||
|
static std::unique_ptr<MergeTreeMetadataCache> create(const String & dir, size_t size);
|
||||||
|
|
||||||
|
explicit MergeTreeMetadataCache(rocksdb::DB * rocksdb_) : rocksdb{rocksdb_}
|
||||||
|
{
|
||||||
|
assert(rocksdb);
|
||||||
|
}
|
||||||
|
|
||||||
|
MergeTreeMetadataCache(const MergeTreeMetadataCache &) = delete;
|
||||||
|
|
||||||
|
MergeTreeMetadataCache & operator=(const MergeTreeMetadataCache &) = delete;
|
||||||
|
|
||||||
|
Status put(const String & key, const String & value);
|
||||||
|
Status del(const String & key);
|
||||||
|
Status get(const String & key, String & value);
|
||||||
|
void getByPrefix(const String & prefix, Strings & keys, Strings & values);
|
||||||
|
uint64_t getEstimateNumKeys() const;
|
||||||
|
|
||||||
|
void shutdown();
|
||||||
|
private:
|
||||||
|
std::unique_ptr<rocksdb::DB> rocksdb;
|
||||||
|
Poco::Logger * log = &Poco::Logger::get("MergeTreeMetadataCache");
|
||||||
|
};
|
||||||
|
|
||||||
|
using MergeTreeMetadataCachePtr = std::shared_ptr<MergeTreeMetadataCache>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
@ -178,12 +178,6 @@ namespace
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::unique_ptr<ReadBufferFromFileBase> openForReading(const DiskPtr & disk, const String & path)
|
|
||||||
{
|
|
||||||
size_t file_size = disk->getFileSize(path);
|
|
||||||
return disk->readFile(path, ReadSettings().adjustBufferSize(file_size), file_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
String MergeTreePartition::getID(const MergeTreeData & storage) const
|
String MergeTreePartition::getID(const MergeTreeData & storage) const
|
||||||
{
|
{
|
||||||
return getID(storage.getInMemoryMetadataPtr()->getPartitionKey().sample_block);
|
return getID(storage.getInMemoryMetadataPtr()->getPartitionKey().sample_block);
|
||||||
@ -373,15 +367,15 @@ void MergeTreePartition::serializeText(const MergeTreeData & storage, WriteBuffe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MergeTreePartition::load(const MergeTreeData & storage, const DiskPtr & disk, const String & part_path)
|
void MergeTreePartition::load(const MergeTreeData & storage, const PartMetadataManagerPtr & manager)
|
||||||
{
|
{
|
||||||
auto metadata_snapshot = storage.getInMemoryMetadataPtr();
|
auto metadata_snapshot = storage.getInMemoryMetadataPtr();
|
||||||
if (!metadata_snapshot->hasPartitionKey())
|
if (!metadata_snapshot->hasPartitionKey())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
const auto & partition_key_sample = adjustPartitionKey(metadata_snapshot, storage.getContext()).sample_block;
|
const auto & partition_key_sample = adjustPartitionKey(metadata_snapshot, storage.getContext()).sample_block;
|
||||||
auto partition_file_path = part_path + "partition.dat";
|
|
||||||
auto file = openForReading(disk, partition_file_path);
|
auto file = manager->read("partition.dat");
|
||||||
value.resize(partition_key_sample.columns());
|
value.resize(partition_key_sample.columns());
|
||||||
for (size_t i = 0; i < partition_key_sample.columns(); ++i)
|
for (size_t i = 0; i < partition_key_sample.columns(); ++i)
|
||||||
partition_key_sample.getByPosition(i).type->getDefaultSerialization()->deserializeBinary(value[i], *file);
|
partition_key_sample.getByPosition(i).type->getDefaultSerialization()->deserializeBinary(value[i], *file);
|
||||||
@ -402,7 +396,9 @@ std::unique_ptr<WriteBufferFromFileBase> MergeTreePartition::store(const Block &
|
|||||||
auto out = disk->writeFile(part_path + "partition.dat");
|
auto out = disk->writeFile(part_path + "partition.dat");
|
||||||
HashingWriteBuffer out_hashing(*out);
|
HashingWriteBuffer out_hashing(*out);
|
||||||
for (size_t i = 0; i < value.size(); ++i)
|
for (size_t i = 0; i < value.size(); ++i)
|
||||||
|
{
|
||||||
partition_key_sample.getByPosition(i).type->getDefaultSerialization()->serializeBinary(value[i], out_hashing);
|
partition_key_sample.getByPosition(i).type->getDefaultSerialization()->serializeBinary(value[i], out_hashing);
|
||||||
|
}
|
||||||
|
|
||||||
out_hashing.next();
|
out_hashing.next();
|
||||||
checksums.files["partition.dat"].file_size = out_hashing.count();
|
checksums.files["partition.dat"].file_size = out_hashing.count();
|
||||||
@ -462,4 +458,14 @@ KeyDescription MergeTreePartition::adjustPartitionKey(const StorageMetadataPtr &
|
|||||||
return partition_key;
|
return partition_key;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void MergeTreePartition::appendFiles(const MergeTreeData & storage, Strings& files)
|
||||||
|
{
|
||||||
|
auto metadata_snapshot = storage.getInMemoryMetadataPtr();
|
||||||
|
if (!metadata_snapshot->hasPartitionKey())
|
||||||
|
return;
|
||||||
|
|
||||||
|
files.push_back("partition.dat");
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <Disks/IDisk.h>
|
#include <Disks/IDisk.h>
|
||||||
#include <IO/WriteBuffer.h>
|
#include <IO/WriteBuffer.h>
|
||||||
#include <Storages/KeyDescription.h>
|
#include <Storages/KeyDescription.h>
|
||||||
|
#include <Storages/MergeTree/IPartMetadataManager.h>
|
||||||
#include <Core/Field.h>
|
#include <Core/Field.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -37,7 +38,8 @@ public:
|
|||||||
|
|
||||||
void serializeText(const MergeTreeData & storage, WriteBuffer & out, const FormatSettings & format_settings) const;
|
void serializeText(const MergeTreeData & storage, WriteBuffer & out, const FormatSettings & format_settings) const;
|
||||||
|
|
||||||
void load(const MergeTreeData & storage, const DiskPtr & disk, const String & part_path);
|
void load(const MergeTreeData & storage, const PartMetadataManagerPtr & manager);
|
||||||
|
|
||||||
/// Store functions return write buffer with written but not finalized data.
|
/// Store functions return write buffer with written but not finalized data.
|
||||||
/// User must call finish() for returned object.
|
/// User must call finish() for returned object.
|
||||||
[[nodiscard]] std::unique_ptr<WriteBufferFromFileBase> store(const MergeTreeData & storage, const DiskPtr & disk, const String & part_path, MergeTreeDataPartChecksums & checksums) const;
|
[[nodiscard]] std::unique_ptr<WriteBufferFromFileBase> store(const MergeTreeData & storage, const DiskPtr & disk, const String & part_path, MergeTreeDataPartChecksums & checksums) const;
|
||||||
@ -47,6 +49,8 @@ public:
|
|||||||
|
|
||||||
void create(const StorageMetadataPtr & metadata_snapshot, Block block, size_t row, ContextPtr context);
|
void create(const StorageMetadataPtr & metadata_snapshot, Block block, size_t row, ContextPtr context);
|
||||||
|
|
||||||
|
static void appendFiles(const MergeTreeData & storage, Strings & files);
|
||||||
|
|
||||||
/// Adjust partition key and execute its expression on block. Return sample block according to used expression.
|
/// Adjust partition key and execute its expression on block. Return sample block according to used expression.
|
||||||
static NamesAndTypesList executePartitionByExpression(const StorageMetadataPtr & metadata_snapshot, Block & block, ContextPtr context);
|
static NamesAndTypesList executePartitionByExpression(const StorageMetadataPtr & metadata_snapshot, Block & block, ContextPtr context);
|
||||||
|
|
||||||
|
@ -39,9 +39,12 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor(
|
|||||||
{
|
{
|
||||||
/// Actually it means that parallel reading from replicas enabled
|
/// Actually it means that parallel reading from replicas enabled
|
||||||
/// and we have to collaborate with initiator.
|
/// and we have to collaborate with initiator.
|
||||||
/// In this case we won't set approximate rows, because it will be accounted multiple times
|
/// In this case we won't set approximate rows, because it will be accounted multiple times.
|
||||||
if (!extension_.has_value())
|
/// Also do not count amount of read rows if we read in order of sorting key,
|
||||||
|
/// because we don't know actual amount of read rows in case when limit is set.
|
||||||
|
if (!extension_.has_value() && !reader_settings.read_in_order)
|
||||||
addTotalRowsApprox(total_rows);
|
addTotalRowsApprox(total_rows);
|
||||||
|
|
||||||
ordered_names = header_without_virtual_columns.getNames();
|
ordered_names = header_without_virtual_columns.getNames();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ MergeTreeSequentialSource::MergeTreeSequentialSource(
|
|||||||
addTotalRowsApprox(data_part->rows_count);
|
addTotalRowsApprox(data_part->rows_count);
|
||||||
|
|
||||||
/// Add columns because we don't want to read empty blocks
|
/// Add columns because we don't want to read empty blocks
|
||||||
injectRequiredColumns(storage, storage_snapshot->metadata, data_part, columns_to_read);
|
injectRequiredColumns(storage, storage_snapshot, data_part, columns_to_read);
|
||||||
NamesAndTypesList columns_for_reader;
|
NamesAndTypesList columns_for_reader;
|
||||||
if (take_column_types_from_storage)
|
if (take_column_types_from_storage)
|
||||||
{
|
{
|
||||||
|
@ -139,6 +139,7 @@ struct Settings;
|
|||||||
/** Experimental/work in progress feature. Unsafe for production. */ \
|
/** Experimental/work in progress feature. Unsafe for production. */ \
|
||||||
M(UInt64, part_moves_between_shards_enable, 0, "Experimental/Incomplete feature to move parts between shards. Does not take into account sharding expressions.", 0) \
|
M(UInt64, part_moves_between_shards_enable, 0, "Experimental/Incomplete feature to move parts between shards. Does not take into account sharding expressions.", 0) \
|
||||||
M(UInt64, part_moves_between_shards_delay_seconds, 30, "Time to wait before/after moving parts between shards.", 0) \
|
M(UInt64, part_moves_between_shards_delay_seconds, 30, "Time to wait before/after moving parts between shards.", 0) \
|
||||||
|
M(Bool, use_metadata_cache, false, "Experimental feature to speed up parts loading process by using MergeTree metadata cache", 0) \
|
||||||
\
|
\
|
||||||
/** Obsolete settings. Kept for backward compatibility only. */ \
|
/** Obsolete settings. Kept for backward compatibility only. */ \
|
||||||
M(UInt64, min_relative_delay_to_yield_leadership, 120, "Obsolete setting, does nothing.", 0) \
|
M(UInt64, min_relative_delay_to_yield_leadership, 120, "Obsolete setting, does nothing.", 0) \
|
||||||
|
33
src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp
Normal file
33
src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
#include "PartMetadataManagerOrdinary.h"
|
||||||
|
|
||||||
|
#include <IO/ReadBufferFromFileBase.h>
|
||||||
|
#include <Disks/IDisk.h>
|
||||||
|
#include <Storages/MergeTree/IMergeTreeDataPart.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
static std::unique_ptr<ReadBufferFromFileBase> openForReading(const DiskPtr & disk, const String & path)
|
||||||
|
{
|
||||||
|
size_t file_size = disk->getFileSize(path);
|
||||||
|
return disk->readFile(path, ReadSettings().adjustBufferSize(file_size), file_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
PartMetadataManagerOrdinary::PartMetadataManagerOrdinary(const IMergeTreeDataPart * part_) : IPartMetadataManager(part_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> PartMetadataManagerOrdinary::read(const String & file_name) const
|
||||||
|
{
|
||||||
|
String file_path = fs::path(part->getFullRelativePath()) / file_name;
|
||||||
|
return openForReading(disk, file_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PartMetadataManagerOrdinary::exists(const String & file_name) const
|
||||||
|
{
|
||||||
|
return disk->exists(fs::path(part->getFullRelativePath()) / file_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
29
src/Storages/MergeTree/PartMetadataManagerOrdinary.h
Normal file
29
src/Storages/MergeTree/PartMetadataManagerOrdinary.h
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Storages/MergeTree/IPartMetadataManager.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
class PartMetadataManagerOrdinary : public IPartMetadataManager
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit PartMetadataManagerOrdinary(const IMergeTreeDataPart * part_);
|
||||||
|
|
||||||
|
~PartMetadataManagerOrdinary() override = default;
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> read(const String & file_name) const override;
|
||||||
|
|
||||||
|
bool exists(const String & file_name) const override;
|
||||||
|
|
||||||
|
void deleteAll(bool /*include_projection*/) override {}
|
||||||
|
|
||||||
|
void assertAllDeleted(bool /*include_projection*/) const override {}
|
||||||
|
|
||||||
|
void updateAll(bool /*include_projection*/) override {}
|
||||||
|
|
||||||
|
std::unordered_map<String, uint128> check() const override { return {}; }
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
}
|
273
src/Storages/MergeTree/PartMetadataManagerWithCache.cpp
Normal file
273
src/Storages/MergeTree/PartMetadataManagerWithCache.cpp
Normal file
@ -0,0 +1,273 @@
|
|||||||
|
#include "PartMetadataManagerWithCache.h"
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
#include <Common/hex.h>
|
||||||
|
#include <Common/ErrorCodes.h>
|
||||||
|
#include <IO/HashingReadBuffer.h>
|
||||||
|
#include <IO/ReadBufferFromString.h>
|
||||||
|
#include <Storages/MergeTree/IMergeTreeDataPart.h>
|
||||||
|
|
||||||
|
namespace ProfileEvents
|
||||||
|
{
|
||||||
|
extern const Event MergeTreeMetadataCacheHit;
|
||||||
|
extern const Event MergeTreeMetadataCacheMiss;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
extern const int CORRUPTED_DATA;
|
||||||
|
extern const int NO_SUCH_PROJECTION_IN_TABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
PartMetadataManagerWithCache::PartMetadataManagerWithCache(const IMergeTreeDataPart * part_, const MergeTreeMetadataCachePtr & cache_)
|
||||||
|
: IPartMetadataManager(part_), cache(cache_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
String PartMetadataManagerWithCache::getKeyFromFilePath(const String & file_path) const
|
||||||
|
{
|
||||||
|
return disk->getName() + ":" + file_path;
|
||||||
|
}
|
||||||
|
|
||||||
|
String PartMetadataManagerWithCache::getFilePathFromKey(const String & key) const
|
||||||
|
{
|
||||||
|
return key.substr(disk->getName().size() + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> PartMetadataManagerWithCache::read(const String & file_name) const
|
||||||
|
{
|
||||||
|
String file_path = fs::path(part->getFullRelativePath()) / file_name;
|
||||||
|
String key = getKeyFromFilePath(file_path);
|
||||||
|
String value;
|
||||||
|
auto status = cache->get(key, value);
|
||||||
|
if (!status.ok())
|
||||||
|
{
|
||||||
|
ProfileEvents::increment(ProfileEvents::MergeTreeMetadataCacheMiss);
|
||||||
|
auto in = disk->readFile(file_path);
|
||||||
|
readStringUntilEOF(value, *in);
|
||||||
|
cache->put(key, value);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
ProfileEvents::increment(ProfileEvents::MergeTreeMetadataCacheHit);
|
||||||
|
}
|
||||||
|
return std::make_unique<ReadBufferFromOwnString>(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PartMetadataManagerWithCache::exists(const String & file_name) const
|
||||||
|
{
|
||||||
|
String file_path = fs::path(part->getFullRelativePath()) / file_name;
|
||||||
|
String key = getKeyFromFilePath(file_path);
|
||||||
|
String value;
|
||||||
|
auto status = cache->get(key, value);
|
||||||
|
if (status.ok())
|
||||||
|
{
|
||||||
|
ProfileEvents::increment(ProfileEvents::MergeTreeMetadataCacheHit);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
ProfileEvents::increment(ProfileEvents::MergeTreeMetadataCacheMiss);
|
||||||
|
return disk->exists(fs::path(part->getFullRelativePath()) / file_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void PartMetadataManagerWithCache::deleteAll(bool include_projection)
|
||||||
|
{
|
||||||
|
Strings file_names;
|
||||||
|
part->appendFilesOfColumnsChecksumsIndexes(file_names, include_projection);
|
||||||
|
|
||||||
|
String value;
|
||||||
|
for (const auto & file_name : file_names)
|
||||||
|
{
|
||||||
|
String file_path = fs::path(part->getFullRelativePath()) / file_name;
|
||||||
|
String key = getKeyFromFilePath(file_path);
|
||||||
|
auto status = cache->del(key);
|
||||||
|
if (!status.ok())
|
||||||
|
{
|
||||||
|
status = cache->get(key, value);
|
||||||
|
if (status.IsNotFound())
|
||||||
|
continue;
|
||||||
|
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"deleteAll failed include_projection:{} status:{}, file_path:{}",
|
||||||
|
include_projection,
|
||||||
|
status.ToString(),
|
||||||
|
file_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void PartMetadataManagerWithCache::updateAll(bool include_projection)
|
||||||
|
{
|
||||||
|
Strings file_names;
|
||||||
|
part->appendFilesOfColumnsChecksumsIndexes(file_names, include_projection);
|
||||||
|
|
||||||
|
String value;
|
||||||
|
String read_value;
|
||||||
|
for (const auto & file_name : file_names)
|
||||||
|
{
|
||||||
|
String file_path = fs::path(part->getFullRelativePath()) / file_name;
|
||||||
|
if (!disk->exists(file_path))
|
||||||
|
continue;
|
||||||
|
auto in = disk->readFile(file_path);
|
||||||
|
readStringUntilEOF(value, *in);
|
||||||
|
|
||||||
|
String key = getKeyFromFilePath(file_path);
|
||||||
|
auto status = cache->put(key, value);
|
||||||
|
if (!status.ok())
|
||||||
|
{
|
||||||
|
status = cache->get(key, read_value);
|
||||||
|
if (status.IsNotFound() || read_value == value)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"updateAll failed include_projection:{} status:{}, file_path:{}",
|
||||||
|
include_projection,
|
||||||
|
status.ToString(),
|
||||||
|
file_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void PartMetadataManagerWithCache::assertAllDeleted(bool include_projection) const
|
||||||
|
{
|
||||||
|
Strings keys;
|
||||||
|
std::vector<uint128> _;
|
||||||
|
getKeysAndCheckSums(keys, _);
|
||||||
|
if (keys.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
|
String file_path;
|
||||||
|
String file_name;
|
||||||
|
for (const auto & key : keys)
|
||||||
|
{
|
||||||
|
file_path = getFilePathFromKey(key);
|
||||||
|
file_name = fs::path(file_path).filename();
|
||||||
|
|
||||||
|
/// Metadata file belongs to current part
|
||||||
|
if (fs::path(part->getFullRelativePath()) / file_name == file_path)
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Data part {} with type {} with meta file {} still in cache",
|
||||||
|
part->name,
|
||||||
|
part->getType().toString(),
|
||||||
|
file_path);
|
||||||
|
|
||||||
|
/// File belongs to projection part of current part
|
||||||
|
if (!part->isProjectionPart() && include_projection)
|
||||||
|
{
|
||||||
|
const auto & projection_parts = part->getProjectionParts();
|
||||||
|
for (const auto & [projection_name, projection_part] : projection_parts)
|
||||||
|
{
|
||||||
|
if (fs::path(projection_part->getFullRelativePath()) / file_name == file_path)
|
||||||
|
{
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Data part {} with type {} with meta file {} with projection name still in cache",
|
||||||
|
part->name,
|
||||||
|
part->getType().toString(),
|
||||||
|
file_path,
|
||||||
|
projection_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void PartMetadataManagerWithCache::getKeysAndCheckSums(Strings & keys, std::vector<uint128> & checksums) const
|
||||||
|
{
|
||||||
|
String prefix = getKeyFromFilePath(fs::path(part->getFullRelativePath()) / "");
|
||||||
|
Strings values;
|
||||||
|
cache->getByPrefix(prefix, keys, values);
|
||||||
|
size_t size = keys.size();
|
||||||
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
{
|
||||||
|
ReadBufferFromString rbuf(values[i]);
|
||||||
|
HashingReadBuffer hbuf(rbuf);
|
||||||
|
checksums.push_back(hbuf.getHash());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unordered_map<String, IPartMetadataManager::uint128> PartMetadataManagerWithCache::check() const
|
||||||
|
{
|
||||||
|
/// Only applies for normal part stored on disk
|
||||||
|
if (part->isProjectionPart() || !part->isStoredOnDisk())
|
||||||
|
return {};
|
||||||
|
|
||||||
|
/// The directory of projection part is under the directory of its parent part
|
||||||
|
const auto filenames_without_checksums = part->getFileNamesWithoutChecksums();
|
||||||
|
|
||||||
|
std::unordered_map<String, uint128> results;
|
||||||
|
Strings keys;
|
||||||
|
std::vector<uint128> cache_checksums;
|
||||||
|
std::vector<uint128> disk_checksums;
|
||||||
|
getKeysAndCheckSums(keys, cache_checksums);
|
||||||
|
for (size_t i = 0; i < keys.size(); ++i)
|
||||||
|
{
|
||||||
|
const auto & key = keys[i];
|
||||||
|
String file_path = getFilePathFromKey(key);
|
||||||
|
String file_name = fs::path(file_path).filename();
|
||||||
|
results.emplace(file_name, cache_checksums[i]);
|
||||||
|
|
||||||
|
/// File belongs to normal part
|
||||||
|
if (fs::path(part->getFullRelativePath()) / file_name == file_path)
|
||||||
|
{
|
||||||
|
auto disk_checksum = part->getActualChecksumByFile(file_path);
|
||||||
|
if (disk_checksum != cache_checksums[i])
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::CORRUPTED_DATA,
|
||||||
|
"Checksums doesn't match in part {}. Expected: {}. Found {}.",
|
||||||
|
part->name,
|
||||||
|
getHexUIntUppercase(disk_checksum.first) + getHexUIntUppercase(disk_checksum.second),
|
||||||
|
getHexUIntUppercase(cache_checksums[i].first) + getHexUIntUppercase(cache_checksums[i].second));
|
||||||
|
|
||||||
|
disk_checksums.push_back(disk_checksum);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// File belongs to projection part
|
||||||
|
String proj_dir_name = fs::path(file_path).parent_path().filename();
|
||||||
|
auto pos = proj_dir_name.find_last_of('.');
|
||||||
|
if (pos == String::npos)
|
||||||
|
{
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::NO_SUCH_PROJECTION_IN_TABLE,
|
||||||
|
"There is no projection in part: {} contains file: {} with directory name: {}",
|
||||||
|
part->name,
|
||||||
|
file_path,
|
||||||
|
proj_dir_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
String proj_name = proj_dir_name.substr(0, pos);
|
||||||
|
const auto & projection_parts = part->getProjectionParts();
|
||||||
|
auto it = projection_parts.find(proj_name);
|
||||||
|
if (it == projection_parts.end())
|
||||||
|
{
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::NO_SUCH_PROJECTION_IN_TABLE,
|
||||||
|
"There is no projection {} in part: {} contains file: {}",
|
||||||
|
proj_name, part->name, file_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto disk_checksum = it->second->getActualChecksumByFile(file_path);
|
||||||
|
if (disk_checksum != cache_checksums[i])
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::CORRUPTED_DATA,
|
||||||
|
"Checksums doesn't match in projection part {} {}. Expected: {}. Found {}.",
|
||||||
|
part->name, proj_name,
|
||||||
|
getHexUIntUppercase(disk_checksum.first) + getHexUIntUppercase(disk_checksum.second),
|
||||||
|
getHexUIntUppercase(cache_checksums[i].first) + getHexUIntUppercase(cache_checksums[i].second));
|
||||||
|
disk_checksums.push_back(disk_checksum);
|
||||||
|
}
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
#endif
|
56
src/Storages/MergeTree/PartMetadataManagerWithCache.h
Normal file
56
src/Storages/MergeTree/PartMetadataManagerWithCache.h
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "config_core.h"
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
#include <Storages/MergeTree/IPartMetadataManager.h>
|
||||||
|
#include <Storages/MergeTree/MergeTreeMetadataCache.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/// PartMetadataManagerWithCache stores metadatas of part in RocksDB as cache layer to speed up
|
||||||
|
/// loading process of merge tree table.
|
||||||
|
class PartMetadataManagerWithCache : public IPartMetadataManager
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
PartMetadataManagerWithCache(const IMergeTreeDataPart * part_, const MergeTreeMetadataCachePtr & cache_);
|
||||||
|
|
||||||
|
~PartMetadataManagerWithCache() override = default;
|
||||||
|
|
||||||
|
/// First read the metadata from RocksDB cache, then from disk.
|
||||||
|
std::unique_ptr<SeekableReadBuffer> read(const String & file_name) const override;
|
||||||
|
|
||||||
|
/// First judge existence of the metadata in RocksDB cache, then in disk.
|
||||||
|
bool exists(const String & file_name) const override;
|
||||||
|
|
||||||
|
/// Delete all metadatas in part from RocksDB cache.
|
||||||
|
void deleteAll(bool include_projection) override;
|
||||||
|
|
||||||
|
/// Assert all metadatas in part from RocksDB cache are deleted.
|
||||||
|
void assertAllDeleted(bool include_projection) const override;
|
||||||
|
|
||||||
|
/// Update all metadatas in part from RocksDB cache.
|
||||||
|
/// Need to be called after part directory is renamed.
|
||||||
|
void updateAll(bool include_projection) override;
|
||||||
|
|
||||||
|
/// Check if all metadatas in part from RocksDB cache are up to date.
|
||||||
|
std::unordered_map<String, uint128> check() const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
/// Get cache key from path of metadata file.
|
||||||
|
/// Format: <disk_name>:relative/full/path/of/metadata/file
|
||||||
|
String getKeyFromFilePath(const String & file_path) const;
|
||||||
|
|
||||||
|
/// Get metadata file path from cache key.
|
||||||
|
String getFilePathFromKey(const String & key) const;
|
||||||
|
|
||||||
|
/// Get cache keys and checksums of corresponding metadata in a part(including projection parts)
|
||||||
|
void getKeysAndCheckSums(Strings & keys, std::vector<uint128> & checksums) const;
|
||||||
|
|
||||||
|
|
||||||
|
MergeTreeMetadataCachePtr cache;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
#endif
|
@ -399,6 +399,7 @@ CheckResult ReplicatedMergeTreePartCheckThread::checkPart(const String & part_na
|
|||||||
LOG_WARNING(log, "We have part {} covering part {}", part->name, part_name);
|
LOG_WARNING(log, "We have part {} covering part {}", part->name, part_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
part->checkMetadata();
|
||||||
return {part_name, true, ""};
|
return {part_name, true, ""};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -0,0 +1,83 @@
|
|||||||
|
#include "config_core.h"
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
#include <rocksdb/table.h>
|
||||||
|
#include <rocksdb/db.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Storages/MergeTree/MergeTreeMetadataCache.h>
|
||||||
|
|
||||||
|
using namespace DB;
|
||||||
|
|
||||||
|
class MergeTreeMetadataCacheTest : public ::testing::Test
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void SetUp() override
|
||||||
|
{
|
||||||
|
cache = MergeTreeMetadataCache::create("./db/", 268435456);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TearDown() override
|
||||||
|
{
|
||||||
|
cache->shutdown();
|
||||||
|
cache.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
MergeTreeMetadataCachePtr cache;
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_F(MergeTreeMetadataCacheTest, testCommon)
|
||||||
|
{
|
||||||
|
std::vector<String> files
|
||||||
|
= {"columns.txt", "checksums.txt", "primary.idx", "count.txt", "partition.dat", "minmax_p.idx", "default_compression_codec.txt"};
|
||||||
|
String prefix = "data/test_metadata_cache/check_part_metadata_cache/201806_1_1_0_4/";
|
||||||
|
|
||||||
|
for (const auto & file : files)
|
||||||
|
{
|
||||||
|
auto status = cache->put(prefix + file, prefix + file);
|
||||||
|
ASSERT_EQ(status.code(), rocksdb::Status::Code::kOk);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto & file : files)
|
||||||
|
{
|
||||||
|
String value;
|
||||||
|
auto status = cache->get(prefix + file, value);
|
||||||
|
ASSERT_EQ(status.code(), rocksdb::Status::Code::kOk);
|
||||||
|
ASSERT_EQ(value, prefix + file);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
Strings keys;
|
||||||
|
Strings values;
|
||||||
|
cache->getByPrefix(prefix, keys, values);
|
||||||
|
ASSERT_EQ(keys.size(), files.size());
|
||||||
|
ASSERT_EQ(values.size(), files.size());
|
||||||
|
for (size_t i = 0; i < files.size(); ++i)
|
||||||
|
{
|
||||||
|
ASSERT_EQ(values[i], keys[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto & file : files)
|
||||||
|
{
|
||||||
|
auto status = cache->del(prefix + file);
|
||||||
|
ASSERT_EQ(status.code(), rocksdb::Status::Code::kOk);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto & file : files)
|
||||||
|
{
|
||||||
|
String value;
|
||||||
|
auto status = cache->get(prefix + file, value);
|
||||||
|
ASSERT_EQ(status.code(), rocksdb::Status::Code::kNotFound);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
Strings keys;
|
||||||
|
Strings values;
|
||||||
|
cache->getByPrefix(prefix, keys, values);
|
||||||
|
ASSERT_EQ(keys.size(), 0);
|
||||||
|
ASSERT_EQ(values.size(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
@ -1616,6 +1616,8 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, ContextPtr local_
|
|||||||
auto out = disk->writeFile(tmp_checksums_path, 4096);
|
auto out = disk->writeFile(tmp_checksums_path, 4096);
|
||||||
part->checksums.write(*out);
|
part->checksums.write(*out);
|
||||||
disk->moveFile(tmp_checksums_path, checksums_path);
|
disk->moveFile(tmp_checksums_path, checksums_path);
|
||||||
|
|
||||||
|
part->checkMetadata();
|
||||||
results.emplace_back(part->name, true, "Checksums recounted and written to disk.");
|
results.emplace_back(part->name, true, "Checksums recounted and written to disk.");
|
||||||
}
|
}
|
||||||
catch (const Exception & ex)
|
catch (const Exception & ex)
|
||||||
@ -1632,6 +1634,7 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, ContextPtr local_
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
checkDataPart(part, true);
|
checkDataPart(part, true);
|
||||||
|
part->checkMetadata();
|
||||||
results.emplace_back(part->name, true, "");
|
results.emplace_back(part->name, true, "");
|
||||||
}
|
}
|
||||||
catch (const Exception & ex)
|
catch (const Exception & ex)
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
#include <Common/config.h>
|
#include <Common/config.h>
|
||||||
|
#include "IO/ParallelReadBuffer.h"
|
||||||
|
#include "IO/IOThreadPool.h"
|
||||||
#include "Parsers/ASTCreateQuery.h"
|
#include "Parsers/ASTCreateQuery.h"
|
||||||
|
|
||||||
#if USE_AWS_S3
|
#if USE_AWS_S3
|
||||||
@ -238,7 +240,8 @@ StorageS3Source::StorageS3Source(
|
|||||||
String compression_hint_,
|
String compression_hint_,
|
||||||
const std::shared_ptr<Aws::S3::S3Client> & client_,
|
const std::shared_ptr<Aws::S3::S3Client> & client_,
|
||||||
const String & bucket_,
|
const String & bucket_,
|
||||||
std::shared_ptr<IteratorWrapper> file_iterator_)
|
std::shared_ptr<IteratorWrapper> file_iterator_,
|
||||||
|
const size_t download_thread_num_)
|
||||||
: SourceWithProgress(getHeader(sample_block_, need_path, need_file))
|
: SourceWithProgress(getHeader(sample_block_, need_path, need_file))
|
||||||
, WithContext(context_)
|
, WithContext(context_)
|
||||||
, name(std::move(name_))
|
, name(std::move(name_))
|
||||||
@ -254,6 +257,7 @@ StorageS3Source::StorageS3Source(
|
|||||||
, with_file_column(need_file)
|
, with_file_column(need_file)
|
||||||
, with_path_column(need_path)
|
, with_path_column(need_path)
|
||||||
, file_iterator(file_iterator_)
|
, file_iterator(file_iterator_)
|
||||||
|
, download_thread_num(download_thread_num_)
|
||||||
{
|
{
|
||||||
initialize();
|
initialize();
|
||||||
}
|
}
|
||||||
@ -275,28 +279,79 @@ bool StorageS3Source::initialize()
|
|||||||
|
|
||||||
file_path = fs::path(bucket) / current_key;
|
file_path = fs::path(bucket) / current_key;
|
||||||
|
|
||||||
read_buf = wrapReadBufferWithCompressionMethod(
|
read_buf = wrapReadBufferWithCompressionMethod(createS3ReadBuffer(current_key), chooseCompressionMethod(current_key, compression_hint));
|
||||||
std::make_unique<ReadBufferFromS3>(client, bucket, current_key, max_single_read_retries, getContext()->getReadSettings()),
|
|
||||||
chooseCompressionMethod(current_key, compression_hint));
|
|
||||||
auto input_format = getContext()->getInputFormat(format, *read_buf, sample_block, max_block_size, format_settings);
|
auto input_format = getContext()->getInputFormat(format, *read_buf, sample_block, max_block_size, format_settings);
|
||||||
QueryPipelineBuilder builder;
|
QueryPipelineBuilder builder;
|
||||||
builder.init(Pipe(input_format));
|
builder.init(Pipe(input_format));
|
||||||
|
|
||||||
if (columns_desc.hasDefaults())
|
if (columns_desc.hasDefaults())
|
||||||
{
|
{
|
||||||
builder.addSimpleTransform([&](const Block & header)
|
builder.addSimpleTransform(
|
||||||
{
|
[&](const Block & header)
|
||||||
return std::make_shared<AddingDefaultsTransform>(header, columns_desc, *input_format, getContext());
|
{ return std::make_shared<AddingDefaultsTransform>(header, columns_desc, *input_format, getContext()); });
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pipeline = std::make_unique<QueryPipeline>(QueryPipelineBuilder::getPipeline(std::move(builder)));
|
pipeline = std::make_unique<QueryPipeline>(QueryPipelineBuilder::getPipeline(std::move(builder)));
|
||||||
reader = std::make_unique<PullingPipelineExecutor>(*pipeline);
|
reader = std::make_unique<PullingPipelineExecutor>(*pipeline);
|
||||||
|
|
||||||
initialized = false;
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<ReadBuffer> StorageS3Source::createS3ReadBuffer(const String & key)
|
||||||
|
{
|
||||||
|
const size_t object_size = DB::S3::getObjectSize(client, bucket, key, false);
|
||||||
|
|
||||||
|
auto download_buffer_size = getContext()->getSettings().max_download_buffer_size;
|
||||||
|
const bool use_parallel_download = download_buffer_size > 0 && download_thread_num > 1;
|
||||||
|
const bool object_too_small = object_size < download_thread_num * download_buffer_size;
|
||||||
|
if (!use_parallel_download || object_too_small)
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Downloading object of size {} from S3 in single thread", object_size);
|
||||||
|
return std::make_unique<ReadBufferFromS3>(client, bucket, key, max_single_read_retries, getContext()->getReadSettings());
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(object_size > 0);
|
||||||
|
|
||||||
|
if (download_buffer_size < DBMS_DEFAULT_BUFFER_SIZE)
|
||||||
|
{
|
||||||
|
LOG_WARNING(log, "Downloading buffer {} bytes too small, set at least {} bytes", download_buffer_size, DBMS_DEFAULT_BUFFER_SIZE);
|
||||||
|
download_buffer_size = DBMS_DEFAULT_BUFFER_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto factory = std::make_unique<ReadBufferS3Factory>(
|
||||||
|
client, bucket, key, download_buffer_size, object_size, max_single_read_retries, getContext()->getReadSettings());
|
||||||
|
LOG_TRACE(
|
||||||
|
log, "Downloading from S3 in {} threads. Object size: {}, Range size: {}.", download_thread_num, object_size, download_buffer_size);
|
||||||
|
|
||||||
|
ThreadGroupStatusPtr running_group = CurrentThread::isInitialized() && CurrentThread::get().getThreadGroup()
|
||||||
|
? CurrentThread::get().getThreadGroup()
|
||||||
|
: MainThreadStatus::getInstance().getThreadGroup();
|
||||||
|
|
||||||
|
ContextPtr query_context = CurrentThread::isInitialized() ? CurrentThread::get().getQueryContext() : nullptr;
|
||||||
|
|
||||||
|
auto worker_cleanup = [has_running_group = running_group == nullptr](ThreadStatus & thread_status)
|
||||||
|
{
|
||||||
|
if (has_running_group)
|
||||||
|
thread_status.detachQuery(false);
|
||||||
|
};
|
||||||
|
|
||||||
|
auto worker_setup = [query_context = std::move(query_context),
|
||||||
|
running_group = std::move(running_group)](ThreadStatus & thread_status)
|
||||||
|
{
|
||||||
|
/// Save query context if any, because cache implementation needs it.
|
||||||
|
if (query_context)
|
||||||
|
thread_status.attachQueryContext(query_context);
|
||||||
|
|
||||||
|
/// To be able to pass ProfileEvents.
|
||||||
|
if (running_group)
|
||||||
|
thread_status.attachQuery(running_group);
|
||||||
|
};
|
||||||
|
|
||||||
|
return std::make_unique<ParallelReadBuffer>(
|
||||||
|
std::move(factory), &IOThreadPool::get(), download_thread_num, std::move(worker_setup), std::move(worker_cleanup));
|
||||||
|
}
|
||||||
|
|
||||||
String StorageS3Source::getName() const
|
String StorageS3Source::getName() const
|
||||||
{
|
{
|
||||||
return name;
|
return name;
|
||||||
@ -670,6 +725,7 @@ Pipe StorageS3::read(
|
|||||||
block_for_format = storage_snapshot->metadata->getSampleBlock();
|
block_for_format = storage_snapshot->metadata->getSampleBlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const size_t max_download_threads = local_context->getSettingsRef().max_download_threads;
|
||||||
for (size_t i = 0; i < num_streams; ++i)
|
for (size_t i = 0; i < num_streams; ++i)
|
||||||
{
|
{
|
||||||
pipes.emplace_back(std::make_shared<StorageS3Source>(
|
pipes.emplace_back(std::make_shared<StorageS3Source>(
|
||||||
@ -686,7 +742,8 @@ Pipe StorageS3::read(
|
|||||||
compression_method,
|
compression_method,
|
||||||
client_auth.client,
|
client_auth.client,
|
||||||
client_auth.uri.bucket,
|
client_auth.uri.bucket,
|
||||||
iterator_wrapper));
|
iterator_wrapper,
|
||||||
|
max_download_threads));
|
||||||
}
|
}
|
||||||
auto pipe = Pipe::unitePipes(std::move(pipes));
|
auto pipe = Pipe::unitePipes(std::move(pipes));
|
||||||
|
|
||||||
|
@ -74,7 +74,8 @@ public:
|
|||||||
String compression_hint_,
|
String compression_hint_,
|
||||||
const std::shared_ptr<Aws::S3::S3Client> & client_,
|
const std::shared_ptr<Aws::S3::S3Client> & client_,
|
||||||
const String & bucket,
|
const String & bucket,
|
||||||
std::shared_ptr<IteratorWrapper> file_iterator_);
|
std::shared_ptr<IteratorWrapper> file_iterator_,
|
||||||
|
size_t download_thread_num);
|
||||||
|
|
||||||
String getName() const override;
|
String getName() const override;
|
||||||
|
|
||||||
@ -101,13 +102,17 @@ private:
|
|||||||
std::unique_ptr<PullingPipelineExecutor> reader;
|
std::unique_ptr<PullingPipelineExecutor> reader;
|
||||||
/// onCancel and generate can be called concurrently
|
/// onCancel and generate can be called concurrently
|
||||||
std::mutex reader_mutex;
|
std::mutex reader_mutex;
|
||||||
bool initialized = false;
|
|
||||||
bool with_file_column = false;
|
bool with_file_column = false;
|
||||||
bool with_path_column = false;
|
bool with_path_column = false;
|
||||||
std::shared_ptr<IteratorWrapper> file_iterator;
|
std::shared_ptr<IteratorWrapper> file_iterator;
|
||||||
|
size_t download_thread_num = 1;
|
||||||
|
|
||||||
|
Poco::Logger * log = &Poco::Logger::get("StorageS3Source");
|
||||||
|
|
||||||
/// Recreate ReadBuffer and BlockInputStream for each file.
|
/// Recreate ReadBuffer and BlockInputStream for each file.
|
||||||
bool initialize();
|
bool initialize();
|
||||||
|
|
||||||
|
std::unique_ptr<ReadBuffer> createS3ReadBuffer(const String & key);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -51,40 +51,42 @@ NamesAndTypesList StorageSnapshot::getColumns(const GetColumnsOptions & options)
|
|||||||
NamesAndTypesList StorageSnapshot::getColumnsByNames(const GetColumnsOptions & options, const Names & names) const
|
NamesAndTypesList StorageSnapshot::getColumnsByNames(const GetColumnsOptions & options, const Names & names) const
|
||||||
{
|
{
|
||||||
NamesAndTypesList res;
|
NamesAndTypesList res;
|
||||||
const auto & columns = getMetadataForQuery()->getColumns();
|
|
||||||
for (const auto & name : names)
|
for (const auto & name : names)
|
||||||
{
|
res.push_back(getColumn(options, name));
|
||||||
auto column = columns.tryGetColumn(options, name);
|
return res;
|
||||||
if (column && !isObject(column->type))
|
}
|
||||||
{
|
|
||||||
res.emplace_back(std::move(*column));
|
std::optional<NameAndTypePair> StorageSnapshot::tryGetColumn(const GetColumnsOptions & options, const String & column_name) const
|
||||||
continue;
|
{
|
||||||
}
|
const auto & columns = getMetadataForQuery()->getColumns();
|
||||||
|
auto column = columns.tryGetColumn(options, column_name);
|
||||||
|
if (column && (!isObject(column->type) || !options.with_extended_objects))
|
||||||
|
return column;
|
||||||
|
|
||||||
if (options.with_extended_objects)
|
if (options.with_extended_objects)
|
||||||
{
|
{
|
||||||
auto object_column = object_columns.tryGetColumn(options, name);
|
auto object_column = object_columns.tryGetColumn(options, column_name);
|
||||||
if (object_column)
|
if (object_column)
|
||||||
{
|
return object_column;
|
||||||
res.emplace_back(std::move(*object_column));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (options.with_virtuals)
|
if (options.with_virtuals)
|
||||||
{
|
{
|
||||||
auto it = virtual_columns.find(name);
|
auto it = virtual_columns.find(column_name);
|
||||||
if (it != virtual_columns.end())
|
if (it != virtual_columns.end())
|
||||||
{
|
return NameAndTypePair(column_name, it->second);
|
||||||
res.emplace_back(name, it->second);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE, "There is no column {} in table", name);
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
return res;
|
NameAndTypePair StorageSnapshot::getColumn(const GetColumnsOptions & options, const String & column_name) const
|
||||||
|
{
|
||||||
|
auto column = tryGetColumn(options, column_name);
|
||||||
|
if (!column)
|
||||||
|
throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE, "There is no column {} in table", column_name);
|
||||||
|
|
||||||
|
return *column;
|
||||||
}
|
}
|
||||||
|
|
||||||
Block StorageSnapshot::getSampleBlockForColumns(const Names & column_names) const
|
Block StorageSnapshot::getSampleBlockForColumns(const Names & column_names) const
|
||||||
|
@ -61,6 +61,10 @@ struct StorageSnapshot
|
|||||||
/// Get columns with types according to options only for requested names.
|
/// Get columns with types according to options only for requested names.
|
||||||
NamesAndTypesList getColumnsByNames(const GetColumnsOptions & options, const Names & names) const;
|
NamesAndTypesList getColumnsByNames(const GetColumnsOptions & options, const Names & names) const;
|
||||||
|
|
||||||
|
/// Get column with type according to options for requested name.
|
||||||
|
std::optional<NameAndTypePair> tryGetColumn(const GetColumnsOptions & options, const String & column_name) const;
|
||||||
|
NameAndTypePair getColumn(const GetColumnsOptions & options, const String & column_name) const;
|
||||||
|
|
||||||
/// Block with ordinary + materialized + aliases + virtuals + subcolumns.
|
/// Block with ordinary + materialized + aliases + virtuals + subcolumns.
|
||||||
Block getSampleBlockForColumns(const Names & column_names) const;
|
Block getSampleBlockForColumns(const Names & column_names) const;
|
||||||
|
|
||||||
|
@ -560,6 +560,8 @@ ColumnsDescription IStorageURLBase::getTableStructureFromData(
|
|||||||
const std::optional<FormatSettings> & format_settings,
|
const std::optional<FormatSettings> & format_settings,
|
||||||
ContextPtr context)
|
ContextPtr context)
|
||||||
{
|
{
|
||||||
|
context->getRemoteHostFilter().checkURL(Poco::URI(uri));
|
||||||
|
|
||||||
Poco::Net::HTTPBasicCredentials credentials;
|
Poco::Net::HTTPBasicCredentials credentials;
|
||||||
|
|
||||||
std::vector<String> urls_to_check;
|
std::vector<String> urls_to_check;
|
||||||
|
143
src/Storages/System/StorageSystemMergeTreeMetadataCache.cpp
Normal file
143
src/Storages/System/StorageSystemMergeTreeMetadataCache.cpp
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
#include <Storages/System/StorageSystemMergeTreeMetadataCache.h>
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
#include <DataTypes/DataTypeDateTime.h>
|
||||||
|
#include <DataTypes/DataTypeString.h>
|
||||||
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Parsers/ASTExpressionList.h>
|
||||||
|
#include <Parsers/ASTFunction.h>
|
||||||
|
#include <Parsers/ASTIdentifier.h>
|
||||||
|
#include <Parsers/ASTLiteral.h>
|
||||||
|
#include <Parsers/ASTSelectQuery.h>
|
||||||
|
#include <Storages/MergeTree/KeyCondition.h>
|
||||||
|
#include <Storages/MergeTree/MergeTreeMetadataCache.h>
|
||||||
|
#include <Common/typeid_cast.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
|
}
|
||||||
|
|
||||||
|
NamesAndTypesList StorageSystemMergeTreeMetadataCache::getNamesAndTypes()
|
||||||
|
{
|
||||||
|
return {
|
||||||
|
{"key", std::make_shared<DataTypeString>()},
|
||||||
|
{"value", std::make_shared<DataTypeString>()},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool extractKeyImpl(const IAST & elem, String & res, bool & precise)
|
||||||
|
{
|
||||||
|
const auto * function = elem.as<ASTFunction>();
|
||||||
|
if (!function)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (function->name == "and")
|
||||||
|
{
|
||||||
|
for (const auto & child : function->arguments->children)
|
||||||
|
{
|
||||||
|
bool tmp_precise = false;
|
||||||
|
if (extractKeyImpl(*child, res, tmp_precise))
|
||||||
|
{
|
||||||
|
precise = tmp_precise;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (function->name == "equals" || function->name == "like")
|
||||||
|
{
|
||||||
|
const auto & args = function->arguments->as<ASTExpressionList &>();
|
||||||
|
const IAST * value;
|
||||||
|
|
||||||
|
if (args.children.size() != 2)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
const ASTIdentifier * ident;
|
||||||
|
if ((ident = args.children.at(0)->as<ASTIdentifier>()))
|
||||||
|
value = args.children.at(1).get();
|
||||||
|
else if ((ident = args.children.at(1)->as<ASTIdentifier>()))
|
||||||
|
value = args.children.at(0).get();
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (ident->name() != "key")
|
||||||
|
return false;
|
||||||
|
|
||||||
|
const auto * literal = value->as<ASTLiteral>();
|
||||||
|
if (!literal)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (literal->value.getType() != Field::Types::String)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
res = literal->value.safeGet<String>();
|
||||||
|
precise = function->name == "equals";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Retrieve from the query a condition of the form `key= 'key'`, from conjunctions in the WHERE clause.
|
||||||
|
static String extractKey(const ASTPtr & query, bool& precise)
|
||||||
|
{
|
||||||
|
const auto & select = query->as<ASTSelectQuery &>();
|
||||||
|
if (!select.where())
|
||||||
|
return "";
|
||||||
|
|
||||||
|
String res;
|
||||||
|
return extractKeyImpl(*select.where(), res, precise) ? res : "";
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void StorageSystemMergeTreeMetadataCache::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & query_info) const
|
||||||
|
{
|
||||||
|
bool precise = false;
|
||||||
|
String key = extractKey(query_info.query, precise);
|
||||||
|
if (key.empty())
|
||||||
|
throw Exception(
|
||||||
|
"SELECT from system.merge_tree_metadata_cache table must contain condition like key = 'key' or key LIKE 'prefix%' in WHERE clause.", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
auto cache = context->getMergeTreeMetadataCache();
|
||||||
|
if (precise)
|
||||||
|
{
|
||||||
|
String value;
|
||||||
|
if (cache->get(key, value) != MergeTreeMetadataCache::Status::OK())
|
||||||
|
return;
|
||||||
|
|
||||||
|
size_t col_num = 0;
|
||||||
|
res_columns[col_num++]->insert(key);
|
||||||
|
res_columns[col_num++]->insert(value);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
String target = extractFixedPrefixFromLikePattern(key);
|
||||||
|
if (target.empty())
|
||||||
|
throw Exception(
|
||||||
|
"SELECT from system.merge_tree_metadata_cache table must contain condition like key = 'key' or key LIKE 'prefix%' in WHERE clause.", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
Strings keys;
|
||||||
|
Strings values;
|
||||||
|
keys.reserve(4096);
|
||||||
|
values.reserve(4096);
|
||||||
|
cache->getByPrefix(target, keys, values);
|
||||||
|
if (keys.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
|
assert(keys.size() == values.size());
|
||||||
|
for (size_t i = 0; i < keys.size(); ++i)
|
||||||
|
{
|
||||||
|
size_t col_num = 0;
|
||||||
|
res_columns[col_num++]->insert(keys[i]);
|
||||||
|
res_columns[col_num++]->insert(values[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
#endif
|
32
src/Storages/System/StorageSystemMergeTreeMetadataCache.h
Normal file
32
src/Storages/System/StorageSystemMergeTreeMetadataCache.h
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "config_core.h"
|
||||||
|
|
||||||
|
#if USE_ROCKSDB
|
||||||
|
#include <base/shared_ptr_helper.h>
|
||||||
|
#include <Storages/System/IStorageSystemOneBlock.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
class Context;
|
||||||
|
|
||||||
|
|
||||||
|
/// Implements `merge_tree_metadata_cache` system table, which allows you to view the metadata cache data in rocksdb for testing purposes.
|
||||||
|
class StorageSystemMergeTreeMetadataCache : public shared_ptr_helper<StorageSystemMergeTreeMetadataCache>, public IStorageSystemOneBlock<StorageSystemMergeTreeMetadataCache>
|
||||||
|
{
|
||||||
|
friend struct shared_ptr_helper<StorageSystemMergeTreeMetadataCache>;
|
||||||
|
|
||||||
|
public:
|
||||||
|
std::string getName() const override { return "SystemMergeTreeMetadataCache"; }
|
||||||
|
|
||||||
|
static NamesAndTypesList getNamesAndTypes();
|
||||||
|
|
||||||
|
protected:
|
||||||
|
using IStorageSystemOneBlock::IStorageSystemOneBlock;
|
||||||
|
|
||||||
|
void fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & query_info) const override;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
#endif
|
@ -75,6 +75,7 @@
|
|||||||
|
|
||||||
#if USE_ROCKSDB
|
#if USE_ROCKSDB
|
||||||
#include <Storages/RocksDB/StorageSystemRocksDB.h>
|
#include <Storages/RocksDB/StorageSystemRocksDB.h>
|
||||||
|
#include <Storages/System/StorageSystemMergeTreeMetadataCache.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
@ -129,6 +130,7 @@ void attachSystemTablesLocal(ContextPtr context, IDatabase & system_database)
|
|||||||
#endif
|
#endif
|
||||||
#if USE_ROCKSDB
|
#if USE_ROCKSDB
|
||||||
attach<StorageSystemRocksDB>(context, system_database, "rocksdb");
|
attach<StorageSystemRocksDB>(context, system_database, "rocksdb");
|
||||||
|
attach<StorageSystemMergeTreeMetadataCache>(context, system_database, "merge_tree_metadata_cache");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,8 +20,6 @@ class Description:
|
|||||||
|
|
||||||
def __init__(self, pull_request):
|
def __init__(self, pull_request):
|
||||||
self.label_name = str()
|
self.label_name = str()
|
||||||
self.legal = False
|
|
||||||
|
|
||||||
self._parse(pull_request["bodyText"])
|
self._parse(pull_request["bodyText"])
|
||||||
|
|
||||||
def _parse(self, text):
|
def _parse(self, text):
|
||||||
@ -39,12 +37,6 @@ class Description:
|
|||||||
category = stripped
|
category = stripped
|
||||||
next_category = False
|
next_category = False
|
||||||
|
|
||||||
if (
|
|
||||||
stripped
|
|
||||||
== "I hereby agree to the terms of the CLA available at: https://yandex.ru/legal/cla/?lang=en"
|
|
||||||
):
|
|
||||||
self.legal = True
|
|
||||||
|
|
||||||
category_headers = (
|
category_headers = (
|
||||||
"Category (leave one):",
|
"Category (leave one):",
|
||||||
"Changelog category (leave one):",
|
"Changelog category (leave one):",
|
||||||
|
@ -10,12 +10,12 @@ from get_robot_token import get_parameter_from_ssm
|
|||||||
class ClickHouseHelper:
|
class ClickHouseHelper:
|
||||||
def __init__(self, url=None):
|
def __init__(self, url=None):
|
||||||
if url is None:
|
if url is None:
|
||||||
self.url = get_parameter_from_ssm("clickhouse-test-stat-url2")
|
url = get_parameter_from_ssm("clickhouse-test-stat-url")
|
||||||
|
|
||||||
|
self.url = url
|
||||||
self.auth = {
|
self.auth = {
|
||||||
"X-ClickHouse-User": get_parameter_from_ssm(
|
"X-ClickHouse-User": get_parameter_from_ssm("clickhouse-test-stat-login"),
|
||||||
"clickhouse-test-stat-login2"
|
"X-ClickHouse-Key": get_parameter_from_ssm("clickhouse-test-stat-password"),
|
||||||
),
|
|
||||||
"X-ClickHouse-Key": "",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -179,7 +179,7 @@ def mark_flaky_tests(clickhouse_helper, check_name, test_results):
|
|||||||
check_name=check_name
|
check_name=check_name
|
||||||
)
|
)
|
||||||
|
|
||||||
tests_data = clickhouse_helper.select_json_each_row("gh-data", query)
|
tests_data = clickhouse_helper.select_json_each_row("default", query)
|
||||||
master_failed_tests = {row["test_name"] for row in tests_data}
|
master_failed_tests = {row["test_name"] for row in tests_data}
|
||||||
logging.info("Found flaky tests: %s", ", ".join(master_failed_tests))
|
logging.info("Found flaky tests: %s", ", ".join(master_failed_tests))
|
||||||
|
|
||||||
|
@ -59,3 +59,17 @@ def post_commit_status_to_file(file_path, description, state, report_url):
|
|||||||
with open(file_path, "w", encoding="utf-8") as f:
|
with open(file_path, "w", encoding="utf-8") as f:
|
||||||
out = csv.writer(f, delimiter="\t")
|
out = csv.writer(f, delimiter="\t")
|
||||||
out.writerow([state, report_url, description])
|
out.writerow([state, report_url, description])
|
||||||
|
|
||||||
|
|
||||||
|
def remove_labels(gh, pr_info, labels_names):
|
||||||
|
repo = gh.get_repo(GITHUB_REPOSITORY)
|
||||||
|
pull_request = repo.get_pull(pr_info.number)
|
||||||
|
for label in labels_names:
|
||||||
|
pull_request.remove_from_labels(label)
|
||||||
|
|
||||||
|
|
||||||
|
def post_labels(gh, pr_info, labels_names):
|
||||||
|
repo = gh.get_repo(GITHUB_REPOSITORY)
|
||||||
|
pull_request = repo.get_pull(pr_info.number)
|
||||||
|
for label in labels_names:
|
||||||
|
pull_request.add_to_labels(label)
|
||||||
|
@ -197,4 +197,4 @@ if __name__ == "__main__":
|
|||||||
report_url,
|
report_url,
|
||||||
CHECK_NAME,
|
CHECK_NAME,
|
||||||
)
|
)
|
||||||
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
|
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
|
||||||
|
@ -459,7 +459,7 @@ def main():
|
|||||||
NAME,
|
NAME,
|
||||||
)
|
)
|
||||||
ch_helper = ClickHouseHelper()
|
ch_helper = ClickHouseHelper()
|
||||||
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
|
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -234,7 +234,7 @@ def main():
|
|||||||
NAME,
|
NAME,
|
||||||
)
|
)
|
||||||
ch_helper = ClickHouseHelper()
|
ch_helper = ClickHouseHelper()
|
||||||
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
|
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -114,4 +114,4 @@ if __name__ == "__main__":
|
|||||||
report_url,
|
report_url,
|
||||||
NAME,
|
NAME,
|
||||||
)
|
)
|
||||||
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
|
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
|
||||||
|
@ -204,7 +204,7 @@ if __name__ == "__main__":
|
|||||||
report_url,
|
report_url,
|
||||||
NAME,
|
NAME,
|
||||||
)
|
)
|
||||||
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
|
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
|
||||||
|
|
||||||
# Refuse other checks to run if fast test failed
|
# Refuse other checks to run if fast test failed
|
||||||
if state != "success":
|
if state != "success":
|
||||||
|
@ -356,7 +356,7 @@ if __name__ == "__main__":
|
|||||||
report_url,
|
report_url,
|
||||||
check_name_with_group,
|
check_name_with_group,
|
||||||
)
|
)
|
||||||
ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events)
|
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
|
||||||
|
|
||||||
if state != "success":
|
if state != "success":
|
||||||
if "force-tests" in pr_info.labels:
|
if "force-tests" in pr_info.labels:
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user