Add a lot of minor things from the private repository

This commit is contained in:
Alexey Milovidov 2024-10-25 20:00:24 +02:00
parent aaca3b6156
commit 7d6beb5587
54 changed files with 135 additions and 18 deletions

View File

@ -9,6 +9,8 @@
#include <memory>
#include "config.h"
namespace Poco
{

View File

@ -12,6 +12,7 @@
#include "config.h"
namespace DB
{

View File

@ -193,6 +193,7 @@ enum class AccessType : uint8_t
M(SYSTEM_SENDS, "SYSTEM STOP SENDS, SYSTEM START SENDS, STOP SENDS, START SENDS", GROUP, SYSTEM) \
M(SYSTEM_REPLICATION_QUEUES, "SYSTEM STOP REPLICATION QUEUES, SYSTEM START REPLICATION QUEUES, STOP REPLICATION QUEUES, START REPLICATION QUEUES", TABLE, SYSTEM) \
M(SYSTEM_VIRTUAL_PARTS_UPDATE, "SYSTEM STOP VIRTUAL PARTS UPDATE, SYSTEM START VIRTUAL PARTS UPDATE, STOP VIRTUAL PARTS UPDATE, START VIRTUAL PARTS UPDATE", TABLE, SYSTEM) \
M(SYSTEM_REDUCE_BLOCKING_PARTS, "SYSTEM STOP REDUCE BLOCKING PARTS, SYSTEM START REDUCE BLOCKING PARTS, STOP REDUCE BLOCKING PARTS, START REDUCE BLOCKING PARTS", TABLE, SYSTEM) \
M(SYSTEM_DROP_REPLICA, "DROP REPLICA", TABLE, SYSTEM) \
M(SYSTEM_SYNC_REPLICA, "SYNC REPLICA", TABLE, SYSTEM) \
M(SYSTEM_REPLICA_READINESS, "SYSTEM REPLICA READY, SYSTEM REPLICA UNREADY", GLOBAL, SYSTEM) \

View File

@ -22,6 +22,10 @@ public:
const std::vector<UUID> & current_roles,
const std::vector<UUID> & current_roles_with_admin_option);
std::shared_ptr<const EnabledRoles> getEnabledRoles(
boost::container::flat_set<UUID> current_roles,
boost::container::flat_set<UUID> current_roles_with_admin_option);
private:
using SubscriptionsOnRoles = std::vector<std::shared_ptr<scope_guard>>;

View File

@ -284,7 +284,8 @@ TEST(AccessRights, Union)
"CREATE DICTIONARY, DROP DATABASE, DROP TABLE, DROP VIEW, DROP DICTIONARY, UNDROP TABLE, "
"TRUNCATE, OPTIMIZE, BACKUP, CREATE ROW POLICY, ALTER ROW POLICY, DROP ROW POLICY, "
"SHOW ROW POLICIES, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, "
"SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM VIEWS, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM VIRTUAL PARTS UPDATE, "
"SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM VIEWS, SYSTEM SENDS, "
"SYSTEM REPLICATION QUEUES, SYSTEM VIRTUAL PARTS UPDATE, SYSTEM REDUCE BLOCKING PARTS, "
"SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, "
"SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, "
"SYSTEM UNLOAD PRIMARY KEY, dictGet ON db1.*, GRANT TABLE ENGINE ON db1, "

View File

@ -68,6 +68,11 @@ UUID loadServerUUID(const fs::path & server_uuid_file, Poco::Logger * log)
}
}
void ServerUUID::set(UUID & uuid)
{
server_uuid = uuid;
}
void ServerUUID::setRandomForUnitTests()
{
server_uuid = UUIDHelpers::generateV4();

View File

@ -20,6 +20,9 @@ public:
/// Loads server UUID from file or creates new one. Should be called on daemon startup.
static void load(const fs::path & server_uuid_file, Poco::Logger * log);
/// Sets specific server UUID.
static void set(UUID & uuid);
static void setRandomForUnitTests();
};

View File

@ -64,6 +64,9 @@ namespace UUIDHelpers
/// Generate random UUID.
UUID generateV4();
/// Generate UUID from hash of a string.
UUID makeUUIDv4FromHash(const String & string);
constexpr size_t HighBytes = (std::endian::native == std::endian::little) ? 0 : 1;
constexpr size_t LowBytes = (std::endian::native == std::endian::little) ? 1 : 0;

View File

@ -43,6 +43,8 @@ void enableAllExperimentalSettings(ContextMutablePtr context)
context->setSetting("enable_zstd_qat_codec", 1);
context->setSetting("allow_create_index_without_type", 1);
context->setSetting("allow_experimental_s3queue", 1);
/// clickhouse-private settings
context->setSetting("allow_experimental_shared_set_join", 1);
}

View File

@ -26,6 +26,9 @@ namespace DB
namespace Setting
{
extern const SettingsSeconds max_execution_time;
/// Cloud only
extern const SettingsBool cloud_mode;
}
namespace ErrorCodes
@ -33,6 +36,7 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
extern const int DICTIONARY_ACCESS_DENIED;
extern const int UNSUPPORTED_METHOD;
extern const int SUPPORT_IS_DISABLED;
}
ExecutablePoolDictionarySource::ExecutablePoolDictionarySource(
@ -192,6 +196,9 @@ void registerDictionarySourceExecutablePool(DictionarySourceFactory & factory)
const std::string & /* default_database */,
bool created_from_ddl) -> DictionarySourcePtr
{
if (global_context->getSettingsRef()[Setting::cloud_mode])
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Dictionary source of type `executable pool` is disabled");
if (dict_struct.has_expressions)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Dictionary source of type `executable_pool` does not support attribute expressions");

View File

@ -29,7 +29,6 @@ namespace DB
ContextPtr global_context,
const std::string & /* default_database */,
bool /* created_from_ddl */) -> DictionarySourcePtr {
auto redis_config_prefix = config_prefix + ".redis";
auto host = config.getString(redis_config_prefix + ".host");

View File

@ -28,6 +28,9 @@ namespace Setting
{
extern const SettingsSeconds http_receive_timeout;
extern const SettingsBool odbc_bridge_use_connection_pooling;
/// Cloud only
extern const SettingsBool cloud_mode;
}
namespace ErrorCodes
@ -242,6 +245,9 @@ void registerDictionarySourceXDBC(DictionarySourceFactory & factory)
ContextPtr global_context,
const std::string & /* default_database */,
bool /* check_config */) -> DictionarySourcePtr {
if (global_context->getSettingsRef()[Setting::cloud_mode])
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Dictionary source of type `odbc` is disabled");
#if USE_ODBC
BridgeHelperPtr bridge = std::make_shared<XDBCBridgeHelper<ODBCBridgeMixin>>(
global_context,

View File

@ -313,6 +313,8 @@ public:
return std::make_shared<FakeDiskTransaction>(*this);
}
/// Need to overwrite explicetly because this disk change
/// a lot of "delegate" methods.
return createEncryptedTransaction();
}

View File

@ -1,6 +1,5 @@
#include <Disks/DiskEncryptedTransaction.h>
#if USE_SSL
#include <IO/FileEncryptionCommon.h>
#include <Common/Exception.h>

View File

@ -27,9 +27,11 @@ enum class MetadataStorageType : uint8_t
{
None,
Local,
Keeper,
Plain,
PlainRewritable,
StaticWeb,
Memory,
};
MetadataStorageType metadataTypeFromString(const String & type);

View File

@ -497,7 +497,7 @@ public:
protected:
friend class DiskDecorator;
friend class DiskReadOnlyWrapper;
const String name;
@ -580,6 +580,7 @@ inline String directoryPath(const String & path)
return fs::path(path).parent_path() / "";
}
}
template <>

View File

@ -21,7 +21,7 @@ namespace ErrorCodes
size_t chooseBufferSizeForRemoteReading(const DB::ReadSettings & settings, size_t file_size)
{
/// Only when cache is used we could download bigger portions of FileSegments than what we actually gonna read within particular task.
if (!settings.enable_filesystem_cache)
if (!settings.enable_filesystem_cache && !settings.read_through_distributed_cache)
return settings.remote_fs_buffer_size;
/// Buffers used for prefetch and pre-download better to have enough size, but not bigger than the whole file.

View File

@ -56,6 +56,8 @@ public:
void deserialize(ReadBuffer & buf);
void deserializeFromString(const std::string & data);
/// This method was deleted from public fork recently by Azat
void createFromSingleObject(ObjectStorageKey object_key, size_t bytes_size, size_t ref_count_, bool is_read_only_);
void serialize(WriteBuffer & buf, bool sync) const;
std::string serializeToString() const;

View File

@ -71,6 +71,8 @@ public:
DiskObjectStorageMetadataPtr readMetadataUnlocked(const std::string & path, std::unique_lock<SharedMutex> & lock) const;
DiskObjectStorageMetadataPtr readMetadataUnlocked(const std::string & path, std::shared_lock<SharedMutex> & lock) const;
bool isReadOnly() const override { return disk->isReadOnly(); }
};
class MetadataStorageFromDiskTransaction final : public IMetadataTransaction, private MetadataOperationsHolder

View File

@ -9,6 +9,7 @@
#include <Disks/IO/ReadBufferFromRemoteFSGather.h>
#include <Disks/IO/AsynchronousBoundedReadBuffer.h>
#include <Disks/IO/ThreadPoolRemoteFSReader.h>
#include <Disks/IO/getThreadPoolReader.h>
#include <IO/WriteBufferFromS3.h>
#include <IO/ReadBufferFromS3.h>
#include <IO/S3/getObjectInfo.h>

View File

@ -146,4 +146,9 @@ bool ReadBufferFromPocoSocketBase::poll(size_t timeout_microseconds) const
return res;
}
void ReadBufferFromPocoSocketBase::setReceiveTimeout(size_t receive_timeout_microseconds)
{
socket.setReceiveTimeout(Poco::Timespan(receive_timeout_microseconds, 0));
}
}

View File

@ -34,6 +34,8 @@ public:
ssize_t socketReceiveBytesImpl(char * ptr, size_t size);
void setReceiveTimeout(size_t receive_timeout_microseconds);
private:
AsyncCallback async_callback;
std::string socket_description;

View File

@ -49,7 +49,6 @@ bool S3Exception::isRetryableError() const
}
}
namespace DB::ErrorCodes
{
extern const int S3_ERROR;

View File

@ -20,6 +20,8 @@ namespace ActionLocks
extern const StorageActionBlockType PullReplicationLog = 8;
extern const StorageActionBlockType Cleanup = 9;
extern const StorageActionBlockType ViewRefresh = 10;
extern const StorageActionBlockType VirtualPartsUpdate = 11;
extern const StorageActionBlockType ReduceBlockingParts = 12;
}

View File

@ -96,6 +96,7 @@ void BlobStorageLog::prepareTable()
std::unique_lock lock{prepare_mutex};
const auto & relative_data_path = merge_tree_table->getRelativeDataPath();
prefix_to_ignore = normalizePath(relative_data_path);
LOG_DEBUG(log, "Will ignore blobs with prefix {}", prefix_to_ignore);
}
}

View File

@ -12,7 +12,7 @@ namespace DB
/// Based on the LRU algorithm implementation, the record with the lowest priority is stored at
/// the head of the queue, and the record with the highest priority is stored at the tail.
class LRUFileCachePriority final : public IFileCachePriority
class LRUFileCachePriority : public IFileCachePriority
{
protected:
struct State
@ -85,6 +85,8 @@ public:
bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CachePriorityGuard::Lock &) override;
FileCachePriorityPtr copy() const { return std::make_unique<LRUFileCachePriority>(max_size, max_elements, state); }
private:
class LRUIterator;
using LRUQueue = std::list<EntryPtr>;

View File

@ -72,7 +72,12 @@ public:
bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CachePriorityGuard::Lock &) override;
FileCachePriorityPtr copy() const { return std::make_unique<SLRUFileCachePriority>(max_size, max_elements, size_ratio, probationary_queue.state, protected_queue.state); }
private:
using LRUIterator = LRUFileCachePriority::LRUIterator;
using LRUQueue = std::list<Entry>;
double size_ratio;
LRUFileCachePriority protected_queue;
LRUFileCachePriority probationary_queue;

View File

@ -266,6 +266,10 @@ public:
void triggerReloadDisksTask(const Strings & new_added_disks);
void stopReplicatedDDLQueries();
void startReplicatedDDLQueries();
bool canPerformReplicatedDDLQueries() const;
private:
// The global instance of database catalog. unique_ptr is to allow
// deferred initialization. Thought I'd use std::optional, but I can't
@ -361,6 +365,8 @@ private:
std::mutex reload_disks_mutex;
std::set<String> disks_to_reload;
static constexpr time_t DBMS_DEFAULT_DISK_RELOAD_PERIOD_SEC = 5;
std::atomic<bool> replicated_ddl_queries_enabled = false;
};

View File

@ -82,6 +82,9 @@ private:
AccessRightsElements getRequiredAccessForDDLOnCluster() const;
void startStopAction(StorageActionBlockType action_type, bool start);
void stopReplicatedDDLQueries();
void startReplicatedDDLQueries();
};

View File

@ -40,7 +40,6 @@ class MutationsInterpreter
{
private:
struct Stage;
public:
struct Settings
{
@ -112,6 +111,10 @@ public:
MutationKind::MutationKindEnum getMutationKind() const { return mutation_kind.mutation_kind; }
/// Returns a chain of actions that can be
/// applied to block to execute mutation commands.
std::vector<MutationActions> getMutationActions() const;
/// Internal class which represents a data part for MergeTree
/// or just storage for other storages.
/// The main idea is to create a dedicated reading from MergeTree part.

View File

@ -98,7 +98,6 @@ public:
/// Closes and removes session
void closeSession(const String & session_id);
private:
std::shared_ptr<SessionLog> getSessionLog() const;
ContextMutablePtr makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const;

View File

@ -19,6 +19,7 @@ Squashing::Squashing(Block header_, size_t min_block_size_rows_, size_t min_bloc
, min_block_size_bytes(min_block_size_bytes_)
, header(header_)
{
LOG_TEST(getLogger("Squashing"), "header columns {}", header.columns());
}
Chunk Squashing::flush()

View File

@ -14,6 +14,7 @@
#include <Core/Settings.h>
#include <Common/Macros.h>
#include <Common/ZooKeeper/ZooKeeper.h>
#include "Parsers/ASTSystemQuery.h"
#include <Databases/DatabaseReplicated.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeString.h>
@ -93,6 +94,12 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, ContextPtr context,
if (!context->getSettingsRef()[Setting::allow_distributed_ddl])
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Distributed DDL queries are prohibited for the user");
bool is_system_query = dynamic_cast<ASTSystemQuery *>(query_ptr.get()) != nullptr;
bool replicated_ddl_queries_enabled = DatabaseCatalog::instance().canPerformReplicatedDDLQueries();
if (!is_system_query && !replicated_ddl_queries_enabled)
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Replicated DDL queries are disabled");
if (const auto * query_alter = query_ptr->as<ASTAlterQuery>())
{
for (const auto & command : query_alter->command_list->children)

View File

@ -99,6 +99,7 @@ namespace DB
MR_MACROS(COMPRESSION, "COMPRESSION") \
MR_MACROS(CONST, "CONST") \
MR_MACROS(CONSTRAINT, "CONSTRAINT") \
MR_MACROS(CONNECTIONS, "CONNECTIONS") \
MR_MACROS(CREATE_POLICY, "CREATE POLICY") \
MR_MACROS(CREATE_PROFILE, "CREATE PROFILE") \
MR_MACROS(CREATE_QUOTA, "CREATE QUOTA") \

View File

@ -9,6 +9,7 @@
#include <Common/SensitiveDataMasker.h>
#include <Common/SipHash.h>
#include <Common/StringUtils.h>
#include <algorithm>
namespace DB

View File

@ -53,6 +53,9 @@ PlacementInfo & PlacementInfo::instance()
void PlacementInfo::initialize(const Poco::Util::AbstractConfiguration & config)
try
{
if (initialized)
return;
if (!config.has(DB::PlacementInfo::PLACEMENT_CONFIG_PREFIX))
{
availability_zone = "";

View File

@ -22,6 +22,7 @@ struct FutureMergedMutatedPart
MergeTreeDataPartFormat part_format;
MergeTreePartInfo part_info;
MergeTreeData::DataPartsVector parts;
std::vector<MergeTreePartInfo> blocking_parts_to_remove;
MergeType merge_type = MergeType::Regular;
const MergeTreePartition & getPartition() const { return parts.front()->partition; }

View File

@ -18,6 +18,7 @@ public:
using ValueSizeMap = std::map<std::string, double>;
using VirtualFields = std::unordered_map<String, Field>;
using DeserializeBinaryBulkStateMap = std::map<std::string, ISerialization::DeserializeBinaryBulkStatePtr>;
using FileStreams = std::map<std::string, std::unique_ptr<MergeTreeReaderStream>>;
IMergeTreeReader(
MergeTreeDataPartInfoForReaderPtr data_part_info_for_read_,

View File

@ -372,6 +372,8 @@ bool MergeFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrite
{
part = merge_task->getFuture().get();
part->is_prewarmed = true;
storage.merger_mutator.renameMergedTemporaryPart(part, parts, NO_TRANSACTION_PTR, *transaction_ptr);
/// Why we reset task here? Because it holds shared pointer to part and tryRemovePartImmediately will
/// not able to remove the part and will throw an exception (because someone holds the pointer).

View File

@ -83,6 +83,9 @@ bool MergeProjectionPartsTask::executeStep()
".tmp_proj");
next_level_parts.push_back(executeHere(tmp_part_merge_task));
/// FIXME (alesapin) we should use some temporary storage for this,
/// not commit each subprojection part
next_level_parts.back()->getDataPartStorage().commitTransaction();
next_level_parts.back()->is_temp = true;
}

View File

@ -8,7 +8,7 @@ namespace DB
STRONG_TYPEDEF(UInt32, MergeTreeDataFormatVersion)
const MergeTreeDataFormatVersion MERGE_TREE_DATA_OLD_FORMAT_VERSION {0};
const MergeTreeDataFormatVersion MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING {1};
static constexpr MergeTreeDataFormatVersion MERGE_TREE_DATA_OLD_FORMAT_VERSION {0};
static constexpr MergeTreeDataFormatVersion MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING {1};
}

View File

@ -106,9 +106,11 @@ public:
PreformattedMessage & out_disable_reason,
bool dry_run = false);
/// Actually the most fresh partition with biggest modification_time
String getBestPartitionToOptimizeEntire(const PartitionsInfo & partitions_info) const;
/// Useful to quickly get a list of partitions that contain parts that we may want to merge
/// The result is limited by top_number_of_partitions_to_consider_for_merge
PartitionIdsHint getPartitionsThatMayBeMerged(
size_t max_total_size_to_merge,
const AllowedMergingPredicate & can_merge_callback,

View File

@ -45,6 +45,7 @@ public:
enum Value
{
Full,
Packed,
Unknown,
};

View File

@ -179,8 +179,8 @@ MergeTreeDataPartWriterOnDisk::MergeTreeDataPartWriterOnDisk(
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Can't take information about index granularity from blocks, when non empty index_granularity array specified");
if (!getDataPartStorage().exists())
getDataPartStorage().createDirectories();
/// We don't need to check if it exists or not, createDirectories doesn't throw
getDataPartStorage().createDirectories();
if (settings.rewrite_primary_key)
initPrimaryIndex();

View File

@ -108,6 +108,14 @@ std::optional<MarkType> MergeTreeIndexGranularityInfo::getMarksTypeFromFilesyste
return {};
}
MergeTreeIndexGranularityInfo::MergeTreeIndexGranularityInfo(
MarkType mark_type_, size_t index_granularity_, size_t index_granularity_bytes_)
: mark_type(mark_type_)
, fixed_index_granularity(index_granularity_)
, index_granularity_bytes(index_granularity_bytes_)
{
}
MergeTreeIndexGranularityInfo::MergeTreeIndexGranularityInfo(const MergeTreeData & storage, MergeTreeDataPartType type_)
: MergeTreeIndexGranularityInfo(storage, {storage.canUseAdaptiveGranularity(), (*storage.getSettings())[MergeTreeSetting::compress_marks], type_.getValue()})
{

View File

@ -49,6 +49,7 @@ public:
MergeTreeIndexGranularityInfo(const MergeTreeData & storage, MarkType mark_type_);
MergeTreeIndexGranularityInfo(MergeTreeDataPartType type_, bool is_adaptive_, size_t index_granularity_, size_t index_granularity_bytes_);
MergeTreeIndexGranularityInfo(MarkType mark_type_, size_t index_granularity_, size_t index_granularity_bytes_);
void changeGranularityIfRequired(const IDataPartStorage & data_part_storage);

View File

@ -26,11 +26,11 @@ void checkMutationStatus(std::optional<MergeTreeMutationStatus> & status, const
throw Exception(
ErrorCodes::UNFINISHED,
"Exception happened during execution of mutation{} '{}' with part '{}' reason: '{}'. This error maybe retryable or not. "
"In case of unretryable error, mutation can be killed with KILL MUTATION query",
"In case of unretryable error, mutation can be killed with KILL MUTATION query \n\n{}\n",
mutation_ids.size() > 1 ? "s" : "",
boost::algorithm::join(mutation_ids, ", "),
status->latest_failed_part,
status->latest_fail_reason);
status->latest_fail_reason, StackTrace().toString());
}
}

View File

@ -46,6 +46,13 @@ struct MergeTreePartInfo
< std::forward_as_tuple(rhs.partition_id, rhs.min_block, rhs.max_block, rhs.level, rhs.mutation);
}
bool operator>(const MergeTreePartInfo & rhs) const
{
return std::forward_as_tuple(partition_id, min_block, max_block, level, mutation)
> std::forward_as_tuple(rhs.partition_id, rhs.min_block, rhs.max_block, rhs.level, rhs.mutation);
}
bool operator==(const MergeTreePartInfo & rhs) const
{
return !(*this != rhs);

View File

@ -35,7 +35,7 @@ struct PrewhereExprStep
bool remove_filter_column = false;
bool need_filter = false;
/// Some PREWHERE steps should be executed without conversions.
/// Some PREWHERE steps should be executed without conversions (e.g. early mutation steps)
/// A step without alter conversion cannot be executed after step with alter conversions.
bool perform_alter_conversions = false;
};

View File

@ -3,8 +3,8 @@
#include <Storages/MergeTree/ReplicatedMergeTreeSink.h>
#include <Storages/MergeTree/InsertBlockInfo.h>
#include <Interpreters/PartLog.h>
#include <Common/Exception.h>
#include <Processors/Transforms/DeduplicationTokenTransforms.h>
#include <Common/Exception.h>
#include <Common/FailPoint.h>
#include <Common/ProfileEventsScope.h>
#include <Common/SipHash.h>
@ -690,6 +690,8 @@ std::pair<std::vector<String>, bool> ReplicatedMergeTreeSinkImpl<async_insert>::
///
/// metadata_snapshot->check(part->getColumns());
part->is_prewarmed = true;
auto block_id_path = getBlockIdPath(storage.zookeeper_path, block_id);
CommitRetryContext retry_context;

View File

@ -135,7 +135,6 @@ bool isRetryableException(std::exception_ptr exception_ptr)
}
}
static IMergeTreeDataPart::Checksums checkDataPart(
MergeTreeData::DataPartPtr data_part,
const IDataPartStorage & data_part_storage,
@ -422,6 +421,7 @@ IMergeTreeDataPart::Checksums checkDataPart(
}
ReadSettings read_settings;
read_settings.read_through_distributed_cache = false;
read_settings.enable_filesystem_cache = false;
read_settings.enable_filesystem_cache_log = false;
read_settings.enable_filesystem_read_prefetches_log = false;

View File

@ -5,6 +5,7 @@
#if USE_AWS_S3
#include <IO/S3Settings.h>
#include <Storages/ObjectStorage/StorageObjectStorage.h>
#include <Parsers/IAST_fwd.h>
namespace DB
{

View File

@ -150,6 +150,7 @@ size_t estimateValueSize(
}
}
}
ColumnPtr fillColumnWithRandomData(
const DataTypePtr type,
@ -539,6 +540,8 @@ ColumnPtr fillColumnWithRandomData(
}
}
namespace
{
class GenerateSource : public ISource
{

View File

@ -17,6 +17,8 @@ struct StorageID;
class ASTCreateQuery;
class Context;
using ContextPtr = std::shared_ptr<const Context>;
class IDatabase;
using DatabasePtr = std::shared_ptr<IDatabase>;
/// Helper for replicated tables that use zookeeper for coordination among replicas.
/// Handles things like: