Revert "Miscellaneous"

This commit is contained in:
Alexey Milovidov 2024-10-26 20:59:26 +02:00 committed by GitHub
parent c99b77a1c7
commit d6b38a9eae
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
56 changed files with 21 additions and 165 deletions

View File

@ -2267,21 +2267,6 @@ try
throw;
}
bool found_stop_flag = false;
if (has_zookeeper && global_context->getMacros()->getMacroMap().contains("replica"))
{
auto zookeeper = global_context->getZooKeeper();
String stop_flag_path = "/clickhouse/stop_replicated_ddl_queries/{replica}";
stop_flag_path = global_context->getMacros()->expand(stop_flag_path);
found_stop_flag = zookeeper->exists(stop_flag_path);
}
if (found_stop_flag)
LOG_INFO(log, "Found a stop flag for replicated DDL queries. They will be disabled");
else
DatabaseCatalog::instance().startReplicatedDDLQueries();
LOG_DEBUG(log, "Loaded metadata.");
if (has_trace_collector)

View File

@ -9,8 +9,6 @@
#include <memory>
#include "config.h"
namespace Poco
{

View File

@ -12,7 +12,6 @@
#include "config.h"
namespace DB
{

View File

@ -193,7 +193,6 @@ enum class AccessType : uint8_t
M(SYSTEM_SENDS, "SYSTEM STOP SENDS, SYSTEM START SENDS, STOP SENDS, START SENDS", GROUP, SYSTEM) \
M(SYSTEM_REPLICATION_QUEUES, "SYSTEM STOP REPLICATION QUEUES, SYSTEM START REPLICATION QUEUES, STOP REPLICATION QUEUES, START REPLICATION QUEUES", TABLE, SYSTEM) \
M(SYSTEM_VIRTUAL_PARTS_UPDATE, "SYSTEM STOP VIRTUAL PARTS UPDATE, SYSTEM START VIRTUAL PARTS UPDATE, STOP VIRTUAL PARTS UPDATE, START VIRTUAL PARTS UPDATE", TABLE, SYSTEM) \
M(SYSTEM_REDUCE_BLOCKING_PARTS, "SYSTEM STOP REDUCE BLOCKING PARTS, SYSTEM START REDUCE BLOCKING PARTS, STOP REDUCE BLOCKING PARTS, START REDUCE BLOCKING PARTS", TABLE, SYSTEM) \
M(SYSTEM_DROP_REPLICA, "DROP REPLICA", TABLE, SYSTEM) \
M(SYSTEM_SYNC_REPLICA, "SYNC REPLICA", TABLE, SYSTEM) \
M(SYSTEM_REPLICA_READINESS, "SYSTEM REPLICA READY, SYSTEM REPLICA UNREADY", GLOBAL, SYSTEM) \

View File

@ -22,10 +22,6 @@ public:
const std::vector<UUID> & current_roles,
const std::vector<UUID> & current_roles_with_admin_option);
std::shared_ptr<const EnabledRoles> getEnabledRoles(
boost::container::flat_set<UUID> current_roles,
boost::container::flat_set<UUID> current_roles_with_admin_option);
private:
using SubscriptionsOnRoles = std::vector<std::shared_ptr<scope_guard>>;

View File

@ -284,8 +284,7 @@ TEST(AccessRights, Union)
"CREATE DICTIONARY, DROP DATABASE, DROP TABLE, DROP VIEW, DROP DICTIONARY, UNDROP TABLE, "
"TRUNCATE, OPTIMIZE, BACKUP, CREATE ROW POLICY, ALTER ROW POLICY, DROP ROW POLICY, "
"SHOW ROW POLICIES, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, "
"SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM VIEWS, SYSTEM SENDS, "
"SYSTEM REPLICATION QUEUES, SYSTEM VIRTUAL PARTS UPDATE, SYSTEM REDUCE BLOCKING PARTS, "
"SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM VIEWS, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM VIRTUAL PARTS UPDATE, "
"SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, "
"SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, "
"SYSTEM UNLOAD PRIMARY KEY, dictGet ON db1.*, GRANT TABLE ENGINE ON db1, "

View File

@ -68,11 +68,6 @@ UUID loadServerUUID(const fs::path & server_uuid_file, Poco::Logger * log)
}
}
void ServerUUID::set(UUID & uuid)
{
server_uuid = uuid;
}
void ServerUUID::setRandomForUnitTests()
{
server_uuid = UUIDHelpers::generateV4();

View File

@ -20,9 +20,6 @@ public:
/// Loads server UUID from file or creates new one. Should be called on daemon startup.
static void load(const fs::path & server_uuid_file, Poco::Logger * log);
/// Sets specific server UUID.
static void set(UUID & uuid);
static void setRandomForUnitTests();
};

View File

@ -64,9 +64,6 @@ namespace UUIDHelpers
/// Generate random UUID.
UUID generateV4();
/// Generate UUID from hash of a string.
UUID makeUUIDv4FromHash(const String & string);
constexpr size_t HighBytes = (std::endian::native == std::endian::little) ? 0 : 1;
constexpr size_t LowBytes = (std::endian::native == std::endian::little) ? 1 : 0;

View File

@ -85,7 +85,6 @@ namespace ErrorCodes
extern const int NO_ACTIVE_REPLICAS;
extern const int CANNOT_GET_REPLICATED_DATABASE_SNAPSHOT;
extern const int CANNOT_RESTORE_TABLE;
extern const int QUERY_IS_PROHIBITED;
extern const int SUPPORT_IS_DISABLED;
}
@ -1058,9 +1057,6 @@ BlockIO DatabaseReplicated::tryEnqueueReplicatedDDL(const ASTPtr & query, Contex
{
waitDatabaseStarted();
if (!DatabaseCatalog::instance().canPerformReplicatedDDLQueries())
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Replicated DDL queries are disabled");
if (query_context->getCurrentTransaction() && query_context->getSettingsRef()[Setting::throw_on_unsupported_query_inside_transaction])
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Distributed DDL queries inside transactions are not supported");
@ -1241,16 +1237,14 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
String query = fmt::format("CREATE DATABASE IF NOT EXISTS {} ENGINE=Ordinary", backQuoteIfNeed(to_db_name));
auto query_context = Context::createCopy(getContext());
query_context->setSetting("allow_deprecated_database_ordinary", 1);
query_context->setSetting("cloud_mode", false);
executeQuery(query, query_context, QueryFlags{ .internal = true });
executeQuery(query, query_context, QueryFlags{.internal = true});
/// But we want to avoid discarding UUID of ReplicatedMergeTree tables, because it will not work
/// if zookeeper_path contains {uuid} macro. Replicated database do not recreate replicated tables on recovery,
/// so it's ok to save UUID of replicated table.
query = fmt::format("CREATE DATABASE IF NOT EXISTS {} ENGINE=Atomic", backQuoteIfNeed(to_db_name_replicated));
query_context = Context::createCopy(getContext());
query_context->setSetting("cloud_mode", false);
executeQuery(query, query_context, QueryFlags{ .internal = true });
executeQuery(query, query_context, QueryFlags{.internal = true});
}
size_t moved_tables = 0;
@ -1640,7 +1634,7 @@ void DatabaseReplicated::dropTable(ContextPtr local_context, const String & tabl
auto table = tryGetTable(table_name, getContext());
if (!table)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Table {} doesn't exist", table_name);
if (table->getName() == "MaterializedView" || table->getName() == "WindowView" || table->getName() == "SharedSet" || table->getName() == "SharedJoin")
if (table->getName() == "MaterializedView" || table->getName() == "WindowView")
{
/// Avoid recursive locking of metadata_mutex
table->dropInnerTableIfAny(sync, local_context);

View File

@ -43,8 +43,6 @@ void enableAllExperimentalSettings(ContextMutablePtr context)
context->setSetting("enable_zstd_qat_codec", 1);
context->setSetting("allow_create_index_without_type", 1);
context->setSetting("allow_experimental_s3queue", 1);
/// clickhouse-private settings
context->setSetting("allow_experimental_shared_set_join", 1);
}

View File

@ -26,9 +26,6 @@ namespace DB
namespace Setting
{
extern const SettingsSeconds max_execution_time;
/// Cloud only
extern const SettingsBool cloud_mode;
}
namespace ErrorCodes
@ -36,7 +33,6 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
extern const int DICTIONARY_ACCESS_DENIED;
extern const int UNSUPPORTED_METHOD;
extern const int SUPPORT_IS_DISABLED;
}
ExecutablePoolDictionarySource::ExecutablePoolDictionarySource(
@ -196,9 +192,6 @@ void registerDictionarySourceExecutablePool(DictionarySourceFactory & factory)
const std::string & /* default_database */,
bool created_from_ddl) -> DictionarySourcePtr
{
if (global_context->getSettingsRef()[Setting::cloud_mode])
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Dictionary source of type `executable pool` is disabled");
if (dict_struct.has_expressions)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Dictionary source of type `executable_pool` does not support attribute expressions");

View File

@ -29,6 +29,7 @@ namespace DB
ContextPtr global_context,
const std::string & /* default_database */,
bool /* created_from_ddl */) -> DictionarySourcePtr {
auto redis_config_prefix = config_prefix + ".redis";
auto host = config.getString(redis_config_prefix + ".host");

View File

@ -28,9 +28,6 @@ namespace Setting
{
extern const SettingsSeconds http_receive_timeout;
extern const SettingsBool odbc_bridge_use_connection_pooling;
/// Cloud only
extern const SettingsBool cloud_mode;
}
namespace ErrorCodes
@ -245,9 +242,6 @@ void registerDictionarySourceXDBC(DictionarySourceFactory & factory)
ContextPtr global_context,
const std::string & /* default_database */,
bool /* check_config */) -> DictionarySourcePtr {
if (global_context->getSettingsRef()[Setting::cloud_mode])
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Dictionary source of type `odbc` is disabled");
#if USE_ODBC
BridgeHelperPtr bridge = std::make_shared<XDBCBridgeHelper<ODBCBridgeMixin>>(
global_context,

View File

@ -313,8 +313,6 @@ public:
return std::make_shared<FakeDiskTransaction>(*this);
}
/// Need to overwrite explicetly because this disk change
/// a lot of "delegate" methods.
return createEncryptedTransaction();
}

View File

@ -1,5 +1,6 @@
#include <Disks/DiskEncryptedTransaction.h>
#if USE_SSL
#include <IO/FileEncryptionCommon.h>
#include <Common/Exception.h>

View File

@ -27,11 +27,9 @@ enum class MetadataStorageType : uint8_t
{
None,
Local,
Keeper,
Plain,
PlainRewritable,
StaticWeb,
Memory,
};
MetadataStorageType metadataTypeFromString(const String & type);

View File

@ -497,7 +497,7 @@ public:
protected:
friend class DiskReadOnlyWrapper;
friend class DiskDecorator;
const String name;
@ -580,7 +580,6 @@ inline String directoryPath(const String & path)
return fs::path(path).parent_path() / "";
}
}
template <>

View File

@ -21,7 +21,7 @@ namespace ErrorCodes
size_t chooseBufferSizeForRemoteReading(const DB::ReadSettings & settings, size_t file_size)
{
/// Only when cache is used we could download bigger portions of FileSegments than what we actually gonna read within particular task.
if (!settings.enable_filesystem_cache && !settings.read_through_distributed_cache)
if (!settings.enable_filesystem_cache)
return settings.remote_fs_buffer_size;
/// Buffers used for prefetch and pre-download better to have enough size, but not bigger than the whole file.

View File

@ -56,8 +56,6 @@ public:
void deserialize(ReadBuffer & buf);
void deserializeFromString(const std::string & data);
/// This method was deleted from public fork recently by Azat
void createFromSingleObject(ObjectStorageKey object_key, size_t bytes_size, size_t ref_count_, bool is_read_only_);
void serialize(WriteBuffer & buf, bool sync) const;
std::string serializeToString() const;

View File

@ -9,7 +9,6 @@
#include <Disks/IO/ReadBufferFromRemoteFSGather.h>
#include <Disks/IO/AsynchronousBoundedReadBuffer.h>
#include <Disks/IO/ThreadPoolRemoteFSReader.h>
#include <Disks/IO/getThreadPoolReader.h>
#include <IO/WriteBufferFromS3.h>
#include <IO/ReadBufferFromS3.h>
#include <IO/S3/getObjectInfo.h>

View File

@ -146,9 +146,4 @@ bool ReadBufferFromPocoSocketBase::poll(size_t timeout_microseconds) const
return res;
}
void ReadBufferFromPocoSocketBase::setReceiveTimeout(size_t receive_timeout_microseconds)
{
socket.setReceiveTimeout(Poco::Timespan(receive_timeout_microseconds, 0));
}
}

View File

@ -34,8 +34,6 @@ public:
ssize_t socketReceiveBytesImpl(char * ptr, size_t size);
void setReceiveTimeout(size_t receive_timeout_microseconds);
private:
AsyncCallback async_callback;
std::string socket_description;

View File

@ -48,6 +48,7 @@ bool S3Exception::isRetryableError() const
}
}
namespace DB::ErrorCodes
{
extern const int S3_ERROR;

View File

@ -20,8 +20,6 @@ namespace ActionLocks
extern const StorageActionBlockType PullReplicationLog = 8;
extern const StorageActionBlockType Cleanup = 9;
extern const StorageActionBlockType ViewRefresh = 10;
extern const StorageActionBlockType VirtualPartsUpdate = 11;
extern const StorageActionBlockType ReduceBlockingParts = 12;
}

View File

@ -96,7 +96,6 @@ void BlobStorageLog::prepareTable()
std::unique_lock lock{prepare_mutex};
const auto & relative_data_path = merge_tree_table->getRelativeDataPath();
prefix_to_ignore = normalizePath(relative_data_path);
LOG_DEBUG(log, "Will ignore blobs with prefix {}", prefix_to_ignore);
}
}

View File

@ -12,7 +12,7 @@ namespace DB
/// Based on the LRU algorithm implementation, the record with the lowest priority is stored at
/// the head of the queue, and the record with the highest priority is stored at the tail.
class LRUFileCachePriority : public IFileCachePriority
class LRUFileCachePriority final : public IFileCachePriority
{
protected:
struct State
@ -85,8 +85,6 @@ public:
bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CachePriorityGuard::Lock &) override;
FileCachePriorityPtr copy() const { return std::make_unique<LRUFileCachePriority>(max_size, max_elements, state); }
private:
class LRUIterator;
using LRUQueue = std::list<EntryPtr>;

View File

@ -72,12 +72,7 @@ public:
bool modifySizeLimits(size_t max_size_, size_t max_elements_, double size_ratio_, const CachePriorityGuard::Lock &) override;
FileCachePriorityPtr copy() const { return std::make_unique<SLRUFileCachePriority>(max_size, max_elements, size_ratio, probationary_queue.state, protected_queue.state); }
private:
using LRUIterator = LRUFileCachePriority::LRUIterator;
using LRUQueue = std::list<Entry>;
double size_ratio;
LRUFileCachePriority protected_queue;
LRUFileCachePriority probationary_queue;

View File

@ -1817,21 +1817,6 @@ void DatabaseCatalog::triggerReloadDisksTask(const Strings & new_added_disks)
(*reload_disks_task)->schedule();
}
void DatabaseCatalog::stopReplicatedDDLQueries()
{
replicated_ddl_queries_enabled = false;
}
void DatabaseCatalog::startReplicatedDDLQueries()
{
replicated_ddl_queries_enabled = true;
}
bool DatabaseCatalog::canPerformReplicatedDDLQueries() const
{
return replicated_ddl_queries_enabled;
}
static void maybeUnlockUUID(UUID uuid)
{
if (uuid == UUIDHelpers::Nil)

View File

@ -266,10 +266,6 @@ public:
void triggerReloadDisksTask(const Strings & new_added_disks);
void stopReplicatedDDLQueries();
void startReplicatedDDLQueries();
bool canPerformReplicatedDDLQueries() const;
private:
// The global instance of database catalog. unique_ptr is to allow
// deferred initialization. Thought I'd use std::optional, but I can't
@ -365,8 +361,6 @@ private:
std::mutex reload_disks_mutex;
std::set<String> disks_to_reload;
static constexpr time_t DBMS_DEFAULT_DISK_RELOAD_PERIOD_SEC = 5;
std::atomic<bool> replicated_ddl_queries_enabled = false;
};

View File

@ -82,9 +82,6 @@ private:
AccessRightsElements getRequiredAccessForDDLOnCluster() const;
void startStopAction(StorageActionBlockType action_type, bool start);
void stopReplicatedDDLQueries();
void startReplicatedDDLQueries();
};

View File

@ -40,6 +40,7 @@ class MutationsInterpreter
{
private:
struct Stage;
public:
struct Settings
{

View File

@ -98,6 +98,7 @@ public:
/// Closes and removes session
void closeSession(const String & session_id);
private:
std::shared_ptr<SessionLog> getSessionLog() const;
ContextMutablePtr makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const;

View File

@ -19,7 +19,6 @@ Squashing::Squashing(Block header_, size_t min_block_size_rows_, size_t min_bloc
, min_block_size_bytes(min_block_size_bytes_)
, header(header_)
{
LOG_TEST(getLogger("Squashing"), "header columns {}", header.columns());
}
Chunk Squashing::flush()

View File

@ -14,7 +14,6 @@
#include <Core/Settings.h>
#include <Common/Macros.h>
#include <Common/ZooKeeper/ZooKeeper.h>
#include "Parsers/ASTSystemQuery.h"
#include <Databases/DatabaseReplicated.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeString.h>
@ -94,12 +93,6 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, ContextPtr context,
if (!context->getSettingsRef()[Setting::allow_distributed_ddl])
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Distributed DDL queries are prohibited for the user");
bool is_system_query = dynamic_cast<ASTSystemQuery *>(query_ptr.get()) != nullptr;
bool replicated_ddl_queries_enabled = DatabaseCatalog::instance().canPerformReplicatedDDLQueries();
if (!is_system_query && !replicated_ddl_queries_enabled)
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Replicated DDL queries are disabled");
if (const auto * query_alter = query_ptr->as<ASTAlterQuery>())
{
for (const auto & command : query_alter->command_list->children)

View File

@ -99,7 +99,6 @@ namespace DB
MR_MACROS(COMPRESSION, "COMPRESSION") \
MR_MACROS(CONST, "CONST") \
MR_MACROS(CONSTRAINT, "CONSTRAINT") \
MR_MACROS(CONNECTIONS, "CONNECTIONS") \
MR_MACROS(CREATE_POLICY, "CREATE POLICY") \
MR_MACROS(CREATE_PROFILE, "CREATE PROFILE") \
MR_MACROS(CREATE_QUOTA, "CREATE QUOTA") \

View File

@ -9,7 +9,6 @@
#include <Common/SensitiveDataMasker.h>
#include <Common/SipHash.h>
#include <Common/StringUtils.h>
#include <algorithm>
namespace DB

View File

@ -53,9 +53,6 @@ PlacementInfo & PlacementInfo::instance()
void PlacementInfo::initialize(const Poco::Util::AbstractConfiguration & config)
try
{
if (initialized)
return;
if (!config.has(DB::PlacementInfo::PLACEMENT_CONFIG_PREFIX))
{
availability_zone = "";

View File

@ -22,7 +22,6 @@ struct FutureMergedMutatedPart
MergeTreeDataPartFormat part_format;
MergeTreePartInfo part_info;
MergeTreeData::DataPartsVector parts;
std::vector<MergeTreePartInfo> blocking_parts_to_remove;
MergeType merge_type = MergeType::Regular;
const MergeTreePartition & getPartition() const { return parts.front()->partition; }

View File

@ -18,7 +18,6 @@ public:
using ValueSizeMap = std::map<std::string, double>;
using VirtualFields = std::unordered_map<String, Field>;
using DeserializeBinaryBulkStateMap = std::map<std::string, ISerialization::DeserializeBinaryBulkStatePtr>;
using FileStreams = std::map<std::string, std::unique_ptr<MergeTreeReaderStream>>;
IMergeTreeReader(
MergeTreeDataPartInfoForReaderPtr data_part_info_for_read_,

View File

@ -83,9 +83,6 @@ bool MergeProjectionPartsTask::executeStep()
".tmp_proj");
next_level_parts.push_back(executeHere(tmp_part_merge_task));
/// FIXME (alesapin) we should use some temporary storage for this,
/// not commit each subprojection part
next_level_parts.back()->getDataPartStorage().commitTransaction();
next_level_parts.back()->is_temp = true;
}

View File

@ -8,7 +8,7 @@ namespace DB
STRONG_TYPEDEF(UInt32, MergeTreeDataFormatVersion)
static constexpr MergeTreeDataFormatVersion MERGE_TREE_DATA_OLD_FORMAT_VERSION {0};
static constexpr MergeTreeDataFormatVersion MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING {1};
const MergeTreeDataFormatVersion MERGE_TREE_DATA_OLD_FORMAT_VERSION {0};
const MergeTreeDataFormatVersion MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING {1};
}

View File

@ -106,11 +106,9 @@ public:
PreformattedMessage & out_disable_reason,
bool dry_run = false);
/// Actually the most fresh partition with biggest modification_time
String getBestPartitionToOptimizeEntire(const PartitionsInfo & partitions_info) const;
/// Useful to quickly get a list of partitions that contain parts that we may want to merge
/// The result is limited by top_number_of_partitions_to_consider_for_merge
PartitionIdsHint getPartitionsThatMayBeMerged(
size_t max_total_size_to_merge,
const AllowedMergingPredicate & can_merge_callback,

View File

@ -45,7 +45,6 @@ public:
enum Value
{
Full,
Packed,
Unknown,
};

View File

@ -179,8 +179,8 @@ MergeTreeDataPartWriterOnDisk::MergeTreeDataPartWriterOnDisk(
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Can't take information about index granularity from blocks, when non empty index_granularity array specified");
/// We don't need to check if it exists or not, createDirectories doesn't throw
getDataPartStorage().createDirectories();
if (!getDataPartStorage().exists())
getDataPartStorage().createDirectories();
if (settings.rewrite_primary_key)
initPrimaryIndex();

View File

@ -108,14 +108,6 @@ std::optional<MarkType> MergeTreeIndexGranularityInfo::getMarksTypeFromFilesyste
return {};
}
MergeTreeIndexGranularityInfo::MergeTreeIndexGranularityInfo(
MarkType mark_type_, size_t index_granularity_, size_t index_granularity_bytes_)
: mark_type(mark_type_)
, fixed_index_granularity(index_granularity_)
, index_granularity_bytes(index_granularity_bytes_)
{
}
MergeTreeIndexGranularityInfo::MergeTreeIndexGranularityInfo(const MergeTreeData & storage, MergeTreeDataPartType type_)
: MergeTreeIndexGranularityInfo(storage, {storage.canUseAdaptiveGranularity(), (*storage.getSettings())[MergeTreeSetting::compress_marks], type_.getValue()})
{

View File

@ -49,7 +49,6 @@ public:
MergeTreeIndexGranularityInfo(const MergeTreeData & storage, MarkType mark_type_);
MergeTreeIndexGranularityInfo(MergeTreeDataPartType type_, bool is_adaptive_, size_t index_granularity_, size_t index_granularity_bytes_);
MergeTreeIndexGranularityInfo(MarkType mark_type_, size_t index_granularity_, size_t index_granularity_bytes_);
void changeGranularityIfRequired(const IDataPartStorage & data_part_storage);

View File

@ -26,11 +26,11 @@ void checkMutationStatus(std::optional<MergeTreeMutationStatus> & status, const
throw Exception(
ErrorCodes::UNFINISHED,
"Exception happened during execution of mutation{} '{}' with part '{}' reason: '{}'. This error maybe retryable or not. "
"In case of unretryable error, mutation can be killed with KILL MUTATION query \n\n{}\n",
"In case of unretryable error, mutation can be killed with KILL MUTATION query",
mutation_ids.size() > 1 ? "s" : "",
boost::algorithm::join(mutation_ids, ", "),
status->latest_failed_part,
status->latest_fail_reason, StackTrace().toString());
status->latest_fail_reason);
}
}

View File

@ -46,13 +46,6 @@ struct MergeTreePartInfo
< std::forward_as_tuple(rhs.partition_id, rhs.min_block, rhs.max_block, rhs.level, rhs.mutation);
}
bool operator>(const MergeTreePartInfo & rhs) const
{
return std::forward_as_tuple(partition_id, min_block, max_block, level, mutation)
> std::forward_as_tuple(rhs.partition_id, rhs.min_block, rhs.max_block, rhs.level, rhs.mutation);
}
bool operator==(const MergeTreePartInfo & rhs) const
{
return !(*this != rhs);

View File

@ -35,7 +35,7 @@ struct PrewhereExprStep
bool remove_filter_column = false;
bool need_filter = false;
/// Some PREWHERE steps should be executed without conversions (e.g. early mutation steps)
/// Some PREWHERE steps should be executed without conversions.
/// A step without alter conversion cannot be executed after step with alter conversions.
bool perform_alter_conversions = false;
};

View File

@ -3,8 +3,8 @@
#include <Storages/MergeTree/ReplicatedMergeTreeSink.h>
#include <Storages/MergeTree/InsertBlockInfo.h>
#include <Interpreters/PartLog.h>
#include <Processors/Transforms/DeduplicationTokenTransforms.h>
#include <Common/Exception.h>
#include <Processors/Transforms/DeduplicationTokenTransforms.h>
#include <Common/FailPoint.h>
#include <Common/ProfileEventsScope.h>
#include <Common/SipHash.h>

View File

@ -135,6 +135,7 @@ bool isRetryableException(std::exception_ptr exception_ptr)
}
}
static IMergeTreeDataPart::Checksums checkDataPart(
MergeTreeData::DataPartPtr data_part,
const IDataPartStorage & data_part_storage,
@ -421,7 +422,6 @@ IMergeTreeDataPart::Checksums checkDataPart(
}
ReadSettings read_settings;
read_settings.read_through_distributed_cache = false;
read_settings.enable_filesystem_cache = false;
read_settings.enable_filesystem_cache_log = false;
read_settings.enable_filesystem_read_prefetches_log = false;

View File

@ -5,7 +5,6 @@
#if USE_AWS_S3
#include <IO/S3Settings.h>
#include <Storages/ObjectStorage/StorageObjectStorage.h>
#include <Parsers/IAST_fwd.h>
namespace DB
{

View File

@ -150,7 +150,6 @@ size_t estimateValueSize(
}
}
}
ColumnPtr fillColumnWithRandomData(
const DataTypePtr type,
@ -540,8 +539,6 @@ ColumnPtr fillColumnWithRandomData(
}
}
namespace
{
class GenerateSource : public ISource
{

View File

@ -17,8 +17,6 @@ struct StorageID;
class ASTCreateQuery;
class Context;
using ContextPtr = std::shared_ptr<const Context>;
class IDatabase;
using DatabasePtr = std::shared_ptr<IDatabase>;
/// Helper for replicated tables that use zookeeper for coordination among replicas.
/// Handles things like:

View File

@ -142,7 +142,6 @@ SYSTEM REPLICATED SENDS ['SYSTEM STOP REPLICATED SENDS','SYSTEM START REPLICATED
SYSTEM SENDS ['SYSTEM STOP SENDS','SYSTEM START SENDS','STOP SENDS','START SENDS'] \N SYSTEM
SYSTEM REPLICATION QUEUES ['SYSTEM STOP REPLICATION QUEUES','SYSTEM START REPLICATION QUEUES','STOP REPLICATION QUEUES','START REPLICATION QUEUES'] TABLE SYSTEM
SYSTEM VIRTUAL PARTS UPDATE ['SYSTEM STOP VIRTUAL PARTS UPDATE','SYSTEM START VIRTUAL PARTS UPDATE','STOP VIRTUAL PARTS UPDATE','START VIRTUAL PARTS UPDATE'] TABLE SYSTEM
SYSTEM REDUCE BLOCKING PARTS ['SYSTEM STOP REDUCE BLOCKING PARTS','SYSTEM START REDUCE BLOCKING PARTS','STOP REDUCE BLOCKING PARTS','START REDUCE BLOCKING PARTS'] TABLE SYSTEM
SYSTEM DROP REPLICA ['DROP REPLICA'] TABLE SYSTEM
SYSTEM SYNC REPLICA ['SYNC REPLICA'] TABLE SYSTEM
SYSTEM REPLICA READINESS ['SYSTEM REPLICA READY','SYSTEM REPLICA UNREADY'] GLOBAL SYSTEM