Compare commits

..

8 Commits

184 changed files with 4327 additions and 6115 deletions

View File

@ -40,8 +40,17 @@ Every month we get together with the community (users, contributors, customers,
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
Upcoming meetups
The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov:
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
Other upcoming meetups
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
* [Bangalore Meetup](https://www.meetup.com/clickhouse-bangalore-user-group/events/303208274/) - September 18
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1
@ -53,20 +62,13 @@ Upcoming meetups
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
Recently completed meetups
Recently completed events
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"

View File

@ -188,9 +188,8 @@ namespace Crypto
pFile = fopen(keyFile.c_str(), "r");
if (pFile)
{
pem_password_cb * pCB = &passCB;
static constexpr char * no_password = "";
void * pPassword = pass.empty() ? (void *)no_password : (void *)pass.c_str();
pem_password_cb * pCB = pass.empty() ? (pem_password_cb *)0 : &passCB;
void * pPassword = pass.empty() ? (void *)0 : (void *)pass.c_str();
if (readFunc(pFile, &pKey, pCB, pPassword))
{
fclose(pFile);
@ -226,13 +225,6 @@ namespace Crypto
error:
if (pFile)
fclose(pFile);
if (*ppKey)
{
if constexpr (std::is_same_v<K, EVP_PKEY>)
EVP_PKEY_free(*ppKey);
else
EC_KEY_free(*ppKey);
}
throw OpenSSLException("EVPKey::loadKey(string)");
}
@ -294,13 +286,6 @@ namespace Crypto
error:
if (pBIO)
BIO_free(pBIO);
if (*ppKey)
{
if constexpr (std::is_same_v<K, EVP_PKEY>)
EVP_PKEY_free(*ppKey);
else
EC_KEY_free(*ppKey);
}
throw OpenSSLException("EVPKey::loadKey(stream)");
}

View File

@ -248,9 +248,6 @@ namespace Net
SSL_CTX * sslContext() const;
/// Returns the underlying OpenSSL SSL Context object.
SSL_CTX * takeSslContext();
/// Takes ownership of the underlying OpenSSL SSL Context object.
Usage usage() const;
/// Returns whether the context is for use by a client or by a server
/// and whether TLSv1 is required.
@ -404,13 +401,6 @@ namespace Net
return _pSSLContext;
}
inline SSL_CTX * Context::takeSslContext()
{
auto * result = _pSSLContext;
_pSSLContext = nullptr;
return result;
}
inline bool Context::extendedCertificateVerificationEnabled() const
{

View File

@ -106,11 +106,6 @@ Context::Context(
Context::~Context()
{
if (_pSSLContext == nullptr)
{
return;
}
try
{
SSL_CTX_free(_pSSLContext);

2
contrib/libpqxx vendored

@ -1 +1 @@
Subproject commit 41e4c331564167cca97ad6eccbd5b8879c2ca044
Subproject commit c995193a3a14d71f4711f1f421f65a1a1db64640

View File

@ -1,9 +1,9 @@
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libpqxx")
set (SRCS
"${LIBRARY_DIR}/src/strconv.cxx"
"${LIBRARY_DIR}/src/array.cxx"
"${LIBRARY_DIR}/src/binarystring.cxx"
"${LIBRARY_DIR}/src/blob.cxx"
"${LIBRARY_DIR}/src/connection.cxx"
"${LIBRARY_DIR}/src/cursor.cxx"
"${LIBRARY_DIR}/src/encodings.cxx"
@ -12,25 +12,59 @@ set (SRCS
"${LIBRARY_DIR}/src/field.cxx"
"${LIBRARY_DIR}/src/largeobject.cxx"
"${LIBRARY_DIR}/src/notification.cxx"
"${LIBRARY_DIR}/src/params.cxx"
"${LIBRARY_DIR}/src/pipeline.cxx"
"${LIBRARY_DIR}/src/result.cxx"
"${LIBRARY_DIR}/src/robusttransaction.cxx"
"${LIBRARY_DIR}/src/row.cxx"
"${LIBRARY_DIR}/src/sql_cursor.cxx"
"${LIBRARY_DIR}/src/strconv.cxx"
"${LIBRARY_DIR}/src/stream_from.cxx"
"${LIBRARY_DIR}/src/stream_to.cxx"
"${LIBRARY_DIR}/src/subtransaction.cxx"
"${LIBRARY_DIR}/src/time.cxx"
"${LIBRARY_DIR}/src/transaction.cxx"
"${LIBRARY_DIR}/src/transaction_base.cxx"
"${LIBRARY_DIR}/src/row.cxx"
"${LIBRARY_DIR}/src/params.cxx"
"${LIBRARY_DIR}/src/util.cxx"
"${LIBRARY_DIR}/src/version.cxx"
"${LIBRARY_DIR}/src/wait.cxx"
)
add_library(_libpqxx ${SRCS})
# Need to explicitly include each header file, because in the directory include/pqxx there are also files
# like just 'array'. So if including the whole directory with `target_include_directories`, it will make
# conflicts with all includes of <array>.
set (HDRS
"${LIBRARY_DIR}/include/pqxx/array.hxx"
"${LIBRARY_DIR}/include/pqxx/params.hxx"
"${LIBRARY_DIR}/include/pqxx/binarystring.hxx"
"${LIBRARY_DIR}/include/pqxx/composite.hxx"
"${LIBRARY_DIR}/include/pqxx/connection.hxx"
"${LIBRARY_DIR}/include/pqxx/cursor.hxx"
"${LIBRARY_DIR}/include/pqxx/dbtransaction.hxx"
"${LIBRARY_DIR}/include/pqxx/errorhandler.hxx"
"${LIBRARY_DIR}/include/pqxx/except.hxx"
"${LIBRARY_DIR}/include/pqxx/field.hxx"
"${LIBRARY_DIR}/include/pqxx/isolation.hxx"
"${LIBRARY_DIR}/include/pqxx/largeobject.hxx"
"${LIBRARY_DIR}/include/pqxx/nontransaction.hxx"
"${LIBRARY_DIR}/include/pqxx/notification.hxx"
"${LIBRARY_DIR}/include/pqxx/pipeline.hxx"
"${LIBRARY_DIR}/include/pqxx/prepared_statement.hxx"
"${LIBRARY_DIR}/include/pqxx/result.hxx"
"${LIBRARY_DIR}/include/pqxx/robusttransaction.hxx"
"${LIBRARY_DIR}/include/pqxx/row.hxx"
"${LIBRARY_DIR}/include/pqxx/separated_list.hxx"
"${LIBRARY_DIR}/include/pqxx/strconv.hxx"
"${LIBRARY_DIR}/include/pqxx/stream_from.hxx"
"${LIBRARY_DIR}/include/pqxx/stream_to.hxx"
"${LIBRARY_DIR}/include/pqxx/subtransaction.hxx"
"${LIBRARY_DIR}/include/pqxx/transaction.hxx"
"${LIBRARY_DIR}/include/pqxx/transaction_base.hxx"
"${LIBRARY_DIR}/include/pqxx/types.hxx"
"${LIBRARY_DIR}/include/pqxx/util.hxx"
"${LIBRARY_DIR}/include/pqxx/version.hxx"
"${LIBRARY_DIR}/include/pqxx/zview.hxx"
)
add_library(_libpqxx ${SRCS} ${HDRS})
target_link_libraries(_libpqxx PUBLIC ch_contrib::libpq)
target_include_directories (_libpqxx SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}/include")

2
contrib/postgres vendored

@ -1 +1 @@
Subproject commit cfd77000af28469fcb650485bad65a35e7649e41
Subproject commit 665ff8c164d56d012e359735efe4d400c0564b44

View File

@ -1,6 +1,18 @@
/* src/include/pg_config.h. Generated from pg_config.h.in by configure. */
/* src/include/pg_config.h.in. Generated from configure.in by autoheader. */
/* Define to the type of arg 1 of 'accept' */
#define ACCEPT_TYPE_ARG1 int
/* Define to the type of arg 2 of 'accept' */
#define ACCEPT_TYPE_ARG2 struct sockaddr *
/* Define to the type of arg 3 of 'accept' */
#define ACCEPT_TYPE_ARG3 size_t
/* Define to the return type of 'accept' */
#define ACCEPT_TYPE_RETURN int
/* Define if building universal (internal helper macro) */
/* #undef AC_APPLE_UNIVERSAL_BUILD */
@ -37,9 +49,6 @@
/* Define to the default TCP port number as a string constant. */
#define DEF_PGPORT_STR "5432"
/* Define to the file name extension of dynamically-loadable modules. */
#define DLSUFFIX ".so"
/* Define to build with GSSAPI support. (--with-gssapi) */
//#define ENABLE_GSS 0
@ -113,9 +122,6 @@
don't. */
#define HAVE_DECL_SNPRINTF 1
/* Define to 1 if you have the declaration of `sigwait', and to 0 if you don't. */
#define HAVE_DECL_SIGWAIT 1
/* Define to 1 if you have the declaration of `strlcat', and to 0 if you
don't. */
#if OS_DARWIN
@ -251,9 +257,6 @@
/* Define to 1 if you have the `inet_aton' function. */
#define HAVE_INET_ATON 1
/* Define to 1 if you have the `inet_pton' function. */
#define HAVE_INET_PTON 1
/* Define to 1 if the system has the type `int64'. */
/* #undef HAVE_INT64 */
@ -320,9 +323,6 @@
/* Define to 1 if you have the `z' library (-lz). */
#define HAVE_LIBZ 1
/* Define to 1 if you have the `zstd' library (-lzstd). */
/* #undef HAVE_LIBZSTD */
/* Define to 1 if constants of type 'long long int' should have the suffix LL.
*/
#define HAVE_LL_CONSTANTS 1
@ -378,9 +378,6 @@
/* Define to 1 if you have the <poll.h> header file. */
#define HAVE_POLL_H 1
/* Define to 1 if you have a POSIX-conforming sigwait declaration. */
/* #undef HAVE_POSIX_DECL_SIGWAIT */
/* Define to 1 if you have the `posix_fadvise' function. */
#define HAVE_POSIX_FADVISE 1
@ -411,6 +408,9 @@
/* Define to 1 if you have the <pwd.h> header file. */
#define HAVE_PWD_H 1
/* Define to 1 if you have the `random' function. */
#define HAVE_RANDOM 1
/* Define to 1 if you have the <readline.h> header file. */
/* #undef HAVE_READLINE_H */
@ -426,6 +426,10 @@
/* Define to 1 if you have the `rint' function. */
#define HAVE_RINT 1
/* Define to 1 if you have the global variable
'rl_completion_append_character'. */
/* #undef HAVE_RL_COMPLETION_APPEND_CHARACTER */
/* Define to 1 if you have the `rl_completion_matches' function. */
#define HAVE_RL_COMPLETION_MATCHES 1
@ -435,9 +439,6 @@
/* Define to 1 if you have the `rl_reset_screen_size' function. */
/* #undef HAVE_RL_RESET_SCREEN_SIZE */
/* Define to 1 if you have the `rl_variable_bind' function. */
#define HAVE_RL_VARIABLE_BIND 1
/* Define to 1 if you have the <security/pam_appl.h> header file. */
#define HAVE_SECURITY_PAM_APPL_H 1
@ -450,9 +451,6 @@
/* Define to 1 if you have the `shm_open' function. */
#define HAVE_SHM_OPEN 1
/* Define to 1 if the system has the type `socklen_t'. */
#define HAVE_SOCKLEN_T 1
/* Define to 1 if you have the `sigprocmask' function. */
#define HAVE_SIGPROCMASK 1
@ -468,6 +466,9 @@
/* Define to 1 if you have spinlocks. */
#define HAVE_SPINLOCKS 1
/* Define to 1 if you have the `srandom' function. */
#define HAVE_SRANDOM 1
/* Define to 1 if you have the `SSL_CTX_set_num_tickets' function. */
/* #define HAVE_SSL_CTX_SET_NUM_TICKETS */
@ -884,9 +885,6 @@
/* Define to select Win32-style shared memory. */
/* #undef USE_WIN32_SHARED_MEMORY */
/* Define to 1 to build with ZSTD support. (--with-zstd) */
/* #undef USE_ZSTD */
/* Define to 1 if `wcstombs_l' requires <xlocale.h>. */
/* #undef WCSTOMBS_L_IN_XLOCALE */

View File

@ -9,7 +9,7 @@ Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a
**Syntax**
``` sql
quantileDD(relative_accuracy, [level])(expr)
quantileDDsketch[relative_accuracy, (level)](expr)
```
**Arguments**

View File

@ -42,7 +42,6 @@ namespace
{
std::unordered_map<UUID, AccessEntityPtr> entities;
std::unordered_map<UUID, std::pair<String, AccessEntityType>> dependencies;
std::unordered_map<UUID, AccessEntityPtr> dependents;
BackupEntryPtr toBackupEntry() const
{
@ -74,24 +73,6 @@ namespace
}
}
if (!dependents.empty())
{
if (!dependencies.empty())
writeText("\n", buf);
writeText("DEPENDENTS\n", buf);
for (const auto & [id, entity] : dependents)
{
writeText(id, buf);
writeChar('\t', buf);
writeText(entity->getTypeInfo().name, buf);
writeChar('\t', buf);
writeText(entity->getName(), buf);
writeChar('\n', buf);
writeText(serializeAccessEntity(*entity), buf);
writeChar('\n', buf);
}
}
return std::make_shared<BackupEntryFromMemory>(buf.str());
}
@ -101,71 +82,59 @@ namespace
{
AccessEntitiesInBackup res;
bool reading_dependencies = false;
bool reading_dependents = false;
bool dependencies_found = false;
while (!buf->eof())
{
String line;
readStringUntilNewlineInto(line, *buf);
buf->ignore();
if (line == "DEPENDENCIES")
{
reading_dependencies = true;
reading_dependents = false;
continue;
}
else if (line == "DEPENDENTS")
{
reading_dependents = true;
reading_dependencies = false;
continue;
}
else if (line.empty())
{
continue;
dependencies_found = true;
break;
}
size_t separator1 = line.find('\t');
size_t separator2 = line.find('\t', separator1 + 1);
if ((separator1 == String::npos) || (separator2 == String::npos))
throw Exception(ErrorCodes::CANNOT_RESTORE_TABLE, "Separators not found in line {}", line);
UUID id = parse<UUID>(line.substr(0, line.find('\t')));
line.clear();
UUID id = parse<UUID>(line.substr(0, separator1));
AccessEntityType type = AccessEntityTypeInfo::parseType(line.substr(separator1 + 1, separator2 - separator1 - 1));
String name = line.substr(separator2 + 1);
if (reading_dependencies)
String queries;
while (!buf->eof())
{
res.dependencies.emplace(id, std::pair{name, type});
String query;
readStringUntilNewlineInto(query, *buf);
buf->ignore();
if (query.empty())
break;
if (!queries.empty())
queries.append("\n");
queries.append(query);
}
else
AccessEntityPtr entity = deserializeAccessEntity(queries);
res.entities.emplace(id, entity);
}
if (dependencies_found)
{
while (!buf->eof())
{
String queries;
while (!buf->eof())
{
String query;
readStringUntilNewlineInto(query, *buf);
buf->ignore();
if (query.empty())
break;
if (!queries.empty())
queries.append("\n");
queries.append(query);
}
String id_as_string;
readStringInto(id_as_string, *buf);
buf->ignore();
UUID id = parse<UUID>(id_as_string);
AccessEntityPtr entity = deserializeAccessEntity(queries);
String type_as_string;
readStringInto(type_as_string, *buf);
buf->ignore();
AccessEntityType type = AccessEntityTypeInfo::parseType(type_as_string);
if (name != entity->getName())
throw Exception(ErrorCodes::CANNOT_RESTORE_TABLE, "Unexpected name {} is specified for {}", name, entity->formatTypeWithName());
if (type != entity->getType())
throw Exception(ErrorCodes::CANNOT_RESTORE_TABLE, "Unexpected type {} is specified for {}", AccessEntityTypeInfo::get(type).name, entity->formatTypeWithName());
String name;
readStringInto(name, *buf);
buf->ignore();
if (reading_dependents)
res.dependents.emplace(id, entity);
else
res.entities.emplace(id, entity);
if (!res.entities.contains(id))
res.dependencies.emplace(id, std::pair{name, type});
}
}
@ -178,59 +147,49 @@ namespace
}
}
};
std::vector<UUID> findDependencies(const std::vector<std::pair<UUID, AccessEntityPtr>> & entities)
{
std::vector<UUID> res;
for (const auto & entity : entities | boost::adaptors::map_values)
insertAtEnd(res, entity->findDependencies());
/// Remove duplicates in the list of dependencies (some entities can refer to other entities).
::sort(res.begin(), res.end());
res.erase(std::unique(res.begin(), res.end()), res.end());
for (const auto & id : entities | boost::adaptors::map_keys)
{
auto it = std::lower_bound(res.begin(), res.end(), id);
if ((it != res.end()) && (*it == id))
res.erase(it);
}
return res;
}
std::unordered_map<UUID, std::pair<String, AccessEntityType>> readDependenciesNamesAndTypes(const std::vector<UUID> & dependencies, const AccessControl & access_control)
{
std::unordered_map<UUID, std::pair<String, AccessEntityType>> res;
for (const auto & id : dependencies)
{
if (auto name_and_type = access_control.tryReadNameWithType(id))
res.emplace(id, name_and_type.value());
}
return res;
}
}
std::pair<String, BackupEntryPtr> makeBackupEntryForAccessEntities(
const std::vector<UUID> & entities_ids,
const std::unordered_map<UUID, AccessEntityPtr> & all_entities,
bool write_dependents,
const String & data_path_in_backup)
std::pair<String, BackupEntryPtr> makeBackupEntryForAccess(
const std::vector<std::pair<UUID, AccessEntityPtr>> & access_entities,
const String & data_path_in_backup,
size_t counter,
const AccessControl & access_control)
{
auto dependencies = readDependenciesNamesAndTypes(findDependencies(access_entities), access_control);
AccessEntitiesInBackup ab;
std::unordered_set<UUID> entities_ids_set;
for (const auto & id : entities_ids)
entities_ids_set.emplace(id);
for (const auto & id : entities_ids)
{
auto it = all_entities.find(id);
if (it != all_entities.end())
{
AccessEntityPtr entity = it->second;
ab.entities.emplace(id, entity);
auto dependencies = entity->findDependencies();
for (const auto & dependency_id : dependencies)
{
if (!entities_ids_set.contains(dependency_id))
{
auto it_dependency = all_entities.find(dependency_id);
if (it_dependency != all_entities.end())
{
auto dependency_entity = it_dependency->second;
ab.dependencies.emplace(dependency_id, std::make_pair(dependency_entity->getName(), dependency_entity->getType()));
}
}
}
}
}
if (write_dependents)
{
for (const auto & [id, possible_dependent] : all_entities)
{
if (!entities_ids_set.contains(id) && possible_dependent->hasDependencies(entities_ids_set))
{
auto dependent = possible_dependent->clone();
dependent->clearAllExceptDependencies();
ab.dependents.emplace(id, dependent);
}
}
}
String filename = fmt::format("access-{}.txt", UUIDHelpers::generateV4());
boost::range::copy(access_entities, std::inserter(ab.entities, ab.entities.end()));
ab.dependencies = std::move(dependencies);
String filename = fmt::format("access{:02}.txt", counter + 1); /// access01.txt, access02.txt, ...
String file_path_in_backup = fs::path{data_path_in_backup} / filename;
return {file_path_in_backup, ab.toBackupEntry()};
}
@ -249,13 +208,24 @@ AccessRestorerFromBackup::AccessRestorerFromBackup(
AccessRestorerFromBackup::~AccessRestorerFromBackup() = default;
void AccessRestorerFromBackup::addDataPath(const String & data_path_in_backup)
void AccessRestorerFromBackup::addDataPath(const String & data_path_in_backup, bool dependents_only)
{
if (loaded)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Access entities already loaded");
if (std::find(data_paths_in_backup.begin(), data_paths_in_backup.end(), data_path_in_backup) == data_paths_in_backup.end())
data_paths_in_backup.emplace_back(data_path_in_backup);
if (dependents_only && !update_dependents)
return;
for (auto & stored_data_path : data_paths_in_backup)
{
if (stored_data_path.first == data_path_in_backup)
{
stored_data_path.second &= dependents_only;
return;
}
}
data_paths_in_backup.emplace_back(data_path_in_backup, dependents_only);
}
@ -267,7 +237,8 @@ void AccessRestorerFromBackup::loadFromBackup()
/// Parse files "access*.txt" found in the added data paths in the backup.
for (size_t data_path_index = 0; data_path_index != data_paths_in_backup.size(); ++data_path_index)
{
const String & data_path_in_backup = data_paths_in_backup[data_path_index];
const String & data_path_in_backup = data_paths_in_backup[data_path_index].first;
bool dependents_only = data_paths_in_backup[data_path_index].second;
fs::path data_path_in_backup_fs = data_path_in_backup;
Strings filenames = backup->listFiles(data_path_in_backup_fs, /*recursive*/ false);
@ -306,8 +277,9 @@ void AccessRestorerFromBackup::loadFromBackup()
}
EntityInfo & entity_info = it->second;
entity_info.entity = entity;
entity_info.restore = true;
entity_info.data_path_index = data_path_index;
if (!dependents_only)
entity_info.restore = true;
}
for (const auto & [id, name_and_type] : ab.dependencies)
@ -320,18 +292,6 @@ void AccessRestorerFromBackup::loadFromBackup()
EntityInfo & entity_info = it->second;
entity_info.is_dependency = true;
}
for (const auto & [id, entity] : ab.dependents)
{
auto it = entity_infos.find(id);
if (it == entity_infos.end())
{
it = entity_infos.emplace(id, EntityInfo{.id = id, .name = entity->getName(), .type = entity->getType()}).first;
}
EntityInfo & entity_info = it->second;
if (!entity_info.restore)
entity_info.entity = entity;
}
}
}
@ -470,19 +430,19 @@ void AccessRestorerFromBackup::generateRandomIDsAndResolveDependencies(const Acc
}
/// Prepare map from old UUIDs to new UUIDs.
std::unordered_set<UUID> ids_to_restore;
std::unordered_map<UUID, UUID> old_to_new_ids;
std::unordered_set<UUID> unresolved_ids;
std::unordered_set<UUID> old_ids_to_restore;
for (const auto & [id, entity_info] : entity_infos)
{
if (entity_info.restore)
ids_to_restore.insert(id);
if (entity_info.new_id)
old_to_new_ids[id] = *entity_info.new_id;
else
unresolved_ids.insert(id);
if (entity_info.restore)
old_ids_to_restore.insert(id);
}
/// Calculate `is_dependent` for each entity info.
@ -490,7 +450,7 @@ void AccessRestorerFromBackup::generateRandomIDsAndResolveDependencies(const Acc
{
for (auto & [id, entity_info] : entity_infos)
{
if (!entity_info.restore && entity_info.new_id && entity_info.entity && entity_info.entity->hasDependencies(ids_to_restore))
if (!entity_info.restore && entity_info.new_id && entity_info.entity && entity_info.entity->hasDependencies(old_ids_to_restore))
entity_info.is_dependent = true;
}
}
@ -498,7 +458,7 @@ void AccessRestorerFromBackup::generateRandomIDsAndResolveDependencies(const Acc
/// Remap the UUIDs of dependencies in the access entities we're going to restore.
for (auto & [id, entity_info] : entity_infos)
{
if (entity_info.restore || entity_info.is_dependent)
if (entity_info.entity && (entity_info.restore || entity_info.is_dependent))
{
auto new_entity = entity_info.entity->clone();
new_entity->replaceDependencies(old_to_new_ids);
@ -507,7 +467,7 @@ void AccessRestorerFromBackup::generateRandomIDsAndResolveDependencies(const Acc
}
if (entity_info.restore && data_path_with_entities_to_restore.empty())
data_path_with_entities_to_restore = data_paths_in_backup[entity_info.data_path_index];
data_path_with_entities_to_restore = data_paths_in_backup[entity_info.data_path_index].first;
}
ids_assigned = true;
@ -570,10 +530,8 @@ void restoreAccessEntitiesFromBackup(
bool throw_if_exists = (restore_settings.create_access == RestoreAccessCreationMode::kCreate);
bool update_dependents = restore_settings.update_access_entities_dependents;
std::unordered_set<UUID> restored_ids;
std::unordered_map<UUID, UUID> new_to_existing_ids;
std::vector<std::pair<UUID, AccessEntityPtr>> more_dependents;
more_dependents.reserve(entities.size());
for (const auto & [id, entity] : entities)
{
@ -585,7 +543,6 @@ void restoreAccessEntitiesFromBackup(
if (destination_access_storage.insert(id, entity, replace_if_exists, throw_if_exists, &existing_id))
{
LOG_TRACE(log, "{}: Added successfully", AccessEntityTypeInfo::get(type).formatEntityNameWithType(name));
restored_ids.emplace(id);
}
else
{
@ -597,15 +554,23 @@ void restoreAccessEntitiesFromBackup(
}
}
std::vector<UUID> ids_to_update;
if (!new_to_existing_ids.empty())
{
std::vector<UUID> ids_to_update;
ids_to_update.reserve(restored_ids.size());
boost::copy(restored_ids, std::inserter(ids_to_update, ids_to_update.end()));
std::unordered_set<UUID> new_ids;
boost::copy(new_to_existing_ids | boost::adaptors::map_keys, std::inserter(new_ids, new_ids.end()));
ids_to_update.clear();
ids_to_update.reserve(entities.size());
for (const auto & [id, entity] : entities)
{
bool is_restored = !new_to_existing_ids.contains(id);
if (is_restored)
ids_to_update.emplace_back(id);
}
/// If new entities restored from backup have dependencies on other entities from backup which were not restored because they existed,
/// then we should correct those dependencies.
auto update_func = [&](const AccessEntityPtr & entity, const UUID &) -> AccessEntityPtr
@ -622,23 +587,35 @@ void restoreAccessEntitiesFromBackup(
destination_access_storage.tryUpdate(ids_to_update, update_func);
}
std::unordered_set<UUID> restored_ids;
auto do_update_dependents = [&](const std::vector<std::pair<UUID, AccessEntityPtr>> & dependents_to_update)
{
if (dependents_to_update.empty())
return;
std::vector<UUID> ids_to_update;
ids_to_update.clear();
ids_to_update.reserve(dependents_to_update.size());
std::unordered_map<UUID, AccessEntityPtr> id_to_source;
std::unordered_map<UUID, AccessEntityPtr> id_to_dependent;
for (const auto & [id, source] : dependents_to_update)
for (const auto & [id, entity] : dependents_to_update)
{
if (!destination_access_storage.isReadOnly(id))
{
auto new_entity = entity->clone();
new_entity->replaceDependencies(new_to_existing_ids);
id_to_dependent[id] = new_entity;
ids_to_update.emplace_back(id);
auto new_source = source->clone();
new_source->replaceDependencies(new_to_existing_ids);
id_to_source[id] = new_source;
}
}
if (restored_ids.empty())
{
for (const auto & [id, entity] : entities)
{
bool is_restored = !new_to_existing_ids.contains(id);
if (is_restored)
restored_ids.insert(id);
}
}
@ -646,12 +623,12 @@ void restoreAccessEntitiesFromBackup(
/// then we should correct those dependencies.
auto update_func = [&](const AccessEntityPtr & entity, const UUID & id) -> AccessEntityPtr
{
const auto & source = *id_to_source.at(id);
if (!source.hasDependencies(restored_ids))
const auto & src_entity = *id_to_dependent.at(id);
if (!src_entity.hasDependencies(restored_ids))
return entity;
LOG_TRACE(log, "{}: Updating dependent", entity->formatTypeWithName());
auto res = entity->clone();
res->copyDependenciesFrom(source, restored_ids);
res->copyDependenciesFrom(src_entity, restored_ids);
return res;
};

View File

@ -21,12 +21,12 @@ struct RestoreSettings;
enum class RestoreAccessCreationMode : uint8_t;
/// Makes a backup entry for of a set of access entities.
std::pair<String, BackupEntryPtr> makeBackupEntryForAccessEntities(
const std::vector<UUID> & entities_ids,
const std::unordered_map<UUID, AccessEntityPtr> & all_entities,
bool write_dependents,
const String & data_path_in_backup);
/// Makes a backup of access entities of a specified type.
std::pair<String, BackupEntryPtr> makeBackupEntryForAccess(
const std::vector<std::pair<UUID, AccessEntityPtr>> & access_entities,
const String & data_path_in_backup,
size_t counter,
const AccessControl & access_control);
/// Restores access entities from a backup.
void restoreAccessEntitiesFromBackup(
@ -44,7 +44,7 @@ public:
~AccessRestorerFromBackup();
/// Adds a data path to loads access entities from.
void addDataPath(const String & data_path_in_backup);
void addDataPath(const String & data_path_in_backup, bool dependents_only = false);
/// Loads access entities from the backup.
void loadFromBackup();
@ -81,7 +81,7 @@ private:
/// Whether generateRandomIDsAndResolveDependencies() finished.
bool ids_assigned = false;
Strings data_paths_in_backup;
std::vector<std::pair<String, bool /* dependents_only */>> data_paths_in_backup;
String data_path_with_entities_to_restore;
/// Information about an access entity loaded from the backup.

View File

@ -54,7 +54,6 @@ struct IAccessEntity
virtual void replaceDependencies(const std::unordered_map<UUID, UUID> & /* old_to_new_ids */) {}
virtual void copyDependenciesFrom(const IAccessEntity & /* src */, const std::unordered_set<UUID> & /* ids */) {}
virtual void removeDependencies(const std::unordered_set<UUID> & /* ids */) {}
virtual void clearAllExceptDependencies() {}
/// Whether this access entity should be written to a backup.
virtual bool isBackupAllowed() const { return false; }
@ -70,6 +69,8 @@ protected:
{
return std::make_shared<EntityClassT>(typeid_cast<const EntityClassT &>(*this));
}
virtual void doReplaceDependencies(const std::unordered_map<UUID, UUID> & /* old_to_new_ids */) {}
};
using AccessEntityPtr = std::shared_ptr<const IAccessEntity>;

View File

@ -4,10 +4,8 @@
#include <Access/User.h>
#include <Access/AccessBackup.h>
#include <Backups/BackupEntriesCollector.h>
#include <Backups/IBackupCoordination.h>
#include <Backups/IRestoreCoordination.h>
#include <Backups/RestoreSettings.h>
#include <Backups/RestorerFromBackup.h>
#include <Backups/RestoreSettings.h>
#include <Common/Exception.h>
#include <Common/quoteString.h>
#include <Common/callOnce.h>
@ -16,7 +14,6 @@
#include <Poco/UUIDGenerator.h>
#include <Poco/Logger.h>
#include <base/FnTraits.h>
#include <base/range.h>
#include <boost/algorithm/string/join.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <boost/range/adaptor/map.hpp>
@ -74,18 +71,6 @@ std::vector<UUID> IAccessStorage::find(AccessEntityType type, const Strings & na
}
std::vector<UUID> IAccessStorage::findAllImpl() const
{
std::vector<UUID> res;
for (auto type : collections::range(AccessEntityType::MAX))
{
auto ids = findAllImpl(type);
res.insert(res.end(), ids.begin(), ids.end());
}
return res;
}
UUID IAccessStorage::getID(AccessEntityType type, const String & name) const
{
auto id = findImpl(type, name);
@ -613,37 +598,19 @@ void IAccessStorage::backup(BackupEntriesCollector & backup_entries_collector, c
if (!isBackupAllowed())
throwBackupNotAllowed();
auto entities_ids = findAll(type);
if (entities_ids.empty())
auto entities = readAllWithIDs(type);
std::erase_if(entities, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); });
if (entities.empty())
return;
auto backup_entry_with_path = makeBackupEntryForAccessEntities(
entities_ids,
backup_entries_collector.getAllAccessEntities(),
backup_entries_collector.getBackupSettings().write_access_entities_dependents,
data_path_in_backup);
auto backup_entry = makeBackupEntryForAccess(
entities,
data_path_in_backup,
backup_entries_collector.getAccessCounter(type),
backup_entries_collector.getContext()->getAccessControl());
if (isReplicated())
{
auto backup_coordination = backup_entries_collector.getBackupCoordination();
auto replication_id = getReplicationID();
backup_coordination->addReplicatedAccessFilePath(replication_id, type, backup_entry_with_path.first);
backup_entries_collector.addPostTask(
[backup_entry = backup_entry_with_path.second,
replication_id,
type,
&backup_entries_collector,
backup_coordination]
{
for (const String & path : backup_coordination->getReplicatedAccessFilePaths(replication_id, type))
backup_entries_collector.addBackupEntry(path, backup_entry);
});
}
else
{
backup_entries_collector.addBackupEntry(backup_entry_with_path);
}
backup_entries_collector.addBackupEntry(backup_entry);
}
@ -652,12 +619,8 @@ void IAccessStorage::restoreFromBackup(RestorerFromBackup & restorer, const Stri
if (!isRestoreAllowed())
throwRestoreNotAllowed();
if (isReplicated())
{
auto restore_coordination = restorer.getRestoreCoordination();
if (!restore_coordination->acquireReplicatedAccessStorage(getReplicationID()))
return;
}
if (isReplicated() && !acquireReplicatedRestore(restorer))
return;
restorer.addDataRestoreTask(
[this, &restorer, data_path_in_backup]

View File

@ -66,7 +66,6 @@ public:
/// Returns true if this storage is replicated.
virtual bool isReplicated() const { return false; }
virtual String getReplicationID() const { return ""; }
/// Starts periodic reloading and updating of entities in this storage.
virtual void startPeriodicReloading() {}
@ -91,9 +90,8 @@ public:
/// Returns the identifiers of all the entities of a specified type contained in the storage.
std::vector<UUID> findAll(AccessEntityType type) const;
/// Returns the identifiers of all the entities in the storage.
template <typename EntityClassT = IAccessEntity>
std::vector<UUID> findAll() const;
template <typename EntityClassT>
std::vector<UUID> findAll() const { return findAll(EntityClassT::TYPE); }
/// Searches for an entity with specified type and name. Returns std::nullopt if not found.
std::optional<UUID> find(AccessEntityType type, const String & name) const;
@ -150,7 +148,7 @@ public:
std::optional<std::pair<String, AccessEntityType>> tryReadNameWithType(const UUID & id) const;
/// Reads all entities and returns them with their IDs.
template <typename EntityClassT = IAccessEntity>
template <typename EntityClassT>
std::vector<std::pair<UUID, std::shared_ptr<const EntityClassT>>> readAllWithIDs() const;
std::vector<std::pair<UUID, AccessEntityPtr>> readAllWithIDs(AccessEntityType type) const;
@ -221,7 +219,6 @@ public:
protected:
virtual std::optional<UUID> findImpl(AccessEntityType type, const String & name) const = 0;
virtual std::vector<UUID> findAllImpl(AccessEntityType type) const = 0;
virtual std::vector<UUID> findAllImpl() const;
virtual AccessEntityPtr readImpl(const UUID & id, bool throw_if_not_exists) const = 0;
virtual std::optional<std::pair<String, AccessEntityType>> readNameWithTypeImpl(const UUID & id, bool throw_if_not_exists) const;
virtual bool insertImpl(const UUID & id, const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists, UUID * conflicting_id);
@ -270,16 +267,6 @@ private:
};
template <typename EntityClassT>
std::vector<UUID> IAccessStorage::findAll() const
{
if constexpr (std::is_same_v<EntityClassT, IAccessEntity>)
return findAllImpl();
else
return findAllImpl(EntityClassT::TYPE);
}
template <typename EntityClassT>
std::shared_ptr<const EntityClassT> IAccessStorage::read(const UUID & id, bool throw_if_not_exists) const
{

View File

@ -47,10 +47,4 @@ void Quota::removeDependencies(const std::unordered_set<UUID> & ids)
to_roles.removeDependencies(ids);
}
void Quota::clearAllExceptDependencies()
{
all_limits.clear();
key_type = QuotaKeyType::NONE;
}
}

View File

@ -51,7 +51,6 @@ struct Quota : public IAccessEntity
void replaceDependencies(const std::unordered_map<UUID, UUID> & old_to_new_ids) override;
void copyDependenciesFrom(const IAccessEntity & src, const std::unordered_set<UUID> & ids) override;
void removeDependencies(const std::unordered_set<UUID> & ids) override;
void clearAllExceptDependencies() override;
bool isBackupAllowed() const override { return true; }
};

View File

@ -4,6 +4,10 @@
#include <Access/ReplicatedAccessStorage.h>
#include <Access/AccessChangesNotifier.h>
#include <Access/AccessBackup.h>
#include <Backups/BackupEntriesCollector.h>
#include <Backups/IBackupCoordination.h>
#include <Backups/IRestoreCoordination.h>
#include <Backups/RestorerFromBackup.h>
#include <IO/ReadHelpers.h>
#include <Interpreters/Context.h>
#include <Common/ZooKeeper/KeeperException.h>
@ -680,4 +684,44 @@ AccessEntityPtr ReplicatedAccessStorage::readImpl(const UUID & id, bool throw_if
return memory_storage.read(id, throw_if_not_exists);
}
void ReplicatedAccessStorage::backup(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup, AccessEntityType type) const
{
if (!isBackupAllowed())
throwBackupNotAllowed();
auto entities = readAllWithIDs(type);
std::erase_if(entities, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); });
if (entities.empty())
return;
auto backup_entry_with_path = makeBackupEntryForAccess(
entities,
data_path_in_backup,
backup_entries_collector.getAccessCounter(type),
backup_entries_collector.getContext()->getAccessControl());
auto backup_coordination = backup_entries_collector.getBackupCoordination();
backup_coordination->addReplicatedAccessFilePath(zookeeper_path, type, backup_entry_with_path.first);
backup_entries_collector.addPostTask(
[backup_entry = backup_entry_with_path.second,
my_zookeeper_path = zookeeper_path,
type,
&backup_entries_collector,
backup_coordination]
{
for (const String & path : backup_coordination->getReplicatedAccessFilePaths(my_zookeeper_path, type))
backup_entries_collector.addBackupEntry(path, backup_entry);
});
}
bool ReplicatedAccessStorage::acquireReplicatedRestore(RestorerFromBackup & restorer) const
{
auto restore_coordination = restorer.getRestoreCoordination();
return restore_coordination->acquireReplicatedAccessStorage(zookeeper_path);
}
}

View File

@ -26,9 +26,7 @@ public:
void shutdown() override;
const char * getStorageType() const override { return STORAGE_TYPE; }
bool isReplicated() const override { return true; }
String getReplicationID() const override { return zookeeper_path; }
void startPeriodicReloading() override { startWatchingThread(); }
void stopPeriodicReloading() override { stopWatchingThread(); }
@ -37,6 +35,7 @@ public:
bool exists(const UUID & id) const override;
bool isBackupAllowed() const override { return backup_allowed; }
void backup(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup, AccessEntityType type) const override;
private:
String zookeeper_path;
@ -81,6 +80,7 @@ private:
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
std::vector<UUID> findAllImpl(AccessEntityType type) const override;
AccessEntityPtr readImpl(const UUID & id, bool throw_if_not_exists) const override;
bool acquireReplicatedRestore(RestorerFromBackup & restorer) const override;
mutable std::mutex mutex;
MemoryAccessStorage memory_storage TSA_GUARDED_BY(mutex);

View File

@ -47,10 +47,4 @@ void Role::removeDependencies(const std::unordered_set<UUID> & ids)
settings.removeDependencies(ids);
}
void Role::clearAllExceptDependencies()
{
access = {};
settings.removeSettingsKeepProfiles();
}
}

View File

@ -25,7 +25,6 @@ struct Role : public IAccessEntity
void replaceDependencies(const std::unordered_map<UUID, UUID> & old_to_new_ids) override;
void copyDependenciesFrom(const IAccessEntity & src, const std::unordered_set<UUID> & ids) override;
void removeDependencies(const std::unordered_set<UUID> & ids) override;
void clearAllExceptDependencies() override;
bool isBackupAllowed() const override { return settings.isBackupAllowed(); }
};

View File

@ -86,10 +86,4 @@ void RowPolicy::removeDependencies(const std::unordered_set<UUID> & ids)
to_roles.removeDependencies(ids);
}
void RowPolicy::clearAllExceptDependencies()
{
for (auto & filter : filters)
filter = {};
}
}

View File

@ -54,7 +54,6 @@ struct RowPolicy : public IAccessEntity
void replaceDependencies(const std::unordered_map<UUID, UUID> & old_to_new_ids) override;
void copyDependenciesFrom(const IAccessEntity & src, const std::unordered_set<UUID> & ids) override;
void removeDependencies(const std::unordered_set<UUID> & ids) override;
void clearAllExceptDependencies() override;
bool isBackupAllowed() const override { return true; }

View File

@ -47,9 +47,4 @@ void SettingsProfile::removeDependencies(const std::unordered_set<UUID> & ids)
to_roles.removeDependencies(ids);
}
void SettingsProfile::clearAllExceptDependencies()
{
elements.removeSettingsKeepProfiles();
}
}

View File

@ -26,7 +26,6 @@ struct SettingsProfile : public IAccessEntity
void replaceDependencies(const std::unordered_map<UUID, UUID> & old_to_new_ids) override;
void copyDependenciesFrom(const IAccessEntity & src, const std::unordered_set<UUID> & ids) override;
void removeDependencies(const std::unordered_set<UUID> & ids) override;
void clearAllExceptDependencies() override;
bool isBackupAllowed() const override { return elements.isBackupAllowed(); }
};

View File

@ -211,15 +211,6 @@ void SettingsProfileElements::removeDependencies(const std::unordered_set<UUID>
}
void SettingsProfileElements::removeSettingsKeepProfiles()
{
for (auto & element : *this)
element.setting_name.clear();
std::erase_if(*this, [&](const SettingsProfileElement & element) { return element.setting_name.empty() && !element.parent_profile; });
}
void SettingsProfileElements::merge(const SettingsProfileElements & other)
{
insert(end(), other.begin(), other.end());

View File

@ -68,8 +68,6 @@ public:
void copyDependenciesFrom(const SettingsProfileElements & src, const std::unordered_set<UUID> & ids);
void removeDependencies(const std::unordered_set<UUID> & ids);
void removeSettingsKeepProfiles();
void merge(const SettingsProfileElements & other);
Settings toSettings() const;

View File

@ -81,14 +81,4 @@ void User::removeDependencies(const std::unordered_set<UUID> & ids)
settings.removeDependencies(ids);
}
void User::clearAllExceptDependencies()
{
authentication_methods.clear();
allowed_client_hosts = AllowedClientHosts::AnyHostTag{};
access = {};
settings.removeSettingsKeepProfiles();
default_database = {};
valid_until = 0;
}
}

View File

@ -36,7 +36,6 @@ struct User : public IAccessEntity
void replaceDependencies(const std::unordered_map<UUID, UUID> & old_to_new_ids) override;
void copyDependenciesFrom(const IAccessEntity & src, const std::unordered_set<UUID> & ids) override;
void removeDependencies(const std::unordered_set<UUID> & ids) override;
void clearAllExceptDependencies() override;
bool isBackupAllowed() const override { return settings.isBackupAllowed(); }
};

View File

@ -3,89 +3,370 @@
#include <Parsers/FunctionSecretArgumentsFinder.h>
#include <Analyzer/ConstantNode.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/IQueryTreeNode.h>
#include <Analyzer/IdentifierNode.h>
#include <Analyzer/ListNode.h>
#include <Common/KnownObjectNames.h>
#include <Core/QualifiedTableName.h>
#include <boost/algorithm/string/predicate.hpp>
namespace DB
{
class FunctionTreeNode : public AbstractFunction
{
public:
class ArgumentTreeNode : public Argument
{
public:
explicit ArgumentTreeNode(const IQueryTreeNode * argument_) : argument(argument_) {}
std::unique_ptr<AbstractFunction> getFunction() const override
{
if (const auto * f = argument->as<FunctionNode>())
return std::make_unique<FunctionTreeNode>(*f);
return nullptr;
}
bool isIdentifier() const override { return argument->as<IdentifierNode>(); }
bool tryGetString(String * res, bool allow_identifier) const override
{
if (const auto * literal = argument->as<ConstantNode>())
{
if (literal->getValue().getType() != Field::Types::String)
return false;
if (res)
*res = literal->getValue().safeGet<String>();
return true;
}
if (allow_identifier)
{
if (const auto * id = argument->as<IdentifierNode>())
{
if (res)
*res = id->getIdentifier().getFullName();
return true;
}
}
return false;
}
private:
const IQueryTreeNode * argument = nullptr;
};
class ArgumentsTreeNode : public Arguments
{
public:
explicit ArgumentsTreeNode(const QueryTreeNodes * arguments_) : arguments(arguments_) {}
size_t size() const override { return arguments ? arguments->size() : 0; }
std::unique_ptr<Argument> at(size_t n) const override { return std::make_unique<ArgumentTreeNode>(arguments->at(n).get()); }
private:
const QueryTreeNodes * arguments = nullptr;
};
explicit FunctionTreeNode(const FunctionNode & function_) : function(&function_)
{
if (const auto & nodes = function->getArguments().getNodes(); !nodes.empty())
arguments = std::make_unique<ArgumentsTreeNode>(&nodes);
}
String name() const override { return function->getFunctionName(); }
private:
const FunctionNode * function = nullptr;
};
/// Finds arguments of a specified function which should not be displayed for most users for security reasons.
/// That involves passwords and secret keys.
class FunctionSecretArgumentsFinderTreeNode : public FunctionSecretArgumentsFinder
class FunctionSecretArgumentsFinderTreeNode
{
public:
explicit FunctionSecretArgumentsFinderTreeNode(const FunctionNode & function_)
: FunctionSecretArgumentsFinder(std::make_unique<FunctionTreeNode>(function_))
explicit FunctionSecretArgumentsFinderTreeNode(const FunctionNode & function_) : function(function_), arguments(function.getArguments())
{
if (!function->hasArguments())
if (arguments.getNodes().empty())
return;
findOrdinaryFunctionSecretArguments();
findFunctionSecretArguments();
}
struct Result
{
/// Result constructed by default means no arguments will be hidden.
size_t start = static_cast<size_t>(-1);
size_t count = 0; /// Mostly it's either 0 or 1. There are only a few cases where `count` can be greater than 1 (e.g. see `encrypt`).
/// In all known cases secret arguments are consecutive
bool are_named = false; /// Arguments like `password = 'password'` are considered as named arguments.
/// E.g. "headers" in `url('..', headers('foo' = '[HIDDEN]'))`
std::vector<std::string> nested_maps;
bool hasSecrets() const
{
return count != 0 || !nested_maps.empty();
}
};
FunctionSecretArgumentsFinder::Result getResult() const { return result; }
private:
const FunctionNode & function;
const ListNode & arguments;
FunctionSecretArgumentsFinder::Result result;
void markSecretArgument(size_t index, bool argument_is_named = false)
{
if (index >= arguments.getNodes().size())
return;
if (!result.count)
{
result.start = index;
result.are_named = argument_is_named;
}
chassert(index >= result.start); /// We always check arguments consecutively
result.count = index + 1 - result.start;
if (!argument_is_named)
result.are_named = false;
}
void findFunctionSecretArguments()
{
const auto & name = function.getFunctionName();
if ((name == "mysql") || (name == "postgresql") || (name == "mongodb"))
{
/// mysql('host:port', 'database', 'table', 'user', 'password', ...)
/// postgresql('host:port', 'database', 'table', 'user', 'password', ...)
/// mongodb('host:port', 'database', 'collection', 'user', 'password', ...)
findMySQLFunctionSecretArguments();
}
else if ((name == "s3") || (name == "cosn") || (name == "oss") ||
(name == "deltaLake") || (name == "hudi") || (name == "iceberg"))
{
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...)
findS3FunctionSecretArguments(/* is_cluster_function= */ false);
}
else if (name == "s3Cluster")
{
/// s3Cluster('cluster_name', 'url', 'aws_access_key_id', 'aws_secret_access_key', ...)
findS3FunctionSecretArguments(/* is_cluster_function= */ true);
}
else if ((name == "remote") || (name == "remoteSecure"))
{
/// remote('addresses_expr', 'db', 'table', 'user', 'password', ...)
findRemoteFunctionSecretArguments();
}
else if ((name == "encrypt") || (name == "decrypt") ||
(name == "aes_encrypt_mysql") || (name == "aes_decrypt_mysql") ||
(name == "tryDecrypt"))
{
/// encrypt('mode', 'plaintext', 'key' [, iv, aad])
findEncryptionFunctionSecretArguments();
}
else if (name == "url")
{
findURLSecretArguments();
}
}
void findMySQLFunctionSecretArguments()
{
if (isNamedCollectionName(0))
{
/// mysql(named_collection, ..., password = 'password', ...)
findSecretNamedArgument("password", 1);
}
else
{
/// mysql('host:port', 'database', 'table', 'user', 'password', ...)
markSecretArgument(4);
}
}
/// Returns the number of arguments excluding "headers" and "extra_credentials" (which should
/// always be at the end). Marks "headers" as secret, if found.
size_t excludeS3OrURLNestedMaps()
{
const auto & nodes = arguments.getNodes();
size_t count = nodes.size();
while (count > 0)
{
const FunctionNode * f = nodes.at(count - 1)->as<FunctionNode>();
if (!f)
break;
if (f->getFunctionName() == "headers")
result.nested_maps.push_back(f->getFunctionName());
else if (f->getFunctionName() != "extra_credentials")
break;
count -= 1;
}
return count;
}
void findS3FunctionSecretArguments(bool is_cluster_function)
{
/// s3Cluster('cluster_name', 'url', ...) has 'url' as its second argument.
size_t url_arg_idx = is_cluster_function ? 1 : 0;
if (!is_cluster_function && isNamedCollectionName(0))
{
/// s3(named_collection, ..., secret_access_key = 'secret_access_key', ...)
findSecretNamedArgument("secret_access_key", 1);
return;
}
/// We should check other arguments first because we don't need to do any replacement in case of
/// s3('url', NOSIGN, 'format' [, 'compression'] [, extra_credentials(..)] [, headers(..)])
/// s3('url', 'format', 'structure' [, 'compression'] [, extra_credentials(..)] [, headers(..)])
size_t count = excludeS3OrURLNestedMaps();
if ((url_arg_idx + 3 <= count) && (count <= url_arg_idx + 4))
{
String second_arg;
if (tryGetStringFromArgument(url_arg_idx + 1, &second_arg))
{
if (boost::iequals(second_arg, "NOSIGN"))
return; /// The argument after 'url' is "NOSIGN".
if (second_arg == "auto" || KnownFormatNames::instance().exists(second_arg))
return; /// The argument after 'url' is a format: s3('url', 'format', ...)
}
}
/// We're going to replace 'aws_secret_access_key' with '[HIDDEN]' for the following signatures:
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...)
/// s3Cluster('cluster_name', 'url', 'aws_access_key_id', 'aws_secret_access_key', 'format', 'compression')
if (url_arg_idx + 2 < count)
markSecretArgument(url_arg_idx + 2);
}
void findURLSecretArguments()
{
if (!isNamedCollectionName(0))
excludeS3OrURLNestedMaps();
}
bool tryGetStringFromArgument(size_t arg_idx, String * res, bool allow_identifier = true) const
{
if (arg_idx >= arguments.getNodes().size())
return false;
return tryGetStringFromArgument(arguments.getNodes()[arg_idx], res, allow_identifier);
}
static bool tryGetStringFromArgument(const QueryTreeNodePtr argument, String * res, bool allow_identifier = true)
{
if (const auto * literal = argument->as<ConstantNode>())
{
if (literal->getValue().getType() != Field::Types::String)
return false;
if (res)
*res = literal->getValue().safeGet<String>();
return true;
}
if (allow_identifier)
{
if (const auto * id = argument->as<IdentifierNode>())
{
if (res)
*res = id->getIdentifier().getFullName();
return true;
}
}
return false;
}
void findRemoteFunctionSecretArguments()
{
if (isNamedCollectionName(0))
{
/// remote(named_collection, ..., password = 'password', ...)
findSecretNamedArgument("password", 1);
return;
}
/// We're going to replace 'password' with '[HIDDEN'] for the following signatures:
/// remote('addresses_expr', db.table, 'user' [, 'password'] [, sharding_key])
/// remote('addresses_expr', 'db', 'table', 'user' [, 'password'] [, sharding_key])
/// remote('addresses_expr', table_function(), 'user' [, 'password'] [, sharding_key])
/// But we should check the number of arguments first because we don't need to do any replacements in case of
/// remote('addresses_expr', db.table)
if (arguments.getNodes().size() < 3)
return;
size_t arg_num = 1;
/// Skip 1 or 2 arguments with table_function() or db.table or 'db', 'table'.
const auto * table_function = arguments.getNodes()[arg_num]->as<FunctionNode>();
if (table_function && KnownTableFunctionNames::instance().exists(table_function->getFunctionName()))
{
++arg_num;
}
else
{
std::optional<String> database;
std::optional<QualifiedTableName> qualified_table_name;
if (!tryGetDatabaseNameOrQualifiedTableName(arg_num, database, qualified_table_name))
{
/// We couldn't evaluate the argument so we don't know whether it is 'db.table' or just 'db'.
/// Hence we can't figure out whether we should skip one argument 'user' or two arguments 'table', 'user'
/// before the argument 'password'. So it's safer to wipe two arguments just in case.
/// The last argument can be also a `sharding_key`, so we need to check that argument is a literal string
/// before wiping it (because the `password` argument is always a literal string).
if (tryGetStringFromArgument(arg_num + 2, nullptr, /* allow_identifier= */ false))
{
/// Wipe either `password` or `user`.
markSecretArgument(arg_num + 2);
}
if (tryGetStringFromArgument(arg_num + 3, nullptr, /* allow_identifier= */ false))
{
/// Wipe either `password` or `sharding_key`.
markSecretArgument(arg_num + 3);
}
return;
}
/// Skip the current argument (which is either a database name or a qualified table name).
++arg_num;
if (database)
{
/// Skip the 'table' argument if the previous argument was a database name.
++arg_num;
}
}
/// Skip username.
++arg_num;
/// Do our replacement:
/// remote('addresses_expr', db.table, 'user', 'password', ...) -> remote('addresses_expr', db.table, 'user', '[HIDDEN]', ...)
/// The last argument can be also a `sharding_key`, so we need to check that argument is a literal string
/// before wiping it (because the `password` argument is always a literal string).
bool can_be_password = tryGetStringFromArgument(arg_num, nullptr, /* allow_identifier= */ false);
if (can_be_password)
markSecretArgument(arg_num);
}
/// Tries to get either a database name or a qualified table name from an argument.
/// Empty string is also allowed (it means the default database).
/// The function is used by findRemoteFunctionSecretArguments() to determine how many arguments to skip before a password.
bool tryGetDatabaseNameOrQualifiedTableName(
size_t arg_idx,
std::optional<String> & res_database,
std::optional<QualifiedTableName> & res_qualified_table_name) const
{
res_database.reset();
res_qualified_table_name.reset();
String str;
if (!tryGetStringFromArgument(arg_idx, &str, /* allow_identifier= */ true))
return false;
if (str.empty())
{
res_database = "";
return true;
}
auto qualified_table_name = QualifiedTableName::tryParseFromString(str);
if (!qualified_table_name)
return false;
if (qualified_table_name->database.empty())
res_database = std::move(qualified_table_name->table);
else
res_qualified_table_name = std::move(qualified_table_name);
return true;
}
void findEncryptionFunctionSecretArguments()
{
if (arguments.getNodes().empty())
return;
/// We replace all arguments after 'mode' with '[HIDDEN]':
/// encrypt('mode', 'plaintext', 'key' [, iv, aad]) -> encrypt('mode', '[HIDDEN]')
result.start = 1;
result.count = arguments.getNodes().size() - 1;
}
/// Whether a specified argument can be the name of a named collection?
bool isNamedCollectionName(size_t arg_idx) const
{
if (arguments.getNodes().size() <= arg_idx)
return false;
const auto * identifier = arguments.getNodes()[arg_idx]->as<IdentifierNode>();
return identifier != nullptr;
}
/// Looks for a secret argument with a specified name. This function looks for arguments in format `key=value` where the key is specified.
void findSecretNamedArgument(const std::string_view & key, size_t start = 0)
{
for (size_t i = start; i < arguments.getNodes().size(); ++i)
{
const auto & argument = arguments.getNodes()[i];
const auto * equals_func = argument->as<FunctionNode>();
if (!equals_func || (equals_func->getFunctionName() != "equals"))
continue;
const auto * expr_list = equals_func->getArguments().as<ListNode>();
if (!expr_list)
continue;
const auto & equal_args = expr_list->getNodes();
if (equal_args.size() != 2)
continue;
String found_key;
if (!tryGetStringFromArgument(equal_args[0], &found_key))
continue;
if (found_key == key)
markSecretArgument(i, /* argument_is_named= */ true);
}
}
};
}

View File

@ -2564,8 +2564,8 @@ void checkFunctionNodeHasEmptyNullsAction(FunctionNode const & node)
if (node.getNullsAction() != NullsAction::EMPTY)
throw Exception(
ErrorCodes::SYNTAX_ERROR,
"Function with name {} cannot use {} NULLS",
backQuote(node.getFunctionName()),
"Function with name '{}' cannot use {} NULLS",
node.getFunctionName(),
node.getNullsAction() == NullsAction::IGNORE_NULLS ? "IGNORE" : "RESPECT");
}
}
@ -3228,16 +3228,16 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
auto hints = NamePrompter<2>::getHints(function_name, possible_function_names);
throw Exception(ErrorCodes::UNKNOWN_FUNCTION,
"Function with name {} does not exist. In scope {}{}",
backQuote(function_name),
"Function with name '{}' does not exist. In scope {}{}",
function_name,
scope.scope_node->formatASTForErrorMessage(),
getHintsErrorMessageSuffix(hints));
}
if (!function_lambda_arguments_indexes.empty())
throw Exception(ErrorCodes::UNSUPPORTED_METHOD,
"Aggregate function {} does not support lambda arguments",
backQuote(function_name));
"Aggregate function '{}' does not support lambda arguments",
function_name);
auto action = function_node_ptr->getNullsAction();
std::string aggregate_function_name = rewriteAggregateFunctionNameIfNeeded(function_name, action, scope.context);
@ -3679,10 +3679,10 @@ ProjectionNames QueryAnalyzer::resolveExpressionNode(
auto hints = IdentifierResolver::collectIdentifierTypoHints(unresolved_identifier, valid_identifiers);
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Unknown {}{} identifier {} in scope {}{}",
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Unknown {}{} identifier '{}' in scope {}{}",
toStringLowercase(IdentifierLookupContext::EXPRESSION),
message_clarification,
backQuote(unresolved_identifier.getFullName()),
unresolved_identifier.getFullName(),
scope.scope_node->formatASTForErrorMessage(),
getHintsErrorMessageSuffix(hints));
}

View File

@ -570,7 +570,7 @@ void BackupCoordinationRemote::prepareReplicatedAccess() const
if (replicated_access)
return;
std::vector<BackupCoordinationReplicatedAccess::FilePathForAccessEntity> file_path_for_access_entities;
std::vector<BackupCoordinationReplicatedAccess::FilePathForAccessEntitry> file_path_for_access_entities;
auto holder = with_retries.createRetriesControlHolder("prepareReplicatedAccess");
holder.retries_ctl.retryLoop(
[&, &zk = holder.faulty_zookeeper]()

View File

@ -1,9 +1,5 @@
#include <Backups/BackupCoordinationReplicatedAccess.h>
#include <filesystem>
namespace fs = std::filesystem;
namespace DB
{
@ -11,7 +7,7 @@ namespace DB
BackupCoordinationReplicatedAccess::BackupCoordinationReplicatedAccess() = default;
BackupCoordinationReplicatedAccess::~BackupCoordinationReplicatedAccess() = default;
void BackupCoordinationReplicatedAccess::addFilePath(FilePathForAccessEntity && file_path_for_access_entity)
void BackupCoordinationReplicatedAccess::addFilePath(FilePathForAccessEntitry && file_path_for_access_entity)
{
const auto & access_zk_path = file_path_for_access_entity.access_zk_path;
const auto & access_entity_type = file_path_for_access_entity.access_entity_type;
@ -32,19 +28,10 @@ Strings BackupCoordinationReplicatedAccess::getFilePaths(const String & access_z
return {};
const auto & file_paths = it->second;
if ((file_paths.host_to_store_access != host_id) || file_paths.file_paths.empty())
if (file_paths.host_to_store_access != host_id)
return {};
/// Use the same filename for all the paths in backup.
/// Those filenames have format "access-<UUID>.txt", where UUID is random.
/// It's not really necessary, however it looks better if those files have the same filename
/// for a backup of ReplicatedAccessStorage on different hosts.
Strings res;
res.reserve(file_paths.file_paths.size());
String filename = fs::path{*file_paths.file_paths.begin()}.filename();
for (const auto & file_path : file_paths.file_paths)
res.emplace_back(fs::path{file_path}.replace_filename(filename));
Strings res{file_paths.file_paths.begin(), file_paths.file_paths.end()};
return res;
}

View File

@ -2,7 +2,7 @@
#include <Core/Types.h>
#include <map>
#include <set>
#include <unordered_set>
namespace DB
@ -28,7 +28,7 @@ public:
BackupCoordinationReplicatedAccess();
~BackupCoordinationReplicatedAccess();
struct FilePathForAccessEntity
struct FilePathForAccessEntitry
{
String access_zk_path;
AccessEntityType access_entity_type;
@ -37,7 +37,7 @@ public:
};
/// Adds a path to access*.txt file keeping access entities of a ReplicatedAccessStorage.
void addFilePath(FilePathForAccessEntity && file_path_for_access_entity);
void addFilePath(FilePathForAccessEntitry && file_path_for_access_entity);
/// Returns all paths added by addFilePath() if `host_id` is a host chosen to store access.
Strings getFilePaths(const String & access_zk_path, AccessEntityType access_entity_type, const String & host_id) const;
@ -47,7 +47,7 @@ private:
struct FilePathsAndHost
{
std::set<String> file_paths;
std::unordered_set<String> file_paths;
String host_to_store_access;
};

View File

@ -1,5 +1,4 @@
#include <Access/Common/AccessEntityType.h>
#include <Access/AccessControl.h>
#include <Backups/BackupCoordinationStage.h>
#include <Backups/BackupEntriesCollector.h>
#include <Backups/BackupEntryFromMemory.h>
@ -904,20 +903,11 @@ void BackupEntriesCollector::runPostTasks()
LOG_TRACE(log, "All post tasks successfully executed");
}
std::unordered_map<UUID, AccessEntityPtr> BackupEntriesCollector::getAllAccessEntities()
size_t BackupEntriesCollector::getAccessCounter(AccessEntityType type)
{
std::lock_guard lock(mutex);
if (!all_access_entities)
{
all_access_entities.emplace();
auto entities_with_ids = context->getAccessControl().readAllWithIDs();
for (const auto & [id, entity] : entities_with_ids)
{
if (entity->isBackupAllowed())
all_access_entities->emplace(id, entity);
}
}
return *all_access_entities;
access_counters.resize(static_cast<size_t>(AccessEntityType::MAX));
return access_counters[static_cast<size_t>(type)]++;
}
}

View File

@ -21,8 +21,7 @@ class IBackupCoordination;
class IDatabase;
using DatabasePtr = std::shared_ptr<IDatabase>;
struct StorageID;
struct IAccessEntity;
using AccessEntityPtr = std::shared_ptr<const IAccessEntity>;
enum class AccessEntityType : uint8_t;
class QueryStatus;
using QueryStatusPtr = std::shared_ptr<QueryStatus>;
@ -50,9 +49,6 @@ public:
ContextPtr getContext() const { return context; }
const ZooKeeperRetriesInfo & getZooKeeperRetriesInfo() const { return global_zookeeper_retries_info; }
/// Returns all access entities which can be put into a backup.
std::unordered_map<UUID, AccessEntityPtr> getAllAccessEntities();
/// Adds a backup entry which will be later returned by run().
/// These function can be called by implementations of IStorage::backupData() in inherited storage classes.
void addBackupEntry(const String & file_name, BackupEntryPtr backup_entry);
@ -65,6 +61,9 @@ public:
/// 1) we need to join (in a backup) the data of replicated tables gathered on different hosts.
void addPostTask(std::function<void()> task);
/// Returns an incremental counter used to backup access control.
size_t getAccessCounter(AccessEntityType type);
private:
void calculateRootPathInBackup();
@ -178,10 +177,9 @@ private:
std::vector<std::pair<String, String>> previous_databases_metadata;
std::vector<std::pair<QualifiedTableName, String>> previous_tables_metadata;
std::optional<std::unordered_map<UUID, AccessEntityPtr>> all_access_entities;
BackupEntries backup_entries;
std::queue<std::function<void()>> post_tasks;
std::vector<size_t> access_counters;
ThreadPool & threadpool;
std::mutex mutex;

View File

@ -37,7 +37,6 @@ namespace ErrorCodes
M(Bool, check_parts) \
M(Bool, check_projection_parts) \
M(Bool, allow_backup_broken_projections) \
M(Bool, write_access_entities_dependents) \
M(Bool, internal) \
M(String, host_id) \
M(OptionalUUID, backup_uuid)

View File

@ -77,11 +77,6 @@ struct BackupSettings
/// Allow to create backup with broken projections.
bool allow_backup_broken_projections = false;
/// Whether dependents of access entities should be written along with the access entities.
/// For example, if a role is granted to a user and we're making a backup of system.roles (but not system.users)
/// this is whether the backup will contain information to grant the role to the corresponding user again.
bool write_access_entities_dependents = true;
/// Internal, should not be specified by user.
/// Whether this backup is a part of a distributed backup created by BACKUP ON CLUSTER.
bool internal = false;

View File

@ -72,6 +72,12 @@ namespace
|| (table_name.table == "row_policies") || (table_name.table == "quotas");
}
const Strings & getAllSystemAccessTableNames()
{
static const Strings all_system_access_table_names{"users", "roles", "quotas", "settings_profiles", "row_policies"};
return all_system_access_table_names;
}
/// Whether a specified name corresponds one of the tables backuping ACL.
bool isSystemFunctionsTableName(const QualifiedTableName & table_name)
{
@ -612,6 +618,8 @@ void RestorerFromBackup::loadSystemAccessTables()
if (restore_settings.structure_only)
return;
std::unordered_set<String> system_access_tables;
/// Special handling for ACL-related system tables.
std::lock_guard lock{mutex};
for (const auto & [table_name, table_info] : table_infos)
@ -620,12 +628,41 @@ void RestorerFromBackup::loadSystemAccessTables()
{
if (!access_restorer)
access_restorer = std::make_unique<AccessRestorerFromBackup>(backup, restore_settings);
access_restorer->addDataPath(table_info.data_path_in_backup);
access_restorer->addDataPath(table_info.data_path_in_backup, /* dependents_only = */ false);
system_access_tables.emplace(table_name.table);
}
}
if (access_restorer)
access_restorer->loadFromBackup();
if (!access_restorer)
return;
if (restore_settings.update_access_entities_dependents)
{
/// Collect dependents from all the system access tables, even from those ones which we aren't restoring at the moment.
for (const auto & table_name : getAllSystemAccessTableNames())
{
if (!system_access_tables.contains(table_name))
{
std::optional<fs::path> root_path_in_use;
for (const auto & root_path_in_backup : root_paths_in_backup)
{
fs::path try_metadata_path = root_path_in_backup / "metadata" / DatabaseCatalog::SYSTEM_DATABASE / (escapeForFileName(table_name) + ".sql");
if (backup->fileExists(try_metadata_path))
{
root_path_in_use = root_path_in_backup;
break;
}
}
if (root_path_in_use)
{
String data_path_in_backup = *root_path_in_use / "data" / DatabaseCatalog::SYSTEM_DATABASE / escapeForFileName(table_name);
access_restorer->addDataPath(data_path_in_backup, /* dependents_only = */ true);
}
}
}
}
access_restorer->loadFromBackup();
}
void RestorerFromBackup::checkAccessForObjectsFoundInBackup() const

View File

@ -455,9 +455,6 @@ void Connection::sendAddendum()
writeStringBinary(proto_recv_chunked, *out);
}
if (server_revision >= DBMS_MIN_REVISION_WITH_VERSIONED_PARALLEL_REPLICAS_PROTOCOL)
writeVarUInt(DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION, *out);
out->next();
}
@ -528,8 +525,6 @@ void Connection::receiveHello(const Poco::Timespan & handshake_timeout)
readVarUInt(server_version_major, *in);
readVarUInt(server_version_minor, *in);
readVarUInt(server_revision, *in);
if (server_revision >= DBMS_MIN_REVISION_WITH_VERSIONED_PARALLEL_REPLICAS_PROTOCOL)
readVarUInt(server_parallel_replicas_protocol_version, *in);
if (server_revision >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE)
readStringBinary(server_timezone, *in);
if (server_revision >= DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME)
@ -964,7 +959,7 @@ void Connection::sendReadTaskResponse(const String & response)
void Connection::sendMergeTreeReadTaskResponse(const ParallelReadResponse & response)
{
writeVarUInt(Protocol::Client::MergeTreeReadTaskResponse, *out);
response.serialize(*out, server_parallel_replicas_protocol_version);
response.serialize(*out);
out->finishChunk();
out->next();
}
@ -1418,7 +1413,7 @@ ParallelReadRequest Connection::receiveParallelReadRequest() const
InitialAllRangesAnnouncement Connection::receiveInitialParallelReadAnnouncement() const
{
return InitialAllRangesAnnouncement::deserialize(*in, server_parallel_replicas_protocol_version);
return InitialAllRangesAnnouncement::deserialize(*in);
}

View File

@ -210,7 +210,6 @@ private:
UInt64 server_version_minor = 0;
UInt64 server_version_patch = 0;
UInt64 server_revision = 0;
UInt64 server_parallel_replicas_protocol_version = 0;
String server_timezone;
String server_display_name;

View File

@ -64,7 +64,6 @@ static struct InitFiu
REGULAR(lazy_pipe_fds_fail_close) \
PAUSEABLE(infinite_sleep) \
PAUSEABLE(stop_moving_part_before_swap_with_active) \
REGULAR(slowdown_index_analysis) \
namespace FailPoints

View File

@ -376,7 +376,6 @@ The server successfully detected this situation and will download merged part fr
M(ParallelReplicasReadAssignedMarks, "Sum across all replicas of how many of scheduled marks were assigned by consistent hash") \
M(ParallelReplicasReadUnassignedMarks, "Sum across all replicas of how many unassigned marks were scheduled") \
M(ParallelReplicasReadAssignedForStealingMarks, "Sum across all replicas of how many of scheduled marks were assigned for stealing by consistent hash") \
M(ParallelReplicasReadMarks, "How many marks were read by the given replica") \
\
M(ParallelReplicasStealingByHashMicroseconds, "Time spent collecting segments meant for stealing by hash") \
M(ParallelReplicasProcessingPartsMicroseconds, "Time spent processing data parts") \
@ -530,7 +529,6 @@ The server successfully detected this situation and will download merged part fr
M(CachedReadBufferReadFromCacheMicroseconds, "Time reading from filesystem cache") \
M(CachedReadBufferReadFromSourceBytes, "Bytes read from filesystem cache source (from remote fs, etc)") \
M(CachedReadBufferReadFromCacheBytes, "Bytes read from filesystem cache") \
M(CachedReadBufferPredownloadedBytes, "Bytes read from filesystem cache source. Cache segments are read from left to right as a whole, it might be that we need to predownload some part of the segment irrelevant for the current task just to get to the needed data") \
M(CachedReadBufferCacheWriteBytes, "Bytes written from source (remote fs, etc) to filesystem cache") \
M(CachedReadBufferCacheWriteMicroseconds, "Time spent writing data into filesystem cache") \
M(CachedReadBufferCreateBufferMicroseconds, "Prepare buffer time") \

View File

@ -181,6 +181,12 @@ void SetACLRequest::addRootPath(const String & root_path) { Coordination::addRoo
void GetACLRequest::addRootPath(const String & root_path) { Coordination::addRootPath(path, root_path); }
void SyncRequest::addRootPath(const String & root_path) { Coordination::addRootPath(path, root_path); }
void MultiRequest::addRootPath(const String & root_path)
{
for (auto & request : requests)
request->addRootPath(root_path);
}
void CreateResponse::removeRootPath(const String & root_path) { Coordination::removeRootPath(path_created, root_path); }
void WatchResponse::removeRootPath(const String & root_path) { Coordination::removeRootPath(path, root_path); }

View File

@ -408,17 +408,11 @@ struct ReconfigResponse : virtual Response
size_t bytesSize() const override { return value.size() + sizeof(stat); }
};
template <typename T>
struct MultiRequest : virtual Request
{
std::vector<T> requests;
void addRootPath(const String & root_path) override
{
for (auto & request : requests)
request->addRootPath(root_path);
}
Requests requests;
void addRootPath(const String & root_path) override;
String getPath() const override { return {}; }
size_t bytesSize() const override

View File

@ -184,7 +184,7 @@ struct TestKeeperReconfigRequest final : ReconfigRequest, TestKeeperRequest
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
};
struct TestKeeperMultiRequest final : MultiRequest<RequestPtr>, TestKeeperRequest
struct TestKeeperMultiRequest final : MultiRequest, TestKeeperRequest
{
explicit TestKeeperMultiRequest(const Requests & generic_requests)
: TestKeeperMultiRequest(std::span(generic_requests))

View File

@ -18,16 +18,14 @@ using namespace DB;
void ZooKeeperResponse::write(WriteBuffer & out) const
{
auto response_size = Coordination::size(xid) + Coordination::size(zxid) + Coordination::size(error);
/// Excessive copy to calculate length.
WriteBufferFromOwnString buf;
Coordination::write(xid, buf);
Coordination::write(zxid, buf);
Coordination::write(error, buf);
if (error == Error::ZOK)
response_size += sizeImpl();
Coordination::write(static_cast<int32_t>(response_size), out);
Coordination::write(xid, out);
Coordination::write(zxid, out);
Coordination::write(error, out);
if (error == Error::ZOK)
writeImpl(out);
writeImpl(buf);
Coordination::write(buf.str(), out);
}
std::string ZooKeeperRequest::toString(bool short_format) const
@ -43,12 +41,12 @@ std::string ZooKeeperRequest::toString(bool short_format) const
void ZooKeeperRequest::write(WriteBuffer & out) const
{
auto request_size = Coordination::size(xid) + Coordination::size(getOpNum()) + sizeImpl();
Coordination::write(static_cast<int32_t>(request_size), out);
Coordination::write(xid, out);
Coordination::write(getOpNum(), out);
writeImpl(out);
/// Excessive copy to calculate length.
WriteBufferFromOwnString buf;
Coordination::write(xid, buf);
Coordination::write(getOpNum(), buf);
writeImpl(buf);
Coordination::write(buf.str(), out);
}
void ZooKeeperSyncRequest::writeImpl(WriteBuffer & out) const
@ -56,11 +54,6 @@ void ZooKeeperSyncRequest::writeImpl(WriteBuffer & out) const
Coordination::write(path, out);
}
size_t ZooKeeperSyncRequest::sizeImpl() const
{
return Coordination::size(path);
}
void ZooKeeperSyncRequest::readImpl(ReadBuffer & in)
{
Coordination::read(path, in);
@ -81,11 +74,6 @@ void ZooKeeperSyncResponse::writeImpl(WriteBuffer & out) const
Coordination::write(path, out);
}
size_t ZooKeeperSyncResponse::sizeImpl() const
{
return Coordination::size(path);
}
void ZooKeeperReconfigRequest::writeImpl(WriteBuffer & out) const
{
Coordination::write(joining, out);
@ -94,11 +82,6 @@ void ZooKeeperReconfigRequest::writeImpl(WriteBuffer & out) const
Coordination::write(version, out);
}
size_t ZooKeeperReconfigRequest::sizeImpl() const
{
return Coordination::size(joining) + Coordination::size(leaving) + Coordination::size(new_members) + Coordination::size(version);
}
void ZooKeeperReconfigRequest::readImpl(ReadBuffer & in)
{
Coordination::read(joining, in);
@ -126,11 +109,6 @@ void ZooKeeperReconfigResponse::writeImpl(WriteBuffer & out) const
Coordination::write(stat, out);
}
size_t ZooKeeperReconfigResponse::sizeImpl() const
{
return Coordination::size(value) + Coordination::size(stat);
}
void ZooKeeperWatchResponse::readImpl(ReadBuffer & in)
{
Coordination::read(type, in);
@ -145,11 +123,6 @@ void ZooKeeperWatchResponse::writeImpl(WriteBuffer & out) const
Coordination::write(path, out);
}
size_t ZooKeeperWatchResponse::sizeImpl() const
{
return Coordination::size(type) + Coordination::size(state) + Coordination::size(path);
}
void ZooKeeperWatchResponse::write(WriteBuffer & out) const
{
if (error == Error::ZOK)
@ -164,11 +137,6 @@ void ZooKeeperAuthRequest::writeImpl(WriteBuffer & out) const
Coordination::write(data, out);
}
size_t ZooKeeperAuthRequest::sizeImpl() const
{
return Coordination::size(type) + Coordination::size(scheme) + Coordination::size(data);
}
void ZooKeeperAuthRequest::readImpl(ReadBuffer & in)
{
Coordination::read(type, in);
@ -207,12 +175,6 @@ void ZooKeeperCreateRequest::writeImpl(WriteBuffer & out) const
Coordination::write(flags, out);
}
size_t ZooKeeperCreateRequest::sizeImpl() const
{
int32_t flags = 0;
return Coordination::size(path) + Coordination::size(data) + Coordination::size(acls) + Coordination::size(flags);
}
void ZooKeeperCreateRequest::readImpl(ReadBuffer & in)
{
Coordination::read(path, in);
@ -249,22 +211,12 @@ void ZooKeeperCreateResponse::writeImpl(WriteBuffer & out) const
Coordination::write(path_created, out);
}
size_t ZooKeeperCreateResponse::sizeImpl() const
{
return Coordination::size(path_created);
}
void ZooKeeperRemoveRequest::writeImpl(WriteBuffer & out) const
{
Coordination::write(path, out);
Coordination::write(version, out);
}
size_t ZooKeeperRemoveRequest::sizeImpl() const
{
return Coordination::size(path) + Coordination::size(version);
}
std::string ZooKeeperRemoveRequest::toStringImpl(bool /*short_format*/) const
{
return fmt::format(
@ -292,11 +244,6 @@ void ZooKeeperRemoveRecursiveRequest::readImpl(ReadBuffer & in)
Coordination::read(remove_nodes_limit, in);
}
size_t ZooKeeperRemoveRecursiveRequest::sizeImpl() const
{
return Coordination::size(path) + Coordination::size(remove_nodes_limit);
}
std::string ZooKeeperRemoveRecursiveRequest::toStringImpl(bool /*short_format*/) const
{
return fmt::format(
@ -312,11 +259,6 @@ void ZooKeeperExistsRequest::writeImpl(WriteBuffer & out) const
Coordination::write(has_watch, out);
}
size_t ZooKeeperExistsRequest::sizeImpl() const
{
return Coordination::size(path) + Coordination::size(has_watch);
}
void ZooKeeperExistsRequest::readImpl(ReadBuffer & in)
{
Coordination::read(path, in);
@ -338,22 +280,12 @@ void ZooKeeperExistsResponse::writeImpl(WriteBuffer & out) const
Coordination::write(stat, out);
}
size_t ZooKeeperExistsResponse::sizeImpl() const
{
return Coordination::size(stat);
}
void ZooKeeperGetRequest::writeImpl(WriteBuffer & out) const
{
Coordination::write(path, out);
Coordination::write(has_watch, out);
}
size_t ZooKeeperGetRequest::sizeImpl() const
{
return Coordination::size(path) + Coordination::size(has_watch);
}
void ZooKeeperGetRequest::readImpl(ReadBuffer & in)
{
Coordination::read(path, in);
@ -377,11 +309,6 @@ void ZooKeeperGetResponse::writeImpl(WriteBuffer & out) const
Coordination::write(stat, out);
}
size_t ZooKeeperGetResponse::sizeImpl() const
{
return Coordination::size(data) + Coordination::size(stat);
}
void ZooKeeperSetRequest::writeImpl(WriteBuffer & out) const
{
Coordination::write(path, out);
@ -389,11 +316,6 @@ void ZooKeeperSetRequest::writeImpl(WriteBuffer & out) const
Coordination::write(version, out);
}
size_t ZooKeeperSetRequest::sizeImpl() const
{
return Coordination::size(path) + Coordination::size(data) + Coordination::size(version);
}
void ZooKeeperSetRequest::readImpl(ReadBuffer & in)
{
Coordination::read(path, in);
@ -420,22 +342,12 @@ void ZooKeeperSetResponse::writeImpl(WriteBuffer & out) const
Coordination::write(stat, out);
}
size_t ZooKeeperSetResponse::sizeImpl() const
{
return Coordination::size(stat);
}
void ZooKeeperListRequest::writeImpl(WriteBuffer & out) const
{
Coordination::write(path, out);
Coordination::write(has_watch, out);
}
size_t ZooKeeperListRequest::sizeImpl() const
{
return Coordination::size(path) + Coordination::size(has_watch);
}
void ZooKeeperListRequest::readImpl(ReadBuffer & in)
{
Coordination::read(path, in);
@ -454,11 +366,6 @@ void ZooKeeperFilteredListRequest::writeImpl(WriteBuffer & out) const
Coordination::write(static_cast<uint8_t>(list_request_type), out);
}
size_t ZooKeeperFilteredListRequest::sizeImpl() const
{
return Coordination::size(path) + Coordination::size(has_watch) + Coordination::size(static_cast<uint8_t>(list_request_type));
}
void ZooKeeperFilteredListRequest::readImpl(ReadBuffer & in)
{
Coordination::read(path, in);
@ -490,11 +397,6 @@ void ZooKeeperListResponse::writeImpl(WriteBuffer & out) const
Coordination::write(stat, out);
}
size_t ZooKeeperListResponse::sizeImpl() const
{
return Coordination::size(names) + Coordination::size(stat);
}
void ZooKeeperSimpleListResponse::readImpl(ReadBuffer & in)
{
Coordination::read(names, in);
@ -505,11 +407,6 @@ void ZooKeeperSimpleListResponse::writeImpl(WriteBuffer & out) const
Coordination::write(names, out);
}
size_t ZooKeeperSimpleListResponse::sizeImpl() const
{
return Coordination::size(names);
}
void ZooKeeperSetACLRequest::writeImpl(WriteBuffer & out) const
{
Coordination::write(path, out);
@ -517,11 +414,6 @@ void ZooKeeperSetACLRequest::writeImpl(WriteBuffer & out) const
Coordination::write(version, out);
}
size_t ZooKeeperSetACLRequest::sizeImpl() const
{
return Coordination::size(path) + Coordination::size(acls) + Coordination::size(version);
}
void ZooKeeperSetACLRequest::readImpl(ReadBuffer & in)
{
Coordination::read(path, in);
@ -539,11 +431,6 @@ void ZooKeeperSetACLResponse::writeImpl(WriteBuffer & out) const
Coordination::write(stat, out);
}
size_t ZooKeeperSetACLResponse::sizeImpl() const
{
return Coordination::size(stat);
}
void ZooKeeperSetACLResponse::readImpl(ReadBuffer & in)
{
Coordination::read(stat, in);
@ -559,11 +446,6 @@ void ZooKeeperGetACLRequest::writeImpl(WriteBuffer & out) const
Coordination::write(path, out);
}
size_t ZooKeeperGetACLRequest::sizeImpl() const
{
return Coordination::size(path);
}
std::string ZooKeeperGetACLRequest::toStringImpl(bool /*short_format*/) const
{
return fmt::format("path = {}", path);
@ -575,11 +457,6 @@ void ZooKeeperGetACLResponse::writeImpl(WriteBuffer & out) const
Coordination::write(stat, out);
}
size_t ZooKeeperGetACLResponse::sizeImpl() const
{
return Coordination::size(acl) + Coordination::size(stat);
}
void ZooKeeperGetACLResponse::readImpl(ReadBuffer & in)
{
Coordination::read(acl, in);
@ -592,11 +469,6 @@ void ZooKeeperCheckRequest::writeImpl(WriteBuffer & out) const
Coordination::write(version, out);
}
size_t ZooKeeperCheckRequest::sizeImpl() const
{
return Coordination::size(path) + Coordination::size(version);
}
void ZooKeeperCheckRequest::readImpl(ReadBuffer & in)
{
Coordination::read(path, in);
@ -622,11 +494,6 @@ void ZooKeeperErrorResponse::writeImpl(WriteBuffer & out) const
Coordination::write(error, out);
}
size_t ZooKeeperErrorResponse::sizeImpl() const
{
return Coordination::size(error);
}
void ZooKeeperMultiRequest::checkOperationType(OperationType type)
{
chassert(!operation_type.has_value() || *operation_type == type);
@ -729,27 +596,6 @@ void ZooKeeperMultiRequest::writeImpl(WriteBuffer & out) const
Coordination::write(error, out);
}
size_t ZooKeeperMultiRequest::sizeImpl() const
{
size_t total_size = 0;
for (const auto & request : requests)
{
const auto & zk_request = dynamic_cast<const ZooKeeperRequest &>(*request);
bool done = false;
int32_t error = -1;
total_size
+= Coordination::size(zk_request.getOpNum()) + Coordination::size(done) + Coordination::size(error) + zk_request.sizeImpl();
}
OpNum op_num = OpNum::Error;
bool done = true;
int32_t error = -1;
return total_size + Coordination::size(op_num) + Coordination::size(done) + Coordination::size(error);
}
void ZooKeeperMultiRequest::readImpl(ReadBuffer & in)
{
while (true)
@ -883,54 +729,31 @@ void ZooKeeperMultiResponse::writeImpl(WriteBuffer & out) const
}
}
size_t ZooKeeperMultiResponse::sizeImpl() const
{
size_t total_size = 0;
for (const auto & response : responses)
{
const ZooKeeperResponse & zk_response = dynamic_cast<const ZooKeeperResponse &>(*response);
OpNum op_num = zk_response.getOpNum();
bool done = false;
Error op_error = zk_response.error;
total_size += Coordination::size(op_num) + Coordination::size(done) + Coordination::size(op_error);
if (op_error == Error::ZOK || op_num == OpNum::Error)
total_size += zk_response.sizeImpl();
}
/// Footer.
OpNum op_num = OpNum::Error;
bool done = true;
int32_t error_read = - 1;
return total_size + Coordination::size(op_num) + Coordination::size(done) + Coordination::size(error_read);
}
ZooKeeperResponsePtr ZooKeeperHeartbeatRequest::makeResponse() const { return std::make_shared<ZooKeeperHeartbeatResponse>(); }
ZooKeeperResponsePtr ZooKeeperSyncRequest::makeResponse() const { return std::make_shared<ZooKeeperSyncResponse>(); }
ZooKeeperResponsePtr ZooKeeperAuthRequest::makeResponse() const { return std::make_shared<ZooKeeperAuthResponse>(); }
ZooKeeperResponsePtr ZooKeeperRemoveRequest::makeResponse() const { return std::make_shared<ZooKeeperRemoveResponse>(); }
ZooKeeperResponsePtr ZooKeeperRemoveRecursiveRequest::makeResponse() const { return std::make_shared<ZooKeeperRemoveRecursiveResponse>(); }
ZooKeeperResponsePtr ZooKeeperExistsRequest::makeResponse() const { return std::make_shared<ZooKeeperExistsResponse>(); }
ZooKeeperResponsePtr ZooKeeperGetRequest::makeResponse() const { return std::make_shared<ZooKeeperGetResponse>(); }
ZooKeeperResponsePtr ZooKeeperSetRequest::makeResponse() const { return std::make_shared<ZooKeeperSetResponse>(); }
ZooKeeperResponsePtr ZooKeeperReconfigRequest::makeResponse() const { return std::make_shared<ZooKeeperReconfigResponse>(); }
ZooKeeperResponsePtr ZooKeeperListRequest::makeResponse() const { return std::make_shared<ZooKeeperListResponse>(); }
ZooKeeperResponsePtr ZooKeeperSimpleListRequest::makeResponse() const { return std::make_shared<ZooKeeperSimpleListResponse>(); }
ZooKeeperResponsePtr ZooKeeperHeartbeatRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperHeartbeatResponse>()); }
ZooKeeperResponsePtr ZooKeeperSyncRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperSyncResponse>()); }
ZooKeeperResponsePtr ZooKeeperAuthRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperAuthResponse>()); }
ZooKeeperResponsePtr ZooKeeperRemoveRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperRemoveResponse>()); }
ZooKeeperResponsePtr ZooKeeperRemoveRecursiveRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperRemoveRecursiveResponse>()); }
ZooKeeperResponsePtr ZooKeeperExistsRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperExistsResponse>()); }
ZooKeeperResponsePtr ZooKeeperGetRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperGetResponse>()); }
ZooKeeperResponsePtr ZooKeeperSetRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperSetResponse>()); }
ZooKeeperResponsePtr ZooKeeperReconfigRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperReconfigResponse>()); }
ZooKeeperResponsePtr ZooKeeperListRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperListResponse>()); }
ZooKeeperResponsePtr ZooKeeperSimpleListRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperSimpleListResponse>()); }
ZooKeeperResponsePtr ZooKeeperCreateRequest::makeResponse() const
{
if (not_exists)
return std::make_shared<ZooKeeperCreateIfNotExistsResponse>();
return std::make_shared<ZooKeeperCreateResponse>();
return setTime(std::make_shared<ZooKeeperCreateIfNotExistsResponse>());
return setTime(std::make_shared<ZooKeeperCreateResponse>());
}
ZooKeeperResponsePtr ZooKeeperCheckRequest::makeResponse() const
{
if (not_exists)
return std::make_shared<ZooKeeperCheckNotExistsResponse>();
return setTime(std::make_shared<ZooKeeperCheckNotExistsResponse>());
return std::make_shared<ZooKeeperCheckResponse>();
return setTime(std::make_shared<ZooKeeperCheckResponse>());
}
ZooKeeperResponsePtr ZooKeeperMultiRequest::makeResponse() const
@ -941,12 +764,11 @@ ZooKeeperResponsePtr ZooKeeperMultiRequest::makeResponse() const
else
response = std::make_shared<ZooKeeperMultiReadResponse>(requests);
return std::move(response);
return setTime(std::move(response));
}
ZooKeeperResponsePtr ZooKeeperCloseRequest::makeResponse() const { return std::make_shared<ZooKeeperCloseResponse>(); }
ZooKeeperResponsePtr ZooKeeperSetACLRequest::makeResponse() const { return std::make_shared<ZooKeeperSetACLResponse>(); }
ZooKeeperResponsePtr ZooKeeperGetACLRequest::makeResponse() const { return std::make_shared<ZooKeeperGetACLResponse>(); }
ZooKeeperResponsePtr ZooKeeperCloseRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperCloseResponse>()); }
ZooKeeperResponsePtr ZooKeeperSetACLRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperSetACLResponse>()); }
ZooKeeperResponsePtr ZooKeeperGetACLRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperGetACLResponse>()); }
void ZooKeeperSessionIDRequest::writeImpl(WriteBuffer & out) const
{
@ -955,11 +777,6 @@ void ZooKeeperSessionIDRequest::writeImpl(WriteBuffer & out) const
Coordination::write(server_id, out);
}
size_t ZooKeeperSessionIDRequest::sizeImpl() const
{
return Coordination::size(internal_id) + Coordination::size(session_timeout_ms) + Coordination::size(server_id);
}
void ZooKeeperSessionIDRequest::readImpl(ReadBuffer & in)
{
Coordination::read(internal_id, in);
@ -986,11 +803,6 @@ void ZooKeeperSessionIDResponse::writeImpl(WriteBuffer & out) const
Coordination::write(server_id, out);
}
size_t ZooKeeperSessionIDResponse::sizeImpl() const
{
return Coordination::size(internal_id) + Coordination::size(session_id) + Coordination::size(server_id);
}
void ZooKeeperRequest::createLogElements(LogElements & elems) const
{
@ -1148,6 +960,40 @@ std::shared_ptr<ZooKeeperRequest> ZooKeeperRequest::read(ReadBuffer & in)
return request;
}
ZooKeeperRequest::~ZooKeeperRequest()
{
if (!request_created_time_ns)
return;
UInt64 elapsed_ns = clock_gettime_ns() - request_created_time_ns;
constexpr UInt64 max_request_time_ns = 1000000000ULL; /// 1 sec
if (max_request_time_ns < elapsed_ns)
{
LOG_TEST(getLogger(__PRETTY_FUNCTION__), "Processing of request xid={} took {} ms", xid, elapsed_ns / 1000000UL);
}
}
ZooKeeperResponsePtr ZooKeeperRequest::setTime(ZooKeeperResponsePtr response) const
{
if (request_created_time_ns)
{
response->response_created_time_ns = clock_gettime_ns();
}
return response;
}
ZooKeeperResponse::~ZooKeeperResponse()
{
if (!response_created_time_ns)
return;
UInt64 elapsed_ns = clock_gettime_ns() - response_created_time_ns;
constexpr UInt64 max_request_time_ns = 1000000000ULL; /// 1 sec
if (max_request_time_ns < elapsed_ns)
{
LOG_TEST(getLogger(__PRETTY_FUNCTION__), "Processing of response xid={} took {} ms", xid, elapsed_ns / 1000000UL);
}
}
ZooKeeperRequestPtr ZooKeeperRequestFactory::get(OpNum op_num) const
{
auto it = op_num_to_request.find(op_num);
@ -1169,6 +1015,7 @@ void registerZooKeeperRequest(ZooKeeperRequestFactory & factory)
factory.registerRequest(num, []
{
auto res = std::make_shared<RequestT>();
res->request_created_time_ns = clock_gettime_ns();
if constexpr (num == OpNum::MultiRead)
res->operation_type = ZooKeeperMultiRequest::OperationType::Read;

View File

@ -7,11 +7,13 @@
#include <boost/noncopyable.hpp>
#include <IO/ReadBuffer.h>
#include <IO/WriteBuffer.h>
#include <unordered_map>
#include <vector>
#include <memory>
#include <cstdint>
#include <optional>
#include <functional>
#include <span>
namespace Coordination
@ -23,11 +25,13 @@ struct ZooKeeperResponse : virtual Response
{
XID xid = 0;
UInt64 response_created_time_ns = 0;
ZooKeeperResponse() = default;
ZooKeeperResponse(const ZooKeeperResponse &) = default;
~ZooKeeperResponse() override;
virtual void readImpl(ReadBuffer &) = 0;
virtual void writeImpl(WriteBuffer &) const = 0;
virtual size_t sizeImpl() const = 0;
virtual void write(WriteBuffer & out) const;
virtual OpNum getOpNum() const = 0;
virtual void fillLogElements(LogElements & elems, size_t idx) const;
@ -47,11 +51,13 @@ struct ZooKeeperRequest : virtual Request
bool restored_from_zookeeper_log = false;
UInt64 request_created_time_ns = 0;
UInt64 thread_id = 0;
String query_id;
ZooKeeperRequest() = default;
ZooKeeperRequest(const ZooKeeperRequest &) = default;
~ZooKeeperRequest() override;
virtual OpNum getOpNum() const = 0;
@ -60,7 +66,6 @@ struct ZooKeeperRequest : virtual Request
std::string toString(bool short_format = false) const;
virtual void writeImpl(WriteBuffer &) const = 0;
virtual size_t sizeImpl() const = 0;
virtual void readImpl(ReadBuffer &) = 0;
virtual std::string toStringImpl(bool /*short_format*/) const { return ""; }
@ -68,6 +73,7 @@ struct ZooKeeperRequest : virtual Request
static std::shared_ptr<ZooKeeperRequest> read(ReadBuffer & in);
virtual ZooKeeperResponsePtr makeResponse() const = 0;
ZooKeeperResponsePtr setTime(ZooKeeperResponsePtr response) const;
virtual bool isReadRequest() const = 0;
virtual void createLogElements(LogElements & elems) const;
@ -80,7 +86,6 @@ struct ZooKeeperHeartbeatRequest final : ZooKeeperRequest
String getPath() const override { return {}; }
OpNum getOpNum() const override { return OpNum::Heartbeat; }
void writeImpl(WriteBuffer &) const override {}
size_t sizeImpl() const override { return 0; }
void readImpl(ReadBuffer &) override {}
ZooKeeperResponsePtr makeResponse() const override;
bool isReadRequest() const override { return false; }
@ -92,7 +97,6 @@ struct ZooKeeperSyncRequest final : ZooKeeperRequest
String getPath() const override { return path; }
OpNum getOpNum() const override { return OpNum::Sync; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
ZooKeeperResponsePtr makeResponse() const override;
@ -105,7 +109,6 @@ struct ZooKeeperSyncResponse final : SyncResponse, ZooKeeperResponse
{
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
OpNum getOpNum() const override { return OpNum::Sync; }
};
@ -119,7 +122,6 @@ struct ZooKeeperReconfigRequest final : ZooKeeperRequest
String getPath() const override { return keeper_config_path; }
OpNum getOpNum() const override { return OpNum::Reconfig; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
ZooKeeperResponsePtr makeResponse() const override;
@ -136,7 +138,6 @@ struct ZooKeeperReconfigResponse final : ReconfigResponse, ZooKeeperResponse
{
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
OpNum getOpNum() const override { return OpNum::Reconfig; }
};
@ -144,7 +145,6 @@ struct ZooKeeperHeartbeatResponse final : ZooKeeperResponse
{
void readImpl(ReadBuffer &) override {}
void writeImpl(WriteBuffer &) const override {}
size_t sizeImpl() const override { return 0; }
OpNum getOpNum() const override { return OpNum::Heartbeat; }
};
@ -153,7 +153,6 @@ struct ZooKeeperWatchResponse final : WatchResponse, ZooKeeperResponse
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void write(WriteBuffer & out) const override;
@ -176,7 +175,6 @@ struct ZooKeeperAuthRequest final : ZooKeeperRequest
String getPath() const override { return {}; }
OpNum getOpNum() const override { return OpNum::Auth; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
@ -191,7 +189,6 @@ struct ZooKeeperAuthResponse final : ZooKeeperResponse
{
void readImpl(ReadBuffer &) override {}
void writeImpl(WriteBuffer &) const override {}
size_t sizeImpl() const override { return 0; }
OpNum getOpNum() const override { return OpNum::Auth; }
@ -203,7 +200,6 @@ struct ZooKeeperCloseRequest final : ZooKeeperRequest
String getPath() const override { return {}; }
OpNum getOpNum() const override { return OpNum::Close; }
void writeImpl(WriteBuffer &) const override {}
size_t sizeImpl() const override { return 0; }
void readImpl(ReadBuffer &) override {}
ZooKeeperResponsePtr makeResponse() const override;
@ -218,7 +214,6 @@ struct ZooKeeperCloseResponse final : ZooKeeperResponse
}
void writeImpl(WriteBuffer &) const override {}
size_t sizeImpl() const override { return 0; }
OpNum getOpNum() const override { return OpNum::Close; }
};
@ -233,7 +228,6 @@ struct ZooKeeperCreateRequest final : public CreateRequest, ZooKeeperRequest
OpNum getOpNum() const override { return not_exists ? OpNum::CreateIfNotExists : OpNum::Create; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
@ -250,7 +244,6 @@ struct ZooKeeperCreateResponse : CreateResponse, ZooKeeperResponse
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
OpNum getOpNum() const override { return OpNum::Create; }
@ -272,7 +265,6 @@ struct ZooKeeperRemoveRequest final : RemoveRequest, ZooKeeperRequest
OpNum getOpNum() const override { return OpNum::Remove; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
@ -288,7 +280,6 @@ struct ZooKeeperRemoveResponse final : RemoveResponse, ZooKeeperResponse
{
void readImpl(ReadBuffer &) override {}
void writeImpl(WriteBuffer &) const override {}
size_t sizeImpl() const override { return 0; }
OpNum getOpNum() const override { return OpNum::Remove; }
size_t bytesSize() const override { return RemoveResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
@ -302,7 +293,6 @@ struct ZooKeeperRemoveRecursiveRequest final : RemoveRecursiveRequest, ZooKeeper
OpNum getOpNum() const override { return OpNum::RemoveRecursive; }
void writeImpl(WriteBuffer & out) const override;
void readImpl(ReadBuffer & in) override;
size_t sizeImpl() const override;
std::string toStringImpl(bool short_format) const override;
ZooKeeperResponsePtr makeResponse() const override;
@ -315,7 +305,6 @@ struct ZooKeeperRemoveRecursiveResponse : RemoveRecursiveResponse, ZooKeeperResp
{
void readImpl(ReadBuffer &) override {}
void writeImpl(WriteBuffer &) const override {}
size_t sizeImpl() const override { return 0; }
OpNum getOpNum() const override { return OpNum::RemoveRecursive; }
size_t bytesSize() const override { return RemoveRecursiveResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
@ -328,7 +317,6 @@ struct ZooKeeperExistsRequest final : ExistsRequest, ZooKeeperRequest
OpNum getOpNum() const override { return OpNum::Exists; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
@ -342,7 +330,6 @@ struct ZooKeeperExistsResponse final : ExistsResponse, ZooKeeperResponse
{
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
OpNum getOpNum() const override { return OpNum::Exists; }
size_t bytesSize() const override { return ExistsResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
@ -357,7 +344,6 @@ struct ZooKeeperGetRequest final : GetRequest, ZooKeeperRequest
OpNum getOpNum() const override { return OpNum::Get; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
@ -371,7 +357,6 @@ struct ZooKeeperGetResponse final : GetResponse, ZooKeeperResponse
{
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
OpNum getOpNum() const override { return OpNum::Get; }
size_t bytesSize() const override { return GetResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
@ -386,7 +371,6 @@ struct ZooKeeperSetRequest final : SetRequest, ZooKeeperRequest
OpNum getOpNum() const override { return OpNum::Set; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
ZooKeeperResponsePtr makeResponse() const override;
@ -401,7 +385,6 @@ struct ZooKeeperSetResponse final : SetResponse, ZooKeeperResponse
{
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
OpNum getOpNum() const override { return OpNum::Set; }
size_t bytesSize() const override { return SetResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
@ -416,7 +399,6 @@ struct ZooKeeperListRequest : ListRequest, ZooKeeperRequest
OpNum getOpNum() const override { return OpNum::List; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
ZooKeeperResponsePtr makeResponse() const override;
@ -437,7 +419,6 @@ struct ZooKeeperFilteredListRequest final : ZooKeeperListRequest
OpNum getOpNum() const override { return OpNum::FilteredList; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
@ -448,7 +429,6 @@ struct ZooKeeperListResponse : ListResponse, ZooKeeperResponse
{
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
OpNum getOpNum() const override { return OpNum::List; }
size_t bytesSize() const override { return ListResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
@ -460,7 +440,6 @@ struct ZooKeeperSimpleListResponse final : ZooKeeperListResponse
{
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
OpNum getOpNum() const override { return OpNum::SimpleList; }
size_t bytesSize() const override { return ZooKeeperListResponse::bytesSize() - sizeof(stat); }
@ -473,7 +452,6 @@ struct ZooKeeperCheckRequest : CheckRequest, ZooKeeperRequest
OpNum getOpNum() const override { return not_exists ? OpNum::CheckNotExists : OpNum::Check; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
@ -489,7 +467,6 @@ struct ZooKeeperCheckResponse : CheckResponse, ZooKeeperResponse
{
void readImpl(ReadBuffer &) override {}
void writeImpl(WriteBuffer &) const override {}
size_t sizeImpl() const override { return 0; }
OpNum getOpNum() const override { return OpNum::Check; }
size_t bytesSize() const override { return CheckResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
@ -506,7 +483,6 @@ struct ZooKeeperErrorResponse final : ErrorResponse, ZooKeeperResponse
{
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
OpNum getOpNum() const override { return OpNum::Error; }
@ -517,7 +493,6 @@ struct ZooKeeperSetACLRequest final : SetACLRequest, ZooKeeperRequest
{
OpNum getOpNum() const override { return OpNum::SetACL; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
ZooKeeperResponsePtr makeResponse() const override;
@ -530,7 +505,6 @@ struct ZooKeeperSetACLResponse final : SetACLResponse, ZooKeeperResponse
{
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
OpNum getOpNum() const override { return OpNum::SetACL; }
size_t bytesSize() const override { return SetACLResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
@ -540,7 +514,6 @@ struct ZooKeeperGetACLRequest final : GetACLRequest, ZooKeeperRequest
{
OpNum getOpNum() const override { return OpNum::GetACL; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
ZooKeeperResponsePtr makeResponse() const override;
@ -553,13 +526,12 @@ struct ZooKeeperGetACLResponse final : GetACLResponse, ZooKeeperResponse
{
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
OpNum getOpNum() const override { return OpNum::GetACL; }
size_t bytesSize() const override { return GetACLResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
};
struct ZooKeeperMultiRequest final : MultiRequest<ZooKeeperRequestPtr>, ZooKeeperRequest
struct ZooKeeperMultiRequest final : MultiRequest, ZooKeeperRequest
{
OpNum getOpNum() const override;
ZooKeeperMultiRequest() = default;
@ -568,7 +540,6 @@ struct ZooKeeperMultiRequest final : MultiRequest<ZooKeeperRequestPtr>, ZooKeepe
ZooKeeperMultiRequest(std::span<const Coordination::RequestPtr> generic_requests, const ACLs & default_acls);
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
std::string toStringImpl(bool short_format) const override;
@ -592,14 +563,12 @@ private:
struct ZooKeeperMultiResponse : MultiResponse, ZooKeeperResponse
{
ZooKeeperMultiResponse() = default;
explicit ZooKeeperMultiResponse(const std::vector<ZooKeeperRequestPtr> & requests)
explicit ZooKeeperMultiResponse(const Requests & requests)
{
responses.reserve(requests.size());
for (const auto & request : requests)
responses.emplace_back(request->makeResponse());
responses.emplace_back(dynamic_cast<const ZooKeeperRequest &>(*request).makeResponse());
}
explicit ZooKeeperMultiResponse(const Responses & responses_)
@ -610,7 +579,6 @@ struct ZooKeeperMultiResponse : MultiResponse, ZooKeeperResponse
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
size_t bytesSize() const override { return MultiResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
@ -641,7 +609,6 @@ struct ZooKeeperSessionIDRequest final : ZooKeeperRequest
Coordination::OpNum getOpNum() const override { return OpNum::SessionID; }
String getPath() const override { return {}; }
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
void readImpl(ReadBuffer & in) override;
Coordination::ZooKeeperResponsePtr makeResponse() const override;
@ -660,7 +627,6 @@ struct ZooKeeperSessionIDResponse final : ZooKeeperResponse
void readImpl(ReadBuffer & in) override;
void writeImpl(WriteBuffer & out) const override;
size_t sizeImpl() const override;
Coordination::OpNum getOpNum() const override { return OpNum::SessionID; }
};

View File

@ -42,32 +42,6 @@ void write(const Error & x, WriteBuffer & out)
write(static_cast<int32_t>(x), out);
}
size_t size(OpNum x)
{
return size(static_cast<int32_t>(x));
}
size_t size(const std::string & s)
{
return size(static_cast<int32_t>(s.size())) + s.size();
}
size_t size(const ACL & acl)
{
return size(acl.permissions) + size(acl.scheme) + size(acl.id);
}
size_t size(const Stat & stat)
{
return size(stat.czxid) + size(stat.mzxid) + size(stat.ctime) + size(stat.mtime) + size(stat.version) + size(stat.cversion)
+ size(stat.aversion) + size(stat.ephemeralOwner) + size(stat.dataLength) + size(stat.numChildren) + size(stat.pzxid);
}
size_t size(const Error & x)
{
return size(static_cast<int32_t>(x));
}
void read(OpNum & x, ReadBuffer & in)
{
int32_t raw_op_num;

View File

@ -43,36 +43,6 @@ void write(const std::vector<T> & arr, WriteBuffer & out)
write(elem, out);
}
template <typename T>
requires is_arithmetic_v<T>
size_t size(T x)
{
return sizeof(x);
}
size_t size(OpNum x);
size_t size(const std::string & s);
size_t size(const ACL & acl);
size_t size(const Stat & stat);
size_t size(const Error & x);
template <size_t N>
size_t size(const std::array<char, N>)
{
return size(static_cast<int32_t>(N)) + N;
}
template <typename T>
size_t size(const std::vector<T> & arr)
{
size_t total_size = size(static_cast<int32_t>(arr.size()));
for (const auto & elem : arr)
total_size += size(elem);
return total_size;
}
template <typename T>
requires is_arithmetic_v<T>
void read(T & x, ReadBuffer & in)

View File

@ -1,4 +1,4 @@
clickhouse_add_executable(integer_hash_tables_and_hashes integer_hash_tables_and_hashes.cpp orc_string_dictionary.cpp)
clickhouse_add_executable(integer_hash_tables_and_hashes integer_hash_tables_and_hashes.cpp)
target_link_libraries (integer_hash_tables_and_hashes PRIVATE
ch_contrib::gbenchmark_all
dbms
@ -7,8 +7,3 @@ target_link_libraries (integer_hash_tables_and_hashes PRIVATE
ch_contrib::wyhash
ch_contrib::farmhash
ch_contrib::xxHash)
clickhouse_add_executable(orc_string_dictionary orc_string_dictionary.cpp)
target_link_libraries (orc_string_dictionary PRIVATE
ch_contrib::gbenchmark_all
dbms)

View File

@ -1,311 +0,0 @@
#include <cstdlib>
#include <base/defines.h>
#include <benchmark/benchmark.h>
class OldSortedStringDictionary
{
public:
struct DictEntry
{
DictEntry(const char * str, size_t len) : data(str), length(len) { }
const char * data;
size_t length;
};
OldSortedStringDictionary() : totalLength(0) { }
// insert a new string into dictionary, return its insertion order
size_t insert(const char * str, size_t len);
// reorder input index buffer from insertion order to dictionary order
void reorder(std::vector<int64_t> & idxBuffer) const;
// get dict entries in insertion order
void getEntriesInInsertionOrder(std::vector<const DictEntry *> &) const;
size_t size() const;
// return total length of strings in the dictionary
uint64_t length() const;
void clear();
// store indexes of insertion order in the dictionary for not-null rows
std::vector<int64_t> idxInDictBuffer;
private:
struct LessThan
{
bool operator()(const DictEntry & left, const DictEntry & right) const
{
int ret = memcmp(left.data, right.data, std::min(left.length, right.length));
if (ret != 0)
{
return ret < 0;
}
return left.length < right.length;
}
};
std::map<DictEntry, size_t, LessThan> dict;
std::vector<std::vector<char>> data;
uint64_t totalLength;
};
// insert a new string into dictionary, return its insertion order
size_t OldSortedStringDictionary::insert(const char * str, size_t len)
{
auto ret = dict.insert({DictEntry(str, len), dict.size()});
if (ret.second)
{
// make a copy to internal storage
data.push_back(std::vector<char>(len));
memcpy(data.back().data(), str, len);
// update dictionary entry to link pointer to internal storage
DictEntry * entry = const_cast<DictEntry *>(&(ret.first->first));
entry->data = data.back().data();
totalLength += len;
}
return ret.first->second;
}
/**
* Reorder input index buffer from insertion order to dictionary order
*
* We require this function because string values are buffered by indexes
* in their insertion order. Until the entire dictionary is complete can
* we get their sorted indexes in the dictionary in that ORC specification
* demands dictionary should be ordered. Therefore this function transforms
* the indexes from insertion order to dictionary value order for final
* output.
*/
void OldSortedStringDictionary::reorder(std::vector<int64_t> & idxBuffer) const
{
// iterate the dictionary to get mapping from insertion order to value order
std::vector<size_t> mapping(dict.size());
size_t dictIdx = 0;
for (auto it = dict.cbegin(); it != dict.cend(); ++it)
{
mapping[it->second] = dictIdx++;
}
// do the transformation
for (size_t i = 0; i != idxBuffer.size(); ++i)
{
idxBuffer[i] = static_cast<int64_t>(mapping[static_cast<size_t>(idxBuffer[i])]);
}
}
// get dict entries in insertion order
void OldSortedStringDictionary::getEntriesInInsertionOrder(std::vector<const DictEntry *> & entries) const
{
entries.resize(dict.size());
for (auto it = dict.cbegin(); it != dict.cend(); ++it)
{
entries[it->second] = &(it->first);
}
}
// return count of entries
size_t OldSortedStringDictionary::size() const
{
return dict.size();
}
// return total length of strings in the dictionary
uint64_t OldSortedStringDictionary::length() const
{
return totalLength;
}
void OldSortedStringDictionary::clear()
{
totalLength = 0;
data.clear();
dict.clear();
}
/**
* Implementation of increasing sorted string dictionary
*/
class NewSortedStringDictionary
{
public:
struct DictEntry
{
DictEntry(const char * str, size_t len) : data(str), length(len) { }
const char * data;
size_t length;
};
struct DictEntryWithIndex
{
DictEntryWithIndex(const char * str, size_t len, size_t index_) : entry(str, len), index(index_) { }
DictEntry entry;
size_t index;
};
NewSortedStringDictionary() : totalLength_(0) { }
// insert a new string into dictionary, return its insertion order
size_t insert(const char * str, size_t len);
// reorder input index buffer from insertion order to dictionary order
void reorder(std::vector<int64_t> & idxBuffer) const;
// get dict entries in insertion order
void getEntriesInInsertionOrder(std::vector<const DictEntry *> &) const;
// return count of entries
size_t size() const;
// return total length of strings in the dictionary
uint64_t length() const;
void clear();
// store indexes of insertion order in the dictionary for not-null rows
std::vector<int64_t> idxInDictBuffer;
private:
struct LessThan
{
bool operator()(const DictEntryWithIndex & l, const DictEntryWithIndex & r)
{
const auto & left = l.entry;
const auto & right = r.entry;
int ret = memcmp(left.data, right.data, std::min(left.length, right.length));
if (ret != 0)
{
return ret < 0;
}
return left.length < right.length;
}
};
mutable std::vector<DictEntryWithIndex> flatDict_;
std::unordered_map<std::string, size_t> keyToIndex;
uint64_t totalLength_;
};
// insert a new string into dictionary, return its insertion order
size_t NewSortedStringDictionary::insert(const char * str, size_t len)
{
size_t index = flatDict_.size();
auto ret = keyToIndex.emplace(std::string(str, len), index);
if (ret.second)
{
flatDict_.emplace_back(ret.first->first.data(), ret.first->first.size(), index);
totalLength_ += len;
}
return ret.first->second;
}
/**
* Reorder input index buffer from insertion order to dictionary order
*
* We require this function because string values are buffered by indexes
* in their insertion order. Until the entire dictionary is complete can
* we get their sorted indexes in the dictionary in that ORC specification
* demands dictionary should be ordered. Therefore this function transforms
* the indexes from insertion order to dictionary value order for final
* output.
*/
void NewSortedStringDictionary::reorder(std::vector<int64_t> & idxBuffer) const
{
// iterate the dictionary to get mapping from insertion order to value order
std::vector<size_t> mapping(flatDict_.size());
for (size_t i = 0; i < flatDict_.size(); ++i)
{
mapping[flatDict_[i].index] = i;
}
// do the transformation
for (size_t i = 0; i != idxBuffer.size(); ++i)
{
idxBuffer[i] = static_cast<int64_t>(mapping[static_cast<size_t>(idxBuffer[i])]);
}
}
// get dict entries in insertion order
void NewSortedStringDictionary::getEntriesInInsertionOrder(std::vector<const DictEntry *> & entries) const
{
std::sort(
flatDict_.begin(),
flatDict_.end(),
[](const DictEntryWithIndex & left, const DictEntryWithIndex & right) { return left.index < right.index; });
entries.resize(flatDict_.size());
for (size_t i = 0; i < flatDict_.size(); ++i)
{
entries[i] = &(flatDict_[i].entry);
}
}
// return count of entries
size_t NewSortedStringDictionary::size() const
{
return flatDict_.size();
}
// return total length of strings in the dictionary
uint64_t NewSortedStringDictionary::length() const
{
return totalLength_;
}
void NewSortedStringDictionary::clear()
{
totalLength_ = 0;
keyToIndex.clear();
flatDict_.clear();
}
template <size_t cardinality>
static std::vector<std::string> mockStrings()
{
std::vector<std::string> res(1000000);
for (auto & s : res)
{
s = "test string dictionary " + std::to_string(rand() % cardinality);
}
return res;
}
template <typename DictionaryImpl>
static NO_INLINE std::unique_ptr<DictionaryImpl> createAndWriteStringDictionary(const std::vector<std::string> & strs)
{
auto dict = std::make_unique<DictionaryImpl>();
for (const auto & str : strs)
{
auto index = dict->insert(str.data(), str.size());
dict->idxInDictBuffer.push_back(index);
}
dict->reorder(dict->idxInDictBuffer);
return dict;
}
template <typename DictionaryImpl, size_t cardinality>
static void BM_writeStringDictionary(benchmark::State & state)
{
auto strs = mockStrings<cardinality>();
for (auto _ : state)
{
auto dict = createAndWriteStringDictionary<DictionaryImpl>(strs);
benchmark::DoNotOptimize(dict);
}
}
BENCHMARK_TEMPLATE(BM_writeStringDictionary, OldSortedStringDictionary, 10);
BENCHMARK_TEMPLATE(BM_writeStringDictionary, NewSortedStringDictionary, 10);
BENCHMARK_TEMPLATE(BM_writeStringDictionary, OldSortedStringDictionary, 100);
BENCHMARK_TEMPLATE(BM_writeStringDictionary, NewSortedStringDictionary, 100);
BENCHMARK_TEMPLATE(BM_writeStringDictionary, OldSortedStringDictionary, 1000);
BENCHMARK_TEMPLATE(BM_writeStringDictionary, NewSortedStringDictionary, 1000);
BENCHMARK_TEMPLATE(BM_writeStringDictionary, OldSortedStringDictionary, 10000);
BENCHMARK_TEMPLATE(BM_writeStringDictionary, NewSortedStringDictionary, 10000);
BENCHMARK_TEMPLATE(BM_writeStringDictionary, OldSortedStringDictionary, 100000);
BENCHMARK_TEMPLATE(BM_writeStringDictionary, NewSortedStringDictionary, 100000);

View File

@ -45,7 +45,6 @@ uint64_t ACLMap::convertACLs(const Coordination::ACLs & acls)
if (acls.empty())
return 0;
std::lock_guard lock(map_mutex);
if (acl_to_num.contains(acls))
return acl_to_num[acls];
@ -63,7 +62,6 @@ Coordination::ACLs ACLMap::convertNumber(uint64_t acls_id) const
if (acls_id == 0)
return Coordination::ACLs{};
std::lock_guard lock(map_mutex);
if (!num_to_acl.contains(acls_id))
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown ACL id {}. It's a bug", acls_id);
@ -72,7 +70,6 @@ Coordination::ACLs ACLMap::convertNumber(uint64_t acls_id) const
void ACLMap::addMapping(uint64_t acls_id, const Coordination::ACLs & acls)
{
std::lock_guard lock(map_mutex);
num_to_acl[acls_id] = acls;
acl_to_num[acls] = acls_id;
max_acl_id = std::max(acls_id + 1, max_acl_id); /// max_acl_id pointer next slot
@ -80,13 +77,11 @@ void ACLMap::addMapping(uint64_t acls_id, const Coordination::ACLs & acls)
void ACLMap::addUsage(uint64_t acl_id)
{
std::lock_guard lock(map_mutex);
usage_counter[acl_id]++;
}
void ACLMap::removeUsage(uint64_t acl_id)
{
std::lock_guard lock(map_mutex);
if (!usage_counter.contains(acl_id))
return;

View File

@ -32,8 +32,6 @@ private:
NumToACLMap num_to_acl;
UsageCounter usage_counter;
uint64_t max_acl_id{1};
mutable std::mutex map_mutex;
public:
/// Convert ACL to number. If it's new ACL than adds it to map

View File

@ -301,13 +301,11 @@ String MonitorCommand::run()
print(ret, "server_state", keeper_info.getRole());
const auto & storage_stats = state_machine.getStorageStats();
print(ret, "znode_count", storage_stats.nodes_count.load(std::memory_order_relaxed));
print(ret, "watch_count", storage_stats.total_watches_count.load(std::memory_order_relaxed));
print(ret, "ephemerals_count", storage_stats.total_emphemeral_nodes_count.load(std::memory_order_relaxed));
print(ret, "approximate_data_size", storage_stats.approximate_data_size.load(std::memory_order_relaxed));
print(ret, "key_arena_size", 0);
print(ret, "znode_count", state_machine.getNodesCount());
print(ret, "watch_count", state_machine.getTotalWatchesCount());
print(ret, "ephemerals_count", state_machine.getTotalEphemeralNodesCount());
print(ret, "approximate_data_size", state_machine.getApproximateDataSize());
print(ret, "key_arena_size", state_machine.getKeyArenaSize());
print(ret, "latest_snapshot_size", state_machine.getLatestSnapshotSize());
#if defined(OS_LINUX) || defined(OS_DARWIN)
@ -389,7 +387,6 @@ String ServerStatCommand::run()
auto & stats = keeper_dispatcher.getKeeperConnectionStats();
Keeper4LWInfo keeper_info = keeper_dispatcher.getKeeper4LWInfo();
const auto & storage_stats = keeper_dispatcher.getStateMachine().getStorageStats();
write("ClickHouse Keeper version", String(VERSION_DESCRIBE) + "-" + VERSION_GITHASH);
@ -401,9 +398,9 @@ String ServerStatCommand::run()
write("Sent", toString(stats.getPacketsSent()));
write("Connections", toString(keeper_info.alive_connections_count));
write("Outstanding", toString(keeper_info.outstanding_requests_count));
write("Zxid", formatZxid(storage_stats.last_zxid.load(std::memory_order_relaxed)));
write("Zxid", formatZxid(keeper_info.last_zxid));
write("Mode", keeper_info.getRole());
write("Node count", toString(storage_stats.nodes_count.load(std::memory_order_relaxed)));
write("Node count", toString(keeper_info.total_nodes_count));
return buf.str();
}
@ -419,7 +416,6 @@ String StatCommand::run()
auto & stats = keeper_dispatcher.getKeeperConnectionStats();
Keeper4LWInfo keeper_info = keeper_dispatcher.getKeeper4LWInfo();
const auto & storage_stats = keeper_dispatcher.getStateMachine().getStorageStats();
write("ClickHouse Keeper version", String(VERSION_DESCRIBE) + "-" + VERSION_GITHASH);
@ -435,9 +431,9 @@ String StatCommand::run()
write("Sent", toString(stats.getPacketsSent()));
write("Connections", toString(keeper_info.alive_connections_count));
write("Outstanding", toString(keeper_info.outstanding_requests_count));
write("Zxid", formatZxid(storage_stats.last_zxid.load(std::memory_order_relaxed)));
write("Zxid", formatZxid(keeper_info.last_zxid));
write("Mode", keeper_info.getRole());
write("Node count", toString(storage_stats.nodes_count.load(std::memory_order_relaxed)));
write("Node count", toString(keeper_info.total_nodes_count));
return buf.str();
}

View File

@ -1,5 +1,7 @@
#pragma once
#include <string>
#include <base/types.h>
#include <Common/Exception.h>
@ -28,6 +30,9 @@ struct Keeper4LWInfo
uint64_t follower_count;
uint64_t synced_follower_count;
uint64_t total_nodes_count;
int64_t last_zxid;
String getRole() const
{
if (is_standalone)

View File

@ -38,16 +38,15 @@ void updateKeeperInformation(KeeperDispatcher & keeper_dispatcher, AsynchronousM
is_follower = static_cast<size_t>(keeper_info.is_follower);
is_exceeding_mem_soft_limit = static_cast<size_t>(keeper_info.is_exceeding_mem_soft_limit);
zxid = keeper_info.last_zxid;
const auto & state_machine = keeper_dispatcher.getStateMachine();
const auto & storage_stats = state_machine.getStorageStats();
zxid = storage_stats.last_zxid.load(std::memory_order_relaxed);
znode_count = storage_stats.nodes_count.load(std::memory_order_relaxed);
watch_count = storage_stats.total_watches_count.load(std::memory_order_relaxed);
ephemerals_count = storage_stats.total_emphemeral_nodes_count.load(std::memory_order_relaxed);
approximate_data_size = storage_stats.approximate_data_size.load(std::memory_order_relaxed);
key_arena_size = 0;
session_with_watches = storage_stats.sessions_with_watches_count.load(std::memory_order_relaxed);
paths_watched = storage_stats.watched_paths_count.load(std::memory_order_relaxed);
znode_count = state_machine.getNodesCount();
watch_count = state_machine.getTotalWatchesCount();
ephemerals_count = state_machine.getTotalEphemeralNodesCount();
approximate_data_size = state_machine.getApproximateDataSize();
key_arena_size = state_machine.getKeyArenaSize();
session_with_watches = state_machine.getSessionsWithWatchesCount();
paths_watched = state_machine.getWatchedPathsCount();
# if defined(__linux__) || defined(__APPLE__)
open_file_descriptor_count = getCurrentProcessFDCount();

View File

@ -305,7 +305,7 @@ void KeeperDispatcher::requestThread()
if (has_read_request)
{
if (server->isLeaderAlive())
server->putLocalReadRequest({request});
server->putLocalReadRequest(request);
else
addErrorResponses({request}, Coordination::Error::ZCONNECTIONLOSS);
}

View File

@ -28,16 +28,6 @@
#include <Common/getMultipleKeysFromConfig.h>
#include <Common/getNumberOfPhysicalCPUCores.h>
#if USE_SSL
# include <Server/CertificateReloader.h>
# include <openssl/ssl.h>
# include <Poco/Crypto/EVPPKey.h>
# include <Poco/Net/Context.h>
# include <Poco/Net/SSLManager.h>
# include <Poco/Net/Utility.h>
# include <Poco/StringTokenizer.h>
#endif
#include <chrono>
#include <mutex>
#include <string>
@ -58,7 +48,6 @@ namespace ErrorCodes
extern const int SUPPORT_IS_DISABLED;
extern const int LOGICAL_ERROR;
extern const int INVALID_CONFIG_PARAMETER;
extern const int BAD_ARGUMENTS;
}
using namespace std::chrono_literals;
@ -67,16 +56,6 @@ namespace
{
#if USE_SSL
int callSetCertificate(SSL * ssl, void * arg)
{
if (!arg)
return -1;
const CertificateReloader::Data * data = reinterpret_cast<CertificateReloader::Data *>(arg);
return setCertificateCallback(ssl, data, getLogger("SSLContext"));
}
void setSSLParams(nuraft::asio_service::options & asio_opts)
{
const Poco::Util::LayeredConfiguration & config = Poco::Util::Application::instance().config();
@ -90,55 +69,18 @@ void setSSLParams(nuraft::asio_service::options & asio_opts)
if (!config.has(private_key_file_property))
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "Server private key file is not set.");
Poco::Net::Context::Params params;
params.certificateFile = config.getString(certificate_file_property);
if (params.certificateFile.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Server certificate file in config '{}' is empty", certificate_file_property);
params.privateKeyFile = config.getString(private_key_file_property);
if (params.privateKeyFile.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Server key file in config '{}' is empty", private_key_file_property);
auto pass_phrase = config.getString("openSSL.server.privateKeyPassphraseHandler.options.password", "");
auto certificate_data = std::make_shared<CertificateReloader::Data>(params.certificateFile, params.privateKeyFile, pass_phrase);
asio_opts.enable_ssl_ = true;
asio_opts.server_cert_file_ = config.getString(certificate_file_property);
asio_opts.server_key_file_ = config.getString(private_key_file_property);
if (config.has(root_ca_file_property))
params.caLocation = config.getString(root_ca_file_property);
asio_opts.root_cert_file_ = config.getString(root_ca_file_property);
params.loadDefaultCAs = config.getBool("openSSL.server.loadDefaultCAFile", false);
params.verificationMode = Poco::Net::Utility::convertVerificationMode(config.getString("openSSL.server.verificationMode", "none"));
if (config.getBool("openSSL.server.loadDefaultCAFile", false))
asio_opts.load_default_ca_file_ = true;
std::string disabled_protocols_list = config.getString("openSSL.server.disableProtocols", "");
Poco::StringTokenizer dp_tok(disabled_protocols_list, ";,", Poco::StringTokenizer::TOK_TRIM | Poco::StringTokenizer::TOK_IGNORE_EMPTY);
int disabled_protocols = 0;
for (const auto & token : dp_tok)
{
if (token == "sslv2")
disabled_protocols |= Poco::Net::Context::PROTO_SSLV2;
else if (token == "sslv3")
disabled_protocols |= Poco::Net::Context::PROTO_SSLV3;
else if (token == "tlsv1")
disabled_protocols |= Poco::Net::Context::PROTO_TLSV1;
else if (token == "tlsv1_1")
disabled_protocols |= Poco::Net::Context::PROTO_TLSV1_1;
else if (token == "tlsv1_2")
disabled_protocols |= Poco::Net::Context::PROTO_TLSV1_2;
}
asio_opts.ssl_context_provider_server_ = [params, certificate_data, disabled_protocols]
{
Poco::Net::Context context(Poco::Net::Context::Usage::TLSV1_2_SERVER_USE, params);
context.disableProtocols(disabled_protocols);
SSL_CTX * ssl_ctx = context.takeSslContext();
SSL_CTX_set_cert_cb(ssl_ctx, callSetCertificate, reinterpret_cast<void *>(certificate_data.get()));
return ssl_ctx;
};
asio_opts.ssl_context_provider_client_ = [ctx_params = std::move(params)]
{
Poco::Net::Context context(Poco::Net::Context::Usage::TLSV1_2_CLIENT_USE, ctx_params);
return context.takeSslContext();
};
if (config.getString("openSSL.server.verificationMode", "none") == "none")
asio_opts.skip_verification_ = true;
}
#endif
@ -1207,6 +1149,8 @@ Keeper4LWInfo KeeperServer::getPartiallyFilled4LWInfo() const
result.synced_follower_count = getSyncedFollowerCount();
}
result.is_exceeding_mem_soft_limit = isExceedingMemorySoftLimit();
result.total_nodes_count = getKeeperStateMachine()->getNodesCount();
result.last_zxid = getKeeperStateMachine()->getLastProcessedZxid();
return result;
}

View File

@ -78,20 +78,20 @@ namespace
writeBinary(false, out);
/// Serialize stat
writeBinary(node.stats.czxid, out);
writeBinary(node.stats.mzxid, out);
writeBinary(node.stats.ctime(), out);
writeBinary(node.stats.mtime, out);
writeBinary(node.stats.version, out);
writeBinary(node.stats.cversion, out);
writeBinary(node.stats.aversion, out);
writeBinary(node.stats.ephemeralOwner(), out);
writeBinary(node.czxid, out);
writeBinary(node.mzxid, out);
writeBinary(node.ctime(), out);
writeBinary(node.mtime, out);
writeBinary(node.version, out);
writeBinary(node.cversion, out);
writeBinary(node.aversion, out);
writeBinary(node.ephemeralOwner(), out);
if (version < SnapshotVersion::V6)
writeBinary(static_cast<int32_t>(node.stats.data_size), out);
writeBinary(node.stats.numChildren(), out);
writeBinary(node.stats.pzxid, out);
writeBinary(static_cast<int32_t>(node.getData().size()), out);
writeBinary(node.numChildren(), out);
writeBinary(node.pzxid, out);
writeBinary(node.stats.seqNum(), out);
writeBinary(node.seqNum(), out);
if (version >= SnapshotVersion::V4 && version <= SnapshotVersion::V5)
writeBinary(node.sizeInBytes(), out);
@ -100,11 +100,11 @@ namespace
template<typename Node>
void readNode(Node & node, ReadBuffer & in, SnapshotVersion version, ACLMap & acl_map)
{
readVarUInt(node.stats.data_size, in);
if (node.stats.data_size != 0)
readVarUInt(node.data_size, in);
if (node.data_size != 0)
{
node.data = std::unique_ptr<char[]>(new char[node.stats.data_size]);
in.readStrict(node.data.get(), node.stats.data_size);
node.data = std::unique_ptr<char[]>(new char[node.data_size]);
in.readStrict(node.data.get(), node.data_size);
}
if (version >= SnapshotVersion::V1)
@ -141,19 +141,19 @@ namespace
}
/// Deserialize stat
readBinary(node.stats.czxid, in);
readBinary(node.stats.mzxid, in);
readBinary(node.czxid, in);
readBinary(node.mzxid, in);
int64_t ctime;
readBinary(ctime, in);
node.stats.setCtime(ctime);
readBinary(node.stats.mtime, in);
readBinary(node.stats.version, in);
readBinary(node.stats.cversion, in);
readBinary(node.stats.aversion, in);
node.setCtime(ctime);
readBinary(node.mtime, in);
readBinary(node.version, in);
readBinary(node.cversion, in);
readBinary(node.aversion, in);
int64_t ephemeral_owner = 0;
readBinary(ephemeral_owner, in);
if (ephemeral_owner != 0)
node.stats.setEphemeralOwner(ephemeral_owner);
node.setEphemeralOwner(ephemeral_owner);
if (version < SnapshotVersion::V6)
{
@ -163,14 +163,14 @@ namespace
int32_t num_children = 0;
readBinary(num_children, in);
if (ephemeral_owner == 0)
node.stats.setNumChildren(num_children);
node.setNumChildren(num_children);
readBinary(node.stats.pzxid, in);
readBinary(node.pzxid, in);
int32_t seq_num = 0;
readBinary(seq_num, in);
if (ephemeral_owner == 0)
node.stats.setSeqNum(seq_num);
node.setSeqNum(seq_num);
if (version >= SnapshotVersion::V4 && version <= SnapshotVersion::V5)
{
@ -256,7 +256,7 @@ void KeeperStorageSnapshot<Storage>::serialize(const KeeperStorageSnapshot<Stora
/// Benign race condition possible while taking snapshot: NuRaft decide to create snapshot at some log id
/// and only after some time we lock storage and enable snapshot mode. So snapshot_container_size can be
/// slightly bigger than required.
if (node.stats.mzxid > snapshot.zxid)
if (node.mzxid > snapshot.zxid)
break;
writeBinary(path, out);
writeNode(node, snapshot.version, out);
@ -306,7 +306,7 @@ void KeeperStorageSnapshot<Storage>::serialize(const KeeperStorageSnapshot<Stora
}
template<typename Storage>
void KeeperStorageSnapshot<Storage>::deserialize(SnapshotDeserializationResult<Storage> & deserialization_result, ReadBuffer & in, KeeperContextPtr keeper_context) TSA_NO_THREAD_SAFETY_ANALYSIS
void KeeperStorageSnapshot<Storage>::deserialize(SnapshotDeserializationResult<Storage> & deserialization_result, ReadBuffer & in, KeeperContextPtr keeper_context)
{
uint8_t version;
readBinary(version, in);
@ -435,13 +435,13 @@ void KeeperStorageSnapshot<Storage>::deserialize(SnapshotDeserializationResult<S
}
}
auto ephemeral_owner = node.stats.ephemeralOwner();
auto ephemeral_owner = node.ephemeralOwner();
if constexpr (!use_rocksdb)
if (!node.stats.isEphemeral() && node.stats.numChildren() > 0)
node.getChildren().reserve(node.stats.numChildren());
if (!node.isEphemeral() && node.numChildren() > 0)
node.getChildren().reserve(node.numChildren());
if (ephemeral_owner != 0)
storage.committed_ephemerals[node.stats.ephemeralOwner()].insert(std::string{path});
storage.ephemerals[node.ephemeralOwner()].insert(std::string{path});
if (recalculate_digest)
storage.nodes_digest += node.getDigest(path);
@ -467,25 +467,16 @@ void KeeperStorageSnapshot<Storage>::deserialize(SnapshotDeserializationResult<S
{
if (itr.key != "/")
{
if (itr.value.stats.numChildren() != static_cast<int32_t>(itr.value.getChildren().size()))
if (itr.value.numChildren() != static_cast<int32_t>(itr.value.getChildren().size()))
{
#ifdef NDEBUG
/// TODO (alesapin) remove this, it should be always CORRUPTED_DATA.
LOG_ERROR(
getLogger("KeeperSnapshotManager"),
"Children counter in stat.numChildren {}"
" is different from actual children size {} for node {}",
itr.value.stats.numChildren(),
itr.value.getChildren().size(),
itr.key);
LOG_ERROR(getLogger("KeeperSnapshotManager"), "Children counter in stat.numChildren {}"
" is different from actual children size {} for node {}", itr.value.numChildren(), itr.value.getChildren().size(), itr.key);
#else
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Children counter in stat.numChildren {}"
" is different from actual children size {} for node {}",
itr.value.stats.numChildren(),
itr.value.getChildren().size(),
itr.key);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Children counter in stat.numChildren {}"
" is different from actual children size {} for node {}",
itr.value.numChildren(), itr.value.getChildren().size(), itr.key);
#endif
}
}
@ -520,7 +511,7 @@ void KeeperStorageSnapshot<Storage>::deserialize(SnapshotDeserializationResult<S
session_auth_counter++;
}
if (!ids.empty())
storage.committed_session_and_auth[active_session_id] = ids;
storage.session_and_auth[active_session_id] = ids;
}
current_session_size++;
}
@ -536,8 +527,6 @@ void KeeperStorageSnapshot<Storage>::deserialize(SnapshotDeserializationResult<S
buffer->pos(0);
deserialization_result.cluster_config = ClusterConfig::deserialize(*buffer);
}
storage.updateStats();
}
template<typename Storage>
@ -555,7 +544,7 @@ KeeperStorageSnapshot<Storage>::KeeperStorageSnapshot(Storage * storage_, uint64
begin = storage->getSnapshotIteratorBegin();
session_and_timeout = storage->getActiveSessions();
acl_map = storage->acl_map.getMapping();
session_and_auth = storage->committed_session_and_auth;
session_and_auth = storage->session_and_auth;
}
template<typename Storage>
@ -574,7 +563,7 @@ KeeperStorageSnapshot<Storage>::KeeperStorageSnapshot(
begin = storage->getSnapshotIteratorBegin();
session_and_timeout = storage->getActiveSessions();
acl_map = storage->acl_map.getMapping();
session_and_auth = storage->committed_session_and_auth;
session_and_auth = storage->session_and_auth;
}
template<typename Storage>

View File

@ -36,11 +36,6 @@ namespace ProfileEvents
extern const Event KeeperStorageLockWaitMicroseconds;
}
namespace CurrentMetrics
{
extern const Metric KeeperAliveConnections;
}
namespace DB
{
@ -61,7 +56,6 @@ IKeeperStateMachine::IKeeperStateMachine(
, snapshots_queue(snapshots_queue_)
, min_request_size_to_cache(keeper_context_->getCoordinationSettings()->min_request_size_for_cache)
, log(getLogger("KeeperStateMachine"))
, read_pool(CurrentMetrics::KeeperAliveConnections, CurrentMetrics::KeeperAliveConnections, CurrentMetrics::KeeperAliveConnections, 100, 10000, 10000)
, superdigest(superdigest_)
, keeper_context(keeper_context_)
, snapshot_manager_s3(snapshot_manager_s3_)
@ -181,20 +175,18 @@ void assertDigest(
}
}
template <bool shared = false>
struct LockGuardWithStats final
struct TSA_SCOPED_LOCKABLE LockGuardWithStats final
{
using LockType = std::conditional_t<shared, std::shared_lock<SharedMutex>, std::unique_lock<SharedMutex>>;
LockType lock;
explicit LockGuardWithStats(SharedMutex & mutex)
std::unique_lock<std::mutex> lock;
explicit LockGuardWithStats(std::mutex & mutex) TSA_ACQUIRE(mutex)
{
Stopwatch watch;
LockType l(mutex);
std::unique_lock l(mutex);
ProfileEvents::increment(ProfileEvents::KeeperStorageLockWaitMicroseconds, watch.elapsedMicroseconds());
lock = std::move(l);
}
~LockGuardWithStats() = default;
~LockGuardWithStats() TSA_RELEASE() = default;
};
}
@ -320,12 +312,13 @@ bool KeeperStateMachine<Storage>::preprocess(const KeeperStorageBase::RequestFor
if (op_num == Coordination::OpNum::SessionID || op_num == Coordination::OpNum::Reconfig)
return true;
LockGuardWithStats lock(storage_and_responses_lock);
if (storage->isFinalized())
return false;
try
{
LockGuardWithStats<true> lock(storage_mutex);
storage->preprocessRequest(
request_for_session.request,
request_for_session.session_id,
@ -342,12 +335,7 @@ bool KeeperStateMachine<Storage>::preprocess(const KeeperStorageBase::RequestFor
}
if (keeper_context->digestEnabled() && request_for_session.digest)
assertDigest(
*request_for_session.digest,
storage->getNodesDigest(false, /*lock_transaction_mutex=*/true),
*request_for_session.request,
request_for_session.log_idx,
false);
assertDigest(*request_for_session.digest, storage->getNodesDigest(false), *request_for_session.request, request_for_session.log_idx, false);
return true;
}
@ -355,7 +343,7 @@ bool KeeperStateMachine<Storage>::preprocess(const KeeperStorageBase::RequestFor
template<typename Storage>
void KeeperStateMachine<Storage>::reconfigure(const KeeperStorageBase::RequestForSession& request_for_session)
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
KeeperStorageBase::ResponseForSession response = processReconfiguration(request_for_session);
if (!responses_queue.push(response))
{
@ -473,7 +461,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine<Storage>::commit(const uint64_t l
response_for_session.response = response;
response_for_session.request = request_for_session->request;
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
session_id = storage->getSessionID(session_id_request.session_timeout_ms);
LOG_DEBUG(log, "Session ID response {} with timeout {}", session_id, session_id_request.session_timeout_ms);
response->session_id = session_id;
@ -484,31 +472,24 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine<Storage>::commit(const uint64_t l
if (op_num == Coordination::OpNum::Close)
{
std::lock_guard cache_lock(request_cache_mutex);
std::lock_guard lock(request_cache_mutex);
parsed_request_cache.erase(request_for_session->session_id);
}
{
LockGuardWithStats<true> lock(storage_mutex);
std::lock_guard response_lock(process_and_responses_lock);
KeeperStorageBase::ResponsesForSessions responses_for_sessions
= storage->processRequest(request_for_session->request, request_for_session->session_id, request_for_session->zxid);
for (auto & response_for_session : responses_for_sessions)
{
if (response_for_session.response->xid != Coordination::WATCH_XID)
response_for_session.request = request_for_session->request;
LockGuardWithStats lock(storage_and_responses_lock);
KeeperStorageBase::ResponsesForSessions responses_for_sessions
= storage->processRequest(request_for_session->request, request_for_session->session_id, request_for_session->zxid);
try_push(response_for_session);
}
for (auto & response_for_session : responses_for_sessions)
{
if (response_for_session.response->xid != Coordination::WATCH_XID)
response_for_session.request = request_for_session->request;
try_push(response_for_session);
}
if (keeper_context->digestEnabled() && request_for_session->digest)
assertDigest(
*request_for_session->digest,
storage->getNodesDigest(true, /*lock_transaction_mutex=*/true),
*request_for_session->request,
request_for_session->log_idx,
true);
assertDigest(*request_for_session->digest, storage->getNodesDigest(true), *request_for_session->request, request_for_session->log_idx, true);
}
ProfileEvents::increment(ProfileEvents::KeeperCommits);
@ -553,6 +534,8 @@ bool KeeperStateMachine<Storage>::apply_snapshot(nuraft::snapshot & s)
}
{ /// deserialize and apply snapshot to storage
LockGuardWithStats lock(storage_and_responses_lock);
SnapshotDeserializationResult<Storage> snapshot_deserialization_result;
if (latest_snapshot_ptr)
snapshot_deserialization_result = snapshot_manager.deserializeSnapshotFromBuffer(latest_snapshot_ptr);
@ -560,7 +543,6 @@ bool KeeperStateMachine<Storage>::apply_snapshot(nuraft::snapshot & s)
snapshot_deserialization_result
= snapshot_manager.deserializeSnapshotFromBuffer(snapshot_manager.deserializeSnapshotBufferFromDisk(s.get_last_log_idx()));
LockGuardWithStats storage_lock(storage_mutex);
/// maybe some logs were preprocessed with log idx larger than the snapshot idx
/// we have to apply them to the new storage
storage->applyUncommittedState(*snapshot_deserialization_result.storage, snapshot_deserialization_result.snapshot_meta->get_last_log_idx());
@ -605,7 +587,16 @@ void KeeperStateMachine<Storage>::rollbackRequest(const KeeperStorageBase::Reque
if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID)
return;
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
storage->rollbackRequest(request_for_session.zxid, allow_missing);
}
template<typename Storage>
void KeeperStateMachine<Storage>::rollbackRequestNoLock(const KeeperStorageBase::RequestForSession & request_for_session, bool allow_missing)
{
if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID)
return;
storage->rollbackRequest(request_for_session.zxid, allow_missing);
}
@ -625,7 +616,7 @@ void KeeperStateMachine<Storage>::create_snapshot(nuraft::snapshot & s, nuraft::
auto snapshot_meta_copy = nuraft::snapshot::deserialize(*snp_buf);
CreateSnapshotTask snapshot_task;
{ /// lock storage for a short period time to turn on "snapshot mode". After that we can read consistent storage state without locking.
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
snapshot_task.snapshot = std::make_shared<KeeperStorageSnapshot<Storage>>(storage.get(), snapshot_meta_copy, getClusterConfig());
}
@ -690,7 +681,7 @@ void KeeperStateMachine<Storage>::create_snapshot(nuraft::snapshot & s, nuraft::
}
{
/// Destroy snapshot with lock
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
LOG_TRACE(log, "Clearing garbage after snapshot");
/// Turn off "snapshot mode" and clear outdate part of storage state
storage->clearGarbageAfterSnapshot();
@ -833,10 +824,10 @@ template<typename Storage>
void KeeperStateMachine<Storage>::processReadRequest(const KeeperStorageBase::RequestForSession & request_for_session)
{
/// Pure local request, just process it with storage
LockGuardWithStats<true> storage_lock(storage_mutex);
std::lock_guard response_lock(process_and_responses_lock);
LockGuardWithStats lock(storage_and_responses_lock);
auto responses = storage->processRequest(
request_for_session.request, request_for_session.session_id, std::nullopt, true /*check_acl*/, true /*is_local*/);
for (auto & response_for_session : responses)
{
if (response_for_session.response->xid != Coordination::WATCH_XID)
@ -849,116 +840,112 @@ void KeeperStateMachine<Storage>::processReadRequest(const KeeperStorageBase::Re
template<typename Storage>
void KeeperStateMachine<Storage>::shutdownStorage()
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
storage->finalize();
}
template<typename Storage>
std::vector<int64_t> KeeperStateMachine<Storage>::getDeadSessions()
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
return storage->getDeadSessions();
}
template<typename Storage>
int64_t KeeperStateMachine<Storage>::getNextZxid() const
{
LockGuardWithStats lock(storage_and_responses_lock);
return storage->getNextZXID();
}
template<typename Storage>
KeeperStorageBase::Digest KeeperStateMachine<Storage>::getNodesDigest() const
{
LockGuardWithStats lock(storage_mutex);
return storage->getNodesDigest(false, /*lock_transaction_mutex=*/true);
LockGuardWithStats lock(storage_and_responses_lock);
return storage->getNodesDigest(false);
}
template<typename Storage>
uint64_t KeeperStateMachine<Storage>::getLastProcessedZxid() const
{
LockGuardWithStats lock(storage_and_responses_lock);
return storage->getZXID();
}
template<typename Storage>
const KeeperStorageBase::Stats & KeeperStateMachine<Storage>::getStorageStats() const TSA_NO_THREAD_SAFETY_ANALYSIS
{
return storage->getStorageStats();
}
template<typename Storage>
uint64_t KeeperStateMachine<Storage>::getNodesCount() const
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
return storage->getNodesCount();
}
template<typename Storage>
uint64_t KeeperStateMachine<Storage>::getTotalWatchesCount() const
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
return storage->getTotalWatchesCount();
}
template<typename Storage>
uint64_t KeeperStateMachine<Storage>::getWatchedPathsCount() const
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
return storage->getWatchedPathsCount();
}
template<typename Storage>
uint64_t KeeperStateMachine<Storage>::getSessionsWithWatchesCount() const
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
return storage->getSessionsWithWatchesCount();
}
template<typename Storage>
uint64_t KeeperStateMachine<Storage>::getTotalEphemeralNodesCount() const
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
return storage->getTotalEphemeralNodesCount();
}
template<typename Storage>
uint64_t KeeperStateMachine<Storage>::getSessionWithEphemeralNodesCount() const
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
return storage->getSessionWithEphemeralNodesCount();
}
template<typename Storage>
void KeeperStateMachine<Storage>::dumpWatches(WriteBufferFromOwnString & buf) const
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
storage->dumpWatches(buf);
}
template<typename Storage>
void KeeperStateMachine<Storage>::dumpWatchesByPath(WriteBufferFromOwnString & buf) const
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
storage->dumpWatchesByPath(buf);
}
template<typename Storage>
void KeeperStateMachine<Storage>::dumpSessionsAndEphemerals(WriteBufferFromOwnString & buf) const
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
storage->dumpSessionsAndEphemerals(buf);
}
template<typename Storage>
uint64_t KeeperStateMachine<Storage>::getApproximateDataSize() const
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
return storage->getApproximateDataSize();
}
template<typename Storage>
uint64_t KeeperStateMachine<Storage>::getKeyArenaSize() const
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
return storage->getArenaDataSize();
}
@ -1001,7 +988,7 @@ ClusterConfigPtr IKeeperStateMachine::getClusterConfig() const
template<typename Storage>
void KeeperStateMachine<Storage>::recalculateStorageStats()
{
LockGuardWithStats lock(storage_mutex);
LockGuardWithStats lock(storage_and_responses_lock);
LOG_INFO(log, "Recalculating storage stats");
storage->recalculateStats();
LOG_INFO(log, "Done recalculating storage stats");

View File

@ -85,8 +85,6 @@ public:
/// Introspection functions for 4lw commands
virtual uint64_t getLastProcessedZxid() const = 0;
virtual const KeeperStorageBase::Stats & getStorageStats() const = 0;
virtual uint64_t getNodesCount() const = 0;
virtual uint64_t getTotalWatchesCount() const = 0;
virtual uint64_t getWatchedPathsCount() const = 0;
@ -126,16 +124,12 @@ protected:
/// Mutex for snapshots
mutable std::mutex snapshots_lock;
/// Lock for the storage
/// Storage works in thread-safe way ONLY for preprocessing/processing
/// In any other case, unique storage lock needs to be taken
mutable SharedMutex storage_mutex;
/// Lock for processing and responses_queue. It's important to process requests
/// Lock for storage and responses_queue. It's important to process requests
/// and push them to the responses queue while holding this lock. Otherwise
/// we can get strange cases when, for example client send read request with
/// watch and after that receive watch response and only receive response
/// for request.
mutable std::mutex process_and_responses_lock;
mutable std::mutex storage_and_responses_lock;
std::unordered_map<int64_t, std::unordered_map<Coordination::XID, std::shared_ptr<KeeperStorageBase::RequestForSession>>> parsed_request_cache;
uint64_t min_request_size_to_cache{0};
@ -152,7 +146,6 @@ protected:
mutable std::mutex cluster_config_lock;
ClusterConfigPtr cluster_config;
ThreadPool read_pool;
/// Special part of ACL system -- superdigest specified in server config.
const std::string superdigest;
@ -160,8 +153,10 @@ protected:
KeeperSnapshotManagerS3 * snapshot_manager_s3;
virtual KeeperStorageBase::ResponseForSession processReconfiguration(const KeeperStorageBase::RequestForSession & request_for_session)
= 0;
virtual KeeperStorageBase::ResponseForSession processReconfiguration(
const KeeperStorageBase::RequestForSession& request_for_session)
TSA_REQUIRES(storage_and_responses_lock) = 0;
};
/// ClickHouse Keeper state machine. Wrapper for KeeperStorage.
@ -194,6 +189,10 @@ public:
// (can happen in case of exception during preprocessing)
void rollbackRequest(const KeeperStorageBase::RequestForSession & request_for_session, bool allow_missing) override;
void rollbackRequestNoLock(
const KeeperStorageBase::RequestForSession & request_for_session,
bool allow_missing) TSA_NO_THREAD_SAFETY_ANALYSIS;
/// Apply preliminarily saved (save_logical_snp_obj) snapshot to our state.
bool apply_snapshot(nuraft::snapshot & s) override;
@ -206,7 +205,7 @@ public:
// This should be used only for tests or keeper-data-dumper because it violates
// TSA -- we can't acquire the lock outside of this class or return a storage under lock
// in a reasonable way.
Storage & getStorageUnsafe()
Storage & getStorageUnsafe() TSA_NO_THREAD_SAFETY_ANALYSIS
{
return *storage;
}
@ -225,8 +224,6 @@ public:
/// Introspection functions for 4lw commands
uint64_t getLastProcessedZxid() const override;
const KeeperStorageBase::Stats & getStorageStats() const override;
uint64_t getNodesCount() const override;
uint64_t getTotalWatchesCount() const override;
uint64_t getWatchedPathsCount() const override;
@ -248,12 +245,12 @@ public:
private:
/// Main state machine logic
std::unique_ptr<Storage> storage;
std::unique_ptr<Storage> storage; //TSA_PT_GUARDED_BY(storage_and_responses_lock);
/// Save/Load and Serialize/Deserialize logic for snapshots.
KeeperSnapshotManager<Storage> snapshot_manager;
KeeperStorageBase::ResponseForSession processReconfiguration(const KeeperStorageBase::RequestForSession & request_for_session) override;
KeeperStorageBase::ResponseForSession processReconfiguration(const KeeperStorageBase::RequestForSession & request_for_session)
TSA_REQUIRES(storage_and_responses_lock) override;
};
}

File diff suppressed because it is too large Load Diff

View File

@ -1,16 +1,10 @@
#pragma once
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <Coordination/ACLMap.h>
#include <Coordination/SessionExpiryQueue.h>
#include <Coordination/SnapshotableHashTable.h>
#include "Common/StringHashForHeterogeneousLookup.h"
#include <Common/SharedMutex.h>
#include <Common/Concepts.h>
#include <base/defines.h>
#include <absl/container/flat_hash_set.h>
@ -29,11 +23,14 @@ using ResponseCallback = std::function<void(const Coordination::ZooKeeperRespons
using ChildrenSet = absl::flat_hash_set<StringRef, StringRefHash>;
using SessionAndTimeout = std::unordered_map<int64_t, int64_t>;
struct NodeStats
/// KeeperRocksNodeInfo is used in RocksDB keeper.
/// It is serialized directly as POD to RocksDB.
struct KeeperRocksNodeInfo
{
int64_t czxid{0};
int64_t mzxid{0};
int64_t pzxid{0};
uint64_t acl_id = 0; /// 0 -- no ACL by default
int64_t mtime{0};
@ -41,9 +38,225 @@ struct NodeStats
int32_t cversion{0};
int32_t aversion{0};
uint32_t data_size{0};
int32_t seq_num = 0;
mutable UInt64 digest = 0; /// we cached digest for this node.
void copyStats(const Coordination::Stat & stat);
/// as ctime can't be negative because it stores the timestamp when the
/// node was created, we can use the MSB for a bool
struct
{
bool is_ephemeral : 1;
int64_t ctime : 63;
} is_ephemeral_and_ctime{false, 0};
/// ephemeral notes cannot have children so a node can set either
/// ephemeral_owner OR seq_num + num_children
union
{
int64_t ephemeral_owner;
struct
{
int32_t seq_num;
int32_t num_children;
} children_info;
} ephemeral_or_children_data{0};
bool isEphemeral() const
{
return is_ephemeral_and_ctime.is_ephemeral;
}
int64_t ephemeralOwner() const
{
if (isEphemeral())
return ephemeral_or_children_data.ephemeral_owner;
return 0;
}
void setEphemeralOwner(int64_t ephemeral_owner)
{
is_ephemeral_and_ctime.is_ephemeral = ephemeral_owner != 0;
ephemeral_or_children_data.ephemeral_owner = ephemeral_owner;
}
int32_t numChildren() const
{
if (isEphemeral())
return 0;
return ephemeral_or_children_data.children_info.num_children;
}
void setNumChildren(int32_t num_children)
{
ephemeral_or_children_data.children_info.num_children = num_children;
}
/// dummy interface for test
void addChild(StringRef) {}
auto getChildren() const
{
return std::vector<int>(numChildren());
}
void increaseNumChildren()
{
chassert(!isEphemeral());
++ephemeral_or_children_data.children_info.num_children;
}
void decreaseNumChildren()
{
chassert(!isEphemeral());
--ephemeral_or_children_data.children_info.num_children;
}
int32_t seqNum() const
{
if (isEphemeral())
return 0;
return ephemeral_or_children_data.children_info.seq_num;
}
void setSeqNum(int32_t seq_num_)
{
ephemeral_or_children_data.children_info.seq_num = seq_num_;
}
void increaseSeqNum()
{
chassert(!isEphemeral());
++ephemeral_or_children_data.children_info.seq_num;
}
int64_t ctime() const
{
return is_ephemeral_and_ctime.ctime;
}
void setCtime(uint64_t ctime)
{
is_ephemeral_and_ctime.ctime = ctime;
}
void copyStats(const Coordination::Stat & stat);
};
/// KeeperRocksNode is the memory structure used by RocksDB
struct KeeperRocksNode : public KeeperRocksNodeInfo
{
#if USE_ROCKSDB
friend struct RocksDBContainer<KeeperRocksNode>;
#endif
using Meta = KeeperRocksNodeInfo;
uint64_t size_bytes = 0; // only for compatible, should be deprecated
uint64_t sizeInBytes() const { return data_size + sizeof(KeeperRocksNodeInfo); }
void setData(String new_data)
{
data_size = static_cast<uint32_t>(new_data.size());
if (data_size != 0)
{
data = std::unique_ptr<char[]>(new char[new_data.size()]);
memcpy(data.get(), new_data.data(), data_size);
}
}
void shallowCopy(const KeeperRocksNode & other)
{
czxid = other.czxid;
mzxid = other.mzxid;
pzxid = other.pzxid;
acl_id = other.acl_id; /// 0 -- no ACL by default
mtime = other.mtime;
is_ephemeral_and_ctime = other.is_ephemeral_and_ctime;
ephemeral_or_children_data = other.ephemeral_or_children_data;
data_size = other.data_size;
if (data_size != 0)
{
data = std::unique_ptr<char[]>(new char[data_size]);
memcpy(data.get(), other.data.get(), data_size);
}
version = other.version;
cversion = other.cversion;
aversion = other.aversion;
/// cached_digest = other.cached_digest;
}
void invalidateDigestCache() const;
UInt64 getDigest(std::string_view path) const;
String getEncodedString();
void decodeFromString(const String & buffer_str);
void recalculateSize() {}
std::string_view getData() const noexcept { return {data.get(), data_size}; }
void setResponseStat(Coordination::Stat & response_stat) const
{
response_stat.czxid = czxid;
response_stat.mzxid = mzxid;
response_stat.ctime = ctime();
response_stat.mtime = mtime;
response_stat.version = version;
response_stat.cversion = cversion;
response_stat.aversion = aversion;
response_stat.ephemeralOwner = ephemeralOwner();
response_stat.dataLength = static_cast<int32_t>(data_size);
response_stat.numChildren = numChildren();
response_stat.pzxid = pzxid;
}
void reset()
{
serialized = false;
}
bool empty() const
{
return data_size == 0 && mzxid == 0;
}
std::unique_ptr<char[]> data{nullptr};
uint32_t data_size{0};
private:
bool serialized = false;
};
/// KeeperMemNode should have as minimal size as possible to reduce memory footprint
/// of stored nodes
/// New fields should be added to the struct only if it's really necessary
struct KeeperMemNode
{
int64_t czxid{0};
int64_t mzxid{0};
int64_t pzxid{0};
uint64_t acl_id = 0; /// 0 -- no ACL by default
int64_t mtime{0};
std::unique_ptr<char[]> data{nullptr};
uint32_t data_size{0};
int32_t version{0};
int32_t cversion{0};
int32_t aversion{0};
mutable uint64_t cached_digest = 0;
KeeperMemNode() = default;
KeeperMemNode & operator=(const KeeperMemNode & other);
KeeperMemNode(const KeeperMemNode & other);
KeeperMemNode & operator=(KeeperMemNode && other) noexcept;
KeeperMemNode(KeeperMemNode && other) noexcept;
bool empty() const;
bool isEphemeral() const
{
@ -74,7 +287,6 @@ struct NodeStats
void setNumChildren(int32_t num_children)
{
is_ephemeral_and_ctime.is_ephemeral = false;
ephemeral_or_children_data.children_info.num_children = num_children;
}
@ -119,6 +331,34 @@ struct NodeStats
is_ephemeral_and_ctime.ctime = ctime;
}
void copyStats(const Coordination::Stat & stat);
void setResponseStat(Coordination::Stat & response_stat) const;
/// Object memory size
uint64_t sizeInBytes() const;
void setData(const String & new_data);
std::string_view getData() const noexcept { return {data.get(), data_size}; }
void addChild(StringRef child_path);
void removeChild(StringRef child_path);
const auto & getChildren() const noexcept { return children; }
auto & getChildren() { return children; }
// Invalidate the calculated digest so it's recalculated again on the next
// getDigest call
void invalidateDigestCache() const;
// get the calculated digest of the node
UInt64 getDigest(std::string_view path) const;
// copy only necessary information for preprocessing and digest calculation
// (e.g. we don't need to copy list of children)
void shallowCopy(const KeeperMemNode & other);
private:
/// as ctime can't be negative because it stores the timestamp when the
/// node was created, we can use the MSB for a bool
@ -139,132 +379,7 @@ private:
int32_t num_children;
} children_info;
} ephemeral_or_children_data{0};
};
/// KeeperRocksNodeInfo is used in RocksDB keeper.
/// It is serialized directly as POD to RocksDB.
struct KeeperRocksNodeInfo
{
NodeStats stats;
uint64_t acl_id = 0; /// 0 -- no ACL by default
/// dummy interface for test
void addChild(StringRef) {}
auto getChildren() const
{
return std::vector<int>(stats.numChildren());
}
void copyStats(const Coordination::Stat & stat);
};
/// KeeperRocksNode is the memory structure used by RocksDB
struct KeeperRocksNode : public KeeperRocksNodeInfo
{
#if USE_ROCKSDB
friend struct RocksDBContainer<KeeperRocksNode>;
#endif
using Meta = KeeperRocksNodeInfo;
uint64_t size_bytes = 0; // only for compatible, should be deprecated
uint64_t sizeInBytes() const { return stats.data_size + sizeof(KeeperRocksNodeInfo); }
void setData(String new_data)
{
stats.data_size = static_cast<uint32_t>(new_data.size());
if (stats.data_size != 0)
{
data = std::unique_ptr<char[]>(new char[new_data.size()]);
memcpy(data.get(), new_data.data(), stats.data_size);
}
}
void shallowCopy(const KeeperRocksNode & other)
{
stats = other.stats;
acl_id = other.acl_id;
if (stats.data_size != 0)
{
data = std::unique_ptr<char[]>(new char[stats.data_size]);
memcpy(data.get(), other.data.get(), stats.data_size);
}
/// cached_digest = other.cached_digest;
}
void invalidateDigestCache() const;
UInt64 getDigest(std::string_view path) const;
String getEncodedString();
void decodeFromString(const String & buffer_str);
void recalculateSize() {}
std::string_view getData() const noexcept { return {data.get(), stats.data_size}; }
void setResponseStat(Coordination::Stat & response_stat) const;
void reset()
{
serialized = false;
}
bool empty() const
{
return stats.data_size == 0 && stats.mzxid == 0;
}
std::unique_ptr<char[]> data{nullptr};
mutable UInt64 cached_digest = 0; /// we cached digest for this node.
private:
bool serialized = false;
};
/// KeeperMemNode should have as minimal size as possible to reduce memory footprint
/// of stored nodes
/// New fields should be added to the struct only if it's really necessary
struct KeeperMemNode
{
NodeStats stats;
std::unique_ptr<char[]> data{nullptr};
mutable uint64_t cached_digest = 0;
uint64_t acl_id = 0; /// 0 -- no ACL by default
KeeperMemNode() = default;
KeeperMemNode & operator=(const KeeperMemNode & other);
KeeperMemNode(const KeeperMemNode & other);
KeeperMemNode & operator=(KeeperMemNode && other) noexcept;
KeeperMemNode(KeeperMemNode && other) noexcept;
bool empty() const;
void copyStats(const Coordination::Stat & stat);
void setResponseStat(Coordination::Stat & response_stat) const;
/// Object memory size
uint64_t sizeInBytes() const;
void setData(const String & new_data);
std::string_view getData() const noexcept { return {data.get(), stats.data_size}; }
void addChild(StringRef child_path);
void removeChild(StringRef child_path);
const auto & getChildren() const noexcept { return children; }
auto & getChildren() { return children; }
// Invalidate the calculated digest so it's recalculated again on the next
// getDigest call
void invalidateDigestCache() const;
// get the calculated digest of the node
UInt64 getDigest(std::string_view path) const;
// copy only necessary information for preprocessing and digest calculation
// (e.g. we don't need to copy list of children)
void shallowCopy(const KeeperMemNode & other);
private:
ChildrenSet children{};
};
@ -315,186 +430,17 @@ public:
};
using Ephemerals = std::unordered_map<int64_t, std::unordered_set<std::string>>;
struct WatchInfo
{
std::string_view path;
bool is_list_watch;
bool operator==(const WatchInfo &) const = default;
};
struct WatchInfoHash
{
auto operator()(WatchInfo info) const
{
SipHash hash;
hash.update(info.path);
hash.update(info.is_list_watch);
return hash.get64();
}
};
using SessionAndWatcher = std::unordered_map<int64_t, std::unordered_set<WatchInfo, WatchInfoHash>>;
using SessionAndWatcher = std::unordered_map<int64_t, std::unordered_set<std::string>>;
using SessionIDs = std::unordered_set<int64_t>;
/// Just vector of SHA1 from user:password
using AuthIDs = std::vector<AuthID>;
using SessionAndAuth = std::unordered_map<int64_t, AuthIDs>;
using Watches = std::unordered_map<
String /* path, relative of root_path */,
SessionIDs,
StringHashForHeterogeneousLookup,
StringHashForHeterogeneousLookup::transparent_key_equal>;
// Applying ZooKeeper request to storage consists of two steps:
// - preprocessing which, instead of applying the changes directly to storage,
// generates deltas with those changes, denoted with the request ZXID
// - processing which applies deltas with the correct ZXID to the storage
//
// Delta objects allow us two things:
// - fetch the latest, uncommitted state of an object by getting the committed
// state of that same object from the storage and applying the deltas
// in the same order as they are defined
// - quickly commit the changes to the storage
struct CreateNodeDelta
{
Coordination::Stat stat;
Coordination::ACLs acls;
String data;
};
struct RemoveNodeDelta
{
int32_t version{-1};
NodeStats stat;
Coordination::ACLs acls;
String data;
};
struct UpdateNodeStatDelta
{
template <is_any_of<KeeperMemNode, KeeperRocksNode> Node>
explicit UpdateNodeStatDelta(const Node & node)
: old_stats(node.stats)
, new_stats(node.stats)
{}
NodeStats old_stats;
NodeStats new_stats;
int32_t version{-1};
};
struct UpdateNodeDataDelta
{
std::string old_data;
std::string new_data;
int32_t version{-1};
};
struct SetACLDelta
{
Coordination::ACLs old_acls;
Coordination::ACLs new_acls;
int32_t version{-1};
};
struct ErrorDelta
{
Coordination::Error error;
};
struct FailedMultiDelta
{
std::vector<Coordination::Error> error_codes;
Coordination::Error global_error{Coordination::Error::ZOK};
};
// Denotes end of a subrequest in multi request
struct SubDeltaEnd
{
};
struct AddAuthDelta
{
int64_t session_id;
std::shared_ptr<AuthID> auth_id;
};
struct CloseSessionDelta
{
int64_t session_id;
};
using Operation = std::variant<
CreateNodeDelta,
RemoveNodeDelta,
UpdateNodeStatDelta,
UpdateNodeDataDelta,
SetACLDelta,
AddAuthDelta,
ErrorDelta,
SubDeltaEnd,
FailedMultiDelta,
CloseSessionDelta>;
struct Delta
{
Delta(String path_, int64_t zxid_, Operation operation_) : path(std::move(path_)), zxid(zxid_), operation(std::move(operation_)) { }
Delta(int64_t zxid_, Coordination::Error error) : Delta("", zxid_, ErrorDelta{error}) { }
Delta(int64_t zxid_, Operation subdelta) : Delta("", zxid_, subdelta) { }
String path;
int64_t zxid;
Operation operation;
};
using DeltaIterator = std::list<KeeperStorageBase::Delta>::const_iterator;
struct DeltaRange
{
DeltaIterator begin_it;
DeltaIterator end_it;
auto begin() const
{
return begin_it;
}
auto end() const
{
return end_it;
}
bool empty() const
{
return begin_it == end_it;
}
const auto & front() const
{
return *begin_it;
}
};
struct Stats
{
std::atomic<uint64_t> nodes_count = 0;
std::atomic<uint64_t> approximate_data_size = 0;
std::atomic<uint64_t> total_watches_count = 0;
std::atomic<uint64_t> watched_paths_count = 0;
std::atomic<uint64_t> sessions_with_watches_count = 0;
std::atomic<uint64_t> session_with_ephemeral_nodes_count = 0;
std::atomic<uint64_t> total_emphemeral_nodes_count = 0;
std::atomic<int64_t> last_zxid = 0;
};
Stats stats;
using Watches = std::unordered_map<String /* path, relative of root_path */, SessionIDs>;
static bool checkDigest(const Digest & first, const Digest & second);
};
};
/// Keeper state machine almost equal to the ZooKeeper's state machine.
/// Implements all logic of operations, data changes, sessions allocation.
@ -526,73 +472,160 @@ public:
int64_t session_id_counter{1};
mutable SharedMutex auth_mutex;
SessionAndAuth committed_session_and_auth;
SessionAndAuth session_and_auth;
mutable SharedMutex storage_mutex;
/// Main hashtable with nodes. Contain all information about data.
/// All other structures expect session_and_timeout can be restored from
/// container.
Container container;
// Applying ZooKeeper request to storage consists of two steps:
// - preprocessing which, instead of applying the changes directly to storage,
// generates deltas with those changes, denoted with the request ZXID
// - processing which applies deltas with the correct ZXID to the storage
//
// Delta objects allow us two things:
// - fetch the latest, uncommitted state of an object by getting the committed
// state of that same object from the storage and applying the deltas
// in the same order as they are defined
// - quickly commit the changes to the storage
struct CreateNodeDelta
{
Coordination::Stat stat;
Coordination::ACLs acls;
String data;
};
struct RemoveNodeDelta
{
int32_t version{-1};
int64_t ephemeral_owner{0};
};
struct UpdateNodeDelta
{
std::function<void(Node &)> update_fn;
int32_t version{-1};
};
struct SetACLDelta
{
Coordination::ACLs acls;
int32_t version{-1};
};
struct ErrorDelta
{
Coordination::Error error;
};
struct FailedMultiDelta
{
std::vector<Coordination::Error> error_codes;
Coordination::Error global_error{Coordination::Error::ZOK};
};
// Denotes end of a subrequest in multi request
struct SubDeltaEnd
{
};
struct AddAuthDelta
{
int64_t session_id;
AuthID auth_id;
};
struct CloseSessionDelta
{
int64_t session_id;
};
using Operation = std::
variant<CreateNodeDelta, RemoveNodeDelta, UpdateNodeDelta, SetACLDelta, AddAuthDelta, ErrorDelta, SubDeltaEnd, FailedMultiDelta, CloseSessionDelta>;
struct Delta
{
Delta(String path_, int64_t zxid_, Operation operation_) : path(std::move(path_)), zxid(zxid_), operation(std::move(operation_)) { }
Delta(int64_t zxid_, Coordination::Error error) : Delta("", zxid_, ErrorDelta{error}) { }
Delta(int64_t zxid_, Operation subdelta) : Delta("", zxid_, subdelta) { }
String path;
int64_t zxid;
Operation operation;
};
struct UncommittedState
{
explicit UncommittedState(KeeperStorage & storage_) : storage(storage_) { }
void addDeltas(std::list<Delta> new_deltas);
void cleanup(int64_t commit_zxid);
void addDelta(Delta new_delta);
void addDeltas(std::vector<Delta> new_deltas);
void commit(int64_t commit_zxid);
void rollback(int64_t rollback_zxid);
void rollback(std::list<Delta> rollback_deltas);
std::shared_ptr<Node> getNode(StringRef path, bool should_lock_storage = true) const;
std::shared_ptr<Node> getNode(StringRef path) const;
const Node * getActualNodeView(StringRef path, const Node & storage_node) const;
Coordination::ACLs getACLs(StringRef path) const;
void applyDeltas(const std::list<Delta> & new_deltas);
void applyDelta(const Delta & delta);
void rollbackDelta(const Delta & delta);
bool hasACL(int64_t session_id, bool is_local, std::function<bool(const AuthID &)> predicate) const;
void forEachAuthInSession(int64_t session_id, std::function<void(const AuthID &)> func) const;
std::shared_ptr<Node> tryGetNodeFromStorage(StringRef path, bool should_lock_storage = true) const;
std::shared_ptr<Node> tryGetNodeFromStorage(StringRef path) const;
std::unordered_map<int64_t, std::list<const AuthID *>> session_and_auth;
std::unordered_set<int64_t> closed_sessions;
using ZxidToNodes = std::map<int64_t, std::unordered_set<std::string_view>>;
struct UncommittedNode
{
std::shared_ptr<Node> node{nullptr};
std::optional<Coordination::ACLs> acls{};
std::unordered_set<uint64_t> applied_zxids{};
void materializeACL(const ACLMap & current_acl_map);
Coordination::ACLs acls{};
int64_t zxid{0};
};
struct PathCmp
struct Hash
{
auto operator()(const std::string_view a,
const std::string_view b) const
auto operator()(const std::string_view view) const
{
size_t level_a = std::count(a.begin(), a.end(), '/');
size_t level_b = std::count(b.begin(), b.end(), '/');
return level_a < level_b || (level_a == level_b && a < b);
SipHash hash;
hash.update(view);
return hash.get64();
}
using is_transparent = void; // required to make find() work with different type than key_type
};
Ephemerals ephemerals;
struct Equal
{
auto operator()(const std::string_view a,
const std::string_view b) const
{
return a == b;
}
std::unordered_map<int64_t, std::list<std::pair<int64_t, std::shared_ptr<AuthID>>>> session_and_auth;
using is_transparent = void; // required to make find() work with different type than key_type
};
struct PathCmp
{
using is_transparent = std::true_type;
auto operator()(const std::string_view a,
const std::string_view b) const
{
return a.size() < b.size() || (a.size() == b.size() && a < b);
}
};
mutable std::map<std::string, UncommittedNode, PathCmp> nodes;
mutable ZxidToNodes zxid_to_nodes;
std::unordered_map<std::string, std::list<const Delta *>, Hash, Equal> deltas_for_path;
mutable std::mutex deltas_mutex;
std::list<Delta> deltas TSA_GUARDED_BY(deltas_mutex);
std::list<Delta> deltas;
KeeperStorage<Container> & storage;
};
@ -602,7 +635,7 @@ public:
// with zxid > last_zxid
void applyUncommittedState(KeeperStorage & other, int64_t last_log_idx);
Coordination::Error commit(DeltaRange deltas);
Coordination::Error commit(int64_t zxid);
// Create node in the storage
// Returns false if it failed to create the node, true otherwise
@ -620,11 +653,12 @@ public:
bool checkACL(StringRef path, int32_t permissions, int64_t session_id, bool is_local);
std::mutex ephemeral_mutex;
/// Mapping session_id -> set of ephemeral nodes paths
Ephemerals committed_ephemerals;
size_t committed_ephemeral_nodes{0};
void unregisterEphemeralPath(int64_t session_id, const std::string & path);
/// Mapping session_id -> set of ephemeral nodes paths
Ephemerals ephemerals;
/// Mapping session_id -> set of watched nodes paths
SessionAndWatcher sessions_and_watchers;
/// Expiration queue for session, allows to get dead sessions at some point of time
SessionExpiryQueue session_expiry_queue;
/// All active sessions with timeout
@ -633,10 +667,8 @@ public:
/// ACLMap for more compact ACLs storage inside nodes.
ACLMap acl_map;
mutable std::mutex transaction_mutex;
/// Global id of all requests applied to storage
int64_t zxid TSA_GUARDED_BY(transaction_mutex) = 0;
int64_t zxid{0};
// older Keeper node (pre V5 snapshots) can create snapshots and receive logs from newer Keeper nodes
// this can lead to some inconsistencies, e.g. from snapshot it will use log_idx as zxid
@ -653,16 +685,11 @@ public:
int64_t log_idx = 0;
};
std::list<TransactionInfo> uncommitted_transactions TSA_GUARDED_BY(transaction_mutex);
std::deque<TransactionInfo> uncommitted_transactions;
uint64_t nodes_digest = 0;
uint64_t nodes_digest{0};
std::atomic<bool> finalized{false};
/// Mapping session_id -> set of watched nodes paths
SessionAndWatcher sessions_and_watchers;
size_t total_watches_count = 0;
bool finalized{false};
/// Currently active watches (node_path -> subscribed sessions)
Watches watches;
@ -671,30 +698,45 @@ public:
void clearDeadWatches(int64_t session_id);
/// Get current committed zxid
int64_t getZXID() const;
int64_t getZXID() const { return zxid; }
int64_t getNextZXID() const;
int64_t getNextZXIDLocked() const TSA_REQUIRES(transaction_mutex);
int64_t getNextZXID() const
{
if (uncommitted_transactions.empty())
return zxid + 1;
Digest getNodesDigest(bool committed, bool lock_transaction_mutex) const;
return uncommitted_transactions.back().zxid + 1;
}
Digest getNodesDigest(bool committed) const;
KeeperContextPtr keeper_context;
const String superdigest;
std::atomic<bool> initialized{false};
bool initialized{false};
KeeperStorage(int64_t tick_time_ms, const String & superdigest_, const KeeperContextPtr & keeper_context_, bool initialize_system_nodes = true);
void initializeSystemNodes() TSA_NO_THREAD_SAFETY_ANALYSIS;
void initializeSystemNodes();
/// Allocate new session id with the specified timeouts
int64_t getSessionID(int64_t session_timeout_ms);
int64_t getSessionID(int64_t session_timeout_ms)
{
auto result = session_id_counter++;
session_and_timeout.emplace(result, session_timeout_ms);
session_expiry_queue.addNewSessionOrUpdate(result, session_timeout_ms);
return result;
}
/// Add session id. Used when restoring KeeperStorage from snapshot.
void addSessionID(int64_t session_id, int64_t session_timeout_ms) TSA_NO_THREAD_SAFETY_ANALYSIS;
void addSessionID(int64_t session_id, int64_t session_timeout_ms)
{
session_and_timeout.emplace(session_id, session_timeout_ms);
session_expiry_queue.addNewSessionOrUpdate(session_id, session_timeout_ms);
}
UInt64 calculateNodesDigest(UInt64 current_digest, const std::list<Delta> & new_deltas) const;
UInt64 calculateNodesDigest(UInt64 current_digest, const std::vector<Delta> & new_deltas) const;
/// Process user request and return response.
/// check_acl = false only when converting data from ZooKeeper.
@ -721,39 +763,42 @@ public:
/// Set of methods for creating snapshots
/// Turn on snapshot mode, so data inside Container is not deleted, but replaced with new version.
void enableSnapshotMode(size_t up_to_version);
void enableSnapshotMode(size_t up_to_version)
{
container.enableSnapshotMode(up_to_version);
}
/// Turn off snapshot mode.
void disableSnapshotMode();
void disableSnapshotMode()
{
container.disableSnapshotMode();
}
Container::const_iterator getSnapshotIteratorBegin() const;
Container::const_iterator getSnapshotIteratorBegin() const { return container.begin(); }
/// Clear outdated data from internal container.
void clearGarbageAfterSnapshot();
void clearGarbageAfterSnapshot() { container.clearOutdatedNodes(); }
/// Get all active sessions
SessionAndTimeout getActiveSessions() const;
const SessionAndTimeout & getActiveSessions() const { return session_and_timeout; }
/// Get all dead sessions
std::vector<int64_t> getDeadSessions() const;
void updateStats();
const Stats & getStorageStats() const;
std::vector<int64_t> getDeadSessions() const { return session_expiry_queue.getExpiredSessions(); }
/// Introspection functions mostly used in 4-letter commands
uint64_t getNodesCount() const;
uint64_t getNodesCount() const { return container.size(); }
uint64_t getApproximateDataSize() const;
uint64_t getApproximateDataSize() const { return container.getApproximateDataSize(); }
uint64_t getArenaDataSize() const;
uint64_t getArenaDataSize() const { return container.keyArenaSize(); }
uint64_t getTotalWatchesCount() const;
uint64_t getWatchedPathsCount() const;
uint64_t getWatchedPathsCount() const { return watches.size() + list_watches.size(); }
uint64_t getSessionsWithWatchesCount() const;
uint64_t getSessionWithEphemeralNodesCount() const;
uint64_t getSessionWithEphemeralNodesCount() const { return ephemerals.size(); }
uint64_t getTotalEphemeralNodesCount() const;
void dumpWatches(WriteBufferFromOwnString & buf) const;

View File

@ -155,11 +155,11 @@ public:
ReadBufferFromOwnString buffer(iter->value().ToStringView());
typename Node::Meta & meta = new_pair->value;
readPODBinary(meta, buffer);
readVarUInt(new_pair->value.stats.data_size, buffer);
if (new_pair->value.stats.data_size)
readVarUInt(new_pair->value.data_size, buffer);
if (new_pair->value.data_size)
{
new_pair->value.data = std::unique_ptr<char[]>(new char[new_pair->value.stats.data_size]);
buffer.readStrict(new_pair->value.data.get(), new_pair->value.stats.data_size);
new_pair->value.data = std::unique_ptr<char[]>(new char[new_pair->value.data_size]);
buffer.readStrict(new_pair->value.data.get(), new_pair->value.data_size);
}
pair = new_pair;
}
@ -211,7 +211,7 @@ public:
}
}
std::vector<std::pair<std::string, Node>> getChildren(const std::string & key_, bool read_data = false)
std::vector<std::pair<std::string, Node>> getChildren(const std::string & key_)
{
rocksdb::ReadOptions read_options;
read_options.total_order_seek = true;
@ -232,15 +232,6 @@ public:
typename Node::Meta & meta = node;
/// We do not read data here
readPODBinary(meta, buffer);
if (read_data)
{
readVarUInt(meta.stats.data_size, buffer);
if (meta.stats.data_size)
{
node.data = std::unique_ptr<char[]>(new char[meta.stats.data_size]);
buffer.readStrict(node.data.get(), meta.stats.data_size);
}
}
std::string real_key(iter->key().data() + len, iter->key().size() - len);
// std::cout << "real key: " << real_key << std::endl;
result.emplace_back(std::move(real_key), std::move(node));
@ -277,11 +268,11 @@ public:
typename Node::Meta & meta = kv->value;
readPODBinary(meta, buffer);
/// TODO: Sometimes we don't need to load data.
readVarUInt(kv->value.stats.data_size, buffer);
if (kv->value.stats.data_size)
readVarUInt(kv->value.data_size, buffer);
if (kv->value.data_size)
{
kv->value.data = std::unique_ptr<char[]>(new char[kv->value.stats.data_size]);
buffer.readStrict(kv->value.data.get(), kv->value.stats.data_size);
kv->value.data = std::unique_ptr<char[]>(new char[kv->value.data_size]);
buffer.readStrict(kv->value.data.get(), kv->value.data_size);
}
return const_iterator(kv);
}
@ -290,7 +281,7 @@ public:
{
auto it = find(key);
chassert(it != end());
return MockNode(it->value.stats.numChildren(), it->value.getData());
return MockNode(it->value.numChildren(), it->value.getData());
}
const_iterator updateValue(StringRef key_, ValueUpdater updater)

View File

@ -93,7 +93,7 @@ void deserializeACLMap(Storage & storage, ReadBuffer & in)
}
template<typename Storage>
int64_t deserializeStorageData(Storage & storage, ReadBuffer & in, LoggerPtr log) TSA_NO_THREAD_SAFETY_ANALYSIS
int64_t deserializeStorageData(Storage & storage, ReadBuffer & in, LoggerPtr log)
{
int64_t max_zxid = 0;
std::string path;
@ -108,33 +108,33 @@ int64_t deserializeStorageData(Storage & storage, ReadBuffer & in, LoggerPtr log
Coordination::read(node.acl_id, in);
/// Deserialize stat
Coordination::read(node.stats.czxid, in);
Coordination::read(node.stats.mzxid, in);
Coordination::read(node.czxid, in);
Coordination::read(node.mzxid, in);
/// For some reason ZXID specified in filename can be smaller
/// then actual zxid from nodes. In this case we will use zxid from nodes.
max_zxid = std::max(max_zxid, node.stats.mzxid);
max_zxid = std::max(max_zxid, node.mzxid);
int64_t ctime;
Coordination::read(ctime, in);
node.stats.setCtime(ctime);
Coordination::read(node.stats.mtime, in);
Coordination::read(node.stats.version, in);
Coordination::read(node.stats.cversion, in);
Coordination::read(node.stats.aversion, in);
node.setCtime(ctime);
Coordination::read(node.mtime, in);
Coordination::read(node.version, in);
Coordination::read(node.cversion, in);
Coordination::read(node.aversion, in);
int64_t ephemeral_owner;
Coordination::read(ephemeral_owner, in);
if (ephemeral_owner != 0)
node.stats.setEphemeralOwner(ephemeral_owner);
Coordination::read(node.stats.pzxid, in);
node.setEphemeralOwner(ephemeral_owner);
Coordination::read(node.pzxid, in);
if (!path.empty())
{
if (ephemeral_owner == 0)
node.stats.setSeqNum(node.stats.cversion);
node.setSeqNum(node.cversion);
storage.container.insertOrReplace(path, node);
if (ephemeral_owner != 0)
storage.committed_ephemerals[ephemeral_owner].insert(path);
storage.ephemerals[ephemeral_owner].insert(path);
storage.acl_map.addUsage(node.acl_id);
}
@ -149,13 +149,7 @@ int64_t deserializeStorageData(Storage & storage, ReadBuffer & in, LoggerPtr log
if (itr.key != "/")
{
auto parent_path = parentNodePath(itr.key);
storage.container.updateValue(
parent_path,
[my_path = itr.key](typename Storage::Node & value)
{
value.addChild(getBaseNodeName(my_path));
value.stats.increaseNumChildren();
});
storage.container.updateValue(parent_path, [my_path = itr.key] (typename Storage::Node & value) { value.addChild(getBaseNodeName(my_path)); value.increaseNumChildren(); });
}
}
@ -163,7 +157,7 @@ int64_t deserializeStorageData(Storage & storage, ReadBuffer & in, LoggerPtr log
}
template<typename Storage>
void deserializeKeeperStorageFromSnapshot(Storage & storage, const std::string & snapshot_path, LoggerPtr log) TSA_NO_THREAD_SAFETY_ANALYSIS
void deserializeKeeperStorageFromSnapshot(Storage & storage, const std::string & snapshot_path, LoggerPtr log)
{
LOG_INFO(log, "Deserializing storage snapshot {}", snapshot_path);
int64_t zxid = getZxidFromName(snapshot_path);
@ -493,7 +487,7 @@ bool hasErrorsInMultiRequest(Coordination::ZooKeeperRequestPtr request)
}
template<typename Storage>
bool deserializeTxn(Storage & storage, ReadBuffer & in, LoggerPtr /*log*/) TSA_NO_THREAD_SAFETY_ANALYSIS
bool deserializeTxn(Storage & storage, ReadBuffer & in, LoggerPtr /*log*/)
{
int64_t checksum;
Coordination::read(checksum, in);
@ -574,7 +568,7 @@ void deserializeLogAndApplyToStorage(Storage & storage, const std::string & log_
}
template<typename Storage>
void deserializeLogsAndApplyToStorage(Storage & storage, const std::string & path, LoggerPtr log) TSA_NO_THREAD_SAFETY_ANALYSIS
void deserializeLogsAndApplyToStorage(Storage & storage, const std::string & path, LoggerPtr log)
{
std::map<int64_t, std::string> existing_logs;
for (const auto & p : fs::directory_iterator(path))

View File

@ -1,7 +1,6 @@
#include <chrono>
#include <gtest/gtest.h>
#include "base/defines.h"
#include "config.h"
#if USE_NURAFT
@ -1541,7 +1540,7 @@ void addNode(Storage & storage, const std::string & path, const std::string & da
using Node = typename Storage::Node;
Node node{};
node.setData(data);
node.stats.setEphemeralOwner(ephemeral_owner);
node.setEphemeralOwner(ephemeral_owner);
storage.container.insertOrReplace(path, node);
auto child_it = storage.container.find(path);
auto child_path = DB::getBaseNodeName(child_it->key);
@ -1550,7 +1549,7 @@ void addNode(Storage & storage, const std::string & path, const std::string & da
[&](auto & parent)
{
parent.addChild(child_path);
parent.stats.increaseNumChildren();
parent.increaseNumChildren();
});
}
@ -1571,9 +1570,9 @@ TYPED_TEST(CoordinationTest, TestStorageSnapshotSimple)
addNode(storage, "/hello1", "world", 1);
addNode(storage, "/hello2", "somedata", 3);
storage.session_id_counter = 5;
TSA_SUPPRESS_WARNING_FOR_WRITE(storage.zxid) = 2;
storage.committed_ephemerals[3] = {"/hello2"};
storage.committed_ephemerals[1] = {"/hello1"};
storage.zxid = 2;
storage.ephemerals[3] = {"/hello2"};
storage.ephemerals[1] = {"/hello1"};
storage.getSessionID(130);
storage.getSessionID(130);
@ -1602,10 +1601,10 @@ TYPED_TEST(CoordinationTest, TestStorageSnapshotSimple)
EXPECT_EQ(restored_storage->container.getValue("/hello1").getData(), "world");
EXPECT_EQ(restored_storage->container.getValue("/hello2").getData(), "somedata");
EXPECT_EQ(restored_storage->session_id_counter, 7);
EXPECT_EQ(restored_storage->getZXID(), 2);
EXPECT_EQ(restored_storage->committed_ephemerals.size(), 2);
EXPECT_EQ(restored_storage->committed_ephemerals[3].size(), 1);
EXPECT_EQ(restored_storage->committed_ephemerals[1].size(), 1);
EXPECT_EQ(restored_storage->zxid, 2);
EXPECT_EQ(restored_storage->ephemerals.size(), 2);
EXPECT_EQ(restored_storage->ephemerals[3].size(), 1);
EXPECT_EQ(restored_storage->ephemerals[1].size(), 1);
EXPECT_EQ(restored_storage->session_and_timeout.size(), 2);
}
@ -2028,7 +2027,7 @@ TYPED_TEST(CoordinationTest, TestEphemeralNodeRemove)
state_machine->commit(1, entry_c->get_buf());
const auto & storage = state_machine->getStorageUnsafe();
EXPECT_EQ(storage.committed_ephemerals.size(), 1);
EXPECT_EQ(storage.ephemerals.size(), 1);
std::shared_ptr<ZooKeeperRemoveRequest> request_d = std::make_shared<ZooKeeperRemoveRequest>();
request_d->path = "/hello";
/// Delete from other session
@ -2036,7 +2035,7 @@ TYPED_TEST(CoordinationTest, TestEphemeralNodeRemove)
state_machine->pre_commit(2, entry_d->get_buf());
state_machine->commit(2, entry_d->get_buf());
EXPECT_EQ(storage.committed_ephemerals.size(), 0);
EXPECT_EQ(storage.ephemerals.size(), 0);
}
@ -2591,9 +2590,9 @@ TYPED_TEST(CoordinationTest, TestStorageSnapshotDifferentCompressions)
addNode(storage, "/hello1", "world", 1);
addNode(storage, "/hello2", "somedata", 3);
storage.session_id_counter = 5;
TSA_SUPPRESS_WARNING_FOR_WRITE(storage.zxid) = 2;
storage.committed_ephemerals[3] = {"/hello2"};
storage.committed_ephemerals[1] = {"/hello1"};
storage.zxid = 2;
storage.ephemerals[3] = {"/hello2"};
storage.ephemerals[1] = {"/hello1"};
storage.getSessionID(130);
storage.getSessionID(130);
@ -2618,10 +2617,10 @@ TYPED_TEST(CoordinationTest, TestStorageSnapshotDifferentCompressions)
EXPECT_EQ(restored_storage->container.getValue("/hello1").getData(), "world");
EXPECT_EQ(restored_storage->container.getValue("/hello2").getData(), "somedata");
EXPECT_EQ(restored_storage->session_id_counter, 7);
EXPECT_EQ(restored_storage->getZXID(), 2);
EXPECT_EQ(restored_storage->committed_ephemerals.size(), 2);
EXPECT_EQ(restored_storage->committed_ephemerals[3].size(), 1);
EXPECT_EQ(restored_storage->committed_ephemerals[1].size(), 1);
EXPECT_EQ(restored_storage->zxid, 2);
EXPECT_EQ(restored_storage->ephemerals.size(), 2);
EXPECT_EQ(restored_storage->ephemerals[3].size(), 1);
EXPECT_EQ(restored_storage->ephemerals[1].size(), 1);
EXPECT_EQ(restored_storage->session_and_timeout.size(), 2);
}
@ -2806,13 +2805,13 @@ TYPED_TEST(CoordinationTest, TestStorageSnapshotEqual)
storage.session_id_counter = 5;
storage.committed_ephemerals[3] = {"/hello"};
storage.committed_ephemerals[1] = {"/hello/somepath"};
storage.ephemerals[3] = {"/hello"};
storage.ephemerals[1] = {"/hello/somepath"};
for (size_t j = 0; j < 3333; ++j)
storage.getSessionID(130 * j);
DB::KeeperStorageSnapshot<Storage> snapshot(&storage, storage.getZXID());
DB::KeeperStorageSnapshot<Storage> snapshot(&storage, storage.zxid);
auto buf = manager.serializeSnapshotToBuffer(snapshot);
@ -3316,7 +3315,7 @@ TYPED_TEST(CoordinationTest, TestCheckNotExistsRequest)
create_path("/test_node");
auto node_it = storage.container.find("/test_node");
ASSERT_NE(node_it, storage.container.end());
auto node_version = node_it->value.stats.version;
auto node_version = node_it->value.version;
{
SCOPED_TRACE("CheckNotExists returns ZNODEEXISTS");
@ -3567,12 +3566,12 @@ TYPED_TEST(CoordinationTest, TestRemoveRecursiveRequest)
{
SCOPED_TRACE("Recursive Remove Ephemeral");
create("/T7", zkutil::CreateMode::Ephemeral);
ASSERT_EQ(storage.committed_ephemerals.size(), 1);
ASSERT_EQ(storage.ephemerals.size(), 1);
auto responses = remove_recursive("/T7", 100);
ASSERT_EQ(responses.size(), 1);
ASSERT_EQ(responses[0].response->error, Coordination::Error::ZOK);
ASSERT_EQ(storage.committed_ephemerals.size(), 0);
ASSERT_EQ(storage.ephemerals.size(), 0);
ASSERT_FALSE(exists("/T7"));
}
@ -3582,12 +3581,12 @@ TYPED_TEST(CoordinationTest, TestRemoveRecursiveRequest)
create("/T8/A", zkutil::CreateMode::Persistent);
create("/T8/B", zkutil::CreateMode::Ephemeral);
create("/T8/A/C", zkutil::CreateMode::Ephemeral);
ASSERT_EQ(storage.committed_ephemerals.size(), 1);
ASSERT_EQ(storage.ephemerals.size(), 1);
auto responses = remove_recursive("/T8", 4);
ASSERT_EQ(responses.size(), 1);
ASSERT_EQ(responses[0].response->error, Coordination::Error::ZOK);
ASSERT_EQ(storage.committed_ephemerals.size(), 0);
ASSERT_EQ(storage.ephemerals.size(), 0);
ASSERT_FALSE(exists("/T8"));
ASSERT_FALSE(exists("/T8/A"));
ASSERT_FALSE(exists("/T8/B"));
@ -3739,72 +3738,6 @@ TYPED_TEST(CoordinationTest, TestRemoveRecursiveInMultiRequest)
ASSERT_FALSE(exists("/A/B"));
ASSERT_FALSE(exists("/A/B/D"));
}
{
SCOPED_TRACE("Recursive Remove For Subtree With Updated Node");
int create_zxid = ++zxid;
auto ops = prepare_create_tree();
/// First create nodes
const auto create_request = std::make_shared<ZooKeeperMultiRequest>(ops, ACLs{});
storage.preprocessRequest(create_request, 1, 0, create_zxid);
auto create_responses = storage.processRequest(create_request, 1, create_zxid);
ASSERT_EQ(create_responses.size(), 1);
ASSERT_TRUE(is_multi_ok(create_responses[0].response));
/// Small limit
int remove_zxid = ++zxid;
ops = {
zkutil::makeSetRequest("/A/B", "", -1),
zkutil::makeRemoveRecursiveRequest("/A", 3),
};
auto remove_request = std::make_shared<ZooKeeperMultiRequest>(ops, ACLs{});
storage.preprocessRequest(remove_request, 1, 0, remove_zxid);
auto remove_responses = storage.processRequest(remove_request, 1, remove_zxid);
ASSERT_EQ(remove_responses.size(), 1);
ASSERT_FALSE(is_multi_ok(remove_responses[0].response));
/// Big limit
remove_zxid = ++zxid;
ops[1] = zkutil::makeRemoveRecursiveRequest("/A", 4);
remove_request = std::make_shared<ZooKeeperMultiRequest>(ops, ACLs{});
storage.preprocessRequest(remove_request, 1, 0, remove_zxid);
remove_responses = storage.processRequest(remove_request, 1, remove_zxid);
ASSERT_EQ(remove_responses.size(), 1);
ASSERT_TRUE(is_multi_ok(remove_responses[0].response));
ASSERT_FALSE(exists("/A"));
ASSERT_FALSE(exists("/A/C"));
ASSERT_FALSE(exists("/A/B"));
ASSERT_FALSE(exists("/A/B/D"));
}
{
SCOPED_TRACE("[BUG] Recursive Remove Level Sorting");
int new_zxid = ++zxid;
Coordination::Requests ops = {
zkutil::makeCreateRequest("/a", "", zkutil::CreateMode::Persistent),
zkutil::makeCreateRequest("/a/bbbbbb", "", zkutil::CreateMode::Persistent),
zkutil::makeCreateRequest("/A", "", zkutil::CreateMode::Persistent),
zkutil::makeCreateRequest("/A/B", "", zkutil::CreateMode::Persistent),
zkutil::makeCreateRequest("/A/CCCCCCCCCCCC", "", zkutil::CreateMode::Persistent),
zkutil::makeRemoveRecursiveRequest("/A", 3),
};
auto remove_request = std::make_shared<ZooKeeperMultiRequest>(ops, ACLs{});
storage.preprocessRequest(remove_request, 1, 0, new_zxid);
auto remove_responses = storage.processRequest(remove_request, 1, new_zxid);
ASSERT_EQ(remove_responses.size(), 1);
ASSERT_TRUE(is_multi_ok(remove_responses[0].response));
ASSERT_TRUE(exists("/a"));
ASSERT_TRUE(exists("/a/bbbbbb"));
ASSERT_FALSE(exists("/A"));
ASSERT_FALSE(exists("/A/B"));
ASSERT_FALSE(exists("/A/CCCCCCCCCCCC"));
}
}
TYPED_TEST(CoordinationTest, TestRemoveRecursiveWatches)
@ -3890,26 +3823,14 @@ TYPED_TEST(CoordinationTest, TestRemoveRecursiveWatches)
auto responses = storage.processRequest(remove_request, 1, new_zxid);
ASSERT_EQ(responses.size(), 7);
/// request response is last
ASSERT_EQ(dynamic_cast<Coordination::ZooKeeperWatchResponse *>(responses.back().response.get()), nullptr);
std::unordered_map<std::string, std::vector<Coordination::Event>> expected_watch_responses
{
{"/A/B/D", {Coordination::Event::DELETED}},
{"/A/B", {Coordination::Event::CHILD, Coordination::Event::DELETED}},
{"/A/C", {Coordination::Event::DELETED}},
{"/A", {Coordination::Event::CHILD, Coordination::Event::DELETED}},
};
std::unordered_map<std::string, std::vector<Coordination::Event>> actual_watch_responses;
for (size_t i = 0; i < 6; ++i)
for (size_t i = 0; i < 7; ++i)
{
ASSERT_EQ(responses[i].response->error, Coordination::Error::ZOK);
const auto & watch_response = dynamic_cast<Coordination::ZooKeeperWatchResponse &>(*responses[i].response);
actual_watch_responses[watch_response.path].push_back(static_cast<Coordination::Event>(watch_response.type));
if (const auto * watch_response = dynamic_cast<Coordination::ZooKeeperWatchResponse *>(responses[i].response.get()))
ASSERT_EQ(watch_response->type, Coordination::Event::DELETED);
}
ASSERT_EQ(expected_watch_responses, actual_watch_responses);
ASSERT_EQ(storage.watches.size(), 0);
ASSERT_EQ(storage.list_watches.size(), 0);

View File

@ -151,15 +151,6 @@ Names NamesAndTypesList::getNames() const
return res;
}
NameSet NamesAndTypesList::getNameSet() const
{
NameSet res;
res.reserve(size());
for (const NameAndTypePair & column : *this)
res.insert(column.name);
return res;
}
DataTypes NamesAndTypesList::getTypes() const
{
DataTypes res;

View File

@ -100,7 +100,6 @@ public:
void getDifference(const NamesAndTypesList & rhs, NamesAndTypesList & deleted, NamesAndTypesList & added) const;
Names getNames() const;
NameSet getNameSet() const;
DataTypes getTypes() const;
/// Remove columns which names are not in the `names`.

View File

@ -33,8 +33,7 @@ static constexpr auto DBMS_MIN_REVISION_WITH_AGGREGATE_FUNCTIONS_VERSIONING = 54
static constexpr auto DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION = 1;
static constexpr auto DBMS_MIN_SUPPORTED_PARALLEL_REPLICAS_PROTOCOL_VERSION = 3;
static constexpr auto DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION = 4;
static constexpr auto DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION = 3;
static constexpr auto DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS = 54453;
static constexpr auto DBMS_MERGE_TREE_PART_INFO_VERSION = 1;
@ -87,8 +86,6 @@ static constexpr auto DBMS_MIN_REVISION_WITH_ROWS_BEFORE_AGGREGATION = 54469;
/// Packets size header
static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS = 54470;
static constexpr auto DBMS_MIN_REVISION_WITH_VERSIONED_PARALLEL_REPLICAS_PROTOCOL = 54471;
/// Version of ClickHouse TCP protocol.
///
/// Should be incremented manually on protocol changes.
@ -96,6 +93,6 @@ static constexpr auto DBMS_MIN_REVISION_WITH_VERSIONED_PARALLEL_REPLICAS_PROTOCO
/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION,
/// later is just a number for server version (one number instead of commit SHA)
/// for simplicity (sometimes it may be more convenient in some use cases).
static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54471;
static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54470;
}

View File

@ -946,7 +946,7 @@ class IColumn;
M(Bool, parallel_replicas_for_non_replicated_merge_tree, false, "If true, ClickHouse will use parallel replicas algorithm also for non-replicated MergeTree tables", 0) \
M(UInt64, parallel_replicas_min_number_of_rows_per_replica, 0, "Limit the number of replicas used in a query to (estimated rows to read / min_number_of_rows_per_replica). The max is still limited by 'max_parallel_replicas'", 0) \
M(Bool, parallel_replicas_prefer_local_join, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN.", 0) \
M(UInt64, parallel_replicas_mark_segment_size, 0, "Parts virtually divided into segments to be distributed between replicas for parallel reading. This setting controls the size of these segments. Not recommended to change until you're absolutely sure in what you're doing. Value should be in range [128; 16384]", 0) \
M(UInt64, parallel_replicas_mark_segment_size, 128, "Parts virtually divided into segments to be distributed between replicas for parallel reading. This setting controls the size of these segments. Not recommended to change until you're absolutely sure in what you're doing", 0) \
M(Bool, allow_archive_path_syntax, true, "File/S3 engines/table function will parse paths with '::' as '<archive> :: <file>' if archive has correct extension", 0) \
M(Bool, parallel_replicas_local_plan, false, "Build local plan for local replica", 0) \
\
@ -1271,7 +1271,6 @@ class IColumn;
M(Bool, output_format_orc_string_as_string, true, "Use ORC String type instead of Binary for String columns", 0) \
M(ORCCompression, output_format_orc_compression_method, "zstd", "Compression method for ORC output format. Supported codecs: lz4, snappy, zlib, zstd, none (uncompressed)", 0) \
M(UInt64, output_format_orc_row_index_stride, 10'000, "Target row index stride in ORC output format", 0) \
M(Double, output_format_orc_dictionary_key_size_threshold, 0.0, "For a string column in ORC output format, if the number of distinct values is greater than this fraction of the total number of non-null rows, turn off dictionary encoding. Otherwise dictionary encoding is enabled", 0) \
\
M(CapnProtoEnumComparingMode, format_capn_proto_enum_comparising_mode, FormatSettings::CapnProtoEnumComparingMode::BY_VALUES, "How to map ClickHouse Enum and CapnProto Enum", 0) \
\

View File

@ -71,7 +71,6 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
},
{"24.9",
{
{"output_format_orc_dictionary_key_size_threshold", 0.0, 0.0, "For a string column in ORC output format, if the number of distinct values is greater than this fraction of the total number of non-null rows, turn off dictionary encoding. Otherwise dictionary encoding is enabled"},
{"input_format_json_empty_as_default", false, false, "Added new setting to allow to treat empty fields in JSON input as default values."},
{"input_format_try_infer_variants", false, false, "Try to infer Variant type in text formats when there is more than one possible type for column/array elements"},
{"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."},
@ -79,7 +78,6 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"allow_materialized_view_with_bad_select", true, true, "Support (but not enable yet) stricter validation in CREATE MATERIALIZED VIEW"},
{"output_format_always_quote_identifiers", false, false, "New setting."},
{"output_format_identifier_quoting_style", "Backticks", "Backticks", "New setting."},
{"parallel_replicas_mark_segment_size", 128, 0, "Value for this setting now determined automatically"},
{"database_replicated_allow_replicated_engine_arguments", 1, 0, "Don't allow explicit arguments by default"},
{"database_replicated_allow_explicit_uuid", 0, 0, "Added a new setting to disallow explicitly specifying table UUID"},
{"parallel_replicas_local_plan", false, false, "Use local plan for local replica in a query with parallel replicas"},

View File

@ -59,27 +59,35 @@ void cckMetadataPathForOrdinary(const ASTCreateQuery & create, const String & me
}
void DatabaseFactory::validate(const ASTCreateQuery & create_query) const
/// validate validates the database engine that's specified in the create query for
/// engine arguments, settings and table overrides.
void validate(const ASTCreateQuery & create_query)
{
auto * storage = create_query.storage;
const String & engine_name = storage->engine->name;
const EngineFeatures & engine_features = database_engines.at(engine_name).features;
/// Check engine may have arguments
if (storage->engine->arguments && !engine_features.supports_arguments)
static const std::unordered_set<std::string_view> engines_with_arguments{"MySQL", "MaterializeMySQL", "MaterializedMySQL",
"Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"};
const String & engine_name = storage->engine->name;
bool engine_may_have_arguments = engines_with_arguments.contains(engine_name);
if (storage->engine->arguments && !engine_may_have_arguments)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine `{}` cannot have arguments", engine_name);
/// Check engine may have settings
bool may_have_settings = endsWith(engine_name, "MySQL") || engine_name == "Replicated" || engine_name == "MaterializedPostgreSQL";
bool has_unexpected_element = storage->engine->parameters || storage->partition_by ||
storage->primary_key || storage->order_by ||
storage->sample_by;
if (has_unexpected_element || (!engine_features.supports_settings && storage->settings))
if (has_unexpected_element || (!may_have_settings && storage->settings))
throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_AST,
"Database engine `{}` cannot have parameters, primary_key, order_by, sample_by, settings", engine_name);
/// Check engine with table overrides
if (create_query.table_overrides && !engine_features.supports_table_overrides)
static const std::unordered_set<std::string_view> engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL", "MaterializedPostgreSQL"};
if (create_query.table_overrides && !engines_with_table_overrides.contains(engine_name))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine `{}` cannot have table overrides", engine_name);
}
@ -113,9 +121,9 @@ DatabasePtr DatabaseFactory::get(const ASTCreateQuery & create, const String & m
return impl;
}
void DatabaseFactory::registerDatabase(const std::string & name, CreatorFn creator_fn, EngineFeatures features)
void DatabaseFactory::registerDatabase(const std::string & name, CreatorFn creator_fn)
{
if (!database_engines.emplace(name, Creator{std::move(creator_fn), features}).second)
if (!database_engines.emplace(name, std::move(creator_fn)).second)
throw Exception(ErrorCodes::LOGICAL_ERROR, "DatabaseFactory: the database engine name '{}' is not unique", name);
}
@ -146,7 +154,7 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
.context = context};
// creator_fn creates and returns a DatabasePtr with the supplied arguments
auto creator_fn = database_engines.at(engine_name).creator_fn;
auto creator_fn = database_engines.at(engine_name);
return creator_fn(arguments);
}

View File

@ -43,30 +43,13 @@ public:
ContextPtr & context;
};
struct EngineFeatures
{
bool supports_arguments = false;
bool supports_settings = false;
bool supports_table_overrides = false;
};
DatabasePtr get(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context);
using CreatorFn = std::function<DatabasePtr(const Arguments & arguments)>;
struct Creator
{
CreatorFn creator_fn;
EngineFeatures features;
};
using DatabaseEngines = std::unordered_map<std::string, CreatorFn>;
DatabasePtr get(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context);
using DatabaseEngines = std::unordered_map<std::string, Creator>;
void registerDatabase(const std::string & name, CreatorFn creator_fn, EngineFeatures features = EngineFeatures{
.supports_arguments = false,
.supports_settings = false,
.supports_table_overrides = false,
});
void registerDatabase(const std::string & name, CreatorFn creator_fn);
const DatabaseEngines & getDatabaseEngines() const { return database_engines; }
@ -82,10 +65,6 @@ private:
DatabaseEngines database_engines;
DatabasePtr getImpl(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context);
/// validate validates the database engine that's specified in the create query for
/// engine arguments, settings and table overrides.
void validate(const ASTCreateQuery & create_query) const;
};
}

View File

@ -257,6 +257,6 @@ void registerDatabaseFilesystem(DatabaseFactory & factory)
return std::make_shared<DatabaseFilesystem>(args.database_name, init_path, args.context);
};
factory.registerDatabase("Filesystem", create_fn, {.supports_arguments = true});
factory.registerDatabase("Filesystem", create_fn);
}
}

View File

@ -253,7 +253,7 @@ void registerDatabaseHDFS(DatabaseFactory & factory)
return std::make_shared<DatabaseHDFS>(args.database_name, source_url, args.context);
};
factory.registerDatabase("HDFS", create_fn, {.supports_arguments = true});
factory.registerDatabase("HDFS", create_fn);
}
} // DB

View File

@ -398,6 +398,6 @@ void registerDatabaseLazy(DatabaseFactory & factory)
cache_expiration_time_seconds,
args.context);
};
factory.registerDatabase("Lazy", create_fn, {.supports_arguments = true});
factory.registerDatabase("Lazy", create_fn);
}
}

View File

@ -2001,6 +2001,6 @@ void registerDatabaseReplicated(DatabaseFactory & factory)
replica_name,
std::move(database_replicated_settings), args.context);
};
factory.registerDatabase("Replicated", create_fn, {.supports_arguments = true, .supports_settings = true});
factory.registerDatabase("Replicated", create_fn);
}
}

View File

@ -326,7 +326,7 @@ void registerDatabaseS3(DatabaseFactory & factory)
return std::make_shared<DatabaseS3>(args.database_name, config, args.context);
};
factory.registerDatabase("S3", create_fn, {.supports_arguments = true});
factory.registerDatabase("S3", create_fn);
}
}
#endif

View File

@ -290,14 +290,8 @@ void registerDatabaseMaterializedMySQL(DatabaseFactory & factory)
binlog_client,
std::move(materialize_mode_settings));
};
DatabaseFactory::EngineFeatures features{
.supports_arguments = true,
.supports_settings = true,
.supports_table_overrides = true,
};
factory.registerDatabase("MaterializeMySQL", create_fn, features);
factory.registerDatabase("MaterializedMySQL", create_fn, features);
factory.registerDatabase("MaterializeMySQL", create_fn);
factory.registerDatabase("MaterializedMySQL", create_fn);
}
}

View File

@ -584,7 +584,7 @@ void registerDatabaseMySQL(DatabaseFactory & factory)
throw Exception(ErrorCodes::CANNOT_CREATE_DATABASE, "Cannot create MySQL database, because {}", exception_message);
}
};
factory.registerDatabase("MySQL", create_fn, {.supports_arguments = true, .supports_settings = true});
factory.registerDatabase("MySQL", create_fn);
}
}

View File

@ -546,11 +546,7 @@ void registerDatabaseMaterializedPostgreSQL(DatabaseFactory & factory)
args.database_name, configuration.database, connection_info,
std::move(postgresql_replica_settings));
};
factory.registerDatabase("MaterializedPostgreSQL", create_fn, {
.supports_arguments = true,
.supports_settings = true,
.supports_table_overrides = true,
});
factory.registerDatabase("MaterializedPostgreSQL", create_fn);
}
}

View File

@ -558,7 +558,7 @@ void registerDatabasePostgreSQL(DatabaseFactory & factory)
pool,
use_table_cache);
};
factory.registerDatabase("PostgreSQL", create_fn, {.supports_arguments = true});
factory.registerDatabase("PostgreSQL", create_fn);
}
}

View File

@ -220,7 +220,7 @@ void registerDatabaseSQLite(DatabaseFactory & factory)
return std::make_shared<DatabaseSQLite>(args.context, engine_define, args.create_query.attach, database_path);
};
factory.registerDatabase("SQLite", create_fn, {.supports_arguments = true});
factory.registerDatabase("SQLite", create_fn);
}
}

View File

@ -28,7 +28,6 @@ extern const Event CachedReadBufferReadFromCacheMicroseconds;
extern const Event CachedReadBufferCacheWriteMicroseconds;
extern const Event CachedReadBufferReadFromSourceBytes;
extern const Event CachedReadBufferReadFromCacheBytes;
extern const Event CachedReadBufferPredownloadedBytes;
extern const Event CachedReadBufferCacheWriteBytes;
extern const Event CachedReadBufferCreateBufferMicroseconds;
@ -645,7 +644,6 @@ void CachedOnDiskReadBufferFromFile::predownload(FileSegment & file_segment)
size_t current_predownload_size = std::min(current_impl_buffer_size, bytes_to_predownload);
ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceBytes, current_impl_buffer_size);
ProfileEvents::increment(ProfileEvents::CachedReadBufferPredownloadedBytes, current_impl_buffer_size);
std::string failure_reason;
bool continue_predownload = file_segment.reserve(

View File

@ -244,7 +244,6 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se
format_settings.orc.output_string_as_string = settings.output_format_orc_string_as_string;
format_settings.orc.output_compression_method = settings.output_format_orc_compression_method;
format_settings.orc.output_row_index_stride = settings.output_format_orc_row_index_stride;
format_settings.orc.output_dictionary_key_size_threshold = settings.output_format_orc_dictionary_key_size_threshold;
format_settings.orc.use_fast_decoder = settings.input_format_orc_use_fast_decoder;
format_settings.orc.filter_push_down = settings.input_format_orc_filter_push_down;
format_settings.orc.reader_time_zone_name = settings.input_format_orc_reader_time_zone_name;

View File

@ -415,7 +415,6 @@ struct FormatSettings
bool filter_push_down = true;
UInt64 output_row_index_stride = 10'000;
String reader_time_zone_name = "GMT";
double output_dictionary_key_size_threshold = 0.0;
} orc{};
/// For capnProto format we should determine how to

View File

@ -15,6 +15,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int SIZES_OF_ARRAYS_DONT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_COLUMN;
}
@ -37,6 +38,13 @@ public:
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.empty())
throw Exception(
ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION,
"Function {} needs at least one argument; passed {}.",
getName(),
arguments.size());
DataTypes arguments_types;
for (size_t index = 0; index < arguments.size(); ++index)
{
@ -60,16 +68,9 @@ public:
}
ColumnPtr
executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & /*result_type*/, size_t input_rows_count) const override
{
size_t num_arguments = arguments.size();
if (num_arguments == 0)
{
auto res_col = result_type->createColumn();
res_col->insertDefault();
return ColumnConst::create(std::move(res_col), input_rows_count);
}
Columns holders(num_arguments);
Columns tuple_columns(num_arguments);

View File

@ -718,12 +718,7 @@ FileCache::getOrSet(
}
}
chassert(file_segments_limit
? file_segments.back()->range().left <= result_range.right
: file_segments.back()->range().contains(result_range.right),
fmt::format("Unexpected state. Back: {}, result range: {}, limit: {}",
file_segments.back()->range().toString(), result_range.toString(), file_segments_limit));
chassert(file_segments_limit ? file_segments.back()->range().left <= result_range.right : file_segments.back()->range().contains(result_range.right));
chassert(!file_segments_limit || file_segments.size() <= file_segments_limit);
return std::make_unique<FileSegmentsHolder>(std::move(file_segments));

View File

@ -532,7 +532,7 @@ void executeQueryWithParallelReplicas(
max_replicas_to_use = shard.getAllNodeCount();
}
auto coordinator = std::make_shared<ParallelReplicasReadingCoordinator>(max_replicas_to_use);
auto coordinator = std::make_shared<ParallelReplicasReadingCoordinator>(max_replicas_to_use, settings.parallel_replicas_mark_segment_size);
auto external_tables = new_context->getExternalTables();

View File

@ -338,8 +338,11 @@ size_t HashJoin::getTotalRowCount() const
return res;
}
void HashJoin::doDebugAsserts() const
size_t HashJoin::getTotalByteCount() const
{
if (!data)
return 0;
#ifndef NDEBUG
size_t debug_blocks_allocated_size = 0;
for (const auto & block : data->blocks)
@ -357,14 +360,6 @@ void HashJoin::doDebugAsserts() const
throw Exception(ErrorCodes::LOGICAL_ERROR, "data->blocks_nullmaps_allocated_size != debug_blocks_nullmaps_allocated_size ({} != {})",
data->blocks_nullmaps_allocated_size, debug_blocks_nullmaps_allocated_size);
#endif
}
size_t HashJoin::getTotalByteCount() const
{
if (!data)
return 0;
doDebugAsserts();
size_t res = 0;
@ -549,11 +544,9 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits)
have_compressed = true;
}
doDebugAsserts();
data->blocks_allocated_size += block_to_save.allocatedBytes();
data->blocks.emplace_back(std::move(block_to_save));
Block * stored_block = &data->blocks.back();
doDebugAsserts();
if (rows)
data->empty = false;
@ -641,11 +634,9 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits)
if (!flag_per_row && !is_inserted)
{
doDebugAsserts();
LOG_TRACE(log, "Skipping inserting block with {} rows", rows);
data->blocks_allocated_size -= stored_block->allocatedBytes();
data->blocks.pop_back();
doDebugAsserts();
}
if (!check_limits)
@ -692,8 +683,6 @@ void HashJoin::shrinkStoredBlocksToFit(size_t & total_bytes_in_join, bool force_
for (auto & stored_block : data->blocks)
{
doDebugAsserts();
size_t old_size = stored_block.allocatedBytes();
stored_block = stored_block.shrinkToFit();
size_t new_size = stored_block.allocatedBytes();
@ -711,8 +700,6 @@ void HashJoin::shrinkStoredBlocksToFit(size_t & total_bytes_in_join, bool force_
else
/// Sometimes after clone resized block can be bigger than original
data->blocks_allocated_size += new_size - old_size;
doDebugAsserts();
}
auto new_total_bytes_in_join = getTotalByteCount();
@ -1429,13 +1416,7 @@ void HashJoin::tryRerangeRightTableDataImpl(Map & map [[maybe_unused]])
};
BlocksList sorted_blocks;
visit_rows_map(sorted_blocks, map);
doDebugAsserts();
data->blocks.swap(sorted_blocks);
size_t new_blocks_allocated_size = 0;
for (const auto & block : data->blocks)
new_blocks_allocated_size += block.allocatedBytes();
data->blocks_allocated_size = new_blocks_allocated_size;
doDebugAsserts();
}
}

View File

@ -470,7 +470,6 @@ private:
void tryRerangeRightTableData() override;
template <JoinKind KIND, typename Map, JoinStrictness STRICTNESS>
void tryRerangeRightTableDataImpl(Map & map);
void doDebugAsserts() const;
};
}

View File

@ -380,99 +380,100 @@ BlockIO InterpreterDropQuery::executeToDatabase(const ASTDropQuery & query)
BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, DatabasePtr & database, std::vector<UUID> & uuids_to_wait)
{
if (query.kind != ASTDropQuery::Kind::Detach && query.kind != ASTDropQuery::Kind::Drop && query.kind != ASTDropQuery::Kind::Truncate)
return {};
const auto & database_name = query.getDatabase();
auto ddl_guard = DatabaseCatalog::instance().getDDLGuard(database_name, "");
database = tryGetDatabase(database_name, query.if_exists);
if (!database)
return {};
bool drop = query.kind == ASTDropQuery::Kind::Drop;
bool truncate = query.kind == ASTDropQuery::Kind::Truncate;
getContext()->checkAccess(AccessType::DROP_DATABASE, database_name);
if (query.kind == ASTDropQuery::Kind::Detach && query.permanently)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DETACH PERMANENTLY is not implemented for databases");
if (query.if_empty)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DROP IF EMPTY is not implemented for databases");
if (!truncate && database->hasReplicationThread())
database->stopReplication();
if (database->shouldBeEmptyOnDetach())
if (database)
{
/// Cancel restarting replicas in that database, wait for remaining RESTART queries to finish.
/// So it will not startup tables concurrently with the flushAndPrepareForShutdown call below.
auto restart_replica_lock = DatabaseCatalog::instance().getLockForDropDatabase(database_name);
ASTDropQuery query_for_table;
query_for_table.kind = query.kind;
// For truncate operation on database, drop the tables
if (truncate)
query_for_table.kind = query.has_all_tables ? ASTDropQuery::Kind::Truncate : ASTDropQuery::Kind::Drop;
query_for_table.if_exists = true;
query_for_table.if_empty = false;
query_for_table.setDatabase(database_name);
query_for_table.sync = query.sync;
/// Flush should not be done if shouldBeEmptyOnDetach() == false,
/// since in this case getTablesIterator() may do some additional work,
/// see DatabaseMaterializedMySQL::getTablesIterator()
auto table_context = Context::createCopy(getContext());
table_context->setInternalQuery(true);
/// Do not hold extra shared pointers to tables
std::vector<std::pair<StorageID, bool>> tables_to_drop;
// NOTE: This means we wait for all tables to be loaded inside getTablesIterator() call in case of `async_load_databases = true`.
for (auto iterator = database->getTablesIterator(table_context); iterator->isValid(); iterator->next())
if (query.kind == ASTDropQuery::Kind::Detach || query.kind == ASTDropQuery::Kind::Drop
|| query.kind == ASTDropQuery::Kind::Truncate)
{
auto table_ptr = iterator->table();
tables_to_drop.push_back({table_ptr->getStorageID(), table_ptr->isDictionary()});
}
bool drop = query.kind == ASTDropQuery::Kind::Drop;
bool truncate = query.kind == ASTDropQuery::Kind::Truncate;
/// Prepare tables for shutdown in parallel.
ThreadPoolCallbackRunnerLocal<void> runner(getDatabaseCatalogDropTablesThreadPool().get(), "DropTables");
for (const auto & [name, _] : tables_to_drop)
{
auto table_ptr = DatabaseCatalog::instance().getTable(name, table_context);
runner([my_table_ptr = std::move(table_ptr)]()
getContext()->checkAccess(AccessType::DROP_DATABASE, database_name);
if (query.kind == ASTDropQuery::Kind::Detach && query.permanently)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DETACH PERMANENTLY is not implemented for databases");
if (query.if_empty)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DROP IF EMPTY is not implemented for databases");
if (!truncate && database->hasReplicationThread())
database->stopReplication();
if (database->shouldBeEmptyOnDetach())
{
my_table_ptr->flushAndPrepareForShutdown();
});
}
runner.waitForAllToFinishAndRethrowFirstError();
/// Cancel restarting replicas in that database, wait for remaining RESTART queries to finish.
/// So it will not startup tables concurrently with the flushAndPrepareForShutdown call below.
auto restart_replica_lock = DatabaseCatalog::instance().getLockForDropDatabase(database_name);
for (const auto & table : tables_to_drop)
{
query_for_table.setTable(table.first.getTableName());
query_for_table.is_dictionary = table.second;
DatabasePtr db;
UUID table_to_wait = UUIDHelpers::Nil;
executeToTableImpl(table_context, query_for_table, db, table_to_wait);
uuids_to_wait.push_back(table_to_wait);
ASTDropQuery query_for_table;
query_for_table.kind = query.kind;
// For truncate operation on database, drop the tables
if (truncate)
query_for_table.kind = query.has_all_tables ? ASTDropQuery::Kind::Truncate : ASTDropQuery::Kind::Drop;
query_for_table.if_exists = true;
query_for_table.if_empty = false;
query_for_table.setDatabase(database_name);
query_for_table.sync = query.sync;
/// Flush should not be done if shouldBeEmptyOnDetach() == false,
/// since in this case getTablesIterator() may do some additional work,
/// see DatabaseMaterializedMySQL::getTablesIterator()
auto table_context = Context::createCopy(getContext());
table_context->setInternalQuery(true);
/// Do not hold extra shared pointers to tables
std::vector<std::pair<StorageID, bool>> tables_to_drop;
// NOTE: This means we wait for all tables to be loaded inside getTablesIterator() call in case of `async_load_databases = true`.
for (auto iterator = database->getTablesIterator(table_context); iterator->isValid(); iterator->next())
{
auto table_ptr = iterator->table();
tables_to_drop.push_back({table_ptr->getStorageID(), table_ptr->isDictionary()});
}
/// Prepare tables for shutdown in parallel.
ThreadPoolCallbackRunnerLocal<void> runner(getDatabaseCatalogDropTablesThreadPool().get(), "DropTables");
for (const auto & [name, _] : tables_to_drop)
{
auto table_ptr = DatabaseCatalog::instance().getTable(name, table_context);
runner([my_table_ptr = std::move(table_ptr)]()
{
my_table_ptr->flushAndPrepareForShutdown();
});
}
runner.waitForAllToFinishAndRethrowFirstError();
for (const auto & table : tables_to_drop)
{
query_for_table.setTable(table.first.getTableName());
query_for_table.is_dictionary = table.second;
DatabasePtr db;
UUID table_to_wait = UUIDHelpers::Nil;
executeToTableImpl(table_context, query_for_table, db, table_to_wait);
uuids_to_wait.push_back(table_to_wait);
}
}
// only if operation is DETACH
if ((!drop || !truncate) && query.sync)
{
/// Avoid "some tables are still in use" when sync mode is enabled
for (const auto & table_uuid : uuids_to_wait)
database->waitDetachedTableNotInUse(table_uuid);
}
/// Protects from concurrent CREATE TABLE queries
auto db_guard = DatabaseCatalog::instance().getExclusiveDDLGuardForDatabase(database_name);
// only if operation is DETACH
if (!drop || !truncate)
database->assertCanBeDetached(true);
/// DETACH or DROP database itself. If TRUNCATE skip dropping/erasing the database.
if (!truncate)
DatabaseCatalog::instance().detachDatabase(getContext(), database_name, drop, database->shouldBeEmptyOnDetach());
}
}
// only if operation is DETACH
if ((!drop || !truncate) && query.sync)
{
/// Avoid "some tables are still in use" when sync mode is enabled
for (const auto & table_uuid : uuids_to_wait)
database->waitDetachedTableNotInUse(table_uuid);
}
/// Protects from concurrent CREATE TABLE queries
auto db_guard = DatabaseCatalog::instance().getExclusiveDDLGuardForDatabase(database_name);
// only if operation is DETACH
if (!drop || !truncate)
database->assertCanBeDetached(true);
/// DETACH or DROP database itself. If TRUNCATE skip dropping/erasing the database.
if (!truncate)
DatabaseCatalog::instance().detachDatabase(getContext(), database_name, drop, database->shouldBeEmptyOnDetach());
return {};
}

View File

@ -1,42 +1,10 @@
#pragma once
#include <Common/KnownObjectNames.h>
#include <Core/QualifiedTableName.h>
#include <base/defines.h>
#include <boost/algorithm/string/predicate.hpp>
#include <vector>
namespace DB
{
class AbstractFunction
{
friend class FunctionSecretArgumentsFinder;
public:
class Argument
{
public:
virtual ~Argument() = default;
virtual std::unique_ptr<AbstractFunction> getFunction() const = 0;
virtual bool isIdentifier() const = 0;
virtual bool tryGetString(String * res, bool allow_identifier) const = 0;
};
class Arguments
{
public:
virtual ~Arguments() = default;
virtual size_t size() const = 0;
virtual std::unique_ptr<Argument> at(size_t n) const = 0;
};
virtual ~AbstractFunction() = default;
virtual String name() const = 0;
bool hasArguments() const { return !!arguments; }
protected:
std::unique_ptr<Arguments> arguments;
};
class FunctionSecretArgumentsFinder
{
public:
@ -55,485 +23,6 @@ public:
return count != 0 || !nested_maps.empty();
}
};
explicit FunctionSecretArgumentsFinder(std::unique_ptr<AbstractFunction> && function_) : function(std::move(function_)) {}
FunctionSecretArgumentsFinder::Result getResult() const { return result; }
protected:
const std::unique_ptr<AbstractFunction> function;
Result result;
void markSecretArgument(size_t index, bool argument_is_named = false)
{
if (index >= function->arguments->size())
return;
if (!result.count)
{
result.start = index;
result.are_named = argument_is_named;
}
chassert(index >= result.start); /// We always check arguments consecutively
result.count = index + 1 - result.start;
if (!argument_is_named)
result.are_named = false;
}
void findOrdinaryFunctionSecretArguments()
{
if ((function->name() == "mysql") || (function->name() == "postgresql") || (function->name() == "mongodb"))
{
/// mysql('host:port', 'database', 'table', 'user', 'password', ...)
/// postgresql('host:port', 'database', 'table', 'user', 'password', ...)
/// mongodb('host:port', 'database', 'collection', 'user', 'password', ...)
findMySQLFunctionSecretArguments();
}
else if ((function->name() == "s3") || (function->name() == "cosn") || (function->name() == "oss") ||
(function->name() == "deltaLake") || (function->name() == "hudi") || (function->name() == "iceberg") ||
(function->name() == "gcs"))
{
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...)
findS3FunctionSecretArguments(/* is_cluster_function= */ false);
}
else if (function->name() == "s3Cluster")
{
/// s3Cluster('cluster_name', 'url', 'aws_access_key_id', 'aws_secret_access_key', ...)
findS3FunctionSecretArguments(/* is_cluster_function= */ true);
}
else if (function->name() == "azureBlobStorage")
{
/// azureBlobStorage(connection_string|storage_account_url, container_name, blobpath, account_name, account_key, format, compression, structure)
findAzureBlobStorageFunctionSecretArguments(/* is_cluster_function= */ false);
}
else if (function->name() == "azureBlobStorageCluster")
{
/// azureBlobStorageCluster(cluster, connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure])
findAzureBlobStorageFunctionSecretArguments(/* is_cluster_function= */ true);
}
else if ((function->name() == "remote") || (function->name() == "remoteSecure"))
{
/// remote('addresses_expr', 'db', 'table', 'user', 'password', ...)
findRemoteFunctionSecretArguments();
}
else if ((function->name() == "encrypt") || (function->name() == "decrypt") ||
(function->name() == "aes_encrypt_mysql") || (function->name() == "aes_decrypt_mysql") ||
(function->name() == "tryDecrypt"))
{
/// encrypt('mode', 'plaintext', 'key' [, iv, aad])
findEncryptionFunctionSecretArguments();
}
else if (function->name() == "url")
{
findURLSecretArguments();
}
}
void findMySQLFunctionSecretArguments()
{
if (isNamedCollectionName(0))
{
/// mysql(named_collection, ..., password = 'password', ...)
findSecretNamedArgument("password", 1);
}
else
{
/// mysql('host:port', 'database', 'table', 'user', 'password', ...)
markSecretArgument(4);
}
}
/// Returns the number of arguments excluding "headers" and "extra_credentials" (which should
/// always be at the end). Marks "headers" as secret, if found.
size_t excludeS3OrURLNestedMaps()
{
size_t count = function->arguments->size();
while (count > 0)
{
const auto f = function->arguments->at(count - 1)->getFunction();
if (!f)
break;
if (f->name() == "headers")
result.nested_maps.push_back(f->name());
else if (f->name() != "extra_credentials")
break;
count -= 1;
}
return count;
}
void findS3FunctionSecretArguments(bool is_cluster_function)
{
/// s3Cluster('cluster_name', 'url', ...) has 'url' as its second argument.
size_t url_arg_idx = is_cluster_function ? 1 : 0;
if (!is_cluster_function && isNamedCollectionName(0))
{
/// s3(named_collection, ..., secret_access_key = 'secret_access_key', ...)
findSecretNamedArgument("secret_access_key", 1);
return;
}
/// We should check other arguments first because we don't need to do any replacement in case of
/// s3('url', NOSIGN, 'format' [, 'compression'] [, extra_credentials(..)] [, headers(..)])
/// s3('url', 'format', 'structure' [, 'compression'] [, extra_credentials(..)] [, headers(..)])
size_t count = excludeS3OrURLNestedMaps();
if ((url_arg_idx + 3 <= count) && (count <= url_arg_idx + 4))
{
String second_arg;
if (tryGetStringFromArgument(url_arg_idx + 1, &second_arg))
{
if (boost::iequals(second_arg, "NOSIGN"))
return; /// The argument after 'url' is "NOSIGN".
if (second_arg == "auto" || KnownFormatNames::instance().exists(second_arg))
return; /// The argument after 'url' is a format: s3('url', 'format', ...)
}
}
/// We're going to replace 'aws_secret_access_key' with '[HIDDEN]' for the following signatures:
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...)
/// s3Cluster('cluster_name', 'url', 'aws_access_key_id', 'aws_secret_access_key', 'format', 'compression')
if (url_arg_idx + 2 < count)
markSecretArgument(url_arg_idx + 2);
}
void findAzureBlobStorageFunctionSecretArguments(bool is_cluster_function)
{
/// azureBlobStorage('cluster_name', 'conn_string/storage_account_url', ...) has 'conn_string/storage_account_url' as its second argument.
size_t url_arg_idx = is_cluster_function ? 1 : 0;
if (!is_cluster_function && isNamedCollectionName(0))
{
/// azureBlobStorage(named_collection, ..., account_key = 'account_key', ...)
findSecretNamedArgument("account_key", 1);
return;
}
else if (is_cluster_function && isNamedCollectionName(1))
{
/// azureBlobStorageCluster(cluster, named_collection, ..., account_key = 'account_key', ...)
findSecretNamedArgument("account_key", 2);
return;
}
/// We should check other arguments first because we don't need to do any replacement in case storage_account_url is not used
/// azureBlobStorage(connection_string|storage_account_url, container_name, blobpath, account_name, account_key, format, compression, structure)
/// azureBlobStorageCluster(cluster, connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure])
size_t count = function->arguments->size();
if ((url_arg_idx + 4 <= count) && (count <= url_arg_idx + 7))
{
String second_arg;
if (tryGetStringFromArgument(url_arg_idx + 3, &second_arg))
{
if (second_arg == "auto" || KnownFormatNames::instance().exists(second_arg))
return; /// The argument after 'url' is a format: s3('url', 'format', ...)
}
}
/// We're going to replace 'account_key' with '[HIDDEN]' if account_key is used in the signature
if (url_arg_idx + 4 < count)
markSecretArgument(url_arg_idx + 4);
}
void findURLSecretArguments()
{
if (!isNamedCollectionName(0))
excludeS3OrURLNestedMaps();
}
bool tryGetStringFromArgument(size_t arg_idx, String * res, bool allow_identifier = true) const
{
if (arg_idx >= function->arguments->size())
return false;
return tryGetStringFromArgument(*function->arguments->at(arg_idx), res, allow_identifier);
}
static bool tryGetStringFromArgument(const AbstractFunction::Argument & argument, String * res, bool allow_identifier = true)
{
return argument.tryGetString(res, allow_identifier);
}
void findRemoteFunctionSecretArguments()
{
if (isNamedCollectionName(0))
{
/// remote(named_collection, ..., password = 'password', ...)
findSecretNamedArgument("password", 1);
return;
}
/// We're going to replace 'password' with '[HIDDEN'] for the following signatures:
/// remote('addresses_expr', db.table, 'user' [, 'password'] [, sharding_key])
/// remote('addresses_expr', 'db', 'table', 'user' [, 'password'] [, sharding_key])
/// remote('addresses_expr', table_function(), 'user' [, 'password'] [, sharding_key])
/// But we should check the number of arguments first because we don't need to do any replacements in case of
/// remote('addresses_expr', db.table)
if (function->arguments->size() < 3)
return;
size_t arg_num = 1;
/// Skip 1 or 2 arguments with table_function() or db.table or 'db', 'table'.
auto table_function = function->arguments->at(arg_num)->getFunction();
if (table_function && KnownTableFunctionNames::instance().exists(table_function->name()))
{
++arg_num;
}
else
{
std::optional<String> database;
std::optional<QualifiedTableName> qualified_table_name;
if (!tryGetDatabaseNameOrQualifiedTableName(arg_num, database, qualified_table_name))
{
/// We couldn't evaluate the argument so we don't know whether it is 'db.table' or just 'db'.
/// Hence we can't figure out whether we should skip one argument 'user' or two arguments 'table', 'user'
/// before the argument 'password'. So it's safer to wipe two arguments just in case.
/// The last argument can be also a `sharding_key`, so we need to check that argument is a literal string
/// before wiping it (because the `password` argument is always a literal string).
if (tryGetStringFromArgument(arg_num + 2, nullptr, /* allow_identifier= */ false))
{
/// Wipe either `password` or `user`.
markSecretArgument(arg_num + 2);
}
if (tryGetStringFromArgument(arg_num + 3, nullptr, /* allow_identifier= */ false))
{
/// Wipe either `password` or `sharding_key`.
markSecretArgument(arg_num + 3);
}
return;
}
/// Skip the current argument (which is either a database name or a qualified table name).
++arg_num;
if (database)
{
/// Skip the 'table' argument if the previous argument was a database name.
++arg_num;
}
}
/// Skip username.
++arg_num;
/// Do our replacement:
/// remote('addresses_expr', db.table, 'user', 'password', ...) -> remote('addresses_expr', db.table, 'user', '[HIDDEN]', ...)
/// The last argument can be also a `sharding_key`, so we need to check that argument is a literal string
/// before wiping it (because the `password` argument is always a literal string).
bool can_be_password = tryGetStringFromArgument(arg_num, nullptr, /* allow_identifier= */ false);
if (can_be_password)
markSecretArgument(arg_num);
}
/// Tries to get either a database name or a qualified table name from an argument.
/// Empty string is also allowed (it means the default database).
/// The function is used by findRemoteFunctionSecretArguments() to determine how many arguments to skip before a password.
bool tryGetDatabaseNameOrQualifiedTableName(
size_t arg_idx,
std::optional<String> & res_database,
std::optional<QualifiedTableName> & res_qualified_table_name) const
{
res_database.reset();
res_qualified_table_name.reset();
String str;
if (!tryGetStringFromArgument(arg_idx, &str, /* allow_identifier= */ true))
return false;
if (str.empty())
{
res_database = "";
return true;
}
auto qualified_table_name = QualifiedTableName::tryParseFromString(str);
if (!qualified_table_name)
return false;
if (qualified_table_name->database.empty())
res_database = std::move(qualified_table_name->table);
else
res_qualified_table_name = std::move(qualified_table_name);
return true;
}
void findEncryptionFunctionSecretArguments()
{
if (function->arguments->size() == 0)
return;
/// We replace all arguments after 'mode' with '[HIDDEN]':
/// encrypt('mode', 'plaintext', 'key' [, iv, aad]) -> encrypt('mode', '[HIDDEN]')
result.start = 1;
result.count = function->arguments->size() - 1;
}
void findTableEngineSecretArguments()
{
const String & engine_name = function->name();
if (engine_name == "ExternalDistributed")
{
/// ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password')
findExternalDistributedTableEngineSecretArguments();
}
else if ((engine_name == "MySQL") || (engine_name == "PostgreSQL") ||
(engine_name == "MaterializedPostgreSQL") || (engine_name == "MongoDB"))
{
/// MySQL('host:port', 'database', 'table', 'user', 'password', ...)
/// PostgreSQL('host:port', 'database', 'table', 'user', 'password', ...)
/// MaterializedPostgreSQL('host:port', 'database', 'table', 'user', 'password', ...)
/// MongoDB('host:port', 'database', 'collection', 'user', 'password', ...)
findMySQLFunctionSecretArguments();
}
else if ((engine_name == "S3") || (engine_name == "COSN") || (engine_name == "OSS") ||
(engine_name == "DeltaLake") || (engine_name == "Hudi") || (engine_name == "Iceberg") || (engine_name == "S3Queue"))
{
/// S3('url', ['aws_access_key_id', 'aws_secret_access_key',] ...)
findS3TableEngineSecretArguments();
}
else if (engine_name == "URL")
{
findURLSecretArguments();
}
}
void findExternalDistributedTableEngineSecretArguments()
{
if (isNamedCollectionName(1))
{
/// ExternalDistributed('engine', named_collection, ..., password = 'password', ...)
findSecretNamedArgument("password", 2);
}
else
{
/// ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password')
markSecretArgument(5);
}
}
void findS3TableEngineSecretArguments()
{
if (isNamedCollectionName(0))
{
/// S3(named_collection, ..., secret_access_key = 'secret_access_key')
findSecretNamedArgument("secret_access_key", 1);
return;
}
/// We should check other arguments first because we don't need to do any replacement in case of
/// S3('url', NOSIGN, 'format' [, 'compression'] [, extra_credentials(..)] [, headers(..)])
/// S3('url', 'format', 'compression' [, extra_credentials(..)] [, headers(..)])
size_t count = excludeS3OrURLNestedMaps();
if ((3 <= count) && (count <= 4))
{
String second_arg;
if (tryGetStringFromArgument(1, &second_arg))
{
if (boost::iequals(second_arg, "NOSIGN"))
return; /// The argument after 'url' is "NOSIGN".
if (count == 3)
{
if (second_arg == "auto" || KnownFormatNames::instance().exists(second_arg))
return; /// The argument after 'url' is a format: S3('url', 'format', ...)
}
}
}
/// We replace 'aws_secret_access_key' with '[HIDDEN]' for the following signatures:
/// S3('url', 'aws_access_key_id', 'aws_secret_access_key')
/// S3('url', 'aws_access_key_id', 'aws_secret_access_key', 'format')
/// S3('url', 'aws_access_key_id', 'aws_secret_access_key', 'format', 'compression')
if (2 < count)
markSecretArgument(2);
}
void findDatabaseEngineSecretArguments()
{
const String & engine_name = function->name();
if ((engine_name == "MySQL") || (engine_name == "MaterializeMySQL") ||
(engine_name == "MaterializedMySQL") || (engine_name == "PostgreSQL") ||
(engine_name == "MaterializedPostgreSQL"))
{
/// MySQL('host:port', 'database', 'user', 'password')
/// PostgreSQL('host:port', 'database', 'user', 'password')
findMySQLDatabaseSecretArguments();
}
else if (engine_name == "S3")
{
/// S3('url', 'access_key_id', 'secret_access_key')
findS3DatabaseSecretArguments();
}
}
void findMySQLDatabaseSecretArguments()
{
if (isNamedCollectionName(0))
{
/// MySQL(named_collection, ..., password = 'password', ...)
findSecretNamedArgument("password", 1);
}
else
{
/// MySQL('host:port', 'database', 'user', 'password')
markSecretArgument(3);
}
}
void findS3DatabaseSecretArguments()
{
if (isNamedCollectionName(0))
{
/// S3(named_collection, ..., secret_access_key = 'password', ...)
findSecretNamedArgument("secret_access_key", 1);
}
else
{
/// S3('url', 'access_key_id', 'secret_access_key')
markSecretArgument(2);
}
}
void findBackupNameSecretArguments()
{
const String & engine_name = function->name();
if (engine_name == "S3")
{
/// BACKUP ... TO S3(url, [aws_access_key_id, aws_secret_access_key])
markSecretArgument(2);
}
}
/// Whether a specified argument can be the name of a named collection?
bool isNamedCollectionName(size_t arg_idx) const
{
if (function->arguments->size() <= arg_idx)
return false;
return function->arguments->at(arg_idx)->isIdentifier();
}
/// Looks for a secret argument with a specified name. This function looks for arguments in format `key=value` where the key is specified.
void findSecretNamedArgument(const std::string_view & key, size_t start = 0)
{
for (size_t i = start; i < function->arguments->size(); ++i)
{
const auto & argument = function->arguments->at(i);
const auto equals_func = argument->getFunction();
if (!equals_func || (equals_func->name() != "equals"))
continue;
if (!equals_func->arguments || equals_func->arguments->size() != 2)
continue;
String found_key;
if (!tryGetStringFromArgument(*equals_func->arguments->at(0), &found_key))
continue;
if (found_key == key)
markSecretArgument(i, /* argument_is_named= */ true);
}
}
};
}

View File

@ -1,97 +1,35 @@
#pragma once
#include <Parsers/FunctionSecretArgumentsFinder.h>
#include <Core/QualifiedTableName.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTLiteral.h>
#include <Parsers/ASTIdentifier.h>
#include <Common/KnownObjectNames.h>
#include <boost/algorithm/string/predicate.hpp>
namespace DB
{
class FunctionAST : public AbstractFunction
{
public:
class ArgumentAST : public Argument
{
public:
explicit ArgumentAST(const IAST * argument_) : argument(argument_) {}
std::unique_ptr<AbstractFunction> getFunction() const override
{
if (const auto * f = argument->as<ASTFunction>())
return std::make_unique<FunctionAST>(*f);
return nullptr;
}
bool isIdentifier() const override { return argument->as<ASTIdentifier>(); }
bool tryGetString(String * res, bool allow_identifier) const override
{
if (const auto * literal = argument->as<ASTLiteral>())
{
if (literal->value.getType() != Field::Types::String)
return false;
if (res)
*res = literal->value.safeGet<String>();
return true;
}
if (allow_identifier)
{
if (const auto * id = argument->as<ASTIdentifier>())
{
if (res)
*res = id->name();
return true;
}
}
return false;
}
private:
const IAST * argument = nullptr;
};
class ArgumentsAST : public Arguments
{
public:
explicit ArgumentsAST(const ASTs * arguments_) : arguments(arguments_) {}
size_t size() const override { return arguments ? arguments->size() : 0; }
std::unique_ptr<Argument> at(size_t n) const override
{
return std::make_unique<ArgumentAST>(arguments->at(n).get());
}
private:
const ASTs * arguments = nullptr;
};
explicit FunctionAST(const ASTFunction & function_) : function(&function_)
{
if (!function->arguments)
return;
const auto * expr_list = function->arguments->as<ASTExpressionList>();
if (!expr_list)
return;
arguments = std::make_unique<ArgumentsAST>(&expr_list->children);
}
String name() const override { return function->name; }
private:
const ASTFunction * function = nullptr;
};
/// Finds arguments of a specified function which should not be displayed for most users for security reasons.
/// That involves passwords and secret keys.
class FunctionSecretArgumentsFinderAST : public FunctionSecretArgumentsFinder
class FunctionSecretArgumentsFinderAST
{
public:
explicit FunctionSecretArgumentsFinderAST(const ASTFunction & function_)
: FunctionSecretArgumentsFinder(std::make_unique<FunctionAST>(function_))
explicit FunctionSecretArgumentsFinderAST(const ASTFunction & function_) : function(function_)
{
if (!function->hasArguments())
if (!function.arguments)
return;
switch (function_.kind)
const auto * expr_list = function.arguments->as<ASTExpressionList>();
if (!expr_list)
return;
arguments = &expr_list->children;
switch (function.kind)
{
case ASTFunction::Kind::ORDINARY_FUNCTION: findOrdinaryFunctionSecretArguments(); break;
case ASTFunction::Kind::WINDOW_FUNCTION: break;
@ -105,7 +43,507 @@ public:
}
FunctionSecretArgumentsFinder::Result getResult() const { return result; }
private:
const ASTFunction & function;
const ASTs * arguments = nullptr;
FunctionSecretArgumentsFinder::Result result;
void markSecretArgument(size_t index, bool argument_is_named = false)
{
if (index >= arguments->size())
return;
if (!result.count)
{
result.start = index;
result.are_named = argument_is_named;
}
chassert(index >= result.start); /// We always check arguments consecutively
result.count = index + 1 - result.start;
if (!argument_is_named)
result.are_named = false;
}
void findOrdinaryFunctionSecretArguments()
{
if ((function.name == "mysql") || (function.name == "postgresql") || (function.name == "mongodb"))
{
/// mysql('host:port', 'database', 'table', 'user', 'password', ...)
/// postgresql('host:port', 'database', 'table', 'user', 'password', ...)
/// mongodb('host:port', 'database', 'collection', 'user', 'password', ...)
findMySQLFunctionSecretArguments();
}
else if ((function.name == "s3") || (function.name == "cosn") || (function.name == "oss") ||
(function.name == "deltaLake") || (function.name == "hudi") || (function.name == "iceberg") ||
(function.name == "gcs"))
{
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...)
findS3FunctionSecretArguments(/* is_cluster_function= */ false);
}
else if (function.name == "s3Cluster")
{
/// s3Cluster('cluster_name', 'url', 'aws_access_key_id', 'aws_secret_access_key', ...)
findS3FunctionSecretArguments(/* is_cluster_function= */ true);
}
else if (function.name == "azureBlobStorage")
{
/// azureBlobStorage(connection_string|storage_account_url, container_name, blobpath, account_name, account_key, format, compression, structure)
findAzureBlobStorageFunctionSecretArguments(/* is_cluster_function= */ false);
}
else if (function.name == "azureBlobStorageCluster")
{
/// azureBlobStorageCluster(cluster, connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure])
findAzureBlobStorageFunctionSecretArguments(/* is_cluster_function= */ true);
}
else if ((function.name == "remote") || (function.name == "remoteSecure"))
{
/// remote('addresses_expr', 'db', 'table', 'user', 'password', ...)
findRemoteFunctionSecretArguments();
}
else if ((function.name == "encrypt") || (function.name == "decrypt") ||
(function.name == "aes_encrypt_mysql") || (function.name == "aes_decrypt_mysql") ||
(function.name == "tryDecrypt"))
{
/// encrypt('mode', 'plaintext', 'key' [, iv, aad])
findEncryptionFunctionSecretArguments();
}
else if (function.name == "url")
{
findURLSecretArguments();
}
}
void findMySQLFunctionSecretArguments()
{
if (isNamedCollectionName(0))
{
/// mysql(named_collection, ..., password = 'password', ...)
findSecretNamedArgument("password", 1);
}
else
{
/// mysql('host:port', 'database', 'table', 'user', 'password', ...)
markSecretArgument(4);
}
}
/// Returns the number of arguments excluding "headers" and "extra_credentials" (which should
/// always be at the end). Marks "headers" as secret, if found.
size_t excludeS3OrURLNestedMaps()
{
size_t count = arguments->size();
while (count > 0)
{
const ASTFunction * f = arguments->at(count - 1)->as<ASTFunction>();
if (!f)
break;
if (f->name == "headers")
result.nested_maps.push_back(f->name);
else if (f->name != "extra_credentials")
break;
count -= 1;
}
return count;
}
void findS3FunctionSecretArguments(bool is_cluster_function)
{
/// s3Cluster('cluster_name', 'url', ...) has 'url' as its second argument.
size_t url_arg_idx = is_cluster_function ? 1 : 0;
if (!is_cluster_function && isNamedCollectionName(0))
{
/// s3(named_collection, ..., secret_access_key = 'secret_access_key', ...)
findSecretNamedArgument("secret_access_key", 1);
return;
}
/// We should check other arguments first because we don't need to do any replacement in case of
/// s3('url', NOSIGN, 'format' [, 'compression'] [, extra_credentials(..)] [, headers(..)])
/// s3('url', 'format', 'structure' [, 'compression'] [, extra_credentials(..)] [, headers(..)])
size_t count = excludeS3OrURLNestedMaps();
if ((url_arg_idx + 3 <= count) && (count <= url_arg_idx + 4))
{
String second_arg;
if (tryGetStringFromArgument(url_arg_idx + 1, &second_arg))
{
if (boost::iequals(second_arg, "NOSIGN"))
return; /// The argument after 'url' is "NOSIGN".
if (second_arg == "auto" || KnownFormatNames::instance().exists(second_arg))
return; /// The argument after 'url' is a format: s3('url', 'format', ...)
}
}
/// We're going to replace 'aws_secret_access_key' with '[HIDDEN]' for the following signatures:
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...)
/// s3Cluster('cluster_name', 'url', 'aws_access_key_id', 'aws_secret_access_key', 'format', 'compression')
if (url_arg_idx + 2 < count)
markSecretArgument(url_arg_idx + 2);
}
void findAzureBlobStorageFunctionSecretArguments(bool is_cluster_function)
{
/// azureBlobStorage('cluster_name', 'conn_string/storage_account_url', ...) has 'conn_string/storage_account_url' as its second argument.
size_t url_arg_idx = is_cluster_function ? 1 : 0;
if (!is_cluster_function && isNamedCollectionName(0))
{
/// azureBlobStorage(named_collection, ..., account_key = 'account_key', ...)
findSecretNamedArgument("account_key", 1);
return;
}
else if (is_cluster_function && isNamedCollectionName(1))
{
/// azureBlobStorageCluster(cluster, named_collection, ..., account_key = 'account_key', ...)
findSecretNamedArgument("account_key", 2);
return;
}
/// We should check other arguments first because we don't need to do any replacement in case storage_account_url is not used
/// azureBlobStorage(connection_string|storage_account_url, container_name, blobpath, account_name, account_key, format, compression, structure)
/// azureBlobStorageCluster(cluster, connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression, structure])
size_t count = arguments->size();
if ((url_arg_idx + 4 <= count) && (count <= url_arg_idx + 7))
{
String second_arg;
if (tryGetStringFromArgument(url_arg_idx + 3, &second_arg))
{
if (second_arg == "auto" || KnownFormatNames::instance().exists(second_arg))
return; /// The argument after 'url' is a format: s3('url', 'format', ...)
}
}
/// We're going to replace 'account_key' with '[HIDDEN]' if account_key is used in the signature
if (url_arg_idx + 4 < count)
markSecretArgument(url_arg_idx + 4);
}
void findURLSecretArguments()
{
if (!isNamedCollectionName(0))
excludeS3OrURLNestedMaps();
}
bool tryGetStringFromArgument(size_t arg_idx, String * res, bool allow_identifier = true) const
{
if (arg_idx >= arguments->size())
return false;
return tryGetStringFromArgument(*(*arguments)[arg_idx], res, allow_identifier);
}
static bool tryGetStringFromArgument(const IAST & argument, String * res, bool allow_identifier = true)
{
if (const auto * literal = argument.as<ASTLiteral>())
{
if (literal->value.getType() != Field::Types::String)
return false;
if (res)
*res = literal->value.safeGet<String>();
return true;
}
if (allow_identifier)
{
if (const auto * id = argument.as<ASTIdentifier>())
{
if (res)
*res = id->name();
return true;
}
}
return false;
}
void findRemoteFunctionSecretArguments()
{
if (isNamedCollectionName(0))
{
/// remote(named_collection, ..., password = 'password', ...)
findSecretNamedArgument("password", 1);
return;
}
/// We're going to replace 'password' with '[HIDDEN'] for the following signatures:
/// remote('addresses_expr', db.table, 'user' [, 'password'] [, sharding_key])
/// remote('addresses_expr', 'db', 'table', 'user' [, 'password'] [, sharding_key])
/// remote('addresses_expr', table_function(), 'user' [, 'password'] [, sharding_key])
/// But we should check the number of arguments first because we don't need to do any replacements in case of
/// remote('addresses_expr', db.table)
if (arguments->size() < 3)
return;
size_t arg_num = 1;
/// Skip 1 or 2 arguments with table_function() or db.table or 'db', 'table'.
const auto * table_function = (*arguments)[arg_num]->as<ASTFunction>();
if (table_function && KnownTableFunctionNames::instance().exists(table_function->name))
{
++arg_num;
}
else
{
std::optional<String> database;
std::optional<QualifiedTableName> qualified_table_name;
if (!tryGetDatabaseNameOrQualifiedTableName(arg_num, database, qualified_table_name))
{
/// We couldn't evaluate the argument so we don't know whether it is 'db.table' or just 'db'.
/// Hence we can't figure out whether we should skip one argument 'user' or two arguments 'table', 'user'
/// before the argument 'password'. So it's safer to wipe two arguments just in case.
/// The last argument can be also a `sharding_key`, so we need to check that argument is a literal string
/// before wiping it (because the `password` argument is always a literal string).
if (tryGetStringFromArgument(arg_num + 2, nullptr, /* allow_identifier= */ false))
{
/// Wipe either `password` or `user`.
markSecretArgument(arg_num + 2);
}
if (tryGetStringFromArgument(arg_num + 3, nullptr, /* allow_identifier= */ false))
{
/// Wipe either `password` or `sharding_key`.
markSecretArgument(arg_num + 3);
}
return;
}
/// Skip the current argument (which is either a database name or a qualified table name).
++arg_num;
if (database)
{
/// Skip the 'table' argument if the previous argument was a database name.
++arg_num;
}
}
/// Skip username.
++arg_num;
/// Do our replacement:
/// remote('addresses_expr', db.table, 'user', 'password', ...) -> remote('addresses_expr', db.table, 'user', '[HIDDEN]', ...)
/// The last argument can be also a `sharding_key`, so we need to check that argument is a literal string
/// before wiping it (because the `password` argument is always a literal string).
bool can_be_password = tryGetStringFromArgument(arg_num, nullptr, /* allow_identifier= */ false);
if (can_be_password)
markSecretArgument(arg_num);
}
/// Tries to get either a database name or a qualified table name from an argument.
/// Empty string is also allowed (it means the default database).
/// The function is used by findRemoteFunctionSecretArguments() to determine how many arguments to skip before a password.
bool tryGetDatabaseNameOrQualifiedTableName(
size_t arg_idx,
std::optional<String> & res_database,
std::optional<QualifiedTableName> & res_qualified_table_name) const
{
res_database.reset();
res_qualified_table_name.reset();
String str;
if (!tryGetStringFromArgument(arg_idx, &str, /* allow_identifier= */ true))
return false;
if (str.empty())
{
res_database = "";
return true;
}
auto qualified_table_name = QualifiedTableName::tryParseFromString(str);
if (!qualified_table_name)
return false;
if (qualified_table_name->database.empty())
res_database = std::move(qualified_table_name->table);
else
res_qualified_table_name = std::move(qualified_table_name);
return true;
}
void findEncryptionFunctionSecretArguments()
{
if (arguments->empty())
return;
/// We replace all arguments after 'mode' with '[HIDDEN]':
/// encrypt('mode', 'plaintext', 'key' [, iv, aad]) -> encrypt('mode', '[HIDDEN]')
result.start = 1;
result.count = arguments->size() - 1;
}
void findTableEngineSecretArguments()
{
const String & engine_name = function.name;
if (engine_name == "ExternalDistributed")
{
/// ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password')
findExternalDistributedTableEngineSecretArguments();
}
else if ((engine_name == "MySQL") || (engine_name == "PostgreSQL") ||
(engine_name == "MaterializedPostgreSQL") || (engine_name == "MongoDB"))
{
/// MySQL('host:port', 'database', 'table', 'user', 'password', ...)
/// PostgreSQL('host:port', 'database', 'table', 'user', 'password', ...)
/// MaterializedPostgreSQL('host:port', 'database', 'table', 'user', 'password', ...)
/// MongoDB('host:port', 'database', 'collection', 'user', 'password', ...)
findMySQLFunctionSecretArguments();
}
else if ((engine_name == "S3") || (engine_name == "COSN") || (engine_name == "OSS") ||
(engine_name == "DeltaLake") || (engine_name == "Hudi") || (engine_name == "Iceberg") || (engine_name == "S3Queue"))
{
/// S3('url', ['aws_access_key_id', 'aws_secret_access_key',] ...)
findS3TableEngineSecretArguments();
}
else if (engine_name == "URL")
{
findURLSecretArguments();
}
}
void findExternalDistributedTableEngineSecretArguments()
{
if (isNamedCollectionName(1))
{
/// ExternalDistributed('engine', named_collection, ..., password = 'password', ...)
findSecretNamedArgument("password", 2);
}
else
{
/// ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password')
markSecretArgument(5);
}
}
void findS3TableEngineSecretArguments()
{
if (isNamedCollectionName(0))
{
/// S3(named_collection, ..., secret_access_key = 'secret_access_key')
findSecretNamedArgument("secret_access_key", 1);
return;
}
/// We should check other arguments first because we don't need to do any replacement in case of
/// S3('url', NOSIGN, 'format' [, 'compression'] [, extra_credentials(..)] [, headers(..)])
/// S3('url', 'format', 'compression' [, extra_credentials(..)] [, headers(..)])
size_t count = excludeS3OrURLNestedMaps();
if ((3 <= count) && (count <= 4))
{
String second_arg;
if (tryGetStringFromArgument(1, &second_arg))
{
if (boost::iequals(second_arg, "NOSIGN"))
return; /// The argument after 'url' is "NOSIGN".
if (count == 3)
{
if (second_arg == "auto" || KnownFormatNames::instance().exists(second_arg))
return; /// The argument after 'url' is a format: S3('url', 'format', ...)
}
}
}
/// We replace 'aws_secret_access_key' with '[HIDDEN]' for the following signatures:
/// S3('url', 'aws_access_key_id', 'aws_secret_access_key')
/// S3('url', 'aws_access_key_id', 'aws_secret_access_key', 'format')
/// S3('url', 'aws_access_key_id', 'aws_secret_access_key', 'format', 'compression')
if (2 < count)
markSecretArgument(2);
}
void findDatabaseEngineSecretArguments()
{
const String & engine_name = function.name;
if ((engine_name == "MySQL") || (engine_name == "MaterializeMySQL") ||
(engine_name == "MaterializedMySQL") || (engine_name == "PostgreSQL") ||
(engine_name == "MaterializedPostgreSQL"))
{
/// MySQL('host:port', 'database', 'user', 'password')
/// PostgreSQL('host:port', 'database', 'user', 'password')
findMySQLDatabaseSecretArguments();
}
else if (engine_name == "S3")
{
/// S3('url', 'access_key_id', 'secret_access_key')
findS3DatabaseSecretArguments();
}
}
void findMySQLDatabaseSecretArguments()
{
if (isNamedCollectionName(0))
{
/// MySQL(named_collection, ..., password = 'password', ...)
findSecretNamedArgument("password", 1);
}
else
{
/// MySQL('host:port', 'database', 'user', 'password')
markSecretArgument(3);
}
}
void findS3DatabaseSecretArguments()
{
if (isNamedCollectionName(0))
{
/// S3(named_collection, ..., secret_access_key = 'password', ...)
findSecretNamedArgument("secret_access_key", 1);
}
else
{
/// S3('url', 'access_key_id', 'secret_access_key')
markSecretArgument(2);
}
}
void findBackupNameSecretArguments()
{
const String & engine_name = function.name;
if (engine_name == "S3")
{
/// BACKUP ... TO S3(url, [aws_access_key_id, aws_secret_access_key])
markSecretArgument(2);
}
}
/// Whether a specified argument can be the name of a named collection?
bool isNamedCollectionName(size_t arg_idx) const
{
if (arguments->size() <= arg_idx)
return false;
const auto * identifier = (*arguments)[arg_idx]->as<ASTIdentifier>();
return identifier != nullptr;
}
/// Looks for a secret argument with a specified name. This function looks for arguments in format `key=value` where the key is specified.
void findSecretNamedArgument(const std::string_view & key, size_t start = 0)
{
for (size_t i = start; i < arguments->size(); ++i)
{
const auto & argument = (*arguments)[i];
const auto * equals_func = argument->as<ASTFunction>();
if (!equals_func || (equals_func->name != "equals"))
continue;
const auto * expr_list = equals_func->arguments->as<ASTExpressionList>();
if (!expr_list)
continue;
const auto & equal_args = expr_list->children;
if (equal_args.size() != 2)
continue;
String found_key;
if (!tryGetStringFromArgument(*equal_args[0], &found_key))
continue;
if (found_key == key)
markSecretArgument(i, /* argument_is_named= */ true);
}
}
};
}

View File

@ -78,9 +78,7 @@ void ORCOutputStream::write(const void* buf, size_t length)
}
ORCBlockOutputFormat::ORCBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_)
: IOutputFormat(header_, out_)
, format_settings{format_settings_}
, output_stream(out_)
: IOutputFormat(header_, out_), format_settings{format_settings_}, output_stream(out_)
{
for (const auto & type : header_.getDataTypes())
data_types.push_back(recursiveRemoveLowCardinality(type));
@ -567,7 +565,6 @@ void ORCBlockOutputFormat::prepareWriter()
schema = orc::createStructType();
options.setCompression(getORCCompression(format_settings.orc.output_compression_method));
options.setRowIndexStride(format_settings.orc.output_row_index_stride);
options.setDictionaryKeySizeThreshold(format_settings.orc.output_dictionary_key_size_threshold);
size_t columns_count = header.columns();
for (size_t i = 0; i != columns_count; ++i)
schema->addStructField(header.safeGetByPosition(i).name, getORCType(recursiveRemoveLowCardinality(data_types[i])));

View File

@ -1,8 +1,6 @@
#include <Processors/QueryPlan/ReadFromMergeTree.h>
#include <Core/Settings.h>
#include <IO/Operators.h>
#include <Interpreters/Cluster.h>
#include <Interpreters/Context.h>
#include <Interpreters/ExpressionAnalyzer.h>
#include <Interpreters/InterpreterSelectQuery.h>
@ -10,8 +8,6 @@
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTSelectQuery.h>
#include <Parsers/ExpressionListParsers.h>
#include <Parsers/parseIdentifierOrStringLiteral.h>
#include <Processors/ConcatProcessor.h>
#include <Processors/Merges/AggregatingSortedTransform.h>
#include <Processors/Merges/CollapsingSortedTransform.h>
@ -20,7 +16,6 @@
#include <Processors/Merges/ReplacingSortedTransform.h>
#include <Processors/Merges/SummingSortedTransform.h>
#include <Processors/Merges/VersionedCollapsingTransform.h>
#include <Processors/QueryPlan/IQueryPlanStep.h>
#include <Processors/QueryPlan/PartsSplitter.h>
#include <Processors/Sources/NullSource.h>
#include <Processors/Transforms/ExpressionTransform.h>
@ -29,11 +24,10 @@
#include <Processors/Transforms/SelectByIndicesTransform.h>
#include <QueryPipeline/QueryPipelineBuilder.h>
#include <Storages/MergeTree/MergeTreeDataSelectExecutor.h>
#include <Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.h>
#include <Storages/MergeTree/MergeTreeIndexMinMax.h>
#include <Storages/MergeTree/MergeTreeIndexVectorSimilarity.h>
#include <Storages/MergeTree/MergeTreePrefetchedReadPool.h>
#include <Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.h>
#include <Storages/MergeTree/MergeTreeReadPool.h>
#include <Storages/MergeTree/MergeTreePrefetchedReadPool.h>
#include <Storages/MergeTree/MergeTreeReadPoolInOrder.h>
#include <Storages/MergeTree/MergeTreeReadPoolParallelReplicas.h>
#include <Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.h>
@ -47,6 +41,11 @@
#include <Common/JSONBuilder.h>
#include <Common/isLocalAddress.h>
#include <Common/logger_useful.h>
#include <Core/Settings.h>
#include <Processors/QueryPlan/IQueryPlanStep.h>
#include <Parsers/parseIdentifierOrStringLiteral.h>
#include <Parsers/ExpressionListParsers.h>
#include <Storages/MergeTree/MergeTreeIndexMinMax.h>
#include <algorithm>
#include <iterator>
@ -382,7 +381,6 @@ Pipe ReadFromMergeTree::readFromPoolParallelReplicas(RangesInDataParts parts_wit
.all_callback = all_ranges_callback.value(),
.callback = read_task_callback.value(),
.number_of_current_replica = number_of_current_replica.value_or(client_info.number_of_current_replica),
.total_nodes_count = context->getClusterForParallelReplicas()->getShardsInfo().at(0).getAllNodeCount(),
};
/// We have a special logic for local replica. It has to read less data, because in some cases it should
@ -565,7 +563,6 @@ Pipe ReadFromMergeTree::readInOrder(
.all_callback = all_ranges_callback.value(),
.callback = read_task_callback.value(),
.number_of_current_replica = number_of_current_replica.value_or(client_info.number_of_current_replica),
.total_nodes_count = context->getClusterForParallelReplicas()->getShardsInfo().at(0).getAllNodeCount(),
};
auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier;

View File

@ -34,12 +34,8 @@ int CertificateReloader::setCertificate(SSL * ssl, const CertificateReloader::Mu
auto current = pdata->data.get();
if (!current)
return -1;
return setCertificateCallback(ssl, current.get(), log);
}
int setCertificateCallback(SSL * ssl, const CertificateReloader::Data * current_data, LoggerPtr log)
{
if (current_data->certs_chain.empty())
if (current->certs_chain.empty())
return -1;
if (auto err = SSL_clear_chain_certs(ssl); err != 1)
@ -47,12 +43,12 @@ int setCertificateCallback(SSL * ssl, const CertificateReloader::Data * current_
LOG_ERROR(log, "Clear certificates {}", Poco::Net::Utility::getLastError());
return -1;
}
if (auto err = SSL_use_certificate(ssl, const_cast<X509 *>(current_data->certs_chain[0].certificate())); err != 1)
if (auto err = SSL_use_certificate(ssl, const_cast<X509 *>(current->certs_chain[0].certificate())); err != 1)
{
LOG_ERROR(log, "Use certificate {}", Poco::Net::Utility::getLastError());
return -1;
}
for (auto cert = current_data->certs_chain.begin() + 1; cert != current_data->certs_chain.end(); cert++)
for (auto cert = current->certs_chain.begin() + 1; cert != current->certs_chain.end(); cert++)
{
if (auto err = SSL_add1_chain_cert(ssl, const_cast<X509 *>(cert->certificate())); err != 1)
{
@ -60,7 +56,7 @@ int setCertificateCallback(SSL * ssl, const CertificateReloader::Data * current_
return -1;
}
}
if (auto err = SSL_use_PrivateKey(ssl, const_cast<EVP_PKEY *>(static_cast<const EVP_PKEY *>(current_data->key))); err != 1)
if (auto err = SSL_use_PrivateKey(ssl, const_cast<EVP_PKEY *>(static_cast<const EVP_PKEY *>(current->key))); err != 1)
{
LOG_ERROR(log, "Use private key {}", Poco::Net::Utility::getLastError());
return -1;

View File

@ -104,9 +104,6 @@ private:
mutable std::mutex data_mutex;
};
/// A callback for OpenSSL
int setCertificateCallback(SSL * ssl, const CertificateReloader::Data * current_data, LoggerPtr log);
}
#endif

View File

@ -1270,7 +1270,7 @@ void TCPHandler::sendReadTaskRequestAssumeLocked()
void TCPHandler::sendMergeTreeAllRangesAnnouncementAssumeLocked(InitialAllRangesAnnouncement announcement)
{
writeVarUInt(Protocol::Server::MergeTreeAllRangesAnnouncement, *out);
announcement.serialize(*out, client_parallel_replicas_protocol_version);
announcement.serialize(*out);
out->finishChunk();
out->next();
@ -1280,7 +1280,7 @@ void TCPHandler::sendMergeTreeAllRangesAnnouncementAssumeLocked(InitialAllRanges
void TCPHandler::sendMergeTreeReadTaskRequestAssumeLocked(ParallelReadRequest request)
{
writeVarUInt(Protocol::Server::MergeTreeReadTaskRequest, *out);
request.serialize(*out, client_parallel_replicas_protocol_version);
request.serialize(*out);
out->finishChunk();
out->next();
@ -1662,9 +1662,6 @@ void TCPHandler::receiveAddendum()
readStringBinary(proto_send_chunked_cl, *in);
readStringBinary(proto_recv_chunked_cl, *in);
}
if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_VERSIONED_PARALLEL_REPLICAS_PROTOCOL)
readVarUInt(client_parallel_replicas_protocol_version, *in);
}
@ -1692,8 +1689,6 @@ void TCPHandler::sendHello()
writeVarUInt(VERSION_MAJOR, *out);
writeVarUInt(VERSION_MINOR, *out);
writeVarUInt(DBMS_TCP_PROTOCOL_VERSION, *out);
if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_VERSIONED_PARALLEL_REPLICAS_PROTOCOL)
writeVarUInt(DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION, *out);
if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE)
writeStringBinary(DateLUT::instance().getTimeZone(), *out);
if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME)

View File

@ -188,7 +188,6 @@ private:
UInt64 client_version_minor = 0;
UInt64 client_version_patch = 0;
UInt32 client_tcp_protocol_version = 0;
UInt32 client_parallel_replicas_protocol_version = 0;
String proto_send_chunked_cl = "notchunked";
String proto_recv_chunked_cl = "notchunked";
String quota_key;

Some files were not shown because too many files have changed in this diff Show More