mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-20 08:40:50 +00:00
Merge remote-tracking branch 'rschu1ze/master' into follow-up-62700
This commit is contained in:
commit
ef4dfa6c3a
2
.github/workflows/pull_request.yml
vendored
2
.github/workflows/pull_request.yml
vendored
@ -97,7 +97,7 @@ jobs:
|
||||
# for main CI chain
|
||||
#
|
||||
Builds_1:
|
||||
needs: [RunConfig, FastTest]
|
||||
needs: [RunConfig, StyleCheck, FastTest]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_1') }}
|
||||
# using callable wf (reusable_stage.yml) allows to group all nested jobs under a tab
|
||||
uses: ./.github/workflows/reusable_build_stage.yml
|
||||
|
@ -25,7 +25,7 @@ azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
cache_policy=""
|
||||
if [ $(($RANDOM%2)) -eq 1 ]; then
|
||||
if [ $((RANDOM % 2)) -eq 1 ]; then
|
||||
cache_policy="SLRU"
|
||||
else
|
||||
cache_policy="LRU"
|
||||
|
@ -72,7 +72,7 @@ mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/c
|
||||
|
||||
# Randomize cache policies.
|
||||
cache_policy=""
|
||||
if [ $(($RANDOM%2)) -eq 1 ]; then
|
||||
if [ $((RANDOM % 2)) -eq 1 ]; then
|
||||
cache_policy="SLRU"
|
||||
else
|
||||
cache_policy="LRU"
|
||||
|
@ -13,7 +13,7 @@ And the presentation: https://presentations.clickhouse.com/fosdem2023/
|
||||
Data source: https://dumps.wikimedia.org/other/pageviews/
|
||||
|
||||
Getting the list of links:
|
||||
```
|
||||
``` shell
|
||||
for i in {2015..2023}; do
|
||||
for j in {01..12}; do
|
||||
echo "${i}-${j}" >&2
|
||||
@ -24,7 +24,7 @@ done | sort | uniq | tee links.txt
|
||||
```
|
||||
|
||||
Downloading the data:
|
||||
```
|
||||
``` shell
|
||||
sed -r 's!pageviews-([0-9]{4})([0-9]{2})[0-9]{2}-[0-9]+\.gz!https://dumps.wikimedia.org/other/pageviews/\1/\1-\2/\0!' \
|
||||
links.txt | xargs -P3 wget --continue
|
||||
```
|
||||
@ -40,8 +40,7 @@ CREATE TABLE wikistat
|
||||
project LowCardinality(String),
|
||||
subproject LowCardinality(String),
|
||||
path String CODEC(ZSTD(3)),
|
||||
hits UInt64 CODEC(ZSTD(3)),
|
||||
size UInt64 CODEC(ZSTD(3))
|
||||
hits UInt64 CODEC(ZSTD(3))
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (path, time);
|
||||
@ -49,7 +48,7 @@ ORDER BY (path, time);
|
||||
|
||||
Loading the data:
|
||||
|
||||
```
|
||||
``` shell
|
||||
clickhouse-local --query "
|
||||
WITH replaceRegexpOne(_path, '^.+pageviews-(\\d{4})(\\d{2})(\\d{2})-(\\d{2})(\\d{2})(\\d{2}).gz$', '\1-\2-\3 \4-\5-\6')::DateTime AS time,
|
||||
extractGroups(line, '^([^ \\.]+)(\\.[^ ]+)? +([^ ]+) +(\\d+) +(\\d+)$') AS values
|
||||
@ -58,9 +57,27 @@ clickhouse-local --query "
|
||||
values[1] AS project,
|
||||
values[2] AS subproject,
|
||||
values[3] AS path,
|
||||
(values[4])::UInt64 AS hits,
|
||||
(values[5])::UInt64 AS size
|
||||
(values[4])::UInt64 AS hits
|
||||
FROM file('pageviews*.gz', LineAsString)
|
||||
WHERE length(values) = 5 FORMAT Native
|
||||
" | clickhouse-client --query "INSERT INTO wikistat FORMAT Native"
|
||||
```
|
||||
|
||||
Or loading the cleaning data:
|
||||
|
||||
``` sql
|
||||
INSERT INTO wikistat WITH
|
||||
parseDateTimeBestEffort(extract(_file, '^pageviews-([\\d\\-]+)\\.gz$')) AS time,
|
||||
splitByChar(' ', line) AS values,
|
||||
splitByChar('.', values[1]) AS projects
|
||||
SELECT
|
||||
time,
|
||||
projects[1] AS project,
|
||||
projects[2] AS subproject,
|
||||
decodeURLComponent(values[2]) AS path,
|
||||
CAST(values[3], 'UInt64') AS hits
|
||||
FROM s3(
|
||||
'https://clickhouse-public-datasets.s3.amazonaws.com/wikistat/original/pageviews*.gz',
|
||||
LineAsString)
|
||||
WHERE length(values) >= 3
|
||||
```
|
||||
|
@ -557,6 +557,37 @@ Result:
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
## tanh
|
||||
|
||||
Returns the [hyperbolic tangent](https://www.mathworks.com/help/matlab/ref/tanh.html).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
tanh(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Values from the interval: `-1 < tanh(x) < 1`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT tanh(0);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
0
|
||||
```
|
||||
|
||||
## atanh
|
||||
|
||||
|
@ -209,6 +209,7 @@ Hierarchy of privileges:
|
||||
- `SHOW NAMED COLLECTIONS`
|
||||
- `SHOW NAMED COLLECTIONS SECRETS`
|
||||
- `NAMED COLLECTION`
|
||||
- [TABLE ENGINE](#grant-table-engine)
|
||||
|
||||
Examples of how this hierarchy is treated:
|
||||
|
||||
@ -505,6 +506,7 @@ and
|
||||
[`format_display_secrets_in_show_and_select` format setting](../../operations/settings/formats#format_display_secrets_in_show_and_select)
|
||||
are turned on.
|
||||
|
||||
|
||||
### NAMED COLLECTION ADMIN
|
||||
|
||||
Allows a certain operation on a specified named collection. Before version 23.7 it was called NAMED COLLECTION CONTROL, and after 23.7 NAMED COLLECTION ADMIN was added and NAMED COLLECTION CONTROL is preserved as an alias.
|
||||
@ -524,6 +526,17 @@ Unlike all other grants (CREATE, DROP, ALTER, SHOW) grant NAMED COLLECTION was a
|
||||
Assuming a named collection is called abc, we grant privilege CREATE NAMED COLLECTION to user john.
|
||||
- `GRANT CREATE NAMED COLLECTION ON abc TO john`
|
||||
|
||||
|
||||
### TABLE ENGINE
|
||||
|
||||
Allows using a specified table engine when creating a table. Applies to [table engines](../../engines/table-engines/index.md).
|
||||
|
||||
**Examples**
|
||||
|
||||
- `GRANT TABLE ENGINE ON * TO john`
|
||||
- `GRANT TABLE ENGINE ON TinyLog TO john`
|
||||
|
||||
|
||||
### ALL
|
||||
|
||||
Grants all the privileges on regulated entity to a user account or a role.
|
||||
|
@ -138,6 +138,7 @@
|
||||
|
||||
#if USE_AZURE_BLOB_STORAGE
|
||||
# include <azure/storage/common/internal/xml_wrapper.hpp>
|
||||
# include <azure/core/diagnostics/logger.hpp>
|
||||
#endif
|
||||
|
||||
#include <incbin.h>
|
||||
@ -612,6 +613,45 @@ static void sanityChecks(Server & server)
|
||||
}
|
||||
}
|
||||
|
||||
static void initializeAzureSDKLogger(
|
||||
[[ maybe_unused ]] const ServerSettings & server_settings,
|
||||
[[ maybe_unused ]] int server_logs_level)
|
||||
{
|
||||
#if USE_AZURE_BLOB_STORAGE
|
||||
if (!server_settings.enable_azure_sdk_logging)
|
||||
return;
|
||||
|
||||
using AzureLogsLevel = Azure::Core::Diagnostics::Logger::Level;
|
||||
|
||||
static const std::unordered_map<AzureLogsLevel, std::pair<Poco::Message::Priority, DB::LogsLevel>> azure_to_server_mapping =
|
||||
{
|
||||
{AzureLogsLevel::Error, {Poco::Message::PRIO_DEBUG, LogsLevel::debug}},
|
||||
{AzureLogsLevel::Warning, {Poco::Message::PRIO_DEBUG, LogsLevel::debug}},
|
||||
{AzureLogsLevel::Informational, {Poco::Message::PRIO_TRACE, LogsLevel::trace}},
|
||||
{AzureLogsLevel::Verbose, {Poco::Message::PRIO_TEST, LogsLevel::test}},
|
||||
};
|
||||
|
||||
static const std::map<Poco::Message::Priority, AzureLogsLevel> server_to_azure_mapping =
|
||||
{
|
||||
{Poco::Message::PRIO_DEBUG, AzureLogsLevel::Warning},
|
||||
{Poco::Message::PRIO_TRACE, AzureLogsLevel::Informational},
|
||||
{Poco::Message::PRIO_TEST, AzureLogsLevel::Verbose},
|
||||
};
|
||||
|
||||
static const LoggerPtr azure_sdk_logger = getLogger("AzureSDK");
|
||||
|
||||
auto it = server_to_azure_mapping.lower_bound(static_cast<Poco::Message::Priority>(server_logs_level));
|
||||
chassert(it != server_to_azure_mapping.end());
|
||||
Azure::Core::Diagnostics::Logger::SetLevel(it->second);
|
||||
|
||||
Azure::Core::Diagnostics::Logger::SetListener([](AzureLogsLevel level, const std::string & message)
|
||||
{
|
||||
auto [poco_level, db_level] = azure_to_server_mapping.at(level);
|
||||
LOG_IMPL(azure_sdk_logger, db_level, poco_level, fmt::runtime(message));
|
||||
});
|
||||
#endif
|
||||
}
|
||||
|
||||
int Server::main(const std::vector<std::string> & /*args*/)
|
||||
try
|
||||
{
|
||||
@ -1860,6 +1900,7 @@ try
|
||||
/// Build loggers before tables startup to make log messages from tables
|
||||
/// attach available in system.text_log
|
||||
buildLoggers(config(), logger());
|
||||
initializeAzureSDKLogger(server_settings, logger().getLevel());
|
||||
/// After the system database is created, attach virtual system tables (in addition to query_log and part_log)
|
||||
attachSystemTablesServer(global_context, *database_catalog.getSystemDatabase(), has_zookeeper);
|
||||
attachInformationSchema(global_context, *database_catalog.getDatabase(DatabaseCatalog::INFORMATION_SCHEMA));
|
||||
|
@ -742,6 +742,10 @@
|
||||
It also enables 'changeable_in_readonly' constraint type -->
|
||||
<settings_constraints_replace_previous>true</settings_constraints_replace_previous>
|
||||
|
||||
<!-- By default, for backward compatibility create table with a specific table engine ignores grant,
|
||||
however you can change this behaviour by setting this to true -->
|
||||
<table_engines_require_grant>true</table_engines_require_grant>
|
||||
|
||||
<!-- Number of seconds since last access a role is stored in the Role Cache -->
|
||||
<role_cache_expiration_time_seconds>600</role_cache_expiration_time_seconds>
|
||||
</access_control_improvements>
|
||||
|
@ -7,6 +7,10 @@ max-branches=50
|
||||
max-nested-blocks=10
|
||||
max-statements=200
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
src_paths = ["src", "tests/ci", "tests/sqllogic"]
|
||||
|
||||
[tool.pylint.FORMAT]
|
||||
#ignore-long-lines = (# )?<?https?://\S+>?$
|
||||
|
||||
|
@ -285,6 +285,7 @@ void AccessControl::setUpFromMainConfig(const Poco::Util::AbstractConfiguration
|
||||
setSelectFromSystemDatabaseRequiresGrant(config_.getBool("access_control_improvements.select_from_system_db_requires_grant", false));
|
||||
setSelectFromInformationSchemaRequiresGrant(config_.getBool("access_control_improvements.select_from_information_schema_requires_grant", false));
|
||||
setSettingsConstraintsReplacePrevious(config_.getBool("access_control_improvements.settings_constraints_replace_previous", false));
|
||||
setTableEnginesRequireGrant(config_.getBool("access_control_improvements.table_engines_require_grant", false));
|
||||
|
||||
addStoragesFromMainConfig(config_, config_path_, get_zookeeper_function_);
|
||||
|
||||
|
@ -182,6 +182,9 @@ public:
|
||||
void setSettingsConstraintsReplacePrevious(bool enable) { settings_constraints_replace_previous = enable; }
|
||||
bool doesSettingsConstraintsReplacePrevious() const { return settings_constraints_replace_previous; }
|
||||
|
||||
void setTableEnginesRequireGrant(bool enable) { table_engines_require_grant = enable; }
|
||||
bool doesTableEnginesRequireGrant() const { return table_engines_require_grant; }
|
||||
|
||||
std::shared_ptr<const ContextAccess> getContextAccess(const ContextAccessParams & params) const;
|
||||
|
||||
std::shared_ptr<const EnabledRoles> getEnabledRoles(
|
||||
@ -258,6 +261,7 @@ private:
|
||||
std::atomic_bool select_from_system_db_requires_grant = false;
|
||||
std::atomic_bool select_from_information_schema_requires_grant = false;
|
||||
std::atomic_bool settings_constraints_replace_previous = false;
|
||||
std::atomic_bool table_engines_require_grant = false;
|
||||
std::atomic_int bcrypt_workfactor = 12;
|
||||
std::atomic<AuthenticationType> default_password_type = AuthenticationType::SHA256_PASSWORD;
|
||||
};
|
||||
|
@ -101,8 +101,9 @@ namespace
|
||||
const Flags & getTableFlags() const { return all_flags_for_target[TABLE]; }
|
||||
const Flags & getColumnFlags() const { return all_flags_for_target[COLUMN]; }
|
||||
const Flags & getDictionaryFlags() const { return all_flags_for_target[DICTIONARY]; }
|
||||
const Flags & getNamedCollectionFlags() const { return all_flags_for_target[NAMED_COLLECTION]; }
|
||||
const Flags & getTableEngineFlags() const { return all_flags_for_target[TABLE_ENGINE]; }
|
||||
const Flags & getUserNameFlags() const { return all_flags_for_target[USER_NAME]; }
|
||||
const Flags & getNamedCollectionFlags() const { return all_flags_for_target[NAMED_COLLECTION]; }
|
||||
const Flags & getAllFlagsGrantableOnGlobalLevel() const { return getAllFlags(); }
|
||||
const Flags & getAllFlagsGrantableOnGlobalWithParameterLevel() const { return getGlobalWithParameterFlags(); }
|
||||
const Flags & getAllFlagsGrantableOnDatabaseLevel() const { return all_flags_grantable_on_database_level; }
|
||||
@ -122,6 +123,7 @@ namespace
|
||||
DICTIONARY,
|
||||
NAMED_COLLECTION,
|
||||
USER_NAME,
|
||||
TABLE_ENGINE,
|
||||
};
|
||||
|
||||
struct Node;
|
||||
@ -301,7 +303,7 @@ namespace
|
||||
collectAllFlags(child.get());
|
||||
|
||||
all_flags_grantable_on_table_level = all_flags_for_target[TABLE] | all_flags_for_target[DICTIONARY] | all_flags_for_target[COLUMN];
|
||||
all_flags_grantable_on_global_with_parameter_level = all_flags_for_target[NAMED_COLLECTION] | all_flags_for_target[USER_NAME];
|
||||
all_flags_grantable_on_global_with_parameter_level = all_flags_for_target[NAMED_COLLECTION] | all_flags_for_target[USER_NAME] | all_flags_for_target[TABLE_ENGINE];
|
||||
all_flags_grantable_on_database_level = all_flags_for_target[DATABASE] | all_flags_grantable_on_table_level;
|
||||
}
|
||||
|
||||
@ -352,7 +354,7 @@ namespace
|
||||
std::unordered_map<std::string_view, Flags> keyword_to_flags_map;
|
||||
std::vector<Flags> access_type_to_flags_mapping;
|
||||
Flags all_flags;
|
||||
Flags all_flags_for_target[static_cast<size_t>(USER_NAME) + 1];
|
||||
Flags all_flags_for_target[static_cast<size_t>(TABLE_ENGINE) + 1];
|
||||
Flags all_flags_grantable_on_database_level;
|
||||
Flags all_flags_grantable_on_table_level;
|
||||
Flags all_flags_grantable_on_global_with_parameter_level;
|
||||
@ -376,7 +378,11 @@ std::unordered_map<AccessFlags::ParameterType, AccessFlags> AccessFlags::splitIn
|
||||
if (user_flags)
|
||||
result.emplace(ParameterType::USER_NAME, user_flags);
|
||||
|
||||
auto other_flags = (~named_collection_flags & ~user_flags) & *this;
|
||||
auto table_engine_flags = AccessFlags::allTableEngineFlags() & *this;
|
||||
if (table_engine_flags)
|
||||
result.emplace(ParameterType::TABLE_ENGINE, table_engine_flags);
|
||||
|
||||
auto other_flags = (~named_collection_flags & ~user_flags & ~table_engine_flags) & *this;
|
||||
if (other_flags)
|
||||
result.emplace(ParameterType::NONE, other_flags);
|
||||
|
||||
@ -395,6 +401,10 @@ AccessFlags::ParameterType AccessFlags::getParameterType() const
|
||||
if (AccessFlags::allUserNameFlags().contains(*this))
|
||||
return AccessFlags::USER_NAME;
|
||||
|
||||
/// All flags refer to TABLE ENGINE access type.
|
||||
if (AccessFlags::allTableEngineFlags().contains(*this))
|
||||
return AccessFlags::TABLE_ENGINE;
|
||||
|
||||
throw Exception(ErrorCodes::MIXED_ACCESS_PARAMETER_TYPES, "Having mixed parameter types: {}", toString());
|
||||
}
|
||||
|
||||
@ -414,6 +424,7 @@ AccessFlags AccessFlags::allColumnFlags() { return Helper::instance().getColumnF
|
||||
AccessFlags AccessFlags::allDictionaryFlags() { return Helper::instance().getDictionaryFlags(); }
|
||||
AccessFlags AccessFlags::allNamedCollectionFlags() { return Helper::instance().getNamedCollectionFlags(); }
|
||||
AccessFlags AccessFlags::allUserNameFlags() { return Helper::instance().getUserNameFlags(); }
|
||||
AccessFlags AccessFlags::allTableEngineFlags() { return Helper::instance().getTableEngineFlags(); }
|
||||
AccessFlags AccessFlags::allFlagsGrantableOnGlobalLevel() { return Helper::instance().getAllFlagsGrantableOnGlobalLevel(); }
|
||||
AccessFlags AccessFlags::allFlagsGrantableOnGlobalWithParameterLevel() { return Helper::instance().getAllFlagsGrantableOnGlobalWithParameterLevel(); }
|
||||
AccessFlags AccessFlags::allFlagsGrantableOnDatabaseLevel() { return Helper::instance().getAllFlagsGrantableOnDatabaseLevel(); }
|
||||
|
@ -56,6 +56,7 @@ public:
|
||||
enum ParameterType
|
||||
{
|
||||
NONE,
|
||||
TABLE_ENGINE,
|
||||
NAMED_COLLECTION,
|
||||
USER_NAME,
|
||||
};
|
||||
@ -107,6 +108,9 @@ public:
|
||||
/// Returns all the flags related to a user.
|
||||
static AccessFlags allUserNameFlags();
|
||||
|
||||
/// Returns all the flags related to a table engine.
|
||||
static AccessFlags allTableEngineFlags();
|
||||
|
||||
/// Returns all the flags which could be granted on the global level.
|
||||
/// The same as allFlags().
|
||||
static AccessFlags allFlagsGrantableOnGlobalLevel();
|
||||
|
@ -12,7 +12,7 @@ enum class AccessType
|
||||
/// Macro M should be defined as M(name, aliases, node_type, parent_group_name)
|
||||
/// where name is identifier with underscores (instead of spaces);
|
||||
/// aliases is a string containing comma-separated list;
|
||||
/// node_type either specifies access type's level (GLOBAL/NAMED_COLLECTION/USER_NAME/DATABASE/TABLE/DICTIONARY/VIEW/COLUMNS),
|
||||
/// node_type either specifies access type's level (GLOBAL/NAMED_COLLECTION/USER_NAME/TABLE_ENGINE/DATABASE/TABLE/DICTIONARY/VIEW/COLUMNS),
|
||||
/// or specifies that the access type is a GROUP of other access types;
|
||||
/// parent_group_name is the name of the group containing this access type (or NONE if there is no such group).
|
||||
/// NOTE A parent group must be declared AFTER all its children.
|
||||
@ -153,6 +153,8 @@ enum class AccessType
|
||||
M(NAMED_COLLECTION_ADMIN, "NAMED COLLECTION CONTROL", NAMED_COLLECTION, ALL) \
|
||||
M(SET_DEFINER, "", USER_NAME, ALL) \
|
||||
\
|
||||
M(TABLE_ENGINE, "TABLE ENGINE", TABLE_ENGINE, ALL) \
|
||||
\
|
||||
M(SYSTEM_SHUTDOWN, "SYSTEM KILL, SHUTDOWN", GLOBAL, SYSTEM) \
|
||||
M(SYSTEM_DROP_DNS_CACHE, "SYSTEM DROP DNS, DROP DNS CACHE, DROP DNS", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_CONNECTIONS_CACHE, "SYSTEM DROP CONNECTIONS CACHE, DROP CONNECTIONS CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
|
@ -204,6 +204,52 @@ namespace
|
||||
res.grant(AccessType::SELECT, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE);
|
||||
}
|
||||
|
||||
/// There is overlap between AccessType sources and table engines, so the following code avoids user granting twice.
|
||||
static const std::vector<std::tuple<AccessFlags, std::string>> source_and_table_engines = {
|
||||
{AccessType::FILE, "File"},
|
||||
{AccessType::URL, "URL"},
|
||||
{AccessType::REMOTE, "Distributed"},
|
||||
{AccessType::MONGO, "MongoDB"},
|
||||
{AccessType::REDIS, "Redis"},
|
||||
{AccessType::MYSQL, "MySQL"},
|
||||
{AccessType::POSTGRES, "PostgreSQL"},
|
||||
{AccessType::SQLITE, "SQLite"},
|
||||
{AccessType::ODBC, "ODBC"},
|
||||
{AccessType::JDBC, "JDBC"},
|
||||
{AccessType::HDFS, "HDFS"},
|
||||
{AccessType::S3, "S3"},
|
||||
{AccessType::HIVE, "Hive"},
|
||||
{AccessType::AZURE, "AzureBlobStorage"}
|
||||
};
|
||||
|
||||
/// Sync SOURCE and TABLE_ENGINE, so only need to check TABLE_ENGINE later.
|
||||
if (access_control.doesTableEnginesRequireGrant())
|
||||
{
|
||||
for (const auto & source_and_table_engine : source_and_table_engines)
|
||||
{
|
||||
const auto & source = std::get<0>(source_and_table_engine);
|
||||
if (res.isGranted(source))
|
||||
{
|
||||
const auto & table_engine = std::get<1>(source_and_table_engine);
|
||||
res.grant(AccessType::TABLE_ENGINE, table_engine);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Add TABLE_ENGINE on * and then remove TABLE_ENGINE on particular engines.
|
||||
res.grant(AccessType::TABLE_ENGINE);
|
||||
for (const auto & source_and_table_engine : source_and_table_engines)
|
||||
{
|
||||
const auto & source = std::get<0>(source_and_table_engine);
|
||||
if (!res.isGranted(source))
|
||||
{
|
||||
const auto & table_engine = std::get<1>(source_and_table_engine);
|
||||
res.revoke(AccessType::TABLE_ENGINE, table_engine);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -371,6 +371,7 @@ namespace
|
||||
if (databases)
|
||||
{
|
||||
user->access.revoke(AccessFlags::allFlags() - AccessFlags::allGlobalFlags());
|
||||
user->access.grantWithGrantOption(AccessType::TABLE_ENGINE);
|
||||
user->access.grantWithGrantOption(AccessFlags::allDictionaryFlags(), IDictionary::NO_DATABASE_TAG);
|
||||
for (const String & database : *databases)
|
||||
user->access.grantWithGrantOption(AccessFlags::allFlags(), database);
|
||||
|
@ -54,7 +54,7 @@ TEST(AccessRights, Union)
|
||||
"SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM VIEWS, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM VIRTUAL PARTS UPDATE, "
|
||||
"SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, "
|
||||
"SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*, "
|
||||
"GRANT SET DEFINER ON db1, GRANT NAMED COLLECTION ADMIN ON db1");
|
||||
"GRANT TABLE ENGINE ON db1, GRANT SET DEFINER ON db1, GRANT NAMED COLLECTION ADMIN ON db1");
|
||||
}
|
||||
|
||||
|
||||
|
@ -68,9 +68,9 @@ public:
|
||||
String getName() const override
|
||||
{
|
||||
if constexpr (isMin)
|
||||
return "ArgMin";
|
||||
return nested_function->getName() + "ArgMin";
|
||||
else
|
||||
return "ArgMax";
|
||||
return nested_function->getName() + "ArgMax";
|
||||
}
|
||||
|
||||
bool isState() const override { return nested_function->isState(); }
|
||||
|
@ -5174,8 +5174,8 @@ ProjectionNames QueryAnalyzer::resolveLambda(const QueryTreeNodePtr & lambda_nod
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Lambda {} expect {} arguments. Actual {}. In scope {}",
|
||||
lambda_to_resolve.formatASTForErrorMessage(),
|
||||
arguments_size,
|
||||
lambda_arguments_nodes_size,
|
||||
arguments_size,
|
||||
scope.scope_node->formatASTForErrorMessage());
|
||||
|
||||
/// Initialize aliases in lambda scope
|
||||
|
@ -154,7 +154,7 @@ public:
|
||||
void updatePermutation(PermutationSortDirection, PermutationSortStability,
|
||||
size_t, int, Permutation &, EqualRanges &) const override
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getPermutation is not supported for ColumnUnique.");
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method updatePermutation is not supported for ColumnUnique.");
|
||||
}
|
||||
|
||||
std::vector<MutableColumnPtr> scatter(IColumn::ColumnIndex, const IColumn::Selector &) const override
|
||||
|
@ -140,6 +140,7 @@ namespace DB
|
||||
M(UInt64, http_connections_store_limit, 5000, "Connections above this limit reset after use. Set to 0 to turn connection cache off. The limit applies to the http connections which do not belong to any disk or storage.", 0) \
|
||||
M(UInt64, global_profiler_real_time_period_ns, 0, "Period for real clock timer of global profiler (in nanoseconds). Set 0 value to turn off the real clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
||||
M(UInt64, global_profiler_cpu_time_period_ns, 0, "Period for CPU clock timer of global profiler (in nanoseconds). Set 0 value to turn off the CPU clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
||||
M(Bool, enable_azure_sdk_logging, false, "Enables logging from Azure sdk", 0) \
|
||||
|
||||
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp
|
||||
|
||||
|
@ -42,6 +42,7 @@ public:
|
||||
bool equals(const IDataType & rhs) const override;
|
||||
|
||||
bool isParametric() const override { return true; }
|
||||
bool isComparable() const override { return true; }
|
||||
bool haveSubtypes() const override { return true; }
|
||||
bool textCanContainOnlyValidUTF8() const override;
|
||||
bool haveMaximumSizeOfValue() const override;
|
||||
|
@ -4,8 +4,8 @@
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/re2.h>
|
||||
#include <optional>
|
||||
#include <azure/identity/managed_identity_credential.hpp>
|
||||
#include <azure/core/http/curl_transport.hpp>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
@ -138,35 +138,34 @@ AzureBlobStorageEndpoint processAzureBlobStorageEndpoint(const Poco::Util::Abstr
|
||||
|
||||
|
||||
template <class T>
|
||||
std::unique_ptr<T> getClientWithConnectionString(const String & connection_str, const String & container_name) = delete;
|
||||
|
||||
std::unique_ptr<T> getClientWithConnectionString(const String & connection_str, const String & container_name, const BlobClientOptions & client_options) = delete;
|
||||
|
||||
template<>
|
||||
std::unique_ptr<BlobServiceClient> getClientWithConnectionString(
|
||||
const String & connection_str, const String & /*container_name*/)
|
||||
std::unique_ptr<BlobServiceClient> getClientWithConnectionString(const String & connection_str, const String & /*container_name*/, const BlobClientOptions & client_options)
|
||||
{
|
||||
return std::make_unique<BlobServiceClient>(BlobServiceClient::CreateFromConnectionString(connection_str));
|
||||
return std::make_unique<BlobServiceClient>(BlobServiceClient::CreateFromConnectionString(connection_str, client_options));
|
||||
}
|
||||
|
||||
|
||||
template<>
|
||||
std::unique_ptr<BlobContainerClient> getClientWithConnectionString(
|
||||
const String & connection_str, const String & container_name)
|
||||
std::unique_ptr<BlobContainerClient> getClientWithConnectionString(const String & connection_str, const String & container_name, const BlobClientOptions & client_options)
|
||||
{
|
||||
return std::make_unique<BlobContainerClient>(BlobContainerClient::CreateFromConnectionString(connection_str, container_name));
|
||||
return std::make_unique<BlobContainerClient>(BlobContainerClient::CreateFromConnectionString(connection_str, container_name, client_options));
|
||||
}
|
||||
|
||||
|
||||
template <class T>
|
||||
std::unique_ptr<T> getAzureBlobStorageClientWithAuth(
|
||||
const String & url, const String & container_name, const Poco::Util::AbstractConfiguration & config, const String & config_prefix)
|
||||
const String & url,
|
||||
const String & container_name,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_prefix,
|
||||
const Azure::Storage::Blobs::BlobClientOptions & client_options)
|
||||
{
|
||||
std::string connection_str;
|
||||
if (config.has(config_prefix + ".connection_string"))
|
||||
connection_str = config.getString(config_prefix + ".connection_string");
|
||||
|
||||
if (!connection_str.empty())
|
||||
return getClientWithConnectionString<T>(connection_str, container_name);
|
||||
return getClientWithConnectionString<T>(connection_str, container_name, client_options);
|
||||
|
||||
if (config.has(config_prefix + ".account_key") && config.has(config_prefix + ".account_name"))
|
||||
{
|
||||
@ -174,38 +173,63 @@ std::unique_ptr<T> getAzureBlobStorageClientWithAuth(
|
||||
config.getString(config_prefix + ".account_name"),
|
||||
config.getString(config_prefix + ".account_key")
|
||||
);
|
||||
return std::make_unique<T>(url, storage_shared_key_credential);
|
||||
return std::make_unique<T>(url, storage_shared_key_credential, client_options);
|
||||
}
|
||||
|
||||
auto managed_identity_credential = std::make_shared<Azure::Identity::ManagedIdentityCredential>();
|
||||
return std::make_unique<T>(url, managed_identity_credential);
|
||||
return std::make_unique<T>(url, managed_identity_credential, client_options);
|
||||
}
|
||||
|
||||
Azure::Storage::Blobs::BlobClientOptions getAzureBlobClientOptions(const Poco::Util::AbstractConfiguration & config, const String & config_prefix)
|
||||
{
|
||||
Azure::Core::Http::Policies::RetryOptions retry_options;
|
||||
retry_options.MaxRetries = config.getUInt(config_prefix + ".max_tries", 10);
|
||||
retry_options.RetryDelay = std::chrono::milliseconds(config.getUInt(config_prefix + ".retry_initial_backoff_ms", 10));
|
||||
retry_options.MaxRetryDelay = std::chrono::milliseconds(config.getUInt(config_prefix + ".retry_max_backoff_ms", 1000));
|
||||
|
||||
std::unique_ptr<BlobContainerClient> getAzureBlobContainerClient(
|
||||
const Poco::Util::AbstractConfiguration & config, const String & config_prefix)
|
||||
using CurlOptions = Azure::Core::Http::CurlTransportOptions;
|
||||
CurlOptions curl_options{.NoSignal = true};
|
||||
|
||||
if (config.has(config_prefix + ".curl_ip_resolve"))
|
||||
{
|
||||
auto value = config.getString(config_prefix + ".curl_ip_resolve");
|
||||
if (value == "ipv4")
|
||||
curl_options.IPResolve = CurlOptions::CURL_IPRESOLVE_V4;
|
||||
else if (value == "ipv6")
|
||||
curl_options.IPResolve = CurlOptions::CURL_IPRESOLVE_V6;
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected value for option 'curl_ip_resolve': {}. Expected one of 'ipv4' or 'ipv6'", value);
|
||||
}
|
||||
|
||||
Azure::Storage::Blobs::BlobClientOptions client_options;
|
||||
client_options.Retry = retry_options;
|
||||
client_options.Transport.Transport = std::make_shared<Azure::Core::Http::CurlTransport>(curl_options);
|
||||
|
||||
return client_options;
|
||||
}
|
||||
|
||||
std::unique_ptr<BlobContainerClient> getAzureBlobContainerClient(const Poco::Util::AbstractConfiguration & config, const String & config_prefix)
|
||||
{
|
||||
auto endpoint = processAzureBlobStorageEndpoint(config, config_prefix);
|
||||
auto container_name = endpoint.container_name;
|
||||
auto final_url = endpoint.getEndpoint();
|
||||
auto client_options = getAzureBlobClientOptions(config, config_prefix);
|
||||
|
||||
if (endpoint.container_already_exists.value_or(false))
|
||||
return getAzureBlobStorageClientWithAuth<BlobContainerClient>(final_url, container_name, config, config_prefix);
|
||||
return getAzureBlobStorageClientWithAuth<BlobContainerClient>(final_url, container_name, config, config_prefix, client_options);
|
||||
|
||||
auto blob_service_client = getAzureBlobStorageClientWithAuth<BlobServiceClient>(
|
||||
endpoint.getEndpointWithoutContainer(), container_name, config, config_prefix);
|
||||
auto blob_service_client = getAzureBlobStorageClientWithAuth<BlobServiceClient>(endpoint.getEndpointWithoutContainer(), container_name, config, config_prefix, client_options);
|
||||
|
||||
try
|
||||
{
|
||||
return std::make_unique<BlobContainerClient>(
|
||||
blob_service_client->CreateBlobContainer(container_name).Value);
|
||||
return std::make_unique<BlobContainerClient>(blob_service_client->CreateBlobContainer(container_name).Value);
|
||||
}
|
||||
catch (const Azure::Storage::StorageException & e)
|
||||
{
|
||||
/// If container_already_exists is not set (in config), ignore already exists error.
|
||||
/// (Conflict - The specified container already exists)
|
||||
if (!endpoint.container_already_exists.has_value() && e.StatusCode == Azure::Core::Http::HttpStatusCode::Conflict)
|
||||
return getAzureBlobStorageClientWithAuth<BlobContainerClient>(final_url, container_name, config, config_prefix);
|
||||
return getAzureBlobStorageClientWithAuth<BlobContainerClient>(final_url, container_name, config, config_prefix, client_options);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -45,12 +45,11 @@ struct AzureBlobStorageEndpoint
|
||||
}
|
||||
};
|
||||
|
||||
std::unique_ptr<Azure::Storage::Blobs::BlobContainerClient> getAzureBlobContainerClient(
|
||||
const Poco::Util::AbstractConfiguration & config, const String & config_prefix);
|
||||
std::unique_ptr<Azure::Storage::Blobs::BlobContainerClient> getAzureBlobContainerClient(const Poco::Util::AbstractConfiguration & config, const String & config_prefix);
|
||||
|
||||
AzureBlobStorageEndpoint processAzureBlobStorageEndpoint(const Poco::Util::AbstractConfiguration & config, const String & config_prefix);
|
||||
|
||||
std::unique_ptr<AzureObjectStorageSettings> getAzureBlobStorageSettings(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, ContextPtr /*context*/);
|
||||
std::unique_ptr<AzureObjectStorageSettings> getAzureBlobStorageSettings(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, ContextPtr context);
|
||||
|
||||
}
|
||||
|
||||
|
@ -732,11 +732,7 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti
|
||||
|
||||
/// We have to check access rights again (in case engine was changed).
|
||||
if (create.storage && create.storage->engine)
|
||||
{
|
||||
auto source_access_type = StorageFactory::instance().getSourceAccessType(create.storage->engine->name);
|
||||
if (source_access_type != AccessType::NONE)
|
||||
getContext()->checkAccess(source_access_type);
|
||||
}
|
||||
getContext()->checkAccess(AccessType::TABLE_ENGINE, create.storage->engine->name);
|
||||
|
||||
TableProperties properties;
|
||||
TableLockHolder as_storage_lock;
|
||||
@ -1842,11 +1838,7 @@ AccessRightsElements InterpreterCreateQuery::getRequiredAccess() const
|
||||
required_access.emplace_back(AccessType::SELECT | AccessType::INSERT, create.to_table_id.database_name, create.to_table_id.table_name);
|
||||
|
||||
if (create.storage && create.storage->engine)
|
||||
{
|
||||
auto source_access_type = StorageFactory::instance().getSourceAccessType(create.storage->engine->name);
|
||||
if (source_access_type != AccessType::NONE)
|
||||
required_access.emplace_back(source_access_type);
|
||||
}
|
||||
required_access.emplace_back(AccessType::TABLE_ENGINE, create.storage->engine->name);
|
||||
|
||||
return required_access;
|
||||
}
|
||||
|
@ -915,11 +915,22 @@ void StorageKafka::updateGlobalConfiguration(cppkafka::Configuration & kafka_con
|
||||
#endif // USE_KRB5
|
||||
|
||||
// No need to add any prefix, messages can be distinguished
|
||||
kafka_config.set_log_callback([this](cppkafka::KafkaHandleBase &, int level, const std::string & facility, const std::string & message)
|
||||
{
|
||||
auto [poco_level, client_logs_level] = parseSyslogLevel(level);
|
||||
LOG_IMPL(log, client_logs_level, poco_level, "[rdk:{}] {}", facility, message);
|
||||
});
|
||||
kafka_config.set_log_callback(
|
||||
[this](cppkafka::KafkaHandleBase & handle, int level, const std::string & facility, const std::string & message)
|
||||
{
|
||||
auto [poco_level, client_logs_level] = parseSyslogLevel(level);
|
||||
const auto & kafka_object_config = handle.get_configuration();
|
||||
const std::string client_id_key{"client.id"};
|
||||
chassert(kafka_object_config.has_property(client_id_key) && "Kafka configuration doesn't have expected client.id set");
|
||||
LOG_IMPL(
|
||||
log,
|
||||
client_logs_level,
|
||||
poco_level,
|
||||
"[client.id:{}] [rdk:{}] {}",
|
||||
kafka_object_config.get(client_id_key),
|
||||
facility,
|
||||
message);
|
||||
});
|
||||
|
||||
/// NOTE: statistics should be consumed, otherwise it creates too much
|
||||
/// entries in the queue, that leads to memory leak and slow shutdown.
|
||||
|
@ -204,7 +204,6 @@ void MergeTreeReaderCompact::readPrefix(
|
||||
else
|
||||
serialization = getSerializationInPart(name_and_type);
|
||||
|
||||
|
||||
deserialize_settings.getter = buffer_getter;
|
||||
serialization->deserializeBinaryBulkStatePrefix(deserialize_settings, deserialize_binary_bulk_state_map[name_and_type.name]);
|
||||
}
|
||||
|
@ -68,8 +68,8 @@ protected:
|
||||
const auto & part_name_column = StorageMergeTreeIndex::part_name_column;
|
||||
const auto & mark_number_column = StorageMergeTreeIndex::mark_number_column;
|
||||
const auto & rows_in_granule_column = StorageMergeTreeIndex::rows_in_granule_column;
|
||||
|
||||
const auto & index = part->getIndex();
|
||||
|
||||
Columns result_columns(num_columns);
|
||||
for (size_t pos = 0; pos < num_columns; ++pos)
|
||||
{
|
||||
@ -79,7 +79,19 @@ protected:
|
||||
if (index_header.has(column_name))
|
||||
{
|
||||
size_t index_position = index_header.getPositionByName(column_name);
|
||||
result_columns[pos] = index[index_position];
|
||||
|
||||
/// Some of the columns from suffix of primary index may be not loaded
|
||||
/// according to setting 'primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns'.
|
||||
if (index_position < index.size())
|
||||
{
|
||||
result_columns[pos] = index[index_position];
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto & index_type = index_header.getByPosition(index_position).type;
|
||||
auto index_column = index_type->createColumnConstWithDefaultValue(num_rows);
|
||||
result_columns[pos] = index_column->convertToFullColumnIfConst();
|
||||
}
|
||||
}
|
||||
else if (column_name == part_name_column.name)
|
||||
{
|
||||
|
@ -30,6 +30,7 @@ namespace
|
||||
COLUMN,
|
||||
NAMED_COLLECTION,
|
||||
USER_NAME,
|
||||
TABLE_ENGINE,
|
||||
};
|
||||
|
||||
DataTypeEnum8::Values getLevelEnumValues()
|
||||
@ -43,6 +44,7 @@ namespace
|
||||
enum_values.emplace_back("COLUMN", static_cast<Int8>(COLUMN));
|
||||
enum_values.emplace_back("NAMED_COLLECTION", static_cast<Int8>(NAMED_COLLECTION));
|
||||
enum_values.emplace_back("USER_NAME", static_cast<Int8>(USER_NAME));
|
||||
enum_values.emplace_back("TABLE_ENGINE", static_cast<Int8>(TABLE_ENGINE));
|
||||
return enum_values;
|
||||
}
|
||||
}
|
||||
|
@ -622,6 +622,20 @@ Chunk SystemZooKeeperSource::generate()
|
||||
ZooKeeperRetriesControl("", nullptr, retries_seetings, query_status).retryLoop(
|
||||
[&]() { get_responses = get_zookeeper()->tryGet(paths_to_get); });
|
||||
|
||||
/// Add children count to query total rows. We can not get total rows in advance,
|
||||
/// because it is too heavy to get row count for non exact paths.
|
||||
/// Please be aware that there might be minor setbacks in the query progress,
|
||||
/// but overall it should reflect the advancement of the query.
|
||||
size_t children_count = 0;
|
||||
for (size_t i = 0, size = get_tasks.size(); i < size; ++i)
|
||||
{
|
||||
auto & res = get_responses[i];
|
||||
if (res.error == Coordination::Error::ZNONODE)
|
||||
continue; /// Node was deleted meanwhile.
|
||||
children_count += res.stat.numChildren;
|
||||
}
|
||||
addTotalRowsApprox(children_count);
|
||||
|
||||
for (size_t i = 0, size = get_tasks.size(); i < size; ++i)
|
||||
{
|
||||
auto & res = get_responses[i];
|
||||
|
@ -10,11 +10,8 @@ from pathlib import Path
|
||||
from shutil import copy2
|
||||
from typing import List, Optional, Union
|
||||
|
||||
# isort: off
|
||||
from github.Commit import Commit
|
||||
|
||||
# isort: on
|
||||
|
||||
from build_download_helper import download_build_with_progress
|
||||
from commit_status_helper import post_commit_status
|
||||
from compress_files import SUFFIX, compress_fast, decompress_fast
|
||||
|
@ -8,11 +8,8 @@ import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, List, Optional, Union
|
||||
|
||||
# isort: off
|
||||
import requests
|
||||
|
||||
# isort: on
|
||||
|
||||
import get_robot_token as grt # we need an updated ROBOT_TOKEN
|
||||
from ci_config import CI_CONFIG
|
||||
|
||||
@ -23,6 +20,10 @@ class DownloadException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class APIException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_with_retries(
|
||||
url: str,
|
||||
retries: int = DOWNLOAD_RETRIES_COUNT,
|
||||
@ -109,7 +110,7 @@ def get_gh_api(
|
||||
logging.info("Exception '%s' while getting, retry %i", exc, try_cnt)
|
||||
time.sleep(sleep)
|
||||
|
||||
raise exc
|
||||
raise APIException("Unable to request data from GH API") from exc
|
||||
|
||||
|
||||
def get_build_name_for_check(check_name: str) -> str:
|
||||
|
@ -50,7 +50,7 @@ def main():
|
||||
|
||||
builds_for_check = CI_CONFIG.get_builds_for_report(
|
||||
build_check_name,
|
||||
release=pr_info.is_release(),
|
||||
release=pr_info.is_release,
|
||||
backport=pr_info.head_ref.startswith("backport/"),
|
||||
)
|
||||
required_builds = len(builds_for_check)
|
||||
@ -139,7 +139,8 @@ def main():
|
||||
additional_files=[report_path],
|
||||
).dump()
|
||||
|
||||
if summary_status == ERROR:
|
||||
# We should fail the report job to rerun it in the following attempts
|
||||
if summary_status != SUCCESS:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
@ -17,7 +17,7 @@ from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Union
|
||||
import docker_images_helper
|
||||
import upload_result_helper
|
||||
from build_check import get_release_or_pr
|
||||
from ci_config import CI_CONFIG, Build, CIStages, Labels, JobNames
|
||||
from ci_config import CI_CONFIG, Build, CIStages, JobNames, Labels
|
||||
from ci_utils import GHActions, is_hex, normalize_string
|
||||
from clickhouse_helper import (
|
||||
CiLogsCredentials,
|
||||
@ -49,7 +49,7 @@ from env_helper import (
|
||||
from get_robot_token import get_best_robot_token
|
||||
from git_helper import GIT_PREFIX, Git
|
||||
from git_helper import Runner as GitRunner
|
||||
from github import Github
|
||||
from github_helper import GitHub
|
||||
from pr_info import PRInfo
|
||||
from report import ERROR, SUCCESS, BuildResult, JobReport
|
||||
from s3_helper import S3Helper
|
||||
@ -770,7 +770,7 @@ class CiOptions:
|
||||
res = CiOptions()
|
||||
pr_info = PRInfo()
|
||||
if (
|
||||
not pr_info.is_pr() and not debug_message
|
||||
not pr_info.is_pr and not debug_message
|
||||
): # if commit_message is provided it's test/debug scenario - do not return
|
||||
# CI options can be configured in PRs only
|
||||
# if debug_message is provided - it's a test
|
||||
@ -1218,19 +1218,19 @@ def _mark_success_action(
|
||||
if job_config.run_always or job_config.run_by_label:
|
||||
print(f"Job [{job}] runs always or by label in CI - do not cache")
|
||||
else:
|
||||
if pr_info.is_master():
|
||||
if pr_info.is_master:
|
||||
pass
|
||||
# delete method is disabled for ci_cache. need it?
|
||||
# pending enabled for master branch jobs only
|
||||
# ci_cache.delete_pending(job, batch, num_batches, release_branch=True)
|
||||
if job_status and job_status.is_ok():
|
||||
ci_cache.push_successful(
|
||||
job, batch, num_batches, job_status, pr_info.is_release_branch()
|
||||
job, batch, num_batches, job_status, pr_info.is_release_branch
|
||||
)
|
||||
print(f"Job [{job}] is ok")
|
||||
elif job_status and not job_status.is_ok():
|
||||
ci_cache.push_failed(
|
||||
job, batch, num_batches, job_status, pr_info.is_release_branch()
|
||||
job, batch, num_batches, job_status, pr_info.is_release_branch
|
||||
)
|
||||
print(f"Job [{job}] is failed with status [{job_status.status}]")
|
||||
else:
|
||||
@ -1238,7 +1238,7 @@ def _mark_success_action(
|
||||
description="dummy description", status=ERROR, report_url="dummy url"
|
||||
)
|
||||
ci_cache.push_failed(
|
||||
job, batch, num_batches, job_status, pr_info.is_release_branch()
|
||||
job, batch, num_batches, job_status, pr_info.is_release_branch
|
||||
)
|
||||
print(f"No CommitStatusData for [{job}], push dummy failure to ci_cache")
|
||||
|
||||
@ -1354,9 +1354,9 @@ def _configure_jobs(
|
||||
batches_to_do: List[int] = []
|
||||
add_to_skip = False
|
||||
|
||||
if job_config.pr_only and pr_info.is_release_branch():
|
||||
if job_config.pr_only and pr_info.is_release_branch:
|
||||
continue
|
||||
if job_config.release_only and not pr_info.is_release_branch():
|
||||
if job_config.release_only and not pr_info.is_release_branch:
|
||||
continue
|
||||
|
||||
# fill job randomization buckets (for jobs with configured @random_bucket property))
|
||||
@ -1379,7 +1379,7 @@ def _configure_jobs(
|
||||
job,
|
||||
batch,
|
||||
num_batches,
|
||||
release_branch=pr_info.is_release_branch()
|
||||
release_branch=pr_info.is_release_branch
|
||||
and job_config.required_on_release_branch,
|
||||
):
|
||||
# ci cache is enabled and job is not in the cache - add
|
||||
@ -1390,7 +1390,7 @@ def _configure_jobs(
|
||||
job,
|
||||
batch,
|
||||
num_batches,
|
||||
release_branch=pr_info.is_release_branch()
|
||||
release_branch=pr_info.is_release_branch
|
||||
and job_config.required_on_release_branch,
|
||||
):
|
||||
if job in jobs_to_wait:
|
||||
@ -1413,7 +1413,7 @@ def _configure_jobs(
|
||||
# treat job as being skipped only if it's controlled by digest
|
||||
jobs_to_skip.append(job)
|
||||
|
||||
if not pr_info.is_release_branch():
|
||||
if not pr_info.is_release_branch:
|
||||
# randomization bucket filtering (pick one random job from each bucket, for jobs with configured random_bucket property)
|
||||
for _, jobs in randomization_buckets.items():
|
||||
jobs_to_remove_randomization = set()
|
||||
@ -1435,7 +1435,7 @@ def _configure_jobs(
|
||||
jobs_to_do, jobs_to_skip, jobs_params
|
||||
)
|
||||
|
||||
if pr_info.is_merge_queue():
|
||||
if pr_info.is_merge_queue:
|
||||
# FIXME: Quick support for MQ workflow which is only StyleCheck for now
|
||||
jobs_to_do = [JobNames.STYLE_CHECK]
|
||||
jobs_to_skip = []
|
||||
@ -1504,7 +1504,7 @@ def _update_gh_statuses_action(indata: Dict, s3: S3Helper) -> None:
|
||||
|
||||
# create GH status
|
||||
pr_info = PRInfo()
|
||||
commit = get_commit(Github(get_best_robot_token(), per_page=100), pr_info.sha)
|
||||
commit = get_commit(GitHub(get_best_robot_token(), per_page=100), pr_info.sha)
|
||||
|
||||
def _concurrent_create_status(job: str, batch: int, num_batches: int) -> None:
|
||||
job_status = ci_cache.get_successful(job, batch, num_batches)
|
||||
@ -1551,7 +1551,7 @@ def _fetch_commit_tokens(message: str, pr_info: PRInfo) -> List[str]:
|
||||
]
|
||||
print(f"CI modifyers from commit message: [{res}]")
|
||||
res_2 = []
|
||||
if pr_info.is_pr():
|
||||
if pr_info.is_pr:
|
||||
matches = [match[-1] for match in re.findall(pattern, pr_info.body)]
|
||||
res_2 = [
|
||||
match
|
||||
@ -1626,7 +1626,7 @@ def _upload_build_artifacts(
|
||||
|
||||
# Upload head master binaries
|
||||
static_bin_name = CI_CONFIG.build_config[build_name].static_binary_name
|
||||
if pr_info.is_master() and static_bin_name:
|
||||
if pr_info.is_master and static_bin_name:
|
||||
# Full binary with debug info:
|
||||
s3_path_full = "/".join((pr_info.base_ref, static_bin_name, "clickhouse-full"))
|
||||
binary_full = Path(job_report.build_dir_for_upload) / "clickhouse"
|
||||
@ -1908,11 +1908,11 @@ def main() -> int:
|
||||
if not args.skip_jobs:
|
||||
ci_cache = CiCache(s3, jobs_data["digests"])
|
||||
|
||||
if pr_info.is_master():
|
||||
if pr_info.is_master:
|
||||
# wait for pending jobs to be finished, await_jobs is a long blocking call
|
||||
# wait pending jobs (for now only on release/master branches)
|
||||
ready_jobs_batches_dict = ci_cache.await_jobs(
|
||||
jobs_data.get("jobs_to_wait", {}), pr_info.is_release_branch()
|
||||
jobs_data.get("jobs_to_wait", {}), pr_info.is_release_branch
|
||||
)
|
||||
jobs_to_do = jobs_data["jobs_to_do"]
|
||||
jobs_to_skip = jobs_data["jobs_to_skip"]
|
||||
@ -1929,7 +1929,7 @@ def main() -> int:
|
||||
del jobs_params[job]
|
||||
|
||||
# set planned jobs as pending in the CI cache if on the master
|
||||
if pr_info.is_master():
|
||||
if pr_info.is_master:
|
||||
for job in jobs_data["jobs_to_do"]:
|
||||
config = CI_CONFIG.get_job_config(job)
|
||||
if config.run_always or config.run_by_label:
|
||||
@ -1939,7 +1939,7 @@ def main() -> int:
|
||||
job,
|
||||
job_params["batches"],
|
||||
config.num_batches,
|
||||
release_branch=pr_info.is_release_branch(),
|
||||
release_branch=pr_info.is_release_branch,
|
||||
)
|
||||
|
||||
if "jobs_to_wait" in jobs_data:
|
||||
@ -1994,7 +1994,7 @@ def main() -> int:
|
||||
else:
|
||||
# this is a test job - check if GH commit status or cache record is present
|
||||
commit = get_commit(
|
||||
Github(get_best_robot_token(), per_page=100), pr_info.sha
|
||||
GitHub(get_best_robot_token(), per_page=100), pr_info.sha
|
||||
)
|
||||
|
||||
# rerun helper check
|
||||
@ -2110,7 +2110,7 @@ def main() -> int:
|
||||
additional_urls=additional_urls or None,
|
||||
)
|
||||
commit = get_commit(
|
||||
Github(get_best_robot_token(), per_page=100), pr_info.sha
|
||||
GitHub(get_best_robot_token(), per_page=100), pr_info.sha
|
||||
)
|
||||
post_commit_status(
|
||||
commit,
|
||||
@ -2121,7 +2121,7 @@ def main() -> int:
|
||||
pr_info,
|
||||
dump_to_file=True,
|
||||
)
|
||||
if not pr_info.is_merge_queue():
|
||||
if not pr_info.is_merge_queue:
|
||||
# in the merge queue mergeable status must be set only in FinishCheck (last job in wf)
|
||||
update_mergeable_check(
|
||||
commit,
|
||||
|
@ -9,7 +9,6 @@ from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
# isort: off
|
||||
from github import Github
|
||||
from github.Commit import Commit
|
||||
from github.CommitStatus import CommitStatus
|
||||
@ -18,8 +17,6 @@ from github.GithubObject import NotSet
|
||||
from github.IssueComment import IssueComment
|
||||
from github.Repository import Repository
|
||||
|
||||
# isort: on
|
||||
|
||||
from ci_config import CHECK_DESCRIPTIONS, REQUIRED_CHECKS, CheckDescription
|
||||
from env_helper import GITHUB_REPOSITORY, GITHUB_RUN_URL, TEMP_PATH
|
||||
from pr_info import SKIP_MERGEABLE_CHECK_LABEL, PRInfo
|
||||
@ -149,7 +146,7 @@ def set_status_comment(commit: Commit, pr_info: PRInfo) -> None:
|
||||
one, so the method does nothing for simple pushes and pull requests with
|
||||
`release`/`release-lts` labels"""
|
||||
|
||||
if pr_info.is_merge_queue():
|
||||
if pr_info.is_merge_queue:
|
||||
# skip report creation for the MQ
|
||||
return
|
||||
|
||||
@ -448,7 +445,7 @@ def update_mergeable_check(commit: Commit, pr_info: PRInfo, check_name: str) ->
|
||||
)
|
||||
|
||||
# FIXME: For now, always set mergeable check in the Merge Queue. It's required to pass MQ
|
||||
if not_run and not pr_info.is_merge_queue():
|
||||
if not_run and not pr_info.is_merge_queue:
|
||||
# Let's avoid unnecessary work
|
||||
return
|
||||
|
||||
|
@ -8,11 +8,8 @@ import sys
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
|
||||
# isort: off
|
||||
from pip._vendor.packaging.version import Version
|
||||
|
||||
# isort: on
|
||||
|
||||
from build_download_helper import download_builds_filter
|
||||
from docker_images_helper import DockerImage, get_docker_image, pull_image
|
||||
from env_helper import REPORT_PATH, TEMP_PATH
|
||||
|
@ -8,11 +8,8 @@ import time
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
# isort: off
|
||||
from github import Github
|
||||
|
||||
# isort: on
|
||||
|
||||
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
|
||||
from commit_status_helper import format_description, get_commit, post_commit_status
|
||||
from docker_images_helper import DockerImageData, docker_login, get_images_oredered_list
|
||||
@ -225,9 +222,11 @@ def main():
|
||||
parent_version = (
|
||||
None
|
||||
if not image.parent
|
||||
else image_tags[image.parent]
|
||||
if not args.suffix
|
||||
else f"{image_tags[image.parent]}-{args.suffix}"
|
||||
else (
|
||||
image_tags[image.parent]
|
||||
if not args.suffix
|
||||
else f"{image_tags[image.parent]}-{args.suffix}"
|
||||
)
|
||||
)
|
||||
|
||||
res = process_single_image(
|
||||
|
@ -8,11 +8,8 @@ import subprocess
|
||||
import sys
|
||||
from typing import List, Tuple
|
||||
|
||||
# isort: off
|
||||
from github import Github
|
||||
|
||||
# isort: on
|
||||
|
||||
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
|
||||
from commit_status_helper import format_description, get_commit, post_commit_status
|
||||
from docker_images_helper import docker_login, get_images_oredered_list
|
||||
|
@ -362,7 +362,7 @@ def main():
|
||||
del args.image_repo
|
||||
del args.push
|
||||
|
||||
if pr_info.is_master():
|
||||
if pr_info.is_master:
|
||||
push = True
|
||||
|
||||
image = DockerImageData(image_path, image_repo, False)
|
||||
@ -374,9 +374,10 @@ def main():
|
||||
|
||||
for arch, build_name in zip(ARCH, ("package_release", "package_aarch64")):
|
||||
if not args.bucket_prefix:
|
||||
repo_urls[
|
||||
arch
|
||||
] = f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/{release_or_pr}/{pr_info.sha}/{build_name}"
|
||||
repo_urls[arch] = (
|
||||
f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/"
|
||||
f"{release_or_pr}/{pr_info.sha}/{build_name}"
|
||||
)
|
||||
else:
|
||||
repo_urls[arch] = f"{args.bucket_prefix}/{build_name}"
|
||||
if args.allow_build_reuse:
|
||||
|
@ -5,7 +5,7 @@ import os
|
||||
from os import path as p
|
||||
from typing import Tuple
|
||||
|
||||
from build_download_helper import get_gh_api
|
||||
from build_download_helper import APIException, get_gh_api
|
||||
|
||||
module_dir = p.abspath(p.dirname(__file__))
|
||||
git_root = p.abspath(p.join(module_dir, "..", ".."))
|
||||
@ -42,23 +42,37 @@ _GITHUB_JOB_URL = ""
|
||||
_GITHUB_JOB_API_URL = ""
|
||||
|
||||
|
||||
def GITHUB_JOB_ID() -> str:
|
||||
def GITHUB_JOB_ID(safe: bool = True) -> str:
|
||||
global _GITHUB_JOB_ID
|
||||
global _GITHUB_JOB_URL
|
||||
global _GITHUB_JOB_API_URL
|
||||
if _GITHUB_JOB_ID:
|
||||
return _GITHUB_JOB_ID
|
||||
_GITHUB_JOB_ID, _GITHUB_JOB_URL, _GITHUB_JOB_API_URL = get_job_id_url(GITHUB_JOB)
|
||||
try:
|
||||
_GITHUB_JOB_ID, _GITHUB_JOB_URL, _GITHUB_JOB_API_URL = get_job_id_url(
|
||||
GITHUB_JOB
|
||||
)
|
||||
except APIException as e:
|
||||
logging.warning("Unable to retrieve the job info from GH API: %s", e)
|
||||
if not safe:
|
||||
raise e
|
||||
return _GITHUB_JOB_ID
|
||||
|
||||
|
||||
def GITHUB_JOB_URL() -> str:
|
||||
GITHUB_JOB_ID()
|
||||
def GITHUB_JOB_URL(safe: bool = True) -> str:
|
||||
try:
|
||||
GITHUB_JOB_ID()
|
||||
except APIException:
|
||||
if safe:
|
||||
logging.warning("Using run URL as a fallback to not fail the job")
|
||||
return GITHUB_RUN_URL
|
||||
raise
|
||||
|
||||
return _GITHUB_JOB_URL
|
||||
|
||||
|
||||
def GITHUB_JOB_API_URL() -> str:
|
||||
GITHUB_JOB_ID()
|
||||
def GITHUB_JOB_API_URL(safe: bool = True) -> str:
|
||||
GITHUB_JOB_ID(safe)
|
||||
return _GITHUB_JOB_API_URL
|
||||
|
||||
|
||||
@ -93,7 +107,6 @@ def get_job_id_url(job_name: str) -> Tuple[str, str, str]:
|
||||
):
|
||||
job_id = "0"
|
||||
|
||||
# FIXME: until it's here, we can't move to reusable workflows
|
||||
if not job_url:
|
||||
# This is a terrible workaround for the case of another broken part of
|
||||
# GitHub actions. For nested workflows it doesn't provide a proper job_name
|
||||
|
@ -1,11 +1,8 @@
|
||||
#!/usr/bin/env python3
|
||||
import logging
|
||||
|
||||
# isort: off
|
||||
from github import Github
|
||||
|
||||
# isort: on
|
||||
|
||||
from commit_status_helper import (
|
||||
CI_STATUS_NAME,
|
||||
get_commit,
|
||||
@ -28,7 +25,7 @@ def main():
|
||||
statuses = get_commit_filtered_statuses(commit)
|
||||
trigger_mergeable_check(commit, statuses)
|
||||
|
||||
if not pr_info.is_merge_queue():
|
||||
if not pr_info.is_merge_queue:
|
||||
statuses = [s for s in statuses if s.context == CI_STATUS_NAME]
|
||||
if not statuses:
|
||||
return
|
||||
|
@ -200,7 +200,7 @@ def main():
|
||||
# always use latest
|
||||
docker_image = KEEPER_IMAGE_NAME if args.program == "keeper" else SERVER_IMAGE_NAME
|
||||
|
||||
if pr_info.is_scheduled() or pr_info.is_dispatched():
|
||||
if pr_info.is_scheduled or pr_info.is_dispatched:
|
||||
# get latest clcikhouse by the static link for latest master buit - get its version and provide permanent url for this version to the jepsen
|
||||
build_url = f"{S3_URL}/{S3_BUILDS_BUCKET}/master/amd64/clickhouse"
|
||||
download_build_with_progress(build_url, Path(TEMP_PATH) / "clickhouse")
|
||||
|
@ -9,13 +9,10 @@ from os import getenv
|
||||
from pprint import pformat
|
||||
from typing import Dict, List
|
||||
|
||||
# isort: off
|
||||
from github.PaginatedList import PaginatedList
|
||||
from github.PullRequestReview import PullRequestReview
|
||||
from github.WorkflowRun import WorkflowRun
|
||||
|
||||
# isort: on
|
||||
|
||||
from commit_status_helper import get_commit_filtered_statuses
|
||||
from get_robot_token import get_best_robot_token
|
||||
from github_helper import GitHub, NamedUser, PullRequest, Repository
|
||||
|
@ -9,11 +9,8 @@ import sys
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
# isort: off
|
||||
from github import Github
|
||||
|
||||
# isort: on
|
||||
|
||||
from build_download_helper import download_builds_filter
|
||||
from ci_config import CI_CONFIG
|
||||
from clickhouse_helper import get_instance_id, get_instance_type
|
||||
|
@ -6,12 +6,8 @@ import re
|
||||
from typing import Dict, List, Set, Union
|
||||
from urllib.parse import quote
|
||||
|
||||
# isort: off
|
||||
# for some reason this line moves to the end
|
||||
from unidiff import PatchSet # type: ignore
|
||||
|
||||
# isort: on
|
||||
|
||||
from build_download_helper import get_gh_api
|
||||
from env_helper import (
|
||||
GITHUB_EVENT_PATH,
|
||||
@ -310,27 +306,34 @@ class PRInfo:
|
||||
if need_changed_files:
|
||||
self.fetch_changed_files()
|
||||
|
||||
@property
|
||||
def is_master(self) -> bool:
|
||||
return self.number == 0 and self.head_ref == "master"
|
||||
|
||||
@property
|
||||
def is_release(self) -> bool:
|
||||
return self.number == 0 and bool(
|
||||
re.match(r"^2[1-9]\.[1-9][0-9]*$", self.head_ref)
|
||||
)
|
||||
|
||||
@property
|
||||
def is_release_branch(self) -> bool:
|
||||
return self.number == 0
|
||||
|
||||
@property
|
||||
def is_pr(self):
|
||||
return self.event_type == EventType.PULL_REQUEST
|
||||
|
||||
def is_scheduled(self):
|
||||
@property
|
||||
def is_scheduled(self) -> bool:
|
||||
return self.event_type == EventType.SCHEDULE
|
||||
|
||||
def is_merge_queue(self):
|
||||
@property
|
||||
def is_merge_queue(self) -> bool:
|
||||
return self.event_type == EventType.MERGE_QUEUE
|
||||
|
||||
def is_dispatched(self):
|
||||
@property
|
||||
def is_dispatched(self) -> bool:
|
||||
return self.event_type == EventType.DISPATCH
|
||||
|
||||
def compare_pr_url(self, pr_object: dict) -> str:
|
||||
|
@ -3,11 +3,9 @@ import logging
|
||||
import sys
|
||||
from typing import Tuple
|
||||
|
||||
# isort: off
|
||||
from github import Github
|
||||
|
||||
# isort: on
|
||||
|
||||
from cherry_pick import Labels
|
||||
from commit_status_helper import (
|
||||
CI_STATUS_NAME,
|
||||
create_ci_report,
|
||||
@ -26,7 +24,6 @@ from lambda_shared_package.lambda_shared.pr import (
|
||||
)
|
||||
from pr_info import PRInfo
|
||||
from report import FAILURE, PENDING, SUCCESS
|
||||
from cherry_pick import Labels
|
||||
|
||||
TRUSTED_ORG_IDS = {
|
||||
54801242, # clickhouse
|
||||
@ -202,7 +199,7 @@ def main():
|
||||
ci_report_url = create_ci_report(pr_info, [])
|
||||
print("::notice ::Can run")
|
||||
|
||||
if not pr_info.is_merge_queue():
|
||||
if not pr_info.is_merge_queue:
|
||||
# we need clean CI status for MQ to merge (no pending statuses)
|
||||
post_commit_status(
|
||||
commit,
|
||||
|
@ -132,7 +132,7 @@ def main():
|
||||
|
||||
pr_info = PRInfo()
|
||||
|
||||
if pr_info.is_merge_queue() and args.push:
|
||||
if pr_info.is_merge_queue and args.push:
|
||||
print("Auto style fix will be disabled for Merge Queue workflow")
|
||||
args.push = False
|
||||
|
||||
|
@ -5,5 +5,6 @@
|
||||
<select_from_system_db_requires_grant>true</select_from_system_db_requires_grant>
|
||||
<select_from_information_schema_requires_grant>true</select_from_information_schema_requires_grant>
|
||||
<settings_constraints_replace_previous>true</settings_constraints_replace_previous>
|
||||
<table_engines_require_grant>true</table_engines_require_grant>
|
||||
</access_control_improvements>
|
||||
</clickhouse>
|
||||
|
@ -1036,6 +1036,7 @@ def test_required_privileges():
|
||||
)
|
||||
|
||||
instance.query("GRANT INSERT, CREATE ON test.table2 TO u1")
|
||||
instance.query("GRANT TABLE ENGINE ON MergeTree TO u1")
|
||||
instance.query(
|
||||
f"RESTORE TABLE test.table AS test.table2 FROM {backup_name}", user="u1"
|
||||
)
|
||||
|
@ -300,11 +300,13 @@ def test_allowed_databases(test_cluster):
|
||||
"CREATE TABLE db2.t2 ON CLUSTER cluster (i Int8) ENGINE = Memory",
|
||||
settings={"user": "restricted_user"},
|
||||
)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
instance.query(
|
||||
"CREATE TABLE t3 ON CLUSTER cluster (i Int8) ENGINE = Memory",
|
||||
settings={"user": "restricted_user"},
|
||||
)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
instance.query(
|
||||
"DROP DATABASE db2 ON CLUSTER cluster", settings={"user": "restricted_user"}
|
||||
|
@ -0,0 +1,5 @@
|
||||
<clickhouse>
|
||||
<access_control_improvements>
|
||||
<table_engines_require_grant>true</table_engines_require_grant>
|
||||
</access_control_improvements>
|
||||
</clickhouse>
|
@ -5,9 +5,8 @@ from helpers.test_tools import TSV
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
instance = cluster.add_instance(
|
||||
"instance",
|
||||
user_configs=[
|
||||
"configs/users.d/users.xml",
|
||||
],
|
||||
main_configs=["configs/config.xml"],
|
||||
user_configs=["configs/users.d/users.xml"],
|
||||
)
|
||||
|
||||
|
||||
@ -370,6 +369,7 @@ def test_implicit_create_temporary_table_grant():
|
||||
)
|
||||
|
||||
instance.query("GRANT CREATE TABLE ON test.* TO A")
|
||||
instance.query("GRANT TABLE ENGINE ON Memory TO A")
|
||||
instance.query("CREATE TEMPORARY TABLE tmp(name String)", user="A")
|
||||
|
||||
instance.query("REVOKE CREATE TABLE ON *.* FROM A")
|
||||
@ -718,3 +718,74 @@ def test_current_grants_override():
|
||||
"REVOKE SELECT ON test.* FROM B",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_table_engine_grant_and_revoke():
|
||||
instance.query("DROP USER IF EXISTS A")
|
||||
instance.query("CREATE USER A")
|
||||
instance.query("GRANT CREATE TABLE ON test.table1 TO A")
|
||||
assert "Not enough privileges" in instance.query_and_get_error(
|
||||
"CREATE TABLE test.table1(a Integer) engine=TinyLog", user="A"
|
||||
)
|
||||
|
||||
instance.query("GRANT TABLE ENGINE ON TinyLog TO A")
|
||||
|
||||
instance.query("CREATE TABLE test.table1(a Integer) engine=TinyLog", user="A")
|
||||
|
||||
assert instance.query("SHOW GRANTS FOR A") == TSV(
|
||||
[
|
||||
"GRANT TABLE ENGINE ON TinyLog TO A",
|
||||
"GRANT CREATE TABLE ON test.table1 TO A",
|
||||
]
|
||||
)
|
||||
|
||||
instance.query("REVOKE TABLE ENGINE ON TinyLog FROM A")
|
||||
|
||||
assert "Not enough privileges" in instance.query_and_get_error(
|
||||
"CREATE TABLE test.table1(a Integer) engine=TinyLog", user="A"
|
||||
)
|
||||
|
||||
instance.query("REVOKE CREATE TABLE ON test.table1 FROM A")
|
||||
instance.query("DROP TABLE test.table1")
|
||||
|
||||
assert instance.query("SHOW GRANTS FOR A") == TSV([])
|
||||
|
||||
|
||||
def test_table_engine_and_source_grant():
|
||||
instance.query("DROP USER IF EXISTS A")
|
||||
instance.query("CREATE USER A")
|
||||
instance.query("GRANT CREATE TABLE ON test.table1 TO A")
|
||||
|
||||
instance.query("GRANT TABLE ENGINE ON PostgreSQL TO A")
|
||||
|
||||
instance.query(
|
||||
"""
|
||||
CREATE TABLE test.table1(a Integer)
|
||||
engine=PostgreSQL('localhost:5432', 'dummy', 'dummy', 'dummy', 'dummy');
|
||||
""",
|
||||
user="A",
|
||||
)
|
||||
|
||||
instance.query("DROP TABLE test.table1")
|
||||
|
||||
instance.query("REVOKE TABLE ENGINE ON PostgreSQL FROM A")
|
||||
|
||||
assert "Not enough privileges" in instance.query_and_get_error(
|
||||
"""
|
||||
CREATE TABLE test.table1(a Integer)
|
||||
engine=PostgreSQL('localhost:5432', 'dummy', 'dummy', 'dummy', 'dummy');
|
||||
""",
|
||||
user="A",
|
||||
)
|
||||
|
||||
instance.query("GRANT SOURCES ON *.* TO A")
|
||||
|
||||
instance.query(
|
||||
"""
|
||||
CREATE TABLE test.table1(a Integer)
|
||||
engine=PostgreSQL('localhost:5432', 'dummy', 'dummy', 'dummy', 'dummy');
|
||||
""",
|
||||
user="A",
|
||||
)
|
||||
|
||||
instance.query("DROP TABLE test.table1")
|
||||
|
@ -621,6 +621,7 @@ def test_allow_ddl():
|
||||
)
|
||||
|
||||
instance.query("GRANT CREATE ON tbl TO robin")
|
||||
instance.query("GRANT TABLE ENGINE ON Log TO robin")
|
||||
instance.query("CREATE TABLE tbl(a Int32) ENGINE=Log", user="robin")
|
||||
instance.query("DROP TABLE tbl")
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
-- Tags: zookeeper, no-replicated-database
|
||||
-- Tags: zookeeper, no-replicated-database, no-shared-merge-tree
|
||||
-- Tag no-replicated-database: Old syntax is not allowed
|
||||
-- no-shared-merge-tree: implemented replacement
|
||||
|
||||
DROP TABLE IF EXISTS alter_00121 SYNC;
|
||||
set allow_deprecated_syntax_for_merge_tree=1;
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: long, no-debug
|
||||
# Tags: long, no-debug, no-asan, no-tsan, no-msan
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,5 +1,6 @@
|
||||
-- Tags: long, zookeeper, no-replicated-database
|
||||
-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree
|
||||
-- Tag no-replicated-database: Old syntax is not allowed
|
||||
-- no-shared-merge-tree: implemented replacement
|
||||
|
||||
DROP TABLE IF EXISTS primary_key;
|
||||
set allow_deprecated_syntax_for_merge_tree=1;
|
||||
|
@ -1,5 +1,6 @@
|
||||
-- Tags: long, zookeeper, no-replicated-database
|
||||
-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree
|
||||
-- Tag no-replicated-database: Old syntax is not allowed
|
||||
-- no-shared-merge-tree: implemented replacement
|
||||
|
||||
DROP TABLE IF EXISTS deduplication;
|
||||
set allow_deprecated_syntax_for_merge_tree=1;
|
||||
|
@ -1,5 +1,6 @@
|
||||
-- Tags: long, replica, no-replicated-database
|
||||
-- Tags: long, replica, no-replicated-database, no-shared-merge-tree
|
||||
-- Tag no-replicated-database: Old syntax is not allowed
|
||||
-- no-shared-merge-tree: implemented replacement
|
||||
|
||||
SET replication_alter_partitions_sync = 2;
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: zookeeper, no-replicated-database
|
||||
# Tags: zookeeper, no-replicated-database, no-shared-merge-tree
|
||||
# Tag no-replicated-database: Old syntax is not allowed
|
||||
# no-shared-merge-tree -- old syntax
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,4 +1,5 @@
|
||||
-- Tags: long, zookeeper
|
||||
-- Tags: long, zookeeper, no-shared-merge-tree
|
||||
-- no-shared-merge-tree: different sychronization, replaced with another test
|
||||
|
||||
SELECT '===Ordinary case===';
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
-- Tags: long, replica
|
||||
-- Tags: long, replica, no-shared-merge-tree
|
||||
-- no-shared-merge-tree: different synchronization
|
||||
|
||||
SET replication_alter_partitions_sync = 2;
|
||||
SET insert_keeper_fault_injection_probability=0;
|
||||
|
@ -1,4 +1,5 @@
|
||||
-- Tags: zookeeper
|
||||
-- Tags: zookeeper, no-shared-merge-tree
|
||||
-- no-shared-merge-tree: boring test, nothing new
|
||||
|
||||
SET optimize_on_insert = 0;
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
-- Tags: zookeeper
|
||||
|
||||
DROP TABLE IF EXISTS deduplication_by_partition;
|
||||
set allow_deprecated_syntax_for_merge_tree=1;
|
||||
CREATE TABLE deduplication_by_partition(d Date, x UInt32) ENGINE =
|
||||
ReplicatedMergeTree('/clickhouse/tables/{database}/test_00516/deduplication_by_partition', 'r1') order by x partition by toYYYYMM(d);
|
||||
|
||||
|
@ -103,6 +103,7 @@ SHOW NAMED COLLECTIONS SECRETS ['SHOW NAMED COLLECTIONS SECRETS'] NAMED_COLLECTI
|
||||
NAMED COLLECTION ['NAMED COLLECTION USAGE','USE NAMED COLLECTION'] NAMED_COLLECTION NAMED COLLECTION ADMIN
|
||||
NAMED COLLECTION ADMIN ['NAMED COLLECTION CONTROL'] NAMED_COLLECTION ALL
|
||||
SET DEFINER [] USER_NAME ALL
|
||||
TABLE ENGINE ['TABLE ENGINE'] TABLE_ENGINE ALL
|
||||
SYSTEM SHUTDOWN ['SYSTEM KILL','SHUTDOWN'] GLOBAL SYSTEM
|
||||
SYSTEM DROP DNS CACHE ['SYSTEM DROP DNS','DROP DNS CACHE','DROP DNS'] GLOBAL SYSTEM DROP CACHE
|
||||
SYSTEM DROP CONNECTIONS CACHE ['SYSTEM DROP CONNECTIONS CACHE','DROP CONNECTIONS CACHE'] GLOBAL SYSTEM DROP CACHE
|
||||
|
@ -9,6 +9,7 @@ ${CLICKHOUSE_CLIENT} -q "create table mute_stylecheck (x UInt32) engine = Replic
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE USER user_${CLICKHOUSE_DATABASE} settings database_replicated_allow_only_replicated_engine=1"
|
||||
${CLICKHOUSE_CLIENT} -q "GRANT CREATE TABLE ON ${CLICKHOUSE_DATABASE}_db.* TO user_${CLICKHOUSE_DATABASE}"
|
||||
${CLICKHOUSE_CLIENT} -q "GRANT TABLE ENGINE ON Memory, TABLE ENGINE ON MergeTree, TABLE ENGINE ON ReplicatedMergeTree TO user_${CLICKHOUSE_DATABASE}"
|
||||
${CLICKHOUSE_CLIENT} --allow_experimental_database_replicated=1 --query "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')"
|
||||
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_memory (x UInt32) engine = Memory;"
|
||||
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" 2>&1 | grep -o "Only tables with a Replicated engine"
|
||||
|
@ -12,6 +12,7 @@ ${CLICKHOUSE_CLIENT} -q "create table mute_stylecheck (x UInt32) engine = Replic
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "CREATE USER user_${CLICKHOUSE_DATABASE} settings database_replicated_allow_replicated_engine_arguments=0"
|
||||
${CLICKHOUSE_CLIENT} -q "GRANT CREATE TABLE ON ${CLICKHOUSE_DATABASE}_db.* TO user_${CLICKHOUSE_DATABASE}"
|
||||
${CLICKHOUSE_CLIENT} -q "GRANT TABLE ENGINE ON ReplicatedMergeTree TO user_${CLICKHOUSE_DATABASE}"
|
||||
${CLICKHOUSE_CLIENT} --allow_experimental_database_replicated=1 --query "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')"
|
||||
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_ok (x UInt32) engine = ReplicatedMergeTree order by x;"
|
||||
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 80 }"
|
||||
|
@ -13,21 +13,24 @@ $CLICKHOUSE_CLIENT --query "CREATE USER $user IDENTIFIED WITH PLAINTEXT_PASSWORD
|
||||
$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_memory_02561(name String)" 2>&1 | grep -F "Not enough privileges. To execute this query, it's necessary to have the grant CREATE TEMPORARY TABLE" > /dev/null && echo "OK"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "GRANT CREATE TEMPORARY TABLE ON *.* TO $user"
|
||||
$CLICKHOUSE_CLIENT --query "GRANT TABLE ENGINE ON Memory TO $user"
|
||||
|
||||
$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_memory_02561(name String)"
|
||||
|
||||
$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_merge_tree_02561(name String) ENGINE = MergeTree() ORDER BY name" 2>&1 | grep -F "Not enough privileges. To execute this query, it's necessary to have the grant CREATE ARBITRARY TEMPORARY TABLE" > /dev/null && echo "OK"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "GRANT CREATE ARBITRARY TEMPORARY TABLE ON *.* TO $user"
|
||||
$CLICKHOUSE_CLIENT --query "GRANT TABLE ENGINE ON MergeTree TO $user"
|
||||
|
||||
$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_merge_tree_02561(name String) ENGINE = MergeTree() ORDER BY name"
|
||||
|
||||
$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_file_02561(name String) ENGINE = File(TabSeparated)" 2>&1 | grep -F "Not enough privileges. To execute this query, it's necessary to have the grant FILE" > /dev/null && echo "OK"
|
||||
$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_file_02561(name String) ENGINE = File(TabSeparated)" 2>&1 | grep -F "Not enough privileges. To execute this query, it's necessary to have the grant TABLE ENGINE ON File" > /dev/null && echo "OK"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "GRANT FILE ON *.* TO $user"
|
||||
|
||||
$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_file_02561(name String) ENGINE = File(TabSeparated)"
|
||||
|
||||
$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_url_02561(name String) ENGINE = URL('http://127.0.0.1:8123?query=select+12', 'RawBLOB')" 2>&1 | grep -F "Not enough privileges. To execute this query, it's necessary to have the grant URL" > /dev/null && echo "OK"
|
||||
$CLICKHOUSE_CLIENT --user $user --password hello --query "CREATE TEMPORARY TABLE table_url_02561(name String) ENGINE = URL('http://127.0.0.1:8123?query=select+12', 'RawBLOB')" 2>&1 | grep -F "Not enough privileges. To execute this query, it's necessary to have the grant TABLE ENGINE ON URL" > /dev/null && echo "OK"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "GRANT URL ON *.* TO $user"
|
||||
|
||||
|
@ -12,10 +12,11 @@ function install_test_backup()
|
||||
local test_backup_filename="$1"
|
||||
local test_backup_path="$CURDIR/backups/${test_backup_filename}"
|
||||
|
||||
local backups_disk_root=$($CLICKHOUSE_CLIENT --query "SELECT path FROM system.disks WHERE name='backups'")
|
||||
local backups_disk_root
|
||||
backups_disk_root=$($CLICKHOUSE_CLIENT --query "SELECT path FROM system.disks WHERE name='backups'")
|
||||
|
||||
if [ -z "${backups_disk_root}" ]; then
|
||||
echo Disk \'${backups_disk_root}\' not found
|
||||
echo "Disk '${backups_disk_root}' not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -12,10 +12,11 @@ function install_test_backup()
|
||||
local test_backup_filename="$1"
|
||||
local test_backup_path="$CURDIR/backups/${test_backup_filename}"
|
||||
|
||||
local backups_disk_root=$($CLICKHOUSE_CLIENT --query "SELECT path FROM system.disks WHERE name='backups'")
|
||||
local backups_disk_root
|
||||
backups_disk_root=$($CLICKHOUSE_CLIENT --query "SELECT path FROM system.disks WHERE name='backups'")
|
||||
|
||||
if [ -z "${backups_disk_root}" ]; then
|
||||
echo Disk \'${backups_disk_root}\' not found
|
||||
echo "Disk '${backups_disk_root}' not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -15,7 +15,7 @@ function start_async()
|
||||
{
|
||||
local command="$1"
|
||||
local first_column="s/^\([^\t]*\)\t.*/\1/"
|
||||
echo $(${CLICKHOUSE_CLIENT} --query "$command" | sed "${first_column}")
|
||||
${CLICKHOUSE_CLIENT} --query "$command" | sed "${first_column}"
|
||||
}
|
||||
|
||||
function wait_status()
|
||||
@ -25,7 +25,8 @@ function wait_status()
|
||||
local timeout=60
|
||||
local start=$EPOCHSECONDS
|
||||
while true; do
|
||||
local current_status=$(${CLICKHOUSE_CLIENT} --query "SELECT status FROM system.backups WHERE id='${operation_id}'")
|
||||
local current_status
|
||||
current_status=$(${CLICKHOUSE_CLIENT} --query "SELECT status FROM system.backups WHERE id='${operation_id}'")
|
||||
if [ "${current_status}" == "${expected_status}" ]; then
|
||||
echo "${current_status}"
|
||||
break
|
||||
@ -41,11 +42,11 @@ function wait_status()
|
||||
# Making a backup.
|
||||
backup_name="Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}')"
|
||||
backup_operation_id=$(start_async "BACKUP TABLE tbl TO ${backup_name} ASYNC")
|
||||
wait_status ${backup_operation_id} "BACKUP_CREATED"
|
||||
wait_status "${backup_operation_id}" "BACKUP_CREATED"
|
||||
|
||||
# Restoring from that backup.
|
||||
restore_operation_id=$(start_async "RESTORE TABLE tbl AS tbl2 FROM ${backup_name} ASYNC")
|
||||
wait_status ${restore_operation_id} "RESTORED"
|
||||
wait_status "${restore_operation_id}" "RESTORED"
|
||||
|
||||
# Check the result of that restoration.
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT * FROM tbl2"
|
||||
|
@ -19,7 +19,6 @@ prev_max_size=$($CLICKHOUSE_CLIENT --query "SELECT max_size FROM system.filesyst
|
||||
$CLICKHOUSE_CLIENT --query "SELECT current_size > 0 FROM system.filesystem_cache_settings WHERE cache_name = '$disk_name' FORMAT TabSeparated"
|
||||
|
||||
config_path=/etc/clickhouse-server/config.d/storage_conf.xml
|
||||
config_path_tmp=$config_path.tmp
|
||||
|
||||
new_max_size=$($CLICKHOUSE_CLIENT --query "SELECT divide(max_size, 2) FROM system.filesystem_cache_settings WHERE cache_name = '$disk_name'")
|
||||
sed -i "s|<max_size>$prev_max_size<\/max_size>|<max_size>$new_max_size<\/max_size>|" $config_path
|
||||
|
@ -5,5 +5,5 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
SCHEMADIR=$CURDIR/format_schemas
|
||||
$CLICKHOUSE_LOCAL -q "DESCRIBE TABLE file('nonexist', 'Protobuf') SETTINGS format_schema='$SCHEMADIR/03094_recursive_type.proto:Struct'" |& grep -c CANNOT_PARSE_PROTOBUF_SCHEMA
|
||||
SCHEMADIR="$CUR_DIR/format_schemas"
|
||||
$CLICKHOUSE_LOCAL -q "DESCRIBE TABLE file('nonexist', 'Protobuf') SETTINGS format_schema='$SCHEMADIR/03094_recursive_type.proto:Struct'" |& grep -c BAD_ARGUMENTS
|
||||
|
@ -0,0 +1,4 @@
|
||||
1 str_1
|
||||
1 str_2
|
||||
1 1
|
||||
1 2
|
@ -0,0 +1,7 @@
|
||||
set allow_experimental_variant_type=1;
|
||||
drop table if exists test;
|
||||
create table test (id UInt64, v Variant(UInt64, String)) engine=MergeTree order by (id, v);
|
||||
insert into test values (1, 1), (1, 'str_1'), (1, 2), (1, 'str_2');
|
||||
select * from test;
|
||||
drop table test;
|
||||
|
@ -0,0 +1,12 @@
|
||||
AggregateFunction(sumArgMin, UInt64, UInt64)
|
||||
54
|
||||
0 45
|
||||
1 46
|
||||
2 47
|
||||
3 48
|
||||
4 49
|
||||
5 50
|
||||
6 51
|
||||
7 52
|
||||
8 53
|
||||
9 54
|
22
tests/queries/0_stateless/03127_argMin_combinator_state.sql
Normal file
22
tests/queries/0_stateless/03127_argMin_combinator_state.sql
Normal file
@ -0,0 +1,22 @@
|
||||
SELECT toTypeName(sumArgMinState(number, number)) FROM numbers(1);
|
||||
SELECT sumArgMinState(number, number) AS a FROM numbers(3) FORMAT Null;
|
||||
|
||||
DROP TABLE IF EXISTS argmax_comb;
|
||||
CREATE TABLE argmax_comb(
|
||||
id UInt64,
|
||||
state AggregateFunction(avgArgMax, Float64, UInt64)
|
||||
)
|
||||
ENGINE=MergeTree() ORDER BY tuple();
|
||||
INSERT INTO argmax_comb
|
||||
SELECT
|
||||
CAST(number % 10, 'UInt64') AS id,
|
||||
avgArgMaxState(CAST(number, 'Float64'), id)
|
||||
FROM numbers(100)
|
||||
GROUP BY id;
|
||||
SELECT avgArgMaxMerge(state) FROM argmax_comb;
|
||||
SELECT
|
||||
id,
|
||||
avgArgMaxMerge(state)
|
||||
FROM argmax_comb
|
||||
GROUP BY id
|
||||
ORDER BY id ASC;
|
@ -0,0 +1,20 @@
|
||||
0 2024-01-01 00:00:00 2024-03-31 00:00:00
|
||||
1 2024-01-02 00:00:00 2024-04-01 00:00:00
|
||||
2 2024-01-03 00:00:00 2024-04-02 00:00:00
|
||||
3 2024-01-04 00:00:00 2024-04-03 00:00:00
|
||||
4 2024-01-05 00:00:00 2024-04-04 00:00:00
|
||||
5 2024-01-06 00:00:00 2024-04-05 00:00:00
|
||||
6 2024-01-07 00:00:00 2024-04-06 00:00:00
|
||||
7 2024-01-08 00:00:00 2024-04-07 00:00:00
|
||||
8 2024-01-09 00:00:00 2024-04-08 00:00:00
|
||||
9 2024-01-10 00:00:00 2024-04-09 00:00:00
|
||||
0 2024-01-01 00:00:00 2024-03-31 00:00:00
|
||||
1 2024-01-02 00:00:00 2024-04-01 00:00:00
|
||||
2 2024-01-03 00:00:00 2024-04-02 00:00:00
|
||||
3 2024-01-04 00:00:00 2024-04-03 00:00:00
|
||||
4 2024-01-05 00:00:00 2024-04-04 00:00:00
|
||||
5 2024-01-06 00:00:00 2024-04-05 00:00:00
|
||||
6 2024-01-07 00:00:00 2024-04-06 00:00:00
|
||||
7 2024-01-08 00:00:00 2024-04-07 00:00:00
|
||||
8 2024-01-09 00:00:00 2024-04-08 00:00:00
|
||||
9 2024-01-10 00:00:00 2024-04-09 00:00:00
|
@ -0,0 +1,73 @@
|
||||
DROP TABLE IF EXISTS combinator_argMin_table_r1 SYNC;
|
||||
DROP TABLE IF EXISTS combinator_argMin_table_r2 SYNC;
|
||||
|
||||
CREATE TABLE combinator_argMin_table_r1
|
||||
(
|
||||
`id` Int32,
|
||||
`value` Int32,
|
||||
`agg_time` DateTime,
|
||||
PROJECTION first_items
|
||||
(
|
||||
SELECT
|
||||
id,
|
||||
minArgMin(agg_time, value),
|
||||
maxArgMax(agg_time, value)
|
||||
GROUP BY id
|
||||
)
|
||||
)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_03128/combinator_argMin_table', 'r1')
|
||||
ORDER BY (id);
|
||||
|
||||
INSERT INTO combinator_argMin_table_r1
|
||||
SELECT
|
||||
number % 10 as id,
|
||||
number as value,
|
||||
'01-01-2024 00:00:00' + INTERVAL number DAY
|
||||
FROM
|
||||
numbers(100);
|
||||
|
||||
INSERT INTO combinator_argMin_table_r1
|
||||
SELECT
|
||||
number % 10 as id,
|
||||
number * 10 as value,
|
||||
'01-01-2024 00:00:00' + INTERVAL number DAY
|
||||
FROM
|
||||
numbers(100);
|
||||
|
||||
SELECT
|
||||
id,
|
||||
minArgMin(agg_time, value),
|
||||
maxArgMax(agg_time, value)
|
||||
FROM combinator_argMin_table_r1
|
||||
GROUP BY id
|
||||
ORDER BY id
|
||||
SETTINGS force_optimize_projection=1;
|
||||
|
||||
-- We check replication by creating another replica
|
||||
CREATE TABLE combinator_argMin_table_r2
|
||||
(
|
||||
`id` Int32,
|
||||
`value` Int32,
|
||||
`agg_time` DateTime,
|
||||
PROJECTION first_items
|
||||
(
|
||||
SELECT
|
||||
id,
|
||||
minArgMin(agg_time, value),
|
||||
maxArgMax(agg_time, value)
|
||||
GROUP BY id
|
||||
)
|
||||
)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_03128/combinator_argMin_table', 'r2')
|
||||
ORDER BY (id);
|
||||
|
||||
SYSTEM SYNC REPLICA combinator_argMin_table_r2;
|
||||
|
||||
SELECT
|
||||
id,
|
||||
minArgMin(agg_time, value),
|
||||
maxArgMax(agg_time, value)
|
||||
FROM combinator_argMin_table_r2
|
||||
GROUP BY id
|
||||
ORDER BY id
|
||||
SETTINGS force_optimize_projection=1;
|
@ -0,0 +1,8 @@
|
||||
0 0 0
|
||||
1 4 4
|
||||
2 8 8
|
||||
3 9 9
|
||||
0 0 0
|
||||
1 4 0
|
||||
2 8 0
|
||||
3 9 0
|
@ -0,0 +1,16 @@
|
||||
DROP TABLE IF EXISTS t_index_lazy_load;
|
||||
|
||||
CREATE TABLE t_index_lazy_load (a UInt64, b UInt64)
|
||||
ENGINE = MergeTree ORDER BY (a, b)
|
||||
SETTINGS index_granularity = 4, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns = 0.5;
|
||||
|
||||
INSERT INTO t_index_lazy_load SELECT number, number FROM numbers(10);
|
||||
|
||||
SELECT mark_number, a, b FROM mergeTreeIndex(currentDatabase(), t_index_lazy_load) ORDER BY mark_number;
|
||||
|
||||
DETACH TABLE t_index_lazy_load;
|
||||
ATTACH TABLE t_index_lazy_load;
|
||||
|
||||
SELECT mark_number, a, b FROM mergeTreeIndex(currentDatabase(), t_index_lazy_load) ORDER BY mark_number;
|
||||
|
||||
DROP TABLE t_index_lazy_load;
|
@ -10,11 +10,8 @@ import multiprocessing
|
||||
import os
|
||||
from functools import reduce
|
||||
|
||||
# isort: off
|
||||
from deepdiff import DeepDiff # pylint:disable=import-error; for style check
|
||||
|
||||
# isort: on
|
||||
|
||||
from connection import Engines, default_clickhouse_odbc_conn_str, setup_connection
|
||||
from test_runner import RequestType, Status, TestRunner
|
||||
|
||||
|
@ -7,14 +7,10 @@ from functools import reduce
|
||||
from hashlib import md5
|
||||
from itertools import chain
|
||||
|
||||
# isort: off
|
||||
# pylint:disable=import-error; for style check
|
||||
import sqlglot
|
||||
from sqlglot.expressions import ColumnDef, PrimaryKeyColumnConstraint
|
||||
|
||||
# pylint:enable=import-error; for style check
|
||||
# isort: on
|
||||
|
||||
from exceptions import (
|
||||
DataResultDiffer,
|
||||
Error,
|
||||
@ -23,6 +19,9 @@ from exceptions import (
|
||||
QueryExecutionError,
|
||||
)
|
||||
|
||||
# pylint:enable=import-error; for style check
|
||||
|
||||
|
||||
logger = logging.getLogger("parser")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
@ -474,9 +473,11 @@ class QueryResult:
|
||||
f"values_count: {self.values_count}" if self.values_count else "",
|
||||
f"data_hash: {self.data_hash}" if self.data_hash else "",
|
||||
f"exception: {self.exception}" if self.exception else "",
|
||||
f"hash_threshold: {self.hash_threshold}"
|
||||
if self.hash_threshold
|
||||
else "",
|
||||
(
|
||||
f"hash_threshold: {self.hash_threshold}"
|
||||
if self.hash_threshold
|
||||
else ""
|
||||
),
|
||||
]
|
||||
if x
|
||||
)
|
||||
|
@ -949,6 +949,7 @@ TablesLoaderForegroundThreads
|
||||
TablesLoaderForegroundThreadsActive
|
||||
TablesToDropQueueSize
|
||||
TargetSpecific
|
||||
tanh
|
||||
Telegraf
|
||||
TemplateIgnoreSpaces
|
||||
TemporaryFilesForAggregation
|
||||
|
@ -5,7 +5,7 @@ cd /ClickHouse/utils/check-style || echo -e "failure\tRepo not found" > /test_ou
|
||||
start_total=$(date +%s)
|
||||
|
||||
start=$(date +%s)
|
||||
./shellcheck-run.sh |& tee /test_output/shellcheck.txt
|
||||
./shellcheck-run.sh |& tee /test_output/shellcheck_output.txt
|
||||
runtime=$(($(date +%s)-start))
|
||||
echo "Check shellcheck. Done. $runtime seconds."
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user