mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge remote-tracking branch 'origin/master' into pr-right-joins
This commit is contained in:
commit
77bd0b2903
@ -28,7 +28,7 @@ COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r requirements.txt
|
||||
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
|
@ -12,6 +12,7 @@ charset-normalizer==3.3.2
|
||||
click==8.1.7
|
||||
codespell==2.2.1
|
||||
cryptography==43.0.1
|
||||
datacompy==0.7.3
|
||||
Deprecated==1.2.14
|
||||
dill==0.3.8
|
||||
flake8==4.0.1
|
||||
@ -23,6 +24,7 @@ mccabe==0.6.1
|
||||
multidict==6.0.5
|
||||
mypy==1.8.0
|
||||
mypy-extensions==1.0.0
|
||||
pandas==2.2.3
|
||||
packaging==24.1
|
||||
pathspec==0.9.0
|
||||
pip==24.1.1
|
||||
|
@ -18,6 +18,11 @@ Columns:
|
||||
- `1` — Current user can’t change the setting.
|
||||
- `type` ([String](../../sql-reference/data-types/string.md)) — Setting type (implementation specific string value).
|
||||
- `is_obsolete` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) - Shows whether a setting is obsolete.
|
||||
- `tier` ([Enum8](../../sql-reference/data-types/enum.md)) — Support level for this feature. ClickHouse features are organized in tiers, varying depending on the current status of their development and the expectations one might have when using them. Values:
|
||||
- `'Production'` — The feature is stable, safe to use and does not have issues interacting with other **production** features. .
|
||||
- `'Beta'` — The feature is stable and safe. The outcome of using it together with other features is unknown and correctness is not guaranteed. Testing and reports are welcome.
|
||||
- `'Experimental'` — The feature is under development. Only intended for developers and ClickHouse enthusiasts. The feature might or might not work and could be removed at any time.
|
||||
- `'Obsolete'` — No longer supported. Either it is already removed or it will be removed in future releases.
|
||||
|
||||
**Example**
|
||||
```sql
|
||||
|
@ -18,6 +18,11 @@ Columns:
|
||||
- `1` — Current user can’t change the setting.
|
||||
- `default` ([String](../../sql-reference/data-types/string.md)) — Setting default value.
|
||||
- `is_obsolete` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) - Shows whether a setting is obsolete.
|
||||
- `tier` ([Enum8](../../sql-reference/data-types/enum.md)) — Support level for this feature. ClickHouse features are organized in tiers, varying depending on the current status of their development and the expectations one might have when using them. Values:
|
||||
- `'Production'` — The feature is stable, safe to use and does not have issues interacting with other **production** features. .
|
||||
- `'Beta'` — The feature is stable and safe. The outcome of using it together with other features is unknown and correctness is not guaranteed. Testing and reports are welcome.
|
||||
- `'Experimental'` — The feature is under development. Only intended for developers and ClickHouse enthusiasts. The feature might or might not work and could be removed at any time.
|
||||
- `'Obsolete'` — No longer supported. Either it is already removed or it will be removed in future releases.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -26,19 +31,99 @@ The following example shows how to get information about settings which name con
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM system.settings
|
||||
WHERE name LIKE '%min_i%'
|
||||
WHERE name LIKE '%min_insert_block_size_%'
|
||||
FORMAT Vertical
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name───────────────────────────────────────────────_─value─────_─changed─_─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────_─min──_─max──_─readonly─_─type─────────_─default───_─alias_for─_─is_obsolete─┐
|
||||
│ min_insert_block_size_rows │ 1048449 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ____ │ ____ │ 0 │ UInt64 │ 1048449 │ │ 0 │
|
||||
│ min_insert_block_size_bytes │ 268402944 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ____ │ ____ │ 0 │ UInt64 │ 268402944 │ │ 0 │
|
||||
│ min_insert_block_size_rows_for_materialized_views │ 0 │ 0 │ Like min_insert_block_size_rows, but applied only during pushing to MATERIALIZED VIEW (default: min_insert_block_size_rows) │ ____ │ ____ │ 0 │ UInt64 │ 0 │ │ 0 │
|
||||
│ min_insert_block_size_bytes_for_materialized_views │ 0 │ 0 │ Like min_insert_block_size_bytes, but applied only during pushing to MATERIALIZED VIEW (default: min_insert_block_size_bytes) │ ____ │ ____ │ 0 │ UInt64 │ 0 │ │ 0 │
|
||||
│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ____ │ ____ │ 0 │ Milliseconds │ 1000 │ │ 0 │
|
||||
└────────────────────────────────────────────────────┴───────────┴─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
──────────────────────────────────────────────────────┴──────┴──────┴──────────┴──────────────┴───────────┴───────────┴─────────────┘
|
||||
```
|
||||
Row 1:
|
||||
──────
|
||||
name: min_insert_block_size_rows
|
||||
value: 1048449
|
||||
changed: 0
|
||||
description: Sets the minimum number of rows in the block that can be inserted into a table by an `INSERT` query. Smaller-sized blocks are squashed into bigger ones.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Squashing disabled.
|
||||
min: ᴺᵁᴸᴸ
|
||||
max: ᴺᵁᴸᴸ
|
||||
readonly: 0
|
||||
type: UInt64
|
||||
default: 1048449
|
||||
alias_for:
|
||||
is_obsolete: 0
|
||||
tier: Production
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
name: min_insert_block_size_bytes
|
||||
value: 268402944
|
||||
changed: 0
|
||||
description: Sets the minimum number of bytes in the block which can be inserted into a table by an `INSERT` query. Smaller-sized blocks are squashed into bigger ones.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Squashing disabled.
|
||||
min: ᴺᵁᴸᴸ
|
||||
max: ᴺᵁᴸᴸ
|
||||
readonly: 0
|
||||
type: UInt64
|
||||
default: 268402944
|
||||
alias_for:
|
||||
is_obsolete: 0
|
||||
tier: Production
|
||||
|
||||
Row 3:
|
||||
──────
|
||||
name: min_insert_block_size_rows_for_materialized_views
|
||||
value: 0
|
||||
changed: 0
|
||||
description: Sets the minimum number of rows in the block which can be inserted into a table by an `INSERT` query. Smaller-sized blocks are squashed into bigger ones. This setting is applied only for blocks inserted into [materialized view](../../sql-reference/statements/create/view.md). By adjusting this setting, you control blocks squashing while pushing to materialized view and avoid excessive memory usage.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
- 0 — Squashing disabled.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [min_insert_block_size_rows](#min-insert-block-size-rows)
|
||||
min: ᴺᵁᴸᴸ
|
||||
max: ᴺᵁᴸᴸ
|
||||
readonly: 0
|
||||
type: UInt64
|
||||
default: 0
|
||||
alias_for:
|
||||
is_obsolete: 0
|
||||
tier: Production
|
||||
|
||||
Row 4:
|
||||
──────
|
||||
name: min_insert_block_size_bytes_for_materialized_views
|
||||
value: 0
|
||||
changed: 0
|
||||
description: Sets the minimum number of bytes in the block which can be inserted into a table by an `INSERT` query. Smaller-sized blocks are squashed into bigger ones. This setting is applied only for blocks inserted into [materialized view](../../sql-reference/statements/create/view.md). By adjusting this setting, you control blocks squashing while pushing to materialized view and avoid excessive memory usage.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
- 0 — Squashing disabled.
|
||||
|
||||
**See also**
|
||||
|
||||
- [min_insert_block_size_bytes](#min-insert-block-size-bytes)
|
||||
min: ᴺᵁᴸᴸ
|
||||
max: ᴺᵁᴸᴸ
|
||||
readonly: 0
|
||||
type: UInt64
|
||||
default: 0
|
||||
alias_for:
|
||||
is_obsolete: 0
|
||||
tier: Production
|
||||
```
|
||||
|
||||
Using of `WHERE changed` can be useful, for example, when you want to check:
|
||||
|
||||
|
@ -83,7 +83,7 @@ The presence of long-running or incomplete mutations often indicates that a Clic
|
||||
- Or manually kill some of these mutations by sending a `KILL` command.
|
||||
|
||||
``` sql
|
||||
KILL MUTATION [ON CLUSTER cluster]
|
||||
KILL MUTATION
|
||||
WHERE <where expression to SELECT FROM system.mutations query>
|
||||
[TEST]
|
||||
[FORMAT format]
|
||||
@ -135,7 +135,6 @@ KILL MUTATION WHERE database = 'default' AND table = 'table'
|
||||
-- Cancel the specific mutation:
|
||||
KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt'
|
||||
```
|
||||
:::tip If you are killing a mutation in ClickHouse Cloud or in a self-managed cluster, then be sure to use the ```ON CLUSTER [cluster-name]``` option, in order to ensure the mutation is killed on all replicas:::
|
||||
|
||||
The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table).
|
||||
|
||||
|
@ -1454,8 +1454,22 @@ void ClientBase::resetOutput()
|
||||
|
||||
/// Order is important: format, compression, file
|
||||
|
||||
if (output_format)
|
||||
output_format->finalize();
|
||||
try
|
||||
{
|
||||
if (output_format)
|
||||
output_format->finalize();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// We need to make sure we continue resetting output_format (will stop threads on parallel output)
|
||||
/// as well as cleaning other output related setup
|
||||
if (!have_error)
|
||||
{
|
||||
client_exception
|
||||
= std::make_unique<Exception>(getCurrentExceptionMessageAndPattern(print_stack_trace), getCurrentExceptionCode());
|
||||
have_error = true;
|
||||
}
|
||||
}
|
||||
output_format.reset();
|
||||
|
||||
logs_out_stream.reset();
|
||||
|
@ -8,6 +8,7 @@ namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int INCORRECT_DATA;
|
||||
extern const int UNKNOWN_SETTING;
|
||||
}
|
||||
|
||||
@ -31,11 +32,19 @@ void BaseSettingsHelpers::writeFlags(Flags flags, WriteBuffer & out)
|
||||
}
|
||||
|
||||
|
||||
BaseSettingsHelpers::Flags BaseSettingsHelpers::readFlags(ReadBuffer & in)
|
||||
UInt64 BaseSettingsHelpers::readFlags(ReadBuffer & in)
|
||||
{
|
||||
UInt64 res;
|
||||
readVarUInt(res, in);
|
||||
return static_cast<Flags>(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
SettingsTierType BaseSettingsHelpers::getTier(UInt64 flags)
|
||||
{
|
||||
int8_t tier = static_cast<int8_t>(flags & Flags::TIER);
|
||||
if (tier > SettingsTierType::BETA)
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Unknown tier value: '{}'", tier);
|
||||
return SettingsTierType{tier};
|
||||
}
|
||||
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <unordered_map>
|
||||
#include <Core/SettingsFields.h>
|
||||
#include <Core/SettingsTierType.h>
|
||||
#include <Core/SettingsWriteFormat.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <base/range.h>
|
||||
@ -21,6 +22,27 @@ namespace DB
|
||||
class ReadBuffer;
|
||||
class WriteBuffer;
|
||||
|
||||
struct BaseSettingsHelpers
|
||||
{
|
||||
[[noreturn]] static void throwSettingNotFound(std::string_view name);
|
||||
static void warningSettingNotFound(std::string_view name);
|
||||
|
||||
static void writeString(std::string_view str, WriteBuffer & out);
|
||||
static String readString(ReadBuffer & in);
|
||||
|
||||
enum Flags : UInt64
|
||||
{
|
||||
IMPORTANT = 0x01,
|
||||
CUSTOM = 0x02,
|
||||
TIER = 0x0c, /// 0b1100 == 2 bits
|
||||
/// If adding new flags, consider first if Tier might need more bits
|
||||
};
|
||||
|
||||
static SettingsTierType getTier(UInt64 flags);
|
||||
static void writeFlags(Flags flags, WriteBuffer & out);
|
||||
static UInt64 readFlags(ReadBuffer & in);
|
||||
};
|
||||
|
||||
/** Template class to define collections of settings.
|
||||
* If you create a new setting, please also add it to ./utils/check-style/check-settings-style
|
||||
* for validation
|
||||
@ -138,7 +160,7 @@ public:
|
||||
const char * getTypeName() const;
|
||||
const char * getDescription() const;
|
||||
bool isCustom() const;
|
||||
bool isObsolete() const;
|
||||
SettingsTierType getTier() const;
|
||||
|
||||
bool operator==(const SettingFieldRef & other) const { return (getName() == other.getName()) && (getValue() == other.getValue()); }
|
||||
bool operator!=(const SettingFieldRef & other) const { return !(*this == other); }
|
||||
@ -225,24 +247,6 @@ private:
|
||||
std::conditional_t<Traits::allow_custom_settings, CustomSettingMap, boost::blank> custom_settings_map;
|
||||
};
|
||||
|
||||
struct BaseSettingsHelpers
|
||||
{
|
||||
[[noreturn]] static void throwSettingNotFound(std::string_view name);
|
||||
static void warningSettingNotFound(std::string_view name);
|
||||
|
||||
static void writeString(std::string_view str, WriteBuffer & out);
|
||||
static String readString(ReadBuffer & in);
|
||||
|
||||
enum Flags : UInt64
|
||||
{
|
||||
IMPORTANT = 0x01,
|
||||
CUSTOM = 0x02,
|
||||
OBSOLETE = 0x04,
|
||||
};
|
||||
static void writeFlags(Flags flags, WriteBuffer & out);
|
||||
static Flags readFlags(ReadBuffer & in);
|
||||
};
|
||||
|
||||
template <typename TTraits>
|
||||
void BaseSettings<TTraits>::set(std::string_view name, const Field & value)
|
||||
{
|
||||
@ -477,7 +481,7 @@ void BaseSettings<TTraits>::read(ReadBuffer & in, SettingsWriteFormat format)
|
||||
size_t index = accessor.find(name);
|
||||
|
||||
using Flags = BaseSettingsHelpers::Flags;
|
||||
Flags flags{0};
|
||||
UInt64 flags{0};
|
||||
if (format >= SettingsWriteFormat::STRINGS_WITH_FLAGS)
|
||||
flags = BaseSettingsHelpers::readFlags(in);
|
||||
bool is_important = (flags & Flags::IMPORTANT);
|
||||
@ -797,14 +801,14 @@ bool BaseSettings<TTraits>::SettingFieldRef::isCustom() const
|
||||
}
|
||||
|
||||
template <typename TTraits>
|
||||
bool BaseSettings<TTraits>::SettingFieldRef::isObsolete() const
|
||||
SettingsTierType BaseSettings<TTraits>::SettingFieldRef::getTier() const
|
||||
{
|
||||
if constexpr (Traits::allow_custom_settings)
|
||||
{
|
||||
if (custom_setting)
|
||||
return false;
|
||||
return SettingsTierType::PRODUCTION;
|
||||
}
|
||||
return accessor->isObsolete(index);
|
||||
return accessor->getTier(index);
|
||||
}
|
||||
|
||||
using AliasMap = std::unordered_map<std::string_view, std::string_view>;
|
||||
@ -835,8 +839,8 @@ using AliasMap = std::unordered_map<std::string_view, std::string_view>;
|
||||
const String & getName(size_t index) const { return field_infos[index].name; } \
|
||||
const char * getTypeName(size_t index) const { return field_infos[index].type; } \
|
||||
const char * getDescription(size_t index) const { return field_infos[index].description; } \
|
||||
bool isImportant(size_t index) const { return field_infos[index].is_important; } \
|
||||
bool isObsolete(size_t index) const { return field_infos[index].is_obsolete; } \
|
||||
bool isImportant(size_t index) const { return field_infos[index].flags & BaseSettingsHelpers::Flags::IMPORTANT; } \
|
||||
SettingsTierType getTier(size_t index) const { return BaseSettingsHelpers::getTier(field_infos[index].flags); } \
|
||||
Field castValueUtil(size_t index, const Field & value) const { return field_infos[index].cast_value_util_function(value); } \
|
||||
String valueToStringUtil(size_t index, const Field & value) const { return field_infos[index].value_to_string_util_function(value); } \
|
||||
Field stringToValueUtil(size_t index, const String & str) const { return field_infos[index].string_to_value_util_function(str); } \
|
||||
@ -856,8 +860,7 @@ using AliasMap = std::unordered_map<std::string_view, std::string_view>;
|
||||
String name; \
|
||||
const char * type; \
|
||||
const char * description; \
|
||||
bool is_important; \
|
||||
bool is_obsolete; \
|
||||
UInt64 flags; \
|
||||
Field (*cast_value_util_function)(const Field &); \
|
||||
String (*value_to_string_util_function)(const Field &); \
|
||||
Field (*string_to_value_util_function)(const String &); \
|
||||
@ -968,8 +971,8 @@ struct DefineAliases
|
||||
/// NOLINTNEXTLINE
|
||||
#define IMPLEMENT_SETTINGS_TRAITS_(TYPE, NAME, DEFAULT, DESCRIPTION, FLAGS) \
|
||||
res.field_infos.emplace_back( \
|
||||
FieldInfo{#NAME, #TYPE, DESCRIPTION, (FLAGS) & IMPORTANT, \
|
||||
static_cast<bool>((FLAGS) & BaseSettingsHelpers::Flags::OBSOLETE), \
|
||||
FieldInfo{#NAME, #TYPE, DESCRIPTION, \
|
||||
static_cast<UInt64>(FLAGS), \
|
||||
[](const Field & value) -> Field { return static_cast<Field>(SettingField##TYPE{value}); }, \
|
||||
[](const Field & value) -> String { return SettingField##TYPE{value}.toString(); }, \
|
||||
[](const String & str) -> Field { SettingField##TYPE temp; temp.parseFromString(str); return static_cast<Field>(temp); }, \
|
||||
|
@ -192,6 +192,13 @@ namespace DB
|
||||
DECLARE(UInt64, parts_killer_pool_size, 128, "Threads for cleanup of shared merge tree outdated threads. Only available in ClickHouse Cloud", 0) \
|
||||
DECLARE(UInt64, keeper_multiread_batch_size, 10'000, "Maximum size of batch for MultiRead request to [Zoo]Keeper that support batching. If set to 0, batching is disabled. Available only in ClickHouse Cloud.", 0) \
|
||||
DECLARE(Bool, use_legacy_mongodb_integration, true, "Use the legacy MongoDB integration implementation. Note: it's highly recommended to set this option to false, since legacy implementation will be removed in the future. Please submit any issues you encounter with the new implementation.", 0) \
|
||||
\
|
||||
DECLARE(UInt64, prefetch_threadpool_pool_size, 100, "Size of background pool for prefetches for remote object storages", 0) \
|
||||
DECLARE(UInt64, prefetch_threadpool_queue_size, 1000000, "Number of tasks which is possible to push into prefetches pool", 0) \
|
||||
DECLARE(UInt64, load_marks_threadpool_pool_size, 50, "Size of background pool for marks loading", 0) \
|
||||
DECLARE(UInt64, load_marks_threadpool_queue_size, 1000000, "Number of tasks which is possible to push into prefetches pool", 0) \
|
||||
DECLARE(UInt64, threadpool_writer_pool_size, 100, "Size of background pool for write requests to object storages", 0) \
|
||||
DECLARE(UInt64, threadpool_writer_queue_size, 1000000, "Number of tasks which is possible to push into background pool for write requests to object storages", 0)
|
||||
|
||||
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in dumpToSystemServerSettingsColumns below
|
||||
|
||||
@ -339,7 +346,7 @@ void ServerSettings::dumpToSystemServerSettingsColumns(ServerSettingColumnsParam
|
||||
res_columns[4]->insert(setting.getDescription());
|
||||
res_columns[5]->insert(setting.getTypeName());
|
||||
res_columns[6]->insert(is_changeable ? changeable_settings_it->second.second : ChangeableWithoutRestart::No);
|
||||
res_columns[7]->insert(setting.isObsolete());
|
||||
res_columns[7]->insert(setting.getTier() == SettingsTierType::OBSOLETE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,5 @@
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnMap.h>
|
||||
#include <Core/BaseSettings.h>
|
||||
#include <Core/BaseSettingsFwdMacros.h>
|
||||
#include <Core/BaseSettingsFwdMacrosImpl.h>
|
||||
#include <Core/BaseSettingsProgramOptions.h>
|
||||
#include <Core/DistributedCacheProtocol.h>
|
||||
@ -40,10 +38,17 @@ namespace ErrorCodes
|
||||
* Note: as an alternative, we could implement settings to be completely dynamic in the form of the map: String -> Field,
|
||||
* but we are not going to do it, because settings are used everywhere as static struct fields.
|
||||
*
|
||||
* `flags` can be either 0 or IMPORTANT.
|
||||
* A setting is "IMPORTANT" if it affects the results of queries and can't be ignored by older versions.
|
||||
* `flags` can include a Tier (BETA | EXPERIMENTAL) and an optional bitwise AND with IMPORTANT.
|
||||
* The default (0) means a PRODUCTION ready setting
|
||||
*
|
||||
* When adding new or changing existing settings add them to the settings changes history in SettingsChangesHistory.h
|
||||
* A setting is "IMPORTANT" if it affects the results of queries and can't be ignored by older versions.
|
||||
* Tiers:
|
||||
* EXPERIMENTAL: The feature is in active development stage. Mostly for developers or for ClickHouse enthusiasts.
|
||||
* BETA: There are no known bugs problems in the functionality, but the outcome of using it together with other
|
||||
* features/components is unknown and correctness is not guaranteed.
|
||||
* PRODUCTION (Default): The feature is safe to use along with other features from the PRODUCTION tier.
|
||||
*
|
||||
* When adding new or changing existing settings add them to the settings changes history in SettingsChangesHistory.cpp
|
||||
* for tracking settings changes in different versions and for special `compatibility` settings to work correctly.
|
||||
*/
|
||||
|
||||
@ -5106,6 +5111,9 @@ Only in ClickHouse Cloud. A maximum number of unacknowledged in-flight packets i
|
||||
)", 0) \
|
||||
DECLARE(UInt64, distributed_cache_data_packet_ack_window, DistributedCache::ACK_DATA_PACKET_WINDOW, R"(
|
||||
Only in ClickHouse Cloud. A window for sending ACK for DataPacket sequence in a single distributed cache read request
|
||||
)", 0) \
|
||||
DECLARE(Bool, distributed_cache_discard_connection_if_unread_data, true, R"(
|
||||
Only in ClickHouse Cloud. Discard connection if some data is unread.
|
||||
)", 0) \
|
||||
\
|
||||
DECLARE(Bool, parallelize_output_from_storages, true, R"(
|
||||
@ -5505,90 +5513,102 @@ For testing purposes. Replaces all external table functions to Null to not initi
|
||||
DECLARE(Bool, restore_replace_external_dictionary_source_to_null, false, R"(
|
||||
Replace external dictionary sources to Null on restore. Useful for testing purposes
|
||||
)", 0) \
|
||||
DECLARE(Bool, create_if_not_exists, false, R"(
|
||||
Enable `IF NOT EXISTS` for `CREATE` statement by default. If either this setting or `IF NOT EXISTS` is specified and a table with the provided name already exists, no exception will be thrown.
|
||||
)", 0) \
|
||||
DECLARE(Bool, enforce_strict_identifier_format, false, R"(
|
||||
If enabled, only allow identifiers containing alphanumeric characters and underscores.
|
||||
)", 0) \
|
||||
DECLARE(Bool, mongodb_throw_on_unsupported_query, true, R"(
|
||||
If enabled, MongoDB tables will return an error when a MongoDB query cannot be built. Otherwise, ClickHouse reads the full table and processes it locally. This option does not apply to the legacy implementation or when 'allow_experimental_analyzer=0'.
|
||||
)", 0) \
|
||||
\
|
||||
/* ###################################### */ \
|
||||
/* ######## EXPERIMENTAL FEATURES ####### */ \
|
||||
/* ###################################### */ \
|
||||
DECLARE(Bool, allow_experimental_materialized_postgresql_table, false, R"(
|
||||
Allows to use the MaterializedPostgreSQL table engine. Disabled by default, because this feature is experimental
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_funnel_functions, false, R"(
|
||||
Enable experimental functions for funnel analysis.
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_nlp_functions, false, R"(
|
||||
Enable experimental functions for natural language processing.
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_hash_functions, false, R"(
|
||||
Enable experimental hash functions
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_object_type, false, R"(
|
||||
Allow Object and JSON data types
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_time_series_table, false, R"(
|
||||
Allows creation of tables with the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine.
|
||||
/* Parallel replicas */ \
|
||||
DECLARE(UInt64, allow_experimental_parallel_reading_from_replicas, 0, R"(
|
||||
Use up to `max_parallel_replicas` the number of replicas from each shard for SELECT query execution. Reading is parallelized and coordinated dynamically. 0 - disabled, 1 - enabled, silently disable them in case of failure, 2 - enabled, throw an exception in case of failure
|
||||
)", BETA) ALIAS(enable_parallel_replicas) \
|
||||
DECLARE(NonZeroUInt64, max_parallel_replicas, 1, R"(
|
||||
The maximum number of replicas for each shard when executing a query.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is disabled.
|
||||
- 1 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is enabled.
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_vector_similarity_index, false, R"(
|
||||
Allow experimental vector similarity index
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_variant_type, false, R"(
|
||||
Allows creation of experimental [Variant](../../sql-reference/data-types/variant.md).
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_dynamic_type, false, R"(
|
||||
Allow Dynamic data type
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_json_type, false, R"(
|
||||
Allow JSON data type
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_codecs, false, R"(
|
||||
If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_shared_set_join, true, R"(
|
||||
Only in ClickHouse Cloud. Allow to create ShareSet and SharedJoin
|
||||
)", 0) \
|
||||
DECLARE(UInt64, max_limit_for_ann_queries, 1'000'000, R"(
|
||||
SELECT queries with LIMIT bigger than this setting cannot use vector similarity indexes. Helps to prevent memory overflows in vector similarity indexes.
|
||||
)", 0) \
|
||||
DECLARE(UInt64, hnsw_candidate_list_size_for_search, 256, R"(
|
||||
The size of the dynamic candidate list when searching the vector similarity index, also known as 'ef_search'.
|
||||
)", 0) \
|
||||
DECLARE(Bool, throw_on_unsupported_query_inside_transaction, true, R"(
|
||||
Throw exception if unsupported query is used inside transaction
|
||||
)", 0) \
|
||||
DECLARE(TransactionsWaitCSNMode, wait_changes_become_visible_after_commit_mode, TransactionsWaitCSNMode::WAIT_UNKNOWN, R"(
|
||||
Wait for committed changes to become actually visible in the latest snapshot
|
||||
)", 0) \
|
||||
DECLARE(Bool, implicit_transaction, false, R"(
|
||||
If enabled and not already inside a transaction, wraps the query inside a full transaction (begin + commit or rollback)
|
||||
)", 0) \
|
||||
DECLARE(UInt64, grace_hash_join_initial_buckets, 1, R"(
|
||||
Initial number of grace hash join buckets
|
||||
)", 0) \
|
||||
DECLARE(UInt64, grace_hash_join_max_buckets, 1024, R"(
|
||||
Limit on the number of grace hash join buckets
|
||||
)", 0) \
|
||||
DECLARE(UInt64, join_to_sort_minimum_perkey_rows, 40, R"(
|
||||
The lower limit of per-key average rows in the right table to determine whether to rerange the right table by key in left or inner join. This setting ensures that the optimization is not applied for sparse table keys
|
||||
)", 0) \
|
||||
DECLARE(UInt64, join_to_sort_maximum_table_rows, 10000, R"(
|
||||
The maximum number of rows in the right table to determine whether to rerange the right table by key in left or inner join.
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_join_right_table_sorting, false, R"(
|
||||
If it is set to true, and the conditions of `join_to_sort_minimum_perkey_rows` and `join_to_sort_maximum_table_rows` are met, rerange the right table by key to improve the performance in left or inner hash join.
|
||||
- Positive integer.
|
||||
|
||||
**Additional Info**
|
||||
|
||||
This options will produce different results depending on the settings used.
|
||||
|
||||
:::note
|
||||
This setting will produce incorrect results when joins or subqueries are involved, and all tables don't meet certain requirements. See [Distributed Subqueries and max_parallel_replicas](../../sql-reference/operators/in.md/#max_parallel_replica-subqueries) for more details.
|
||||
:::
|
||||
|
||||
### Parallel processing using `SAMPLE` key
|
||||
|
||||
A query may be processed faster if it is executed on several servers in parallel. But the query performance may degrade in the following cases:
|
||||
|
||||
- The position of the sampling key in the partitioning key does not allow efficient range scans.
|
||||
- Adding a sampling key to the table makes filtering by other columns less efficient.
|
||||
- The sampling key is an expression that is expensive to calculate.
|
||||
- The cluster latency distribution has a long tail, so that querying more servers increases the query overall latency.
|
||||
|
||||
### Parallel processing using [parallel_replicas_custom_key](#parallel_replicas_custom_key)
|
||||
|
||||
This setting is useful for any replicated table.
|
||||
)", 0) \
|
||||
DECLARE(ParallelReplicasMode, parallel_replicas_mode, ParallelReplicasMode::READ_TASKS, R"(
|
||||
Type of filter to use with custom key for parallel replicas. default - use modulo operation on the custom key, range - use range filter on custom key using all possible values for the value type of custom key.
|
||||
)", BETA) \
|
||||
DECLARE(UInt64, parallel_replicas_count, 0, R"(
|
||||
This is internal setting that should not be used directly and represents an implementation detail of the 'parallel replicas' mode. This setting will be automatically set up by the initiator server for distributed queries to the number of parallel replicas participating in query processing.
|
||||
)", BETA) \
|
||||
DECLARE(UInt64, parallel_replica_offset, 0, R"(
|
||||
This is internal setting that should not be used directly and represents an implementation detail of the 'parallel replicas' mode. This setting will be automatically set up by the initiator server for distributed queries to the index of the replica participating in query processing among parallel replicas.
|
||||
)", BETA) \
|
||||
DECLARE(String, parallel_replicas_custom_key, "", R"(
|
||||
An arbitrary integer expression that can be used to split work between replicas for a specific table.
|
||||
The value can be any integer expression.
|
||||
|
||||
Simple expressions using primary keys are preferred.
|
||||
|
||||
If the setting is used on a cluster that consists of a single shard with multiple replicas, those replicas will be converted into virtual shards.
|
||||
Otherwise, it will behave same as for `SAMPLE` key, it will use multiple replicas of each shard.
|
||||
)", BETA) \
|
||||
DECLARE(UInt64, parallel_replicas_custom_key_range_lower, 0, R"(
|
||||
Allows the filter type `range` to split the work evenly between replicas based on the custom range `[parallel_replicas_custom_key_range_lower, INT_MAX]`.
|
||||
|
||||
When used in conjunction with [parallel_replicas_custom_key_range_upper](#parallel_replicas_custom_key_range_upper), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
|
||||
|
||||
Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing.
|
||||
)", BETA) \
|
||||
DECLARE(UInt64, parallel_replicas_custom_key_range_upper, 0, R"(
|
||||
Allows the filter type `range` to split the work evenly between replicas based on the custom range `[0, parallel_replicas_custom_key_range_upper]`. A value of 0 disables the upper bound, setting it the max value of the custom key expression.
|
||||
|
||||
When used in conjunction with [parallel_replicas_custom_key_range_lower](#parallel_replicas_custom_key_range_lower), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
|
||||
|
||||
Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing
|
||||
)", BETA) \
|
||||
DECLARE(String, cluster_for_parallel_replicas, "", R"(
|
||||
Cluster for a shard in which current server is located
|
||||
)", BETA) \
|
||||
DECLARE(Bool, parallel_replicas_allow_in_with_subquery, true, R"(
|
||||
If true, subquery for IN will be executed on every follower replica.
|
||||
)", BETA) \
|
||||
DECLARE(Float, parallel_replicas_single_task_marks_count_multiplier, 2, R"(
|
||||
A multiplier which will be added during calculation for minimal number of marks to retrieve from coordinator. This will be applied only for remote replicas.
|
||||
)", BETA) \
|
||||
DECLARE(Bool, parallel_replicas_for_non_replicated_merge_tree, false, R"(
|
||||
If true, ClickHouse will use parallel replicas algorithm also for non-replicated MergeTree tables
|
||||
)", BETA) \
|
||||
DECLARE(UInt64, parallel_replicas_min_number_of_rows_per_replica, 0, R"(
|
||||
Limit the number of replicas used in a query to (estimated rows to read / min_number_of_rows_per_replica). The max is still limited by 'max_parallel_replicas'
|
||||
)", BETA) \
|
||||
DECLARE(Bool, parallel_replicas_prefer_local_join, true, R"(
|
||||
If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN.
|
||||
)", BETA) \
|
||||
DECLARE(UInt64, parallel_replicas_mark_segment_size, 0, R"(
|
||||
Parts virtually divided into segments to be distributed between replicas for parallel reading. This setting controls the size of these segments. Not recommended to change until you're absolutely sure in what you're doing. Value should be in range [128; 16384]
|
||||
)", BETA) \
|
||||
DECLARE(Bool, parallel_replicas_local_plan, false, R"(
|
||||
Build local plan for local replica
|
||||
)", BETA) \
|
||||
\
|
||||
DECLARE(Bool, allow_experimental_analyzer, true, R"(
|
||||
Allow new query analyzer.
|
||||
)", IMPORTANT | BETA) ALIAS(enable_analyzer) \
|
||||
DECLARE(Bool, analyzer_compatibility_join_using_top_level_identifier, false, R"(
|
||||
Force to resolve identifier in JOIN USING from projection (for example, in `SELECT a + 1 AS b FROM t1 JOIN t2 USING (b)` join will be performed by `t1.a + 1 = t2.b`, rather then `t1.b = t2.b`).
|
||||
)", BETA) \
|
||||
\
|
||||
DECLARE(Timezone, session_timezone, "", R"(
|
||||
Sets the implicit time zone of the current session or query.
|
||||
The implicit time zone is the time zone applied to values of type DateTime/DateTime64 which have no explicitly specified time zone.
|
||||
@ -5648,126 +5668,121 @@ This happens due to different parsing pipelines:
|
||||
**See also**
|
||||
|
||||
- [timezone](../server-configuration-parameters/settings.md#timezone)
|
||||
)", BETA) \
|
||||
DECLARE(Bool, create_if_not_exists, false, R"(
|
||||
Enable `IF NOT EXISTS` for `CREATE` statement by default. If either this setting or `IF NOT EXISTS` is specified and a table with the provided name already exists, no exception will be thrown.
|
||||
)", 0) \
|
||||
DECLARE(Bool, enforce_strict_identifier_format, false, R"(
|
||||
If enabled, only allow identifiers containing alphanumeric characters and underscores.
|
||||
)", 0) \
|
||||
DECLARE(Bool, mongodb_throw_on_unsupported_query, true, R"(
|
||||
If enabled, MongoDB tables will return an error when a MongoDB query cannot be built. Otherwise, ClickHouse reads the full table and processes it locally. This option does not apply to the legacy implementation or when 'allow_experimental_analyzer=0'.
|
||||
)", 0) \
|
||||
DECLARE(Bool, implicit_select, false, R"(
|
||||
Allow writing simple SELECT queries without the leading SELECT keyword, which makes it simple for calculator-style usage, e.g. `1 + 2` becomes a valid query.
|
||||
)", 0) \
|
||||
DECLARE(Bool, use_hive_partitioning, false, R"(
|
||||
When enabled, ClickHouse will detect Hive-style partitioning in path (`/name=value/`) in file-like table engines [File](../../engines/table-engines/special/file.md#hive-style-partitioning)/[S3](../../engines/table-engines/integrations/s3.md#hive-style-partitioning)/[URL](../../engines/table-engines/special/url.md#hive-style-partitioning)/[HDFS](../../engines/table-engines/integrations/hdfs.md#hive-style-partitioning)/[AzureBlobStorage](../../engines/table-engines/integrations/azureBlobStorage.md#hive-style-partitioning) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`.
|
||||
)", 0)\
|
||||
\
|
||||
DECLARE(Bool, allow_statistics_optimize, false, R"(
|
||||
Allows using statistics to optimize queries
|
||||
)", 0) ALIAS(allow_statistic_optimize) \
|
||||
DECLARE(Bool, allow_experimental_statistics, false, R"(
|
||||
Allows defining columns with [statistics](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) and [manipulate statistics](../../engines/table-engines/mergetree-family/mergetree.md#column-statistics).
|
||||
)", 0) ALIAS(allow_experimental_statistic) \
|
||||
\
|
||||
/* Parallel replicas */ \
|
||||
DECLARE(UInt64, allow_experimental_parallel_reading_from_replicas, 0, R"(
|
||||
Use up to `max_parallel_replicas` the number of replicas from each shard for SELECT query execution. Reading is parallelized and coordinated dynamically. 0 - disabled, 1 - enabled, silently disable them in case of failure, 2 - enabled, throw an exception in case of failure
|
||||
)", 0) ALIAS(enable_parallel_replicas) \
|
||||
DECLARE(NonZeroUInt64, max_parallel_replicas, 1, R"(
|
||||
The maximum number of replicas for each shard when executing a query.
|
||||
/* ####################################################### */ \
|
||||
/* ########### START OF EXPERIMENTAL FEATURES ############ */ \
|
||||
/* ## ADD PRODUCTION / BETA FEATURES BEFORE THIS BLOCK ## */ \
|
||||
/* ####################################################### */ \
|
||||
\
|
||||
DECLARE(Bool, allow_experimental_materialized_postgresql_table, false, R"(
|
||||
Allows to use the MaterializedPostgreSQL table engine. Disabled by default, because this feature is experimental
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_funnel_functions, false, R"(
|
||||
Enable experimental functions for funnel analysis.
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_nlp_functions, false, R"(
|
||||
Enable experimental functions for natural language processing.
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_hash_functions, false, R"(
|
||||
Enable experimental hash functions
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_object_type, false, R"(
|
||||
Allow Object and JSON data types
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_time_series_table, false, R"(
|
||||
Allows creation of tables with the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
**Additional Info**
|
||||
|
||||
This options will produce different results depending on the settings used.
|
||||
|
||||
:::note
|
||||
This setting will produce incorrect results when joins or subqueries are involved, and all tables don't meet certain requirements. See [Distributed Subqueries and max_parallel_replicas](../../sql-reference/operators/in.md/#max_parallel_replica-subqueries) for more details.
|
||||
:::
|
||||
|
||||
### Parallel processing using `SAMPLE` key
|
||||
|
||||
A query may be processed faster if it is executed on several servers in parallel. But the query performance may degrade in the following cases:
|
||||
|
||||
- The position of the sampling key in the partitioning key does not allow efficient range scans.
|
||||
- Adding a sampling key to the table makes filtering by other columns less efficient.
|
||||
- The sampling key is an expression that is expensive to calculate.
|
||||
- The cluster latency distribution has a long tail, so that querying more servers increases the query overall latency.
|
||||
|
||||
### Parallel processing using [parallel_replicas_custom_key](#parallel_replicas_custom_key)
|
||||
|
||||
This setting is useful for any replicated table.
|
||||
)", 0) \
|
||||
DECLARE(ParallelReplicasMode, parallel_replicas_mode, ParallelReplicasMode::READ_TASKS, R"(
|
||||
Type of filter to use with custom key for parallel replicas. default - use modulo operation on the custom key, range - use range filter on custom key using all possible values for the value type of custom key.
|
||||
)", 0) \
|
||||
DECLARE(UInt64, parallel_replicas_count, 0, R"(
|
||||
This is internal setting that should not be used directly and represents an implementation detail of the 'parallel replicas' mode. This setting will be automatically set up by the initiator server for distributed queries to the number of parallel replicas participating in query processing.
|
||||
)", 0) \
|
||||
DECLARE(UInt64, parallel_replica_offset, 0, R"(
|
||||
This is internal setting that should not be used directly and represents an implementation detail of the 'parallel replicas' mode. This setting will be automatically set up by the initiator server for distributed queries to the index of the replica participating in query processing among parallel replicas.
|
||||
)", 0) \
|
||||
DECLARE(String, parallel_replicas_custom_key, "", R"(
|
||||
An arbitrary integer expression that can be used to split work between replicas for a specific table.
|
||||
The value can be any integer expression.
|
||||
|
||||
Simple expressions using primary keys are preferred.
|
||||
|
||||
If the setting is used on a cluster that consists of a single shard with multiple replicas, those replicas will be converted into virtual shards.
|
||||
Otherwise, it will behave same as for `SAMPLE` key, it will use multiple replicas of each shard.
|
||||
)", 0) \
|
||||
DECLARE(UInt64, parallel_replicas_custom_key_range_lower, 0, R"(
|
||||
Allows the filter type `range` to split the work evenly between replicas based on the custom range `[parallel_replicas_custom_key_range_lower, INT_MAX]`.
|
||||
|
||||
When used in conjunction with [parallel_replicas_custom_key_range_upper](#parallel_replicas_custom_key_range_upper), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
|
||||
|
||||
Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing.
|
||||
)", 0) \
|
||||
DECLARE(UInt64, parallel_replicas_custom_key_range_upper, 0, R"(
|
||||
Allows the filter type `range` to split the work evenly between replicas based on the custom range `[0, parallel_replicas_custom_key_range_upper]`. A value of 0 disables the upper bound, setting it the max value of the custom key expression.
|
||||
|
||||
When used in conjunction with [parallel_replicas_custom_key_range_lower](#parallel_replicas_custom_key_range_lower), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
|
||||
|
||||
Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing
|
||||
)", 0) \
|
||||
DECLARE(String, cluster_for_parallel_replicas, "", R"(
|
||||
Cluster for a shard in which current server is located
|
||||
)", 0) \
|
||||
DECLARE(Bool, parallel_replicas_allow_in_with_subquery, true, R"(
|
||||
If true, subquery for IN will be executed on every follower replica.
|
||||
)", 0) \
|
||||
DECLARE(Float, parallel_replicas_single_task_marks_count_multiplier, 2, R"(
|
||||
A multiplier which will be added during calculation for minimal number of marks to retrieve from coordinator. This will be applied only for remote replicas.
|
||||
)", 0) \
|
||||
DECLARE(Bool, parallel_replicas_for_non_replicated_merge_tree, false, R"(
|
||||
If true, ClickHouse will use parallel replicas algorithm also for non-replicated MergeTree tables
|
||||
)", 0) \
|
||||
DECLARE(UInt64, parallel_replicas_min_number_of_rows_per_replica, 0, R"(
|
||||
Limit the number of replicas used in a query to (estimated rows to read / min_number_of_rows_per_replica). The max is still limited by 'max_parallel_replicas'
|
||||
)", 0) \
|
||||
DECLARE(Bool, parallel_replicas_prefer_local_join, true, R"(
|
||||
If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN.
|
||||
)", 0) \
|
||||
DECLARE(UInt64, parallel_replicas_mark_segment_size, 0, R"(
|
||||
Parts virtually divided into segments to be distributed between replicas for parallel reading. This setting controls the size of these segments. Not recommended to change until you're absolutely sure in what you're doing. Value should be in range [128; 16384]
|
||||
- 0 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is disabled.
|
||||
- 1 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is enabled.
|
||||
)", 0) \
|
||||
DECLARE(Bool, allow_experimental_vector_similarity_index, false, R"(
|
||||
Allow experimental vector similarity index
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_variant_type, false, R"(
|
||||
Allows creation of experimental [Variant](../../sql-reference/data-types/variant.md).
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_dynamic_type, false, R"(
|
||||
Allow Dynamic data type
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_json_type, false, R"(
|
||||
Allow JSON data type
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_codecs, false, R"(
|
||||
If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_shared_set_join, true, R"(
|
||||
Only in ClickHouse Cloud. Allow to create ShareSet and SharedJoin
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(UInt64, max_limit_for_ann_queries, 1'000'000, R"(
|
||||
SELECT queries with LIMIT bigger than this setting cannot use vector similarity indexes. Helps to prevent memory overflows in vector similarity indexes.
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(UInt64, hnsw_candidate_list_size_for_search, 256, R"(
|
||||
The size of the dynamic candidate list when searching the vector similarity index, also known as 'ef_search'.
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, throw_on_unsupported_query_inside_transaction, true, R"(
|
||||
Throw exception if unsupported query is used inside transaction
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(TransactionsWaitCSNMode, wait_changes_become_visible_after_commit_mode, TransactionsWaitCSNMode::WAIT_UNKNOWN, R"(
|
||||
Wait for committed changes to become actually visible in the latest snapshot
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, implicit_transaction, false, R"(
|
||||
If enabled and not already inside a transaction, wraps the query inside a full transaction (begin + commit or rollback)
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(UInt64, grace_hash_join_initial_buckets, 1, R"(
|
||||
Initial number of grace hash join buckets
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(UInt64, grace_hash_join_max_buckets, 1024, R"(
|
||||
Limit on the number of grace hash join buckets
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(UInt64, join_to_sort_minimum_perkey_rows, 40, R"(
|
||||
The lower limit of per-key average rows in the right table to determine whether to rerange the right table by key in left or inner join. This setting ensures that the optimization is not applied for sparse table keys
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(UInt64, join_to_sort_maximum_table_rows, 10000, R"(
|
||||
The maximum number of rows in the right table to determine whether to rerange the right table by key in left or inner join.
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_join_right_table_sorting, false, R"(
|
||||
If it is set to true, and the conditions of `join_to_sort_minimum_perkey_rows` and `join_to_sort_maximum_table_rows` are met, rerange the right table by key to improve the performance in left or inner hash join.
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, use_hive_partitioning, false, R"(
|
||||
When enabled, ClickHouse will detect Hive-style partitioning in path (`/name=value/`) in file-like table engines [File](../../engines/table-engines/special/file.md#hive-style-partitioning)/[S3](../../engines/table-engines/integrations/s3.md#hive-style-partitioning)/[URL](../../engines/table-engines/special/url.md#hive-style-partitioning)/[HDFS](../../engines/table-engines/integrations/hdfs.md#hive-style-partitioning)/[AzureBlobStorage](../../engines/table-engines/integrations/azureBlobStorage.md#hive-style-partitioning) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`.
|
||||
)", EXPERIMENTAL)\
|
||||
\
|
||||
DECLARE(Bool, allow_statistics_optimize, false, R"(
|
||||
Allows using statistics to optimize queries
|
||||
)", EXPERIMENTAL) ALIAS(allow_statistic_optimize) \
|
||||
DECLARE(Bool, allow_experimental_statistics, false, R"(
|
||||
Allows defining columns with [statistics](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) and [manipulate statistics](../../engines/table-engines/mergetree-family/mergetree.md#column-statistics).
|
||||
)", EXPERIMENTAL) ALIAS(allow_experimental_statistic) \
|
||||
\
|
||||
DECLARE(Bool, allow_archive_path_syntax, true, R"(
|
||||
File/S3 engines/table function will parse paths with '::' as '\\<archive\\> :: \\<file\\>' if archive has correct extension
|
||||
)", 0) \
|
||||
DECLARE(Bool, parallel_replicas_local_plan, false, R"(
|
||||
Build local plan for local replica
|
||||
)", 0) \
|
||||
)", EXPERIMENTAL) \
|
||||
\
|
||||
DECLARE(Bool, allow_experimental_inverted_index, false, R"(
|
||||
If it is set to true, allow to use experimental inverted index.
|
||||
)", 0) \
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_full_text_index, false, R"(
|
||||
If it is set to true, allow to use experimental full-text index.
|
||||
)", 0) \
|
||||
)", EXPERIMENTAL) \
|
||||
\
|
||||
DECLARE(Bool, allow_experimental_join_condition, false, R"(
|
||||
Support join with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y.
|
||||
)", 0) \
|
||||
\
|
||||
DECLARE(Bool, allow_experimental_analyzer, true, R"(
|
||||
Allow new query analyzer.
|
||||
)", IMPORTANT) ALIAS(enable_analyzer) \
|
||||
DECLARE(Bool, analyzer_compatibility_join_using_top_level_identifier, false, R"(
|
||||
Force to resolve identifier in JOIN USING from projection (for example, in `SELECT a + 1 AS b FROM t1 JOIN t2 USING (b)` join will be performed by `t1.a + 1 = t2.b`, rather then `t1.b = t2.b`).
|
||||
)", 0) \
|
||||
\
|
||||
DECLARE(Bool, allow_experimental_live_view, false, R"(
|
||||
@ -5780,43 +5795,43 @@ Possible values:
|
||||
)", 0) \
|
||||
DECLARE(Seconds, live_view_heartbeat_interval, 15, R"(
|
||||
The heartbeat interval in seconds to indicate live query is alive.
|
||||
)", 0) \
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(UInt64, max_live_view_insert_blocks_before_refresh, 64, R"(
|
||||
Limit maximum number of inserted blocks after which mergeable blocks are dropped and query is re-executed.
|
||||
)", 0) \
|
||||
)", EXPERIMENTAL) \
|
||||
\
|
||||
DECLARE(Bool, allow_experimental_window_view, false, R"(
|
||||
Enable WINDOW VIEW. Not mature enough.
|
||||
)", 0) \
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Seconds, window_view_clean_interval, 60, R"(
|
||||
The clean interval of window view in seconds to free outdated data.
|
||||
)", 0) \
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Seconds, window_view_heartbeat_interval, 15, R"(
|
||||
The heartbeat interval in seconds to indicate watch query is alive.
|
||||
)", 0) \
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Seconds, wait_for_window_view_fire_signal_timeout, 10, R"(
|
||||
Timeout for waiting for window view fire signal in event time processing
|
||||
)", 0) \
|
||||
)", EXPERIMENTAL) \
|
||||
\
|
||||
DECLARE(Bool, stop_refreshable_materialized_views_on_startup, false, R"(
|
||||
On server startup, prevent scheduling of refreshable materialized views, as if with SYSTEM STOP VIEWS. You can manually start them with SYSTEM START VIEWS or SYSTEM START VIEW \\<name\\> afterwards. Also applies to newly created views. Has no effect on non-refreshable materialized views.
|
||||
)", 0) \
|
||||
)", EXPERIMENTAL) \
|
||||
\
|
||||
DECLARE(Bool, allow_experimental_database_materialized_mysql, false, R"(
|
||||
Allow to create database with Engine=MaterializedMySQL(...).
|
||||
)", 0) \
|
||||
)", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_database_materialized_postgresql, false, R"(
|
||||
Allow to create database with Engine=MaterializedPostgreSQL(...).
|
||||
)", 0) \
|
||||
)", EXPERIMENTAL) \
|
||||
\
|
||||
/** Experimental feature for moving data between shards. */ \
|
||||
DECLARE(Bool, allow_experimental_query_deduplication, false, R"(
|
||||
Experimental data deduplication for SELECT queries based on part UUIDs
|
||||
)", 0) \
|
||||
DECLARE(Bool, implicit_select, false, R"(
|
||||
Allow writing simple SELECT queries without the leading SELECT keyword, which makes it simple for calculator-style usage, e.g. `1 + 2` becomes a valid query.
|
||||
)", 0)
|
||||
|
||||
)", EXPERIMENTAL) \
|
||||
\
|
||||
/* ####################################################### */ \
|
||||
/* ############ END OF EXPERIMENTAL FEATURES ############# */ \
|
||||
/* ####################################################### */ \
|
||||
|
||||
// End of COMMON_SETTINGS
|
||||
// Please add settings related to formats in Core/FormatFactorySettings.h, move obsolete settings to OBSOLETE_SETTINGS and obsolete format settings to OBSOLETE_FORMAT_SETTINGS.
|
||||
@ -5895,13 +5910,14 @@ Allow writing simple SELECT queries without the leading SELECT keyword, which ma
|
||||
/** The section above is for obsolete settings. Do not add anything there. */
|
||||
#endif /// __CLION_IDE__
|
||||
|
||||
|
||||
#define LIST_OF_SETTINGS(M, ALIAS) \
|
||||
COMMON_SETTINGS(M, ALIAS) \
|
||||
OBSOLETE_SETTINGS(M, ALIAS) \
|
||||
FORMAT_FACTORY_SETTINGS(M, ALIAS) \
|
||||
OBSOLETE_FORMAT_SETTINGS(M, ALIAS) \
|
||||
|
||||
// clang-format on
|
||||
|
||||
DECLARE_SETTINGS_TRAITS_ALLOW_CUSTOM_SETTINGS(SettingsTraits, LIST_OF_SETTINGS)
|
||||
IMPLEMENT_SETTINGS_TRAITS(SettingsTraits, LIST_OF_SETTINGS)
|
||||
|
||||
@ -6009,7 +6025,7 @@ void SettingsImpl::checkNoSettingNamesAtTopLevel(const Poco::Util::AbstractConfi
|
||||
{
|
||||
const auto & name = setting.getName();
|
||||
bool should_skip_check = name == "max_table_size_to_drop" || name == "max_partition_size_to_drop";
|
||||
if (config.has(name) && !setting.isObsolete() && !should_skip_check)
|
||||
if (config.has(name) && (setting.getTier() != SettingsTierType::OBSOLETE) && !should_skip_check)
|
||||
{
|
||||
throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "A setting '{}' appeared at top level in config {}."
|
||||
" But it is user-level setting that should be located in users.xml inside <profiles> section for specific profile."
|
||||
@ -6185,7 +6201,7 @@ std::vector<std::string_view> Settings::getChangedAndObsoleteNames() const
|
||||
std::vector<std::string_view> setting_names;
|
||||
for (const auto & setting : impl->allChanged())
|
||||
{
|
||||
if (setting.isObsolete())
|
||||
if (setting.getTier() == SettingsTierType::OBSOLETE)
|
||||
setting_names.emplace_back(setting.getName());
|
||||
}
|
||||
return setting_names;
|
||||
@ -6234,7 +6250,8 @@ void Settings::dumpToSystemSettingsColumns(MutableColumnsAndConstraints & params
|
||||
res_columns[6]->insert(writability == SettingConstraintWritability::CONST);
|
||||
res_columns[7]->insert(setting.getTypeName());
|
||||
res_columns[8]->insert(setting.getDefaultValueString());
|
||||
res_columns[10]->insert(setting.isObsolete());
|
||||
res_columns[10]->insert(setting.getTier() == SettingsTierType::OBSOLETE);
|
||||
res_columns[11]->insert(setting.getTier());
|
||||
};
|
||||
|
||||
const auto & settings_to_aliases = SettingsImpl::Traits::settingsToAliases();
|
||||
|
@ -64,6 +64,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
},
|
||||
{"24.11",
|
||||
{
|
||||
{"distributed_cache_discard_connection_if_unread_data", true, true, "New setting"},
|
||||
}
|
||||
},
|
||||
{"24.10",
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
// clang-format off
|
||||
#define MAKE_OBSOLETE(M, TYPE, NAME, DEFAULT) \
|
||||
M(TYPE, NAME, DEFAULT, "Obsolete setting, does nothing.", BaseSettingsHelpers::Flags::OBSOLETE)
|
||||
M(TYPE, NAME, DEFAULT, "Obsolete setting, does nothing.", SettingsTierType::OBSOLETE)
|
||||
|
||||
/// NOTE: ServerSettings::loadSettingsFromConfig() should be updated to include this settings
|
||||
#define MAKE_DEPRECATED_BY_SERVER_CONFIG(M, TYPE, NAME, DEFAULT) \
|
||||
M(TYPE, NAME, DEFAULT, "User-level setting is deprecated, and it must be defined in the server configuration instead.", BaseSettingsHelpers::Flags::OBSOLETE)
|
||||
M(TYPE, NAME, DEFAULT, "User-level setting is deprecated, and it must be defined in the server configuration instead.", SettingsTierType::OBSOLETE)
|
||||
|
19
src/Core/SettingsTierType.cpp
Normal file
19
src/Core/SettingsTierType.cpp
Normal file
@ -0,0 +1,19 @@
|
||||
#include <Core/SettingsTierType.h>
|
||||
#include <DataTypes/DataTypeEnum.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
std::shared_ptr<DataTypeEnum8> getSettingsTierEnum()
|
||||
{
|
||||
return std::make_shared<DataTypeEnum8>(
|
||||
DataTypeEnum8::Values
|
||||
{
|
||||
{"Production", static_cast<Int8>(SettingsTierType::PRODUCTION)},
|
||||
{"Obsolete", static_cast<Int8>(SettingsTierType::OBSOLETE)},
|
||||
{"Experimental", static_cast<Int8>(SettingsTierType::EXPERIMENTAL)},
|
||||
{"Beta", static_cast<Int8>(SettingsTierType::BETA)}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
26
src/Core/SettingsTierType.h
Normal file
26
src/Core/SettingsTierType.h
Normal file
@ -0,0 +1,26 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Types.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
template <typename Type>
|
||||
class DataTypeEnum;
|
||||
using DataTypeEnum8 = DataTypeEnum<Int8>;
|
||||
|
||||
// Make it signed for compatibility with DataTypeEnum8
|
||||
enum SettingsTierType : int8_t
|
||||
{
|
||||
PRODUCTION = 0b0000,
|
||||
OBSOLETE = 0b0100,
|
||||
EXPERIMENTAL = 0b1000,
|
||||
BETA = 0b1100
|
||||
};
|
||||
|
||||
std::shared_ptr<DataTypeEnum8> getSettingsTierEnum();
|
||||
|
||||
}
|
@ -161,7 +161,7 @@ String getNameForSubstreamPath(
|
||||
String stream_name,
|
||||
SubstreamIterator begin,
|
||||
SubstreamIterator end,
|
||||
bool escape_tuple_delimiter)
|
||||
bool escape_for_file_name)
|
||||
{
|
||||
using Substream = ISerialization::Substream;
|
||||
|
||||
@ -186,7 +186,7 @@ String getNameForSubstreamPath(
|
||||
/// Because nested data may be represented not by Array of Tuple,
|
||||
/// but by separate Array columns with names in a form of a.b,
|
||||
/// and name is encoded as a whole.
|
||||
if (it->type == Substream::TupleElement && escape_tuple_delimiter)
|
||||
if (it->type == Substream::TupleElement && escape_for_file_name)
|
||||
stream_name += escapeForFileName(substream_name);
|
||||
else
|
||||
stream_name += substream_name;
|
||||
@ -206,7 +206,7 @@ String getNameForSubstreamPath(
|
||||
else if (it->type == SubstreamType::ObjectSharedData)
|
||||
stream_name += ".object_shared_data";
|
||||
else if (it->type == SubstreamType::ObjectTypedPath || it->type == SubstreamType::ObjectDynamicPath)
|
||||
stream_name += "." + it->object_path_name;
|
||||
stream_name += "." + (escape_for_file_name ? escapeForFileName(it->object_path_name) : it->object_path_name);
|
||||
}
|
||||
|
||||
return stream_name;
|
||||
@ -434,6 +434,14 @@ bool ISerialization::isDynamicSubcolumn(const DB::ISerialization::SubstreamPath
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ISerialization::isLowCardinalityDictionarySubcolumn(const DB::ISerialization::SubstreamPath & path)
|
||||
{
|
||||
if (path.empty())
|
||||
return false;
|
||||
|
||||
return path[path.size() - 1].type == SubstreamType::DictionaryKeys;
|
||||
}
|
||||
|
||||
ISerialization::SubstreamData ISerialization::createFromPath(const SubstreamPath & path, size_t prefix_len)
|
||||
{
|
||||
assert(prefix_len <= path.size());
|
||||
|
@ -463,6 +463,8 @@ public:
|
||||
/// Returns true if stream with specified path corresponds to dynamic subcolumn.
|
||||
static bool isDynamicSubcolumn(const SubstreamPath & path, size_t prefix_len);
|
||||
|
||||
static bool isLowCardinalityDictionarySubcolumn(const SubstreamPath & path);
|
||||
|
||||
protected:
|
||||
template <typename State, typename StatePtr>
|
||||
State * checkAndGetState(const StatePtr & state) const;
|
||||
|
@ -54,7 +54,7 @@ void SerializationLowCardinality::enumerateStreams(
|
||||
.withSerializationInfo(data.serialization_info);
|
||||
|
||||
settings.path.back().data = dict_data;
|
||||
dict_inner_serialization->enumerateStreams(settings, callback, dict_data);
|
||||
callback(settings.path);
|
||||
|
||||
settings.path.back() = Substream::DictionaryIndexes;
|
||||
settings.path.back().data = data;
|
||||
|
@ -103,15 +103,15 @@ std::unique_ptr<WriteBufferFromFileBase> HDFSObjectStorage::writeObject( /// NOL
|
||||
ErrorCodes::UNSUPPORTED_METHOD,
|
||||
"HDFS API doesn't support custom attributes/metadata for stored objects");
|
||||
|
||||
std::string path = object.remote_path;
|
||||
if (path.starts_with("/"))
|
||||
path = path.substr(1);
|
||||
if (!path.starts_with(url))
|
||||
path = fs::path(url) / path;
|
||||
|
||||
auto path = extractObjectKeyFromURL(object);
|
||||
/// Single O_WRONLY in libhdfs adds O_TRUNC
|
||||
return std::make_unique<WriteBufferFromHDFS>(
|
||||
path, config, settings->replication, patchSettings(write_settings), buf_size,
|
||||
url_without_path,
|
||||
fs::path(data_directory) / path,
|
||||
config,
|
||||
settings->replication,
|
||||
patchSettings(write_settings),
|
||||
buf_size,
|
||||
mode == WriteMode::Rewrite ? O_WRONLY : O_WRONLY | O_APPEND);
|
||||
}
|
||||
|
||||
|
@ -1171,7 +1171,7 @@ public:
|
||||
|
||||
if (left_tuple && right_tuple)
|
||||
{
|
||||
auto func = FunctionToOverloadResolverAdaptor(std::make_shared<FunctionComparison<Op, Name>>(check_decimal_overflow));
|
||||
auto func = std::make_shared<FunctionToOverloadResolverAdaptor>(std::make_shared<FunctionComparison<Op, Name>>(check_decimal_overflow));
|
||||
|
||||
bool has_nullable = false;
|
||||
bool has_null = false;
|
||||
@ -1181,7 +1181,7 @@ public:
|
||||
{
|
||||
ColumnsWithTypeAndName args = {{nullptr, left_tuple->getElements()[i], ""},
|
||||
{nullptr, right_tuple->getElements()[i], ""}};
|
||||
auto element_type = func.build(args)->getResultType();
|
||||
auto element_type = func->build(args)->getResultType();
|
||||
has_nullable = has_nullable || element_type->isNullable();
|
||||
has_null = has_null || element_type->onlyNull();
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ namespace
|
||||
ColumnsWithTypeAndName args = arguments;
|
||||
args[0].column = args[0].column->cloneResized(input_rows_count)->convertToFullColumnIfConst();
|
||||
|
||||
auto impl = FunctionToOverloadResolverAdaptor(std::make_shared<FunctionTransform>()).build(args);
|
||||
auto impl = std::make_shared<FunctionToOverloadResolverAdaptor>(std::make_shared<FunctionTransform>())->build(args);
|
||||
|
||||
return impl->execute(args, result_type, input_rows_count);
|
||||
}
|
||||
|
@ -1,3 +1,5 @@
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/S3/Credentials.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
@ -693,6 +695,7 @@ S3CredentialsProviderChain::S3CredentialsProviderChain(
|
||||
static const char AWS_ECS_CONTAINER_CREDENTIALS_RELATIVE_URI[] = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI";
|
||||
static const char AWS_ECS_CONTAINER_CREDENTIALS_FULL_URI[] = "AWS_CONTAINER_CREDENTIALS_FULL_URI";
|
||||
static const char AWS_ECS_CONTAINER_AUTHORIZATION_TOKEN[] = "AWS_CONTAINER_AUTHORIZATION_TOKEN";
|
||||
static const char AWS_ECS_CONTAINER_AUTHORIZATION_TOKEN_PATH[] = "AWS_CONTAINER_AUTHORIZATION_TOKEN_PATH";
|
||||
static const char AWS_EC2_METADATA_DISABLED[] = "AWS_EC2_METADATA_DISABLED";
|
||||
|
||||
/// The only difference from DefaultAWSCredentialsProviderChain::DefaultAWSCredentialsProviderChain()
|
||||
@ -750,7 +753,22 @@ S3CredentialsProviderChain::S3CredentialsProviderChain(
|
||||
}
|
||||
else if (!absolute_uri.empty())
|
||||
{
|
||||
const auto token = Aws::Environment::GetEnv(AWS_ECS_CONTAINER_AUTHORIZATION_TOKEN);
|
||||
auto token = Aws::Environment::GetEnv(AWS_ECS_CONTAINER_AUTHORIZATION_TOKEN);
|
||||
const auto token_path = Aws::Environment::GetEnv(AWS_ECS_CONTAINER_AUTHORIZATION_TOKEN_PATH);
|
||||
|
||||
if (!token_path.empty())
|
||||
{
|
||||
LOG_INFO(logger, "The environment variable value {} is {}", AWS_ECS_CONTAINER_AUTHORIZATION_TOKEN_PATH, token_path);
|
||||
|
||||
String token_from_file;
|
||||
|
||||
ReadBufferFromFile in(token_path);
|
||||
readStringUntilEOF(token_from_file, in);
|
||||
Poco::trimInPlace(token_from_file);
|
||||
|
||||
token = token_from_file;
|
||||
}
|
||||
|
||||
AddProvider(std::make_shared<Aws::Auth::TaskRoleCredentialsProvider>(absolute_uri.c_str(), token.c_str()));
|
||||
|
||||
/// DO NOT log the value of the authorization token for security purposes.
|
||||
|
@ -1120,6 +1120,13 @@ Chunk AsynchronousInsertQueue::processPreprocessedEntries(
|
||||
"Expected entry with data kind Preprocessed. Got: {}", entry->chunk.getDataKind());
|
||||
|
||||
Block block_to_insert = *block;
|
||||
if (block_to_insert.rows() == 0)
|
||||
{
|
||||
add_to_async_insert_log(entry, /*parsing_exception=*/ "", block_to_insert.rows(), block_to_insert.bytes());
|
||||
entry->resetChunk();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!isCompatibleHeader(block_to_insert, header))
|
||||
convertBlockToHeader(block_to_insert, header);
|
||||
|
||||
|
@ -273,6 +273,13 @@ namespace ServerSetting
|
||||
extern const ServerSettingsUInt64 max_replicated_sends_network_bandwidth_for_server;
|
||||
extern const ServerSettingsUInt64 tables_loader_background_pool_size;
|
||||
extern const ServerSettingsUInt64 tables_loader_foreground_pool_size;
|
||||
extern const ServerSettingsUInt64 prefetch_threadpool_pool_size;
|
||||
extern const ServerSettingsUInt64 prefetch_threadpool_queue_size;
|
||||
extern const ServerSettingsUInt64 load_marks_threadpool_pool_size;
|
||||
extern const ServerSettingsUInt64 load_marks_threadpool_queue_size;
|
||||
extern const ServerSettingsUInt64 threadpool_writer_pool_size;
|
||||
extern const ServerSettingsUInt64 threadpool_writer_queue_size;
|
||||
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
@ -3215,9 +3222,8 @@ void Context::clearMarkCache() const
|
||||
ThreadPool & Context::getLoadMarksThreadpool() const
|
||||
{
|
||||
callOnce(shared->load_marks_threadpool_initialized, [&] {
|
||||
const auto & config = getConfigRef();
|
||||
auto pool_size = config.getUInt(".load_marks_threadpool_pool_size", 50);
|
||||
auto queue_size = config.getUInt(".load_marks_threadpool_queue_size", 1000000);
|
||||
auto pool_size = shared->server_settings[ServerSetting::load_marks_threadpool_pool_size];
|
||||
auto queue_size = shared->server_settings[ServerSetting::load_marks_threadpool_queue_size];
|
||||
shared->load_marks_threadpool = std::make_unique<ThreadPool>(
|
||||
CurrentMetrics::MarksLoaderThreads, CurrentMetrics::MarksLoaderThreadsActive, CurrentMetrics::MarksLoaderThreadsScheduled, pool_size, pool_size, queue_size);
|
||||
});
|
||||
@ -3410,9 +3416,9 @@ AsynchronousMetrics * Context::getAsynchronousMetrics() const
|
||||
ThreadPool & Context::getPrefetchThreadpool() const
|
||||
{
|
||||
callOnce(shared->prefetch_threadpool_initialized, [&] {
|
||||
const auto & config = getConfigRef();
|
||||
auto pool_size = config.getUInt(".prefetch_threadpool_pool_size", 100);
|
||||
auto queue_size = config.getUInt(".prefetch_threadpool_queue_size", 1000000);
|
||||
auto pool_size = shared->server_settings[ServerSetting::prefetch_threadpool_pool_size];
|
||||
auto queue_size = shared->server_settings[ServerSetting::prefetch_threadpool_queue_size];
|
||||
|
||||
shared->prefetch_threadpool = std::make_unique<ThreadPool>(
|
||||
CurrentMetrics::IOPrefetchThreads, CurrentMetrics::IOPrefetchThreadsActive, CurrentMetrics::IOPrefetchThreadsScheduled, pool_size, pool_size, queue_size);
|
||||
});
|
||||
@ -3422,8 +3428,7 @@ ThreadPool & Context::getPrefetchThreadpool() const
|
||||
|
||||
size_t Context::getPrefetchThreadpoolSize() const
|
||||
{
|
||||
const auto & config = getConfigRef();
|
||||
return config.getUInt(".prefetch_threadpool_pool_size", 100);
|
||||
return shared->server_settings[ServerSetting::prefetch_threadpool_pool_size];
|
||||
}
|
||||
|
||||
ThreadPool & Context::getBuildVectorSimilarityIndexThreadPool() const
|
||||
@ -5696,9 +5701,8 @@ IOUringReader & Context::getIOUringReader() const
|
||||
ThreadPool & Context::getThreadPoolWriter() const
|
||||
{
|
||||
callOnce(shared->threadpool_writer_initialized, [&] {
|
||||
const auto & config = getConfigRef();
|
||||
auto pool_size = config.getUInt(".threadpool_writer_pool_size", 100);
|
||||
auto queue_size = config.getUInt(".threadpool_writer_queue_size", 1000000);
|
||||
auto pool_size = shared->server_settings[ServerSetting::threadpool_writer_pool_size];
|
||||
auto queue_size = shared->server_settings[ServerSetting::threadpool_writer_queue_size];
|
||||
|
||||
shared->threadpool_writer = std::make_unique<ThreadPool>(
|
||||
CurrentMetrics::IOWriterThreads, CurrentMetrics::IOWriterThreadsActive, CurrentMetrics::IOWriterThreadsScheduled, pool_size, pool_size, queue_size);
|
||||
|
@ -1025,7 +1025,7 @@ void InterpreterSystemQuery::dropReplica(ASTSystemQuery & query)
|
||||
{
|
||||
ReplicatedTableStatus status;
|
||||
storage_replicated->getStatus(status);
|
||||
if (status.zookeeper_info.path == query.replica_zk_path)
|
||||
if (status.replica_path == remote_replica_path)
|
||||
throw Exception(ErrorCodes::TABLE_WAS_NOT_DROPPED,
|
||||
"There is a local table {}, which has the same table path in ZooKeeper. "
|
||||
"Please check the path in query. "
|
||||
|
@ -100,7 +100,7 @@ void QueryMetricLog::startQuery(const String & query_id, TimePoint start_time, U
|
||||
const auto query_info = process_list.getQueryInfo(query_id, false, true, false);
|
||||
if (!query_info)
|
||||
{
|
||||
LOG_TRACE(logger, "Query {} is not running anymore, so we couldn't get its QueryInfo", query_id);
|
||||
LOG_TRACE(logger, "Query {} is not running anymore, so we couldn't get its QueryStatusInfo", query_id);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -156,8 +156,8 @@ std::optional<QueryMetricLogElement> QueryMetricLog::createLogMetricElement(cons
|
||||
{
|
||||
/// fmtlib supports subsecond formatting in 10.0.0. We're in 9.1.0, so we need to add the milliseconds ourselves.
|
||||
auto seconds = std::chrono::time_point_cast<std::chrono::seconds>(query_info_time);
|
||||
auto milliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(query_info_time - seconds).count();
|
||||
LOG_DEBUG(logger, "Collecting query_metric_log for query {} with QueryStatusInfo from {:%Y.%m.%d %H:%M:%S}.{:05}. Schedule next: {}", query_id, seconds, milliseconds, schedule_next);
|
||||
auto microseconds = std::chrono::duration_cast<std::chrono::microseconds>(query_info_time - seconds).count();
|
||||
LOG_DEBUG(logger, "Collecting query_metric_log for query {} with QueryStatusInfo from {:%Y.%m.%d %H:%M:%S}.{:06}. Schedule next: {}", query_id, seconds, microseconds, schedule_next);
|
||||
|
||||
std::unique_lock lock(queries_mutex);
|
||||
auto query_status_it = queries.find(query_id);
|
||||
|
@ -15,16 +15,17 @@ namespace DB
|
||||
class NativeInputFormat final : public IInputFormat
|
||||
{
|
||||
public:
|
||||
NativeInputFormat(ReadBuffer & buf, const Block & header_, const FormatSettings & settings)
|
||||
NativeInputFormat(ReadBuffer & buf, const Block & header_, const FormatSettings & settings_)
|
||||
: IInputFormat(header_, &buf)
|
||||
, reader(std::make_unique<NativeReader>(
|
||||
buf,
|
||||
header_,
|
||||
0,
|
||||
settings,
|
||||
settings.defaults_for_omitted_fields ? &block_missing_values : nullptr))
|
||||
settings_,
|
||||
settings_.defaults_for_omitted_fields ? &block_missing_values : nullptr))
|
||||
, header(header_)
|
||||
, block_missing_values(header.columns())
|
||||
, settings(settings_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -55,7 +56,7 @@ public:
|
||||
|
||||
void setReadBuffer(ReadBuffer & in_) override
|
||||
{
|
||||
reader = std::make_unique<NativeReader>(in_, header, 0);
|
||||
reader = std::make_unique<NativeReader>(in_, header, 0, settings, settings.defaults_for_omitted_fields ? &block_missing_values : nullptr);
|
||||
IInputFormat::setReadBuffer(in_);
|
||||
}
|
||||
|
||||
@ -67,6 +68,7 @@ private:
|
||||
std::unique_ptr<NativeReader> reader;
|
||||
Block header;
|
||||
BlockMissingValues block_missing_values;
|
||||
const FormatSettings settings;
|
||||
size_t approx_bytes_read_for_chunk = 0;
|
||||
};
|
||||
|
||||
|
@ -262,7 +262,7 @@ MergeTreeReaderWide::FileStreams::iterator MergeTreeReaderWide::addStream(const
|
||||
/*num_columns_in_mark=*/ 1);
|
||||
|
||||
auto stream_settings = settings;
|
||||
stream_settings.is_low_cardinality_dictionary = substream_path.size() > 1 && substream_path[substream_path.size() - 2].type == ISerialization::Substream::Type::DictionaryKeys;
|
||||
stream_settings.is_low_cardinality_dictionary = ISerialization::isLowCardinalityDictionarySubcolumn(substream_path);
|
||||
|
||||
auto create_stream = [&]<typename Stream>()
|
||||
{
|
||||
|
@ -30,10 +30,11 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
|
||||
/** These settings represent fine tunes for internal details of MergeTree storages
|
||||
* and should not be changed by the user without a reason.
|
||||
*/
|
||||
|
||||
#define MERGE_TREE_SETTINGS(DECLARE, ALIAS) \
|
||||
DECLARE(UInt64, min_compress_block_size, 0, "When granule is written, compress the data in buffer if the size of pending uncompressed data is larger or equal than the specified threshold. If this setting is not set, the corresponding global setting is used.", 0) \
|
||||
DECLARE(UInt64, max_compress_block_size, 0, "Compress the pending uncompressed data in buffer if its size is larger or equal than the specified threshold. Block of data will be compressed even if the current granule is not finished. If this setting is not set, the corresponding global setting is used.", 0) \
|
||||
@ -88,7 +89,7 @@ namespace ErrorCodes
|
||||
DECLARE(UInt64, min_age_to_force_merge_seconds, 0, "If all parts in a certain range are older than this value, range will be always eligible for merging. Set to 0 to disable.", 0) \
|
||||
DECLARE(Bool, min_age_to_force_merge_on_partition_only, false, "Whether min_age_to_force_merge_seconds should be applied only on the entire partition and not on subset.", false) \
|
||||
DECLARE(UInt64, number_of_free_entries_in_pool_to_execute_optimize_entire_partition, 25, "When there is less than specified number of free entries in pool, do not try to execute optimize entire partition with a merge (this merge is created when set min_age_to_force_merge_seconds > 0 and min_age_to_force_merge_on_partition_only = true). This is to leave free threads for regular merges and avoid \"Too many parts\"", 0) \
|
||||
DECLARE(Bool, remove_rolled_back_parts_immediately, 1, "Setting for an incomplete experimental feature.", 0) \
|
||||
DECLARE(Bool, remove_rolled_back_parts_immediately, 1, "Setting for an incomplete experimental feature.", EXPERIMENTAL) \
|
||||
DECLARE(UInt64, replicated_max_mutations_in_one_entry, 10000, "Max number of mutation commands that can be merged together and executed in one MUTATE_PART entry (0 means unlimited)", 0) \
|
||||
DECLARE(UInt64, number_of_mutations_to_delay, 500, "If table has at least that many unfinished mutations, artificially slow down mutations of table. Disabled if set to 0", 0) \
|
||||
DECLARE(UInt64, number_of_mutations_to_throw, 1000, "If table has at least that many unfinished mutations, throw 'Too many mutations' exception. Disabled if set to 0", 0) \
|
||||
@ -98,7 +99,7 @@ namespace ErrorCodes
|
||||
DECLARE(String, merge_workload, "", "Name of workload to be used to access resources for merges", 0) \
|
||||
DECLARE(String, mutation_workload, "", "Name of workload to be used to access resources for mutations", 0) \
|
||||
DECLARE(Milliseconds, background_task_preferred_step_execution_time_ms, 50, "Target time to execution of one step of merge or mutation. Can be exceeded if one step takes longer time", 0) \
|
||||
DECLARE(MergeSelectorAlgorithm, merge_selector_algorithm, MergeSelectorAlgorithm::SIMPLE, "The algorithm to select parts for merges assignment", 0) \
|
||||
DECLARE(MergeSelectorAlgorithm, merge_selector_algorithm, MergeSelectorAlgorithm::SIMPLE, "The algorithm to select parts for merges assignment", EXPERIMENTAL) \
|
||||
\
|
||||
/** Inserts settings. */ \
|
||||
DECLARE(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \
|
||||
@ -214,14 +215,14 @@ namespace ErrorCodes
|
||||
DECLARE(Bool, enable_block_offset_column, false, "Enable persisting column _block_offset for each row.", 0) \
|
||||
\
|
||||
/** Experimental/work in progress feature. Unsafe for production. */ \
|
||||
DECLARE(UInt64, part_moves_between_shards_enable, 0, "Experimental/Incomplete feature to move parts between shards. Does not take into account sharding expressions.", 0) \
|
||||
DECLARE(UInt64, part_moves_between_shards_delay_seconds, 30, "Time to wait before/after moving parts between shards.", 0) \
|
||||
DECLARE(Bool, allow_remote_fs_zero_copy_replication, false, "Don't use this setting in production, because it is not ready.", 0) \
|
||||
DECLARE(String, remote_fs_zero_copy_zookeeper_path, "/clickhouse/zero_copy", "ZooKeeper path for zero-copy table-independent info.", 0) \
|
||||
DECLARE(Bool, remote_fs_zero_copy_path_compatible_mode, false, "Run zero-copy in compatible mode during conversion process.", 0) \
|
||||
DECLARE(Bool, cache_populated_by_fetch, false, "Only available in ClickHouse Cloud", 0) \
|
||||
DECLARE(Bool, force_read_through_cache_for_merges, false, "Force read-through filesystem cache for merges", 0) \
|
||||
DECLARE(Bool, allow_experimental_replacing_merge_with_cleanup, false, "Allow experimental CLEANUP merges for ReplacingMergeTree with is_deleted column.", 0) \
|
||||
DECLARE(UInt64, part_moves_between_shards_enable, 0, "Experimental/Incomplete feature to move parts between shards. Does not take into account sharding expressions.", EXPERIMENTAL) \
|
||||
DECLARE(UInt64, part_moves_between_shards_delay_seconds, 30, "Time to wait before/after moving parts between shards.", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_remote_fs_zero_copy_replication, false, "Don't use this setting in production, because it is not ready.", BETA) \
|
||||
DECLARE(String, remote_fs_zero_copy_zookeeper_path, "/clickhouse/zero_copy", "ZooKeeper path for zero-copy table-independent info.", EXPERIMENTAL) \
|
||||
DECLARE(Bool, remote_fs_zero_copy_path_compatible_mode, false, "Run zero-copy in compatible mode during conversion process.", EXPERIMENTAL) \
|
||||
DECLARE(Bool, cache_populated_by_fetch, false, "Only available in ClickHouse Cloud", EXPERIMENTAL) \
|
||||
DECLARE(Bool, force_read_through_cache_for_merges, false, "Force read-through filesystem cache for merges", EXPERIMENTAL) \
|
||||
DECLARE(Bool, allow_experimental_replacing_merge_with_cleanup, false, "Allow experimental CLEANUP merges for ReplacingMergeTree with is_deleted column.", EXPERIMENTAL) \
|
||||
\
|
||||
/** Compress marks and primary key. */ \
|
||||
DECLARE(Bool, compress_marks, true, "Marks support compression, reduce mark file size and speed up network transmission.", 0) \
|
||||
@ -240,7 +241,7 @@ namespace ErrorCodes
|
||||
DECLARE(DeduplicateMergeProjectionMode, deduplicate_merge_projection_mode, DeduplicateMergeProjectionMode::THROW, "Whether to allow create projection for the table with non-classic MergeTree. Ignore option is purely for compatibility which might result in incorrect answer. Otherwise, if allowed, what is the action when merge, drop or rebuild.", 0) \
|
||||
|
||||
#define MAKE_OBSOLETE_MERGE_TREE_SETTING(M, TYPE, NAME, DEFAULT) \
|
||||
M(TYPE, NAME, DEFAULT, "Obsolete setting, does nothing.", BaseSettingsHelpers::Flags::OBSOLETE)
|
||||
M(TYPE, NAME, DEFAULT, "Obsolete setting, does nothing.", SettingsTierType::OBSOLETE)
|
||||
|
||||
#define OBSOLETE_MERGE_TREE_SETTINGS(M, ALIAS) \
|
||||
/** Obsolete settings that do nothing but left for compatibility reasons. */ \
|
||||
@ -278,8 +279,9 @@ namespace ErrorCodes
|
||||
MERGE_TREE_SETTINGS(M, ALIAS) \
|
||||
OBSOLETE_MERGE_TREE_SETTINGS(M, ALIAS)
|
||||
|
||||
DECLARE_SETTINGS_TRAITS(MergeTreeSettingsTraits, LIST_OF_MERGE_TREE_SETTINGS)
|
||||
// clang-format on
|
||||
|
||||
DECLARE_SETTINGS_TRAITS(MergeTreeSettingsTraits, LIST_OF_MERGE_TREE_SETTINGS)
|
||||
|
||||
/** Settings for the MergeTree family of engines.
|
||||
* Could be loaded from config or from a CREATE TABLE query (SETTINGS clause).
|
||||
@ -650,7 +652,8 @@ void MergeTreeSettings::dumpToSystemMergeTreeSettingsColumns(MutableColumnsAndCo
|
||||
res_columns[5]->insert(max);
|
||||
res_columns[6]->insert(writability == SettingConstraintWritability::CONST);
|
||||
res_columns[7]->insert(setting.getTypeName());
|
||||
res_columns[8]->insert(setting.isObsolete());
|
||||
res_columns[8]->insert(setting.getTier() == SettingsTierType::OBSOLETE);
|
||||
res_columns[9]->insert(setting.getTier());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@ extern const int CANNOT_FSYNC;
|
||||
struct WriteBufferFromHDFS::WriteBufferFromHDFSImpl
|
||||
{
|
||||
std::string hdfs_uri;
|
||||
std::string hdfs_file_path;
|
||||
hdfsFile fout;
|
||||
HDFSBuilderWrapper builder;
|
||||
HDFSFSPtr fs;
|
||||
@ -36,25 +37,24 @@ struct WriteBufferFromHDFS::WriteBufferFromHDFSImpl
|
||||
|
||||
WriteBufferFromHDFSImpl(
|
||||
const std::string & hdfs_uri_,
|
||||
const std::string & hdfs_file_path_,
|
||||
const Poco::Util::AbstractConfiguration & config_,
|
||||
int replication_,
|
||||
const WriteSettings & write_settings_,
|
||||
int flags)
|
||||
: hdfs_uri(hdfs_uri_)
|
||||
, hdfs_file_path(hdfs_file_path_)
|
||||
, builder(createHDFSBuilder(hdfs_uri, config_))
|
||||
, fs(createHDFSFS(builder.get()))
|
||||
, write_settings(write_settings_)
|
||||
{
|
||||
const size_t begin_of_path = hdfs_uri.find('/', hdfs_uri.find("//") + 2);
|
||||
const String path = hdfs_uri.substr(begin_of_path);
|
||||
|
||||
/// O_WRONLY meaning create or overwrite i.e., implies O_TRUNCAT here
|
||||
fout = hdfsOpenFile(fs.get(), path.c_str(), flags, 0, replication_, 0);
|
||||
fout = hdfsOpenFile(fs.get(), hdfs_file_path.c_str(), flags, 0, replication_, 0);
|
||||
|
||||
if (fout == nullptr)
|
||||
{
|
||||
throw Exception(ErrorCodes::CANNOT_OPEN_FILE, "Unable to open HDFS file: {} ({}) error: {}",
|
||||
path, hdfs_uri, std::string(hdfsGetLastError()));
|
||||
hdfs_file_path, hdfs_uri, std::string(hdfsGetLastError()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -71,7 +71,7 @@ struct WriteBufferFromHDFS::WriteBufferFromHDFSImpl
|
||||
rlock.unlock(std::max(0, bytes_written));
|
||||
|
||||
if (bytes_written < 0)
|
||||
throw Exception(ErrorCodes::NETWORK_ERROR, "Fail to write HDFS file: {} {}", hdfs_uri, std::string(hdfsGetLastError()));
|
||||
throw Exception(ErrorCodes::NETWORK_ERROR, "Fail to write HDFS file: {}, hdfs_uri: {}, {}", hdfs_file_path, hdfs_uri, std::string(hdfsGetLastError()));
|
||||
|
||||
if (write_settings.remote_throttler)
|
||||
write_settings.remote_throttler->add(bytes_written, ProfileEvents::RemoteWriteThrottlerBytes, ProfileEvents::RemoteWriteThrottlerSleepMicroseconds);
|
||||
@ -83,20 +83,21 @@ struct WriteBufferFromHDFS::WriteBufferFromHDFSImpl
|
||||
{
|
||||
int result = hdfsSync(fs.get(), fout);
|
||||
if (result < 0)
|
||||
throw ErrnoException(ErrorCodes::CANNOT_FSYNC, "Cannot HDFS sync {} {}", hdfs_uri, std::string(hdfsGetLastError()));
|
||||
throw ErrnoException(ErrorCodes::CANNOT_FSYNC, "Cannot HDFS sync {}, hdfs_url: {}, {}", hdfs_file_path, hdfs_uri, std::string(hdfsGetLastError()));
|
||||
}
|
||||
};
|
||||
|
||||
WriteBufferFromHDFS::WriteBufferFromHDFS(
|
||||
const std::string & hdfs_name_,
|
||||
const std::string & hdfs_uri_,
|
||||
const std::string & hdfs_file_path_,
|
||||
const Poco::Util::AbstractConfiguration & config_,
|
||||
int replication_,
|
||||
const WriteSettings & write_settings_,
|
||||
size_t buf_size_,
|
||||
int flags_)
|
||||
: WriteBufferFromFileBase(buf_size_, nullptr, 0)
|
||||
, impl(std::make_unique<WriteBufferFromHDFSImpl>(hdfs_name_, config_, replication_, write_settings_, flags_))
|
||||
, filename(hdfs_name_)
|
||||
, impl(std::make_unique<WriteBufferFromHDFSImpl>(hdfs_uri_, hdfs_file_path_, config_, replication_, write_settings_, flags_))
|
||||
, filename(hdfs_file_path_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,8 @@ class WriteBufferFromHDFS final : public WriteBufferFromFileBase
|
||||
|
||||
public:
|
||||
WriteBufferFromHDFS(
|
||||
const String & hdfs_name_,
|
||||
const String & hdfs_uri_,
|
||||
const String & hdfs_file_path_,
|
||||
const Poco::Util::AbstractConfiguration & config_,
|
||||
int replication_,
|
||||
const WriteSettings & write_settings_ = {},
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Core/Settings.h>
|
||||
#include <Core/SettingsTierType.h>
|
||||
#include <DataTypes/DataTypeEnum.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
@ -30,6 +31,14 @@ ColumnsDescription SystemMergeTreeSettings<replicated>::getColumnsDescription()
|
||||
},
|
||||
{"type", std::make_shared<DataTypeString>(), "Setting type (implementation specific string value)."},
|
||||
{"is_obsolete", std::make_shared<DataTypeUInt8>(), "Shows whether a setting is obsolete."},
|
||||
{"tier", getSettingsTierEnum(), R"(
|
||||
Support level for this feature. ClickHouse features are organized in tiers, varying depending on the current status of their
|
||||
development and the expectations one might have when using them:
|
||||
* PRODUCTION: The feature is stable, safe to use and does not have issues interacting with other PRODUCTION features.
|
||||
* BETA: The feature is stable and safe. The outcome of using it together with other features is unknown and correctness is not guaranteed. Testing and reports are welcome.
|
||||
* EXPERIMENTAL: The feature is under development. Only intended for developers and ClickHouse enthusiasts. The feature might or might not work and could be removed at any time.
|
||||
* OBSOLETE: No longer supported. Either it is already removed or it will be removed in future releases.
|
||||
)"},
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
#include <Access/SettingsConstraintsAndProfileIDs.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Core/SettingsTierType.h>
|
||||
#include <DataTypes/DataTypeEnum.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
@ -34,6 +36,14 @@ ColumnsDescription StorageSystemSettings::getColumnsDescription()
|
||||
{"default", std::make_shared<DataTypeString>(), "Setting default value."},
|
||||
{"alias_for", std::make_shared<DataTypeString>(), "Flag that shows whether this name is an alias to another setting."},
|
||||
{"is_obsolete", std::make_shared<DataTypeUInt8>(), "Shows whether a setting is obsolete."},
|
||||
{"tier", getSettingsTierEnum(), R"(
|
||||
Support level for this feature. ClickHouse features are organized in tiers, varying depending on the current status of their
|
||||
development and the expectations one might have when using them:
|
||||
* PRODUCTION: The feature is stable, safe to use and does not have issues interacting with other PRODUCTION features.
|
||||
* BETA: The feature is stable and safe. The outcome of using it together with other features is unknown and correctness is not guaranteed. Testing and reports are welcome.
|
||||
* EXPERIMENTAL: The feature is under development. Only intended for developers and ClickHouse enthusiasts. The feature might or might not work and could be removed at any time.
|
||||
* OBSOLETE: No longer supported. Either it is already removed or it will be removed in future releases.
|
||||
)"},
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ def fill_nodes(nodes, shard):
|
||||
for node in nodes:
|
||||
node.query(
|
||||
"""
|
||||
DROP DATABASE IF EXISTS test SYNC;
|
||||
CREATE DATABASE test;
|
||||
|
||||
CREATE TABLE test.test_table(date Date, id UInt32)
|
||||
@ -21,6 +22,7 @@ def fill_nodes(nodes, shard):
|
||||
|
||||
node.query(
|
||||
"""
|
||||
DROP DATABASE IF EXISTS test1 SYNC;
|
||||
CREATE DATABASE test1;
|
||||
|
||||
CREATE TABLE test1.test_table(date Date, id UInt32)
|
||||
@ -33,6 +35,7 @@ def fill_nodes(nodes, shard):
|
||||
|
||||
node.query(
|
||||
"""
|
||||
DROP DATABASE IF EXISTS test2 SYNC;
|
||||
CREATE DATABASE test2;
|
||||
|
||||
CREATE TABLE test2.test_table(date Date, id UInt32)
|
||||
@ -45,7 +48,8 @@ def fill_nodes(nodes, shard):
|
||||
|
||||
node.query(
|
||||
"""
|
||||
CREATE DATABASE test3;
|
||||
DROP DATABASE IF EXISTS test3 SYNC;
|
||||
CREATE DATABASE test3;
|
||||
|
||||
CREATE TABLE test3.test_table(date Date, id UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test3/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date)
|
||||
@ -57,6 +61,7 @@ def fill_nodes(nodes, shard):
|
||||
|
||||
node.query(
|
||||
"""
|
||||
DROP DATABASE IF EXISTS test4 SYNC;
|
||||
CREATE DATABASE test4;
|
||||
|
||||
CREATE TABLE test4.test_table(date Date, id UInt32)
|
||||
@ -84,9 +89,6 @@ node_1_3 = cluster.add_instance(
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
fill_nodes([node_1_1, node_1_2], 1)
|
||||
|
||||
yield cluster
|
||||
|
||||
except Exception as ex:
|
||||
@ -102,6 +104,8 @@ def check_exists(zk, path):
|
||||
|
||||
|
||||
def test_drop_replica(start_cluster):
|
||||
fill_nodes([node_1_1, node_1_2], 1)
|
||||
|
||||
node_1_1.query(
|
||||
"INSERT INTO test.test_table SELECT number, toString(number) FROM numbers(100)"
|
||||
)
|
||||
@ -142,11 +146,7 @@ def test_drop_replica(start_cluster):
|
||||
shard=1
|
||||
)
|
||||
)
|
||||
assert "There is a local table" in node_1_2.query_and_get_error(
|
||||
"SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test/{shard}/replicated/test_table'".format(
|
||||
shard=1
|
||||
)
|
||||
)
|
||||
|
||||
assert "There is a local table" in node_1_1.query_and_get_error(
|
||||
"SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test/{shard}/replicated/test_table'".format(
|
||||
shard=1
|
||||
@ -222,11 +222,22 @@ def test_drop_replica(start_cluster):
|
||||
)
|
||||
assert exists_replica_1_1 == None
|
||||
|
||||
node_1_2.query("SYSTEM DROP REPLICA 'node_1_1'")
|
||||
exists_replica_1_1 = check_exists(
|
||||
node_1_1.query("ATTACH DATABASE test4")
|
||||
|
||||
node_1_2.query("DETACH TABLE test4.test_table")
|
||||
node_1_1.query(
|
||||
"SYSTEM DROP REPLICA 'node_1_2' FROM ZKPATH '/clickhouse/tables/test4/{shard}/replicated/test_table'".format(
|
||||
shard=1
|
||||
)
|
||||
)
|
||||
exists_replica_1_2 = check_exists(
|
||||
zk,
|
||||
"/clickhouse/tables/test4/{shard}/replicated/test_table/replicas/{replica}".format(
|
||||
shard=1, replica="node_1_1"
|
||||
shard=1, replica="node_1_2"
|
||||
),
|
||||
)
|
||||
assert exists_replica_1_1 == None
|
||||
assert exists_replica_1_2 == None
|
||||
|
||||
node_1_1.query("ATTACH DATABASE test")
|
||||
for i in range(1, 4):
|
||||
node_1_1.query("ATTACH DATABASE test{}".format(i))
|
||||
|
@ -396,6 +396,21 @@ def test_read_files_with_spaces(started_cluster):
|
||||
node1.query(f"drop table test")
|
||||
|
||||
|
||||
def test_write_files_with_spaces(started_cluster):
|
||||
fs = HdfsClient(hosts=started_cluster.hdfs_ip)
|
||||
dir = "/itime=2024-10-24 10%3A02%3A04"
|
||||
fs.mkdirs(dir)
|
||||
|
||||
node1.query(
|
||||
f"insert into function hdfs('hdfs://hdfs1:9000{dir}/test.csv', TSVRaw) select 123 settings hdfs_truncate_on_insert=1"
|
||||
)
|
||||
result = node1.query(
|
||||
f"select * from hdfs('hdfs://hdfs1:9000{dir}/test.csv', TSVRaw)"
|
||||
)
|
||||
assert int(result) == 123
|
||||
fs.delete(dir, recursive=True)
|
||||
|
||||
|
||||
def test_truncate_table(started_cluster):
|
||||
hdfs_api = started_cluster.hdfs_api
|
||||
node1.query(
|
||||
|
@ -4193,7 +4193,7 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator
|
||||
],
|
||||
"expected": {
|
||||
"raw_message": "050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801",
|
||||
"error": "Cannot convert: String to UInt16",
|
||||
"error": "Cannot parse string 'BAD' as UInt16",
|
||||
},
|
||||
"printable": False,
|
||||
},
|
||||
|
@ -1,4 +1,4 @@
|
||||
send_timeout 300 0 Timeout for sending data to the network, in seconds. If a client needs to send some data but is not able to send any bytes in this interval, the exception is thrown. If you set this setting on the client, the \'receive_timeout\' for the socket will also be set on the corresponding connection end on the server. \N \N 0 Seconds 300 0
|
||||
storage_policy default 0 Name of storage disk policy \N \N 0 String 0
|
||||
send_timeout 300 0 Timeout for sending data to the network, in seconds. If a client needs to send some data but is not able to send any bytes in this interval, the exception is thrown. If you set this setting on the client, the \'receive_timeout\' for the socket will also be set on the corresponding connection end on the server. \N \N 0 Seconds 300 0 Production
|
||||
storage_policy default 0 Name of storage disk policy \N \N 0 String 0 Production
|
||||
1
|
||||
1
|
||||
|
@ -342,7 +342,8 @@ CREATE TABLE system.merge_tree_settings
|
||||
`max` Nullable(String),
|
||||
`readonly` UInt8,
|
||||
`type` String,
|
||||
`is_obsolete` UInt8
|
||||
`is_obsolete` UInt8,
|
||||
`tier` Enum8('Production' = 0, 'Obsolete' = 4, 'Experimental' = 8, 'Beta' = 12)
|
||||
)
|
||||
ENGINE = SystemMergeTreeSettings
|
||||
COMMENT 'Contains a list of all MergeTree engine specific settings, their current and default values along with descriptions. You may change any of them in SETTINGS section in CREATE query.'
|
||||
@ -932,7 +933,8 @@ CREATE TABLE system.replicated_merge_tree_settings
|
||||
`max` Nullable(String),
|
||||
`readonly` UInt8,
|
||||
`type` String,
|
||||
`is_obsolete` UInt8
|
||||
`is_obsolete` UInt8,
|
||||
`tier` Enum8('Production' = 0, 'Obsolete' = 4, 'Experimental' = 8, 'Beta' = 12)
|
||||
)
|
||||
ENGINE = SystemReplicatedMergeTreeSettings
|
||||
COMMENT 'Contains a list of all ReplicatedMergeTree engine specific settings, their current and default values along with descriptions. You may change any of them in SETTINGS section in CREATE query. '
|
||||
@ -1009,7 +1011,8 @@ CREATE TABLE system.settings
|
||||
`type` String,
|
||||
`default` String,
|
||||
`alias_for` String,
|
||||
`is_obsolete` UInt8
|
||||
`is_obsolete` UInt8,
|
||||
`tier` Enum8('Production' = 0, 'Obsolete' = 4, 'Experimental' = 8, 'Beta' = 12)
|
||||
)
|
||||
ENGINE = SystemSettings
|
||||
COMMENT 'Contains a list of all user-level settings (which can be modified in a scope of query or session), their current and default values along with descriptions.'
|
||||
|
@ -1,7 +1,7 @@
|
||||
['{ArraySizes}','{ArrayElements, Regular}']
|
||||
['{ArraySizes}','{ArrayElements, TupleElement(keys), Regular}','{ArrayElements, TupleElement(values), Regular}']
|
||||
['{TupleElement(1), Regular}','{TupleElement(2), Regular}','{TupleElement(3), Regular}']
|
||||
['{DictionaryKeys, Regular}','{DictionaryIndexes}']
|
||||
['{DictionaryKeys}','{DictionaryIndexes}']
|
||||
['{NullMap}','{NullableElements, Regular}']
|
||||
['{ArraySizes}','{ArrayElements, Regular}']
|
||||
['{ArraySizes}','{ArrayElements, TupleElement(keys), Regular}','{ArrayElements, TupleElement(values), Regular}']
|
||||
|
@ -24,7 +24,7 @@ function check_log()
|
||||
$CLICKHOUSE_CLIENT -m -q """
|
||||
SELECT '--Interval $interval: check that amount of events is correct';
|
||||
SELECT
|
||||
count() BETWEEN (ceil(2500 / $interval) * 0.8) AND (ceil(2500 / $interval) * 1.2)
|
||||
count() BETWEEN ((ceil(2500 / $interval) - 1) * 0.8) AND ((ceil(2500 / $interval) + 1) * 1.2)
|
||||
FROM system.query_metric_log
|
||||
WHERE event_date >= yesterday() AND query_id = '${query_prefix}_${interval}'
|
||||
"""
|
||||
|
@ -0,0 +1,9 @@
|
||||
1 name1
|
||||
2 name2
|
||||
3
|
||||
4
|
||||
5
|
||||
Ok Preprocessed 2
|
||||
Ok Preprocessed 3
|
||||
Ok Preprocessed 0
|
||||
Ok Preprocessed 0
|
27
tests/queries/0_stateless/03257_async_insert_native_empty_block.sh
Executable file
27
tests/queries/0_stateless/03257_async_insert_native_empty_block.sh
Executable file
@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "
|
||||
DROP TABLE IF EXISTS json_square_brackets;
|
||||
CREATE TABLE json_square_brackets (id UInt32, name String) ENGINE = MergeTree ORDER BY tuple()
|
||||
"
|
||||
|
||||
MY_CLICKHOUSE_CLIENT="$CLICKHOUSE_CLIENT --async_insert 1 --wait_for_async_insert 1"
|
||||
|
||||
echo '[{"id": 1, "name": "name1"}, {"id": 2, "name": "name2"}]' | $MY_CLICKHOUSE_CLIENT -q "INSERT INTO json_square_brackets FORMAT JSONEachRow"
|
||||
|
||||
echo '[{"id": 3}, {"id": 4}, {"id": 5}]' | $MY_CLICKHOUSE_CLIENT -q "INSERT INTO json_square_brackets FORMAT JSONEachRow"
|
||||
|
||||
echo '[]' | $MY_CLICKHOUSE_CLIENT -q "INSERT INTO json_square_brackets FORMAT JSONEachRow"
|
||||
|
||||
echo '' | $MY_CLICKHOUSE_CLIENT -q "INSERT INTO json_square_brackets FORMAT JSONEachRow"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT * FROM json_square_brackets ORDER BY id;
|
||||
SELECT status, data_kind, rows FROM system.asynchronous_insert_log WHERE database = currentDatabase() AND table = 'json_square_brackets' ORDER BY event_time_microseconds;
|
||||
DROP TABLE json_square_brackets;
|
||||
"
|
@ -0,0 +1,3 @@
|
||||
{"a-b-c":"43","a-b\\/c-d\\/e":"44","a\\/b\\/c":"42"}
|
||||
42 43 44
|
||||
42 43 44
|
10
tests/queries/0_stateless/03257_json_escape_file_names.sql
Normal file
10
tests/queries/0_stateless/03257_json_escape_file_names.sql
Normal file
@ -0,0 +1,10 @@
|
||||
set allow_experimental_json_type = 1;
|
||||
drop table if exists test;
|
||||
create table test (json JSON) engine=MergeTree order by tuple() settings min_rows_for_wide_part=0, min_bytes_for_wide_part=0;
|
||||
insert into test format JSONAsObject {"a/b/c" : 42, "a-b-c" : 43, "a-b/c-d/e" : 44};
|
||||
|
||||
select * from test;
|
||||
select json.`a/b/c`, json.`a-b-c`, json.`a-b/c-d/e` from test;
|
||||
select json.`a/b/c`.:Int64, json.`a-b-c`.:Int64, json.`a-b/c-d/e`.:Int64 from test;
|
||||
drop table test;
|
||||
|
10
tests/queries/0_stateless/03257_setting_tiers.reference
Normal file
10
tests/queries/0_stateless/03257_setting_tiers.reference
Normal file
@ -0,0 +1,10 @@
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
11
tests/queries/0_stateless/03257_setting_tiers.sql
Normal file
11
tests/queries/0_stateless/03257_setting_tiers.sql
Normal file
@ -0,0 +1,11 @@
|
||||
SELECT count() > 0 FROM system.settings WHERE tier = 'Production';
|
||||
SELECT count() > 0 FROM system.settings WHERE tier = 'Beta';
|
||||
SELECT count() > 0 FROM system.settings WHERE tier = 'Experimental';
|
||||
SELECT count() > 0 FROM system.settings WHERE tier = 'Obsolete';
|
||||
SELECT count() == countIf(tier IN ['Production', 'Beta', 'Experimental', 'Obsolete']) FROM system.settings;
|
||||
|
||||
SELECT count() > 0 FROM system.merge_tree_settings WHERE tier = 'Production';
|
||||
SELECT count() > 0 FROM system.merge_tree_settings WHERE tier = 'Beta';
|
||||
SELECT count() > 0 FROM system.merge_tree_settings WHERE tier = 'Experimental';
|
||||
SELECT count() > 0 FROM system.merge_tree_settings WHERE tier = 'Obsolete';
|
||||
SELECT count() == countIf(tier IN ['Production', 'Beta', 'Experimental', 'Obsolete']) FROM system.merge_tree_settings;
|
@ -0,0 +1,6 @@
|
||||
SET allow_experimental_dynamic_type = 1;
|
||||
DROP TABLE IF EXISTS t0;
|
||||
CREATE TABLE t0 (c0 Tuple(c1 Int,c2 Dynamic)) ENGINE = Memory();
|
||||
SELECT 1 FROM t0 tx JOIN t0 ty ON tx.c0 = ty.c0;
|
||||
DROP TABLE t0;
|
||||
|
@ -0,0 +1 @@
|
||||
0
|
17
tests/queries/0_stateless/03259_native_http_async_insert_settings.sh
Executable file
17
tests/queries/0_stateless/03259_native_http_async_insert_settings.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "drop table if exists test"
|
||||
$CLICKHOUSE_CLIENT -q "create table test (x UInt32) engine=Memory";
|
||||
|
||||
url="${CLICKHOUSE_URL}&async_insert=1&wait_for_async_insert=1"
|
||||
|
||||
$CLICKHOUSE_LOCAL -q "select NULL::Nullable(UInt32) as x format Native" | ${CLICKHOUSE_CURL} -sS "$url&query=INSERT%20INTO%20test%20FORMAT%20Native" --data-binary @-
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "select * from test";
|
||||
$CLICKHOUSE_CLIENT -q "drop table test"
|
||||
|
@ -0,0 +1,20 @@
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
||||
12345678
|
@ -0,0 +1,12 @@
|
||||
set allow_experimental_dynamic_type = 1;
|
||||
set min_bytes_to_use_direct_io = 0;
|
||||
|
||||
drop table if exists test;
|
||||
create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, index_granularity=1, use_adaptive_write_buffer_for_dynamic_subcolumns=0, max_compress_block_size=8, min_compress_block_size=8, use_compact_variant_discriminators_serialization=0;
|
||||
|
||||
insert into test select number, '12345678'::LowCardinality(String) from numbers(20);
|
||||
|
||||
select d.`LowCardinality(String)` from test settings max_threads=1;
|
||||
|
||||
drop table test;
|
||||
|
@ -53,7 +53,7 @@ done
|
||||
if (( STATUS != 0 )); then
|
||||
echo "====== Errors found ======"
|
||||
echo "To exclude some words add them to the dictionary file \"${ASPELL_IGNORE_PATH}/aspell-dict.txt\""
|
||||
echo "You can also run ${0} -i to see the errors interactively and fix them or add to the dictionary file"
|
||||
echo "You can also run '$(realpath --relative-base=${ROOT_PATH} ${0}) -i' to see the errors interactively and fix them or add to the dictionary file"
|
||||
fi
|
||||
|
||||
exit ${STATUS}
|
||||
|
Loading…
Reference in New Issue
Block a user