Compare commits

...

52 Commits

Author SHA1 Message Date
李扬
9637a5d32e
Merge a2f9329e18 into 733c57dae7 2024-09-16 06:05:55 +00:00
Robert Schulze
733c57dae7
Merge pull request #69583 from rschu1ze/better-fix-67476
A better fix for #67476
2024-09-15 09:35:37 +00:00
Nikita Taranov
918ead070a
Merge pull request #69602 from ClickHouse/rm_explicit_initial_announce_from_RFMT
Remove explicit announce from local replica in ReadFromMergeTree
2024-09-14 17:58:41 +00:00
Nikita Taranov
8fd9345d2d fix 2024-09-13 21:26:58 +01:00
Miсhael Stetsyuk
98a2c1c638
Merge pull request #69274 from ClickHouse/fix-metadata-version-in-zookeeper
fix `metadata_version` in ZooKeeper
2024-09-13 17:11:21 +00:00
Nikita Taranov
bab574d674
Merge pull request #69404 from bigo-sg/69135
`have_compressed` is lost in `HashJoin::reuseJoinedData`
2024-09-13 16:39:43 +00:00
Nikita Taranov
57a6a64d8c fix 2024-09-13 17:31:47 +01:00
Nikita Taranov
de78992966
Update tests/integration/test_parallel_replicas_snapshot_from_initiator/test.py
Co-authored-by: Igor Nikonov <954088+devcrafter@users.noreply.github.com>
2024-09-13 18:25:51 +02:00
Nikita Taranov
42670a46d4 impl 2024-09-13 17:00:46 +01:00
Alexander Tokmakov
8111a32c72
Merge pull request #69539 from ClickHouse/evillique-patch-2
Fix attach of ReplicatedMergeTree tables in Replicated databases
2024-09-13 13:06:21 +00:00
Michael Stetsyuk
721e9a7356 empty 2024-09-13 12:38:58 +00:00
Michael Stetsyuk
1b1db0081f do not fix metadata_version if replica is read_only 2024-09-13 12:38:25 +00:00
Robert Schulze
51f3245030
A better fix for #67476
Throw an exception instead of silently ignoring two conflicting settings.
2024-09-13 11:45:15 +00:00
Daniil Ivanik
2fce90ab76
Merge pull request #66782 from a-a-f/input_format_json_empty_as_default
Add support for the `input_format_json_empty_as_default` setting
2024-09-13 10:33:45 +00:00
Yarik Briukhovetskyi
e0c4a88f98
Merge pull request #69514 from aiven-sal/aiven-sal/showcolumns
Correctly handle tables' names with dots in SHOW COLUMNS and SHOW INDEX
2024-09-13 10:12:53 +00:00
János Benjamin Antal
d5c3adabde
Merge pull request #69301 from aohoyd/master
Add xml char escaping to docker's entrypoint
2024-09-13 10:11:30 +00:00
lgbo-ustc
08fd6c8ab6 have_compressed is lost in reuseJoinedData 2024-09-13 17:18:27 +08:00
Nikita Taranov
f330fdb1bf
Merge pull request #69406 from canhld94/parallel_join_limit_threads
Not retaining thread in concurrent hash join threadpool
2024-09-13 09:14:48 +00:00
Michael Kolupaev
e1a206c84d
Merge pull request #64953 from ClickHouse/tomic
Disallow creating refreshable MV on Linux < 3.15
2024-09-13 03:42:28 +00:00
Duc Canh Le
59763a937e Merge branch 'master' into parallel_join_limit_threads
Fix CI
2024-09-13 01:47:20 +00:00
Michael Kolupaev
7e99f05981 Merge remote-tracking branch 'origin/master' into tomic 2024-09-12 21:45:09 +00:00
robot-clickhouse
1da5729f89 Automatic style fix 2024-09-11 21:08:43 +00:00
Nikolay Degterinsky
8e3ba4bd6c
Add test 2024-09-11 22:59:06 +02:00
Nikolay Degterinsky
b30aabf635
Fix attach of ReplicatedMergeTree tables in Replicated databases 2024-09-11 22:38:54 +02:00
Salvatore Mesoraca
dca72d2e0b Add tests for SHOW INDEX from tables with dots 2024-09-11 15:39:02 +02:00
Salvatore Mesoraca
146edca4f7 Add tests for SHOW COLUMNS from tables with dots 2024-09-11 15:35:48 +02:00
Salvatore Mesoraca
3556cf92c3 Allow SHOW INDEXES to work with tables that contain dots in the name 2024-09-11 15:15:31 +02:00
Salvatore Mesoraca
075a85f15c Allow SHOW COLUMNS to work with tables that contain dots in the name 2024-09-11 15:15:08 +02:00
Duc Canh Le
085be6bf59 remove unwanted include
Signed-off-by: Duc Canh Le <duccanh.le@ahrefs.com>
2024-09-11 06:42:48 +00:00
Michael Kolupaev
263cd9e1c3 Fix error message 2024-09-10 19:41:15 +00:00
Michael Kolupaev
5539faa54e Disallow creating refreshable MV on Linux < 3.15 2024-09-10 19:41:15 +00:00
Michael Stetsyuk
be55e1d2e1 better 2024-09-10 13:52:40 +00:00
Duc Canh Le
d089a56272 only set max_free_threads_ = 0 when creating pool
Signed-off-by: Duc Canh Le <duccanh.le@ahrefs.com>
2024-09-10 05:51:48 +00:00
Duc Canh Le
11478b949f limit threadpool size in concurrent hash join
Signed-off-by: Duc Canh Le <duccanh.le@ahrefs.com>
2024-09-10 03:12:44 +00:00
Miсhael Stetsyuk
c9aedee24f
Merge branch 'master' into fix-metadata-version-in-zookeeper 2024-09-05 16:52:56 +01:00
Michael Stetsyuk
32cfdc98b2 fix metadata_version in keeper 2024-09-05 15:51:37 +00:00
Alexey Olshanskiy
d17b20705a
Add char escaping to docker's entrypoint 2024-09-05 17:55:56 +03:00
Kruglov Pavel
2c0ddd10a0
Fix build 2024-08-28 13:49:48 +02:00
Kruglov Pavel
bf4b53390b
Merge branch 'master' into input_format_json_empty_as_default 2024-08-26 13:40:48 +02:00
Kruglov Pavel
34d02304d3
Merge branch 'master' into input_format_json_empty_as_default 2024-08-21 14:50:36 +02:00
Alexis Arnaud
29bc7cf5d5 post-review changes 2024-08-14 12:03:29 +02:00
Alexis Arnaud
a39a4b1080 better 2024-08-13 18:30:43 +02:00
Alexis Arnaud
229fffcd56 post-review changes 2024-08-13 18:30:43 +02:00
Alexis Arnaud
3c586d80c8 post-rebase fixes 2024-08-13 18:30:43 +02:00
Alexis Arnaud
0ff90e4a5c post-review changes 2024-08-13 18:30:43 +02:00
Alexis Arnaud
5d6d378f24 renumbered tests 2024-08-13 18:30:43 +02:00
Alexis Arnaud
14dad25adc trigger build 2024-08-13 18:30:43 +02:00
Alexis Arnaud
906a181b97 fix for clang-tidy 2024-08-13 18:30:43 +02:00
Alexis Arnaud
7b09ec9ccb added input_format_json_empty_as_default to setting changes history 2024-08-13 18:30:43 +02:00
Alexis Arnaud
d5bea37c96 post-review changes 2024-08-13 18:30:43 +02:00
Alexis Arnaud
ad24989b31 slightly better comment 2024-08-13 18:30:43 +02:00
Alexis Arnaud
d36176ad85 Support for the input_format_json_empty_as_default setting 2024-08-13 18:30:43 +02:00
45 changed files with 712 additions and 186 deletions

View File

@ -109,7 +109,7 @@ if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CL
<networks> <networks>
<ip>::/0</ip> <ip>::/0</ip>
</networks> </networks>
<password>${CLICKHOUSE_PASSWORD}</password> <password><![CDATA[${CLICKHOUSE_PASSWORD//]]>/]]]]><![CDATA[>}]]></password>
<quota>default</quota> <quota>default</quota>
<access_management>${CLICKHOUSE_ACCESS_MANAGEMENT}</access_management> <access_management>${CLICKHOUSE_ACCESS_MANAGEMENT}</access_management>
</${CLICKHOUSE_USER}> </${CLICKHOUSE_USER}>

View File

@ -1396,6 +1396,7 @@ SELECT * FROM json_each_row_nested
- [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - ignore unknown keys in json object for named tuples. Default value - `false`. - [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - ignore unknown keys in json object for named tuples. Default value - `false`.
- [input_format_json_compact_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) - allow variable number of columns in JSONCompact/JSONCompactEachRow format, ignore extra columns and use default values on missing columns. Default value - `false`. - [input_format_json_compact_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) - allow variable number of columns in JSONCompact/JSONCompactEachRow format, ignore extra columns and use default values on missing columns. Default value - `false`.
- [input_format_json_throw_on_bad_escape_sequence](/docs/en/operations/settings/settings-formats.md/#input_format_json_throw_on_bad_escape_sequence) - throw an exception if JSON string contains bad escape sequence. If disabled, bad escape sequences will remain as is in the data. Default value - `true`. - [input_format_json_throw_on_bad_escape_sequence](/docs/en/operations/settings/settings-formats.md/#input_format_json_throw_on_bad_escape_sequence) - throw an exception if JSON string contains bad escape sequence. If disabled, bad escape sequences will remain as is in the data. Default value - `true`.
- [input_format_json_empty_as_default](/docs/en/operations/settings/settings-formats.md/#input_format_json_empty_as_default) - treat empty fields in JSON input as default values. Default value - `false`. For complex default expressions [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) must be enabled too.
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`. - [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`. - [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`. - [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.

View File

@ -752,6 +752,17 @@ Possible values:
Default value: 0. Default value: 0.
### input_format_json_empty_as_default {#input_format_json_empty_as_default}
When enabled, replace empty input fields in JSON with default values. For complex default expressions `input_format_defaults_for_omitted_fields` must be enabled too.
Possible values:
+ 0 — Disable.
+ 1 — Enable.
Default value: 0.
## TSV format settings {#tsv-format-settings} ## TSV format settings {#tsv-format-settings}
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default} ### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}

View File

@ -609,6 +609,7 @@
M(728, UNEXPECTED_DATA_TYPE) \ M(728, UNEXPECTED_DATA_TYPE) \
M(729, ILLEGAL_TIME_SERIES_TAGS) \ M(729, ILLEGAL_TIME_SERIES_TAGS) \
M(730, REFRESH_FAILED) \ M(730, REFRESH_FAILED) \
M(731, QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE) \
\ \
M(900, DISTRIBUTED_CACHE_ERROR) \ M(900, DISTRIBUTED_CACHE_ERROR) \
M(901, CANNOT_USE_DISTRIBUTED_CACHE) \ M(901, CANNOT_USE_DISTRIBUTED_CACHE) \

View File

@ -57,11 +57,13 @@ namespace ErrorCodes
namespace DB namespace DB
{ {
static bool supportsAtomicRenameImpl() static std::optional<std::string> supportsAtomicRenameImpl()
{ {
VersionNumber renameat2_minimal_version(3, 15, 0); VersionNumber renameat2_minimal_version(3, 15, 0);
VersionNumber linux_version(Poco::Environment::osVersion()); VersionNumber linux_version(Poco::Environment::osVersion());
return linux_version >= renameat2_minimal_version; if (linux_version >= renameat2_minimal_version)
return std::nullopt;
return fmt::format("Linux kernel 3.15+ is required, got {}", linux_version.toString());
} }
static bool renameat2(const std::string & old_path, const std::string & new_path, int flags) static bool renameat2(const std::string & old_path, const std::string & new_path, int flags)
@ -97,10 +99,14 @@ static bool renameat2(const std::string & old_path, const std::string & new_path
ErrnoException::throwFromPath(ErrorCodes::SYSTEM_ERROR, new_path, "Cannot rename {} to {}", old_path, new_path); ErrnoException::throwFromPath(ErrorCodes::SYSTEM_ERROR, new_path, "Cannot rename {} to {}", old_path, new_path);
} }
bool supportsAtomicRename() bool supportsAtomicRename(std::string * out_message)
{ {
static bool supports = supportsAtomicRenameImpl(); static auto error = supportsAtomicRenameImpl();
return supports; if (!error.has_value())
return true;
if (out_message)
*out_message = error.value();
return false;
} }
} }
@ -152,16 +158,22 @@ static bool renameat2(const std::string & old_path, const std::string & new_path
} }
static bool supportsAtomicRenameImpl() static std::optional<std::string> supportsAtomicRenameImpl()
{ {
auto fun = dlsym(RTLD_DEFAULT, "renamex_np"); auto fun = dlsym(RTLD_DEFAULT, "renamex_np");
return fun != nullptr; if (fun != nullptr)
return std::nullopt;
return "macOS 10.12 or later is required";
} }
bool supportsAtomicRename() bool supportsAtomicRename(std::string * out_message)
{ {
static bool supports = supportsAtomicRenameImpl(); static auto error = supportsAtomicRenameImpl();
return supports; if (!error.has_value())
return true;
if (out_message)
*out_message = error.value();
return false;
} }
} }
@ -179,8 +191,10 @@ static bool renameat2(const std::string &, const std::string &, int)
return false; return false;
} }
bool supportsAtomicRename() bool supportsAtomicRename(std::string * out_message)
{ {
if (out_message)
*out_message = "only Linux and macOS are supported";
return false; return false;
} }

View File

@ -6,7 +6,7 @@ namespace DB
{ {
/// Returns true, if the following functions supported by the system /// Returns true, if the following functions supported by the system
bool supportsAtomicRename(); bool supportsAtomicRename(std::string * out_message = nullptr);
/// Atomically rename old_path to new_path. If new_path exists, do not overwrite it and throw exception /// Atomically rename old_path to new_path. If new_path exists, do not overwrite it and throw exception
void renameNoReplace(const std::string & old_path, const std::string & new_path); void renameNoReplace(const std::string & old_path, const std::string & new_path);

View File

@ -1144,6 +1144,7 @@ class IColumn;
M(Bool, input_format_try_infer_variants, false, "Try to infer the Variant type in text formats when there is more than one possible type for column/array elements", 0) \ M(Bool, input_format_try_infer_variants, false, "Try to infer the Variant type in text formats when there is more than one possible type for column/array elements", 0) \
M(Bool, type_json_skip_duplicated_paths, false, "When enabled, during parsing JSON object into JSON type duplicated paths will be ignored and only the first one will be inserted instead of an exception", 0) \ M(Bool, type_json_skip_duplicated_paths, false, "When enabled, during parsing JSON object into JSON type duplicated paths will be ignored and only the first one will be inserted instead of an exception", 0) \
M(UInt64, input_format_json_max_depth, 1000, "Maximum depth of a field in JSON. This is not a strict limit, it does not have to be applied precisely.", 0) \ M(UInt64, input_format_json_max_depth, 1000, "Maximum depth of a field in JSON. This is not a strict limit, it does not have to be applied precisely.", 0) \
M(Bool, input_format_json_empty_as_default, false, "Treat empty fields in JSON input as default values.", 0) \
M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \
M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \
M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \

View File

@ -71,6 +71,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
}, },
{"24.9", {"24.9",
{ {
{"input_format_json_empty_as_default", false, false, "Added new setting to allow to treat empty fields in JSON input as default values."},
{"input_format_try_infer_variants", false, false, "Try to infer Variant type in text formats when there is more than one possible type for column/array elements"}, {"input_format_try_infer_variants", false, false, "Try to infer Variant type in text formats when there is more than one possible type for column/array elements"},
{"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."}, {"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."},
{"create_if_not_exists", false, false, "New setting."}, {"create_if_not_exists", false, false, "New setting."},

View File

@ -11,6 +11,7 @@
#include <IO/WriteBufferFromString.h> #include <IO/WriteBufferFromString.h>
#include <Formats/FormatSettings.h> #include <Formats/FormatSettings.h>
#include <Formats/JSONUtils.h>
namespace DB namespace DB
{ {
@ -615,28 +616,49 @@ void SerializationArray::serializeTextJSONPretty(const IColumn & column, size_t
} }
void SerializationArray::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const template <typename ReturnType>
ReturnType SerializationArray::deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{ {
deserializeTextImpl(column, istr, auto deserialize_nested = [&settings, this](IColumn & nested_column, ReadBuffer & buf) -> ReturnType
[&](IColumn & nested_column) {
if constexpr (std::is_same_v<ReturnType, void>)
{ {
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column)) if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(nested_column, istr, settings, nested); SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(nested_column, buf, settings, nested);
else else
nested->deserializeTextJSON(nested_column, istr, settings); nested->deserializeTextJSON(nested_column, buf, settings);
}, false); }
else
{
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(nested_column, buf, settings, nested);
return nested->tryDeserializeTextJSON(nested_column, buf, settings);
}
};
if (settings.json.empty_as_default)
return deserializeTextImpl<ReturnType>(column, istr,
[&deserialize_nested, &istr](IColumn & nested_column) -> ReturnType
{
return JSONUtils::deserializeEmpyStringAsDefaultOrNested<ReturnType>(nested_column, istr, deserialize_nested);
}, false);
else
return deserializeTextImpl<ReturnType>(column, istr,
[&deserialize_nested, &istr](IColumn & nested_column) -> ReturnType
{
return deserialize_nested(nested_column, istr);
}, false);
}
void SerializationArray::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{
deserializeTextJSONImpl<void>(column, istr, settings);
} }
bool SerializationArray::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const bool SerializationArray::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{ {
auto read_nested = [&](IColumn & nested_column) return deserializeTextJSONImpl<bool>(column, istr, settings);
{
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(nested_column, istr, settings, nested);
return nested->tryDeserializeTextJSON(nested_column, istr, settings);
};
return deserializeTextImpl<bool>(column, istr, std::move(read_nested), false);
} }

View File

@ -82,6 +82,10 @@ public:
SerializationPtr create(const SerializationPtr & prev) const override; SerializationPtr create(const SerializationPtr & prev) const override;
ColumnPtr create(const ColumnPtr & prev) const override; ColumnPtr create(const ColumnPtr & prev) const override;
}; };
private:
template <typename ReturnType>
ReturnType deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const;
}; };
} }

View File

@ -6,6 +6,7 @@
#include <Columns/ColumnMap.h> #include <Columns/ColumnMap.h>
#include <Core/Field.h> #include <Core/Field.h>
#include <Formats/FormatSettings.h> #include <Formats/FormatSettings.h>
#include <Formats/JSONUtils.h>
#include <Common/assert_cast.h> #include <Common/assert_cast.h>
#include <Common/quoteString.h> #include <Common/quoteString.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
@ -316,28 +317,52 @@ void SerializationMap::serializeTextJSONPretty(const IColumn & column, size_t ro
} }
void SerializationMap::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const template <typename ReturnType>
ReturnType SerializationMap::deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{ {
deserializeTextImpl(column, istr, auto deserialize_nested = [&settings](IColumn & subcolumn, ReadBuffer & buf, const SerializationPtr & subcolumn_serialization) -> ReturnType
[&settings](ReadBuffer & buf, const SerializationPtr & subcolumn_serialization, IColumn & subcolumn) {
if constexpr (std::is_same_v<ReturnType, void>)
{ {
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(subcolumn)) if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(subcolumn))
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(subcolumn, buf, settings, subcolumn_serialization); SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(subcolumn, buf, settings, subcolumn_serialization);
else else
subcolumn_serialization->deserializeTextJSON(subcolumn, buf, settings); subcolumn_serialization->deserializeTextJSON(subcolumn, buf, settings);
}); }
else
{
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(subcolumn))
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(subcolumn, buf, settings, subcolumn_serialization);
return subcolumn_serialization->tryDeserializeTextJSON(subcolumn, buf, settings);
}
};
if (settings.json.empty_as_default)
return deserializeTextImpl<ReturnType>(column, istr,
[&deserialize_nested](ReadBuffer & buf, const SerializationPtr & subcolumn_serialization, IColumn & subcolumn) -> ReturnType
{
return JSONUtils::deserializeEmpyStringAsDefaultOrNested<ReturnType>(subcolumn, buf,
[&deserialize_nested, &subcolumn_serialization](IColumn & subcolumn_, ReadBuffer & buf_) -> ReturnType
{
return deserialize_nested(subcolumn_, buf_, subcolumn_serialization);
});
});
else
return deserializeTextImpl<ReturnType>(column, istr,
[&deserialize_nested](ReadBuffer & buf, const SerializationPtr & subcolumn_serialization, IColumn & subcolumn) -> ReturnType
{
return deserialize_nested(subcolumn, buf, subcolumn_serialization);
});
}
void SerializationMap::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{
deserializeTextJSONImpl<void>(column, istr, settings);
} }
bool SerializationMap::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const bool SerializationMap::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{ {
auto reader = [&settings](ReadBuffer & buf, const SerializationPtr & subcolumn_serialization, IColumn & subcolumn) return deserializeTextJSONImpl<bool>(column, istr, settings);
{
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(subcolumn))
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(subcolumn, buf, settings, subcolumn_serialization);
return subcolumn_serialization->tryDeserializeTextJSON(subcolumn, buf, settings);
};
return deserializeTextImpl<bool>(column, istr, reader);
} }
void SerializationMap::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const void SerializationMap::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const

View File

@ -74,6 +74,9 @@ private:
template <typename ReturnType = void, typename Reader> template <typename ReturnType = void, typename Reader>
ReturnType deserializeTextImpl(IColumn & column, ReadBuffer & istr, Reader && reader) const; ReturnType deserializeTextImpl(IColumn & column, ReadBuffer & istr, Reader && reader) const;
template <typename ReturnType>
ReturnType deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const;
}; };
} }

View File

@ -5,6 +5,7 @@
#include <Core/Field.h> #include <Core/Field.h>
#include <Columns/ColumnTuple.h> #include <Columns/ColumnTuple.h>
#include <Common/assert_cast.h> #include <Common/assert_cast.h>
#include <Formats/JSONUtils.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <IO/ReadBufferFromString.h> #include <IO/ReadBufferFromString.h>
@ -313,27 +314,9 @@ void SerializationTuple::serializeTextJSONPretty(const IColumn & column, size_t
} }
template <typename ReturnType> template <typename ReturnType>
ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const ReturnType SerializationTuple::deserializeTupleJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings, auto && deserialize_element) const
{ {
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>; static constexpr auto throw_exception = std::is_same_v<ReturnType, void>;
auto deserialize_element = [&](IColumn & element_column, size_t element_pos)
{
if constexpr (throw_exception)
{
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(element_column))
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(element_column, istr, settings, elems[element_pos]);
else
elems[element_pos]->deserializeTextJSON(element_column, istr, settings);
return true;
}
else
{
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(element_column))
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(element_column, istr, settings, elems[element_pos]);
return elems[element_pos]->tryDeserializeTextJSON(element_column, istr, settings);
}
};
if (settings.json.read_named_tuples_as_objects if (settings.json.read_named_tuples_as_objects
&& have_explicit_names) && have_explicit_names)
@ -506,12 +489,51 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
} }
} }
void SerializationTuple::deserializeTextJSON(DB::IColumn & column, DB::ReadBuffer & istr, const DB::FormatSettings & settings) const template <typename ReturnType>
ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{ {
deserializeTextJSONImpl(column, istr, settings); auto deserialize_nested = [&settings](IColumn & nested_column, ReadBuffer & buf, const SerializationPtr & nested_column_serialization) -> ReturnType
{
if constexpr (std::is_same_v<ReturnType, void>)
{
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(nested_column, buf, settings, nested_column_serialization);
else
nested_column_serialization->deserializeTextJSON(nested_column, buf, settings);
}
else
{
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(nested_column, buf, settings, nested_column_serialization);
else
return nested_column_serialization->tryDeserializeTextJSON(nested_column, buf, settings);
}
};
if (settings.json.empty_as_default)
return deserializeTupleJSONImpl<ReturnType>(column, istr, settings,
[&deserialize_nested, &istr, this](IColumn & nested_column, size_t element_pos) -> ReturnType
{
return JSONUtils::deserializeEmpyStringAsDefaultOrNested<ReturnType>(nested_column, istr,
[&deserialize_nested, element_pos, this](IColumn & nested_column_, ReadBuffer & buf) -> ReturnType
{
return deserialize_nested(nested_column_, buf, elems[element_pos]);
});
});
else
return deserializeTupleJSONImpl<ReturnType>(column, istr, settings,
[&deserialize_nested, &istr, this](IColumn & nested_column, size_t element_pos) -> ReturnType
{
return deserialize_nested(nested_column, istr, elems[element_pos]);
});
} }
bool SerializationTuple::tryDeserializeTextJSON(DB::IColumn & column, DB::ReadBuffer & istr, const DB::FormatSettings & settings) const void SerializationTuple::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{
deserializeTextJSONImpl<void>(column, istr, settings);
}
bool SerializationTuple::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
{ {
return deserializeTextJSONImpl<bool>(column, istr, settings); return deserializeTextJSONImpl<bool>(column, istr, settings);
} }

View File

@ -81,7 +81,10 @@ private:
template <typename ReturnType = void> template <typename ReturnType = void>
ReturnType deserializeTextImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings, bool whole) const; ReturnType deserializeTextImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings, bool whole) const;
template <typename ReturnType = void> template <typename ReturnType>
ReturnType deserializeTupleJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings, auto && deserialize_element) const;
template <typename ReturnType>
ReturnType deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const; ReturnType deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const;
template <typename ReturnType = void> template <typename ReturnType = void>

View File

@ -197,8 +197,9 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Moving tables between databases of different engines is not supported"); throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Moving tables between databases of different engines is not supported");
} }
if (exchange && !supportsAtomicRename()) std::string message;
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RENAME EXCHANGE is not supported"); if (exchange && !supportsAtomicRename(&message))
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RENAME EXCHANGE is not supported because exchanging files is not supported by the OS ({})", message);
waitDatabaseStarted(); waitDatabaseStarted();

View File

@ -152,6 +152,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se
format_settings.json.try_infer_objects_as_tuples = settings.input_format_json_try_infer_named_tuples_from_objects; format_settings.json.try_infer_objects_as_tuples = settings.input_format_json_try_infer_named_tuples_from_objects;
format_settings.json.throw_on_bad_escape_sequence = settings.input_format_json_throw_on_bad_escape_sequence; format_settings.json.throw_on_bad_escape_sequence = settings.input_format_json_throw_on_bad_escape_sequence;
format_settings.json.ignore_unnecessary_fields = settings.input_format_json_ignore_unnecessary_fields; format_settings.json.ignore_unnecessary_fields = settings.input_format_json_ignore_unnecessary_fields;
format_settings.json.empty_as_default = settings.input_format_json_empty_as_default;
format_settings.json.type_json_skip_duplicated_paths = settings.type_json_skip_duplicated_paths; format_settings.json.type_json_skip_duplicated_paths = settings.type_json_skip_duplicated_paths;
format_settings.null_as_default = settings.input_format_null_as_default; format_settings.null_as_default = settings.input_format_null_as_default;
format_settings.force_null_for_omitted_fields = settings.input_format_force_null_for_omitted_fields; format_settings.force_null_for_omitted_fields = settings.input_format_force_null_for_omitted_fields;

View File

@ -237,6 +237,7 @@ struct FormatSettings
bool infer_incomplete_types_as_strings = true; bool infer_incomplete_types_as_strings = true;
bool throw_on_bad_escape_sequence = true; bool throw_on_bad_escape_sequence = true;
bool ignore_unnecessary_fields = true; bool ignore_unnecessary_fields = true;
bool empty_as_default = false;
bool type_json_skip_duplicated_paths = false; bool type_json_skip_duplicated_paths = false;
} json{}; } json{};

View File

@ -2,12 +2,14 @@
#include <Formats/JSONUtils.h> #include <Formats/JSONUtils.h>
#include <Formats/ReadSchemaUtils.h> #include <Formats/ReadSchemaUtils.h>
#include <Formats/EscapingRuleUtils.h> #include <Formats/EscapingRuleUtils.h>
#include <IO/PeekableReadBuffer.h>
#include <IO/ReadBufferFromString.h> #include <IO/ReadBufferFromString.h>
#include <IO/WriteBufferValidUTF8.h> #include <IO/WriteBufferValidUTF8.h>
#include <DataTypes/Serializations/SerializationNullable.h> #include <DataTypes/Serializations/SerializationNullable.h>
#include <DataTypes/DataTypeNullable.h> #include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeObjectDeprecated.h> #include <DataTypes/DataTypeObjectDeprecated.h>
#include <DataTypes/DataTypeFactory.h> #include <DataTypes/DataTypeFactory.h>
#include <Common/assert_cast.h>
#include <base/find_symbols.h> #include <base/find_symbols.h>
@ -286,11 +288,19 @@ namespace JSONUtils
return true; return true;
} }
if (as_nullable) auto deserialize = [as_nullable, &format_settings, &serialization](IColumn & column_, ReadBuffer & buf) -> bool
return SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(column, in, format_settings, serialization); {
if (as_nullable)
return SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(column_, buf, format_settings, serialization);
serialization->deserializeTextJSON(column, in, format_settings); serialization->deserializeTextJSON(column_, buf, format_settings);
return true; return true;
};
if (format_settings.json.empty_as_default)
return JSONUtils::deserializeEmpyStringAsDefaultOrNested<bool, false>(column, in, deserialize);
else
return deserialize(column, in);
} }
catch (Exception & e) catch (Exception & e)
{ {
@ -920,6 +930,78 @@ namespace JSONUtils
} }
} }
template <typename ReturnType, bool default_column_return_value>
ReturnType deserializeEmpyStringAsDefaultOrNested(IColumn & column, ReadBuffer & istr, const NestedDeserialize<ReturnType> & deserialize_nested)
{
static constexpr auto throw_exception = std::is_same_v<ReturnType, void>;
static constexpr auto EMPTY_STRING = "\"\"";
static constexpr auto EMPTY_STRING_LENGTH = std::string_view(EMPTY_STRING).length();
if (istr.eof() || *istr.position() != EMPTY_STRING[0])
return deserialize_nested(column, istr);
auto do_deserialize = [](IColumn & column_, ReadBuffer & buf, auto && check_for_empty_string, auto && deserialize) -> ReturnType
{
if (check_for_empty_string(buf))
{
column_.insertDefault();
return ReturnType(default_column_return_value);
}
return deserialize(column_, buf);
};
if (istr.available() >= EMPTY_STRING_LENGTH)
{
/// We have enough data in buffer to check if we have an empty string.
auto check_for_empty_string = [](ReadBuffer & buf) -> bool
{
auto * pos = buf.position();
if (checkString(EMPTY_STRING, buf))
return true;
buf.position() = pos;
return false;
};
return do_deserialize(column, istr, check_for_empty_string, deserialize_nested);
}
/// We don't have enough data in buffer to check if we have an empty string.
/// Use PeekableReadBuffer to make a checkpoint before checking for an
/// empty string and rollback if check was failed.
auto check_for_empty_string = [](ReadBuffer & buf) -> bool
{
auto & peekable_buf = assert_cast<PeekableReadBuffer &>(buf);
peekable_buf.setCheckpoint();
SCOPE_EXIT(peekable_buf.dropCheckpoint());
if (checkString(EMPTY_STRING, peekable_buf))
return true;
peekable_buf.rollbackToCheckpoint();
return false;
};
auto deserialize_nested_with_check = [&deserialize_nested](IColumn & column_, ReadBuffer & buf) -> ReturnType
{
auto & peekable_buf = assert_cast<PeekableReadBuffer &>(buf);
if constexpr (throw_exception)
deserialize_nested(column_, peekable_buf);
else if (!deserialize_nested(column_, peekable_buf))
return ReturnType(false);
if (unlikely(peekable_buf.hasUnreadData()))
throw Exception(ErrorCodes::LOGICAL_ERROR, "Incorrect state while parsing JSON: PeekableReadBuffer has unread data in own memory: {}", String(peekable_buf.position(), peekable_buf.available()));
return ReturnType(true);
};
PeekableReadBuffer peekable_buf(istr, true);
return do_deserialize(column, peekable_buf, check_for_empty_string, deserialize_nested_with_check);
}
template void deserializeEmpyStringAsDefaultOrNested<void, true>(IColumn & column, ReadBuffer & istr, const NestedDeserialize<void> & deserialize_nested);
template bool deserializeEmpyStringAsDefaultOrNested<bool, true>(IColumn & column, ReadBuffer & istr, const NestedDeserialize<bool> & deserialize_nested);
template bool deserializeEmpyStringAsDefaultOrNested<bool, false>(IColumn & column, ReadBuffer & istr, const NestedDeserialize<bool> & deserialize_nested);
} }
} }

View File

@ -8,6 +8,7 @@
#include <IO/Progress.h> #include <IO/Progress.h>
#include <Core/NamesAndTypes.h> #include <Core/NamesAndTypes.h>
#include <Common/Stopwatch.h> #include <Common/Stopwatch.h>
#include <functional>
#include <utility> #include <utility>
namespace DB namespace DB
@ -146,6 +147,16 @@ namespace JSONUtils
bool skipUntilFieldInObject(ReadBuffer & in, const String & desired_field_name, const FormatSettings::JSON & settings); bool skipUntilFieldInObject(ReadBuffer & in, const String & desired_field_name, const FormatSettings::JSON & settings);
void skipTheRestOfObject(ReadBuffer & in, const FormatSettings::JSON & settings); void skipTheRestOfObject(ReadBuffer & in, const FormatSettings::JSON & settings);
template <typename ReturnType>
using NestedDeserialize = std::function<ReturnType(IColumn &, ReadBuffer &)>;
template <typename ReturnType, bool default_column_return_value = true>
ReturnType deserializeEmpyStringAsDefaultOrNested(IColumn & column, ReadBuffer & istr, const NestedDeserialize<ReturnType> & deserialize_nested);
extern template void deserializeEmpyStringAsDefaultOrNested<void, true>(IColumn & column, ReadBuffer & istr, const NestedDeserialize<void> & deserialize_nested);
extern template bool deserializeEmpyStringAsDefaultOrNested<bool, true>(IColumn & column, ReadBuffer & istr, const NestedDeserialize<bool> & deserialize_nested);
extern template bool deserializeEmpyStringAsDefaultOrNested<bool, false>(IColumn & column, ReadBuffer & istr, const NestedDeserialize<bool> & deserialize_nested);
} }
} }

View File

@ -85,7 +85,9 @@ ConcurrentHashJoin::ConcurrentHashJoin(
CurrentMetrics::ConcurrentHashJoinPoolThreads, CurrentMetrics::ConcurrentHashJoinPoolThreads,
CurrentMetrics::ConcurrentHashJoinPoolThreadsActive, CurrentMetrics::ConcurrentHashJoinPoolThreadsActive,
CurrentMetrics::ConcurrentHashJoinPoolThreadsScheduled, CurrentMetrics::ConcurrentHashJoinPoolThreadsScheduled,
slots)) /*max_threads_*/ slots,
/*max_free_threads_*/ 0,
/*queue_size_*/ slots))
, stats_collecting_params(stats_collecting_params_) , stats_collecting_params(stats_collecting_params_)
{ {
hash_joins.resize(slots); hash_joins.resize(slots);

View File

@ -1236,6 +1236,7 @@ IBlocksStreamPtr HashJoin::getNonJoinedBlocks(const Block & left_sample_block,
void HashJoin::reuseJoinedData(const HashJoin & join) void HashJoin::reuseJoinedData(const HashJoin & join)
{ {
have_compressed = join.have_compressed;
data = join.data; data = join.data;
from_storage_join = true; from_storage_join = true;

View File

@ -968,6 +968,11 @@ void InterpreterCreateQuery::validateMaterializedViewColumnsAndEngine(const ASTC
if (database && database->getEngineName() != "Atomic") if (database && database->getEngineName() != "Atomic")
throw Exception(ErrorCodes::INCORRECT_QUERY, throw Exception(ErrorCodes::INCORRECT_QUERY,
"Refreshable materialized views (except with APPEND) only support Atomic database engine, but database {} has engine {}", create.getDatabase(), database->getEngineName()); "Refreshable materialized views (except with APPEND) only support Atomic database engine, but database {} has engine {}", create.getDatabase(), database->getEngineName());
std::string message;
if (!supportsAtomicRename(&message))
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
"Can't create refreshable materialized view because exchanging files is not supported by the OS ({})", message);
} }
Block input_block; Block input_block;

View File

@ -99,6 +99,7 @@ namespace DB
namespace ErrorCodes namespace ErrorCodes
{ {
extern const int QUERY_CACHE_USED_WITH_NONDETERMINISTIC_FUNCTIONS; extern const int QUERY_CACHE_USED_WITH_NONDETERMINISTIC_FUNCTIONS;
extern const int QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE;
extern const int QUERY_CACHE_USED_WITH_SYSTEM_TABLE; extern const int QUERY_CACHE_USED_WITH_SYSTEM_TABLE;
extern const int INTO_OUTFILE_NOT_ALLOWED; extern const int INTO_OUTFILE_NOT_ALLOWED;
extern const int INVALID_TRANSACTION; extern const int INVALID_TRANSACTION;
@ -1118,22 +1119,24 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
&& settings.use_query_cache && settings.use_query_cache
&& !internal && !internal
&& client_info.query_kind == ClientInfo::QueryKind::INITIAL_QUERY && client_info.query_kind == ClientInfo::QueryKind::INITIAL_QUERY
/// Bug 67476: Avoid that the query cache stores truncated results if the query ran with a non-THROW overflow mode and hit a limit.
/// This is more workaround than a fix ... unfortunately it is hard to detect from the perspective of the query cache that the
/// query result is truncated.
&& (settings.read_overflow_mode == OverflowMode::THROW
&& settings.read_overflow_mode_leaf == OverflowMode::THROW
&& settings.group_by_overflow_mode == OverflowMode::THROW
&& settings.sort_overflow_mode == OverflowMode::THROW
&& settings.result_overflow_mode == OverflowMode::THROW
&& settings.timeout_overflow_mode == OverflowMode::THROW
&& settings.set_overflow_mode == OverflowMode::THROW
&& settings.join_overflow_mode == OverflowMode::THROW
&& settings.transfer_overflow_mode == OverflowMode::THROW
&& settings.distinct_overflow_mode == OverflowMode::THROW)
&& (ast->as<ASTSelectQuery>() || ast->as<ASTSelectWithUnionQuery>()); && (ast->as<ASTSelectQuery>() || ast->as<ASTSelectWithUnionQuery>());
QueryCache::Usage query_cache_usage = QueryCache::Usage::None; QueryCache::Usage query_cache_usage = QueryCache::Usage::None;
/// Bug 67476: If the query runs with a non-THROW overflow mode and hits a limit, the query cache will store a truncated result (if
/// enabled). This is incorrect. Unfortunately it is hard to detect from the perspective of the query cache that the query result
/// is truncated. Therefore throw an exception, to notify the user to disable either the query cache or use another overflow mode.
if (settings.use_query_cache && (settings.read_overflow_mode != OverflowMode::THROW
|| settings.read_overflow_mode_leaf != OverflowMode::THROW
|| settings.group_by_overflow_mode != OverflowMode::THROW
|| settings.sort_overflow_mode != OverflowMode::THROW
|| settings.result_overflow_mode != OverflowMode::THROW
|| settings.timeout_overflow_mode != OverflowMode::THROW
|| settings.set_overflow_mode != OverflowMode::THROW
|| settings.join_overflow_mode != OverflowMode::THROW
|| settings.transfer_overflow_mode != OverflowMode::THROW
|| settings.distinct_overflow_mode != OverflowMode::THROW))
throw Exception(ErrorCodes::QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE, "use_query_cache and overflow_mode != 'throw' cannot be used together");
/// If the query runs with "use_query_cache = 1", we first probe if the query cache already contains the query result (if yes: /// If the query runs with "use_query_cache = 1", we first probe if the query cache already contains the query result (if yes:
/// return result from cache). If doesn't, we execute the query normally and write the result into the query cache. Both steps use a /// return result from cache). If doesn't, we execute the query normally and write the result into the query cache. Both steps use a
/// hash of the AST, the current database and the settings as cache key. Unfortunately, the settings are in some places internally /// hash of the AST, the current database and the settings as cache key. Unfortunately, the settings are in some places internally

View File

@ -1,6 +1,6 @@
#include <Parsers/ParserShowColumnsQuery.h> #include <Parsers/ParserShowColumnsQuery.h>
#include <Parsers/ASTIdentifier_fwd.h> #include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTLiteral.h> #include <Parsers/ASTLiteral.h>
#include <Parsers/ASTShowColumnsQuery.h> #include <Parsers/ASTShowColumnsQuery.h>
#include <Parsers/CommonParsers.h> #include <Parsers/CommonParsers.h>
@ -18,7 +18,6 @@ bool ParserShowColumnsQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
ASTPtr from1; ASTPtr from1;
ASTPtr from2; ASTPtr from2;
String from1_str;
String from2_str; String from2_str;
auto query = std::make_shared<ASTShowColumnsQuery>(); auto query = std::make_shared<ASTShowColumnsQuery>();
@ -43,25 +42,18 @@ bool ParserShowColumnsQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
else else
return false; return false;
tryGetIdentifierNameInto(from1, from1_str); const auto * table_id = from1->as<ASTIdentifier>();
if (!table_id)
bool abbreviated_form = from1_str.contains("."); // FROM database.table return false;
if (abbreviated_form) query->table = table_id->shortName();
{ if (table_id->compound())
std::vector<String> split; query->database = table_id->name_parts[0];
boost::split(split, from1_str, boost::is_any_of("."));
query->database = split[0];
query->table = split[1];
}
else else
{ {
if (ParserKeyword(Keyword::FROM).ignore(pos, expected) || ParserKeyword(Keyword::IN).ignore(pos, expected)) if (ParserKeyword(Keyword::FROM).ignore(pos, expected) || ParserKeyword(Keyword::IN).ignore(pos, expected))
if (!ParserIdentifier().parse(pos, from2, expected)) if (!ParserIdentifier().parse(pos, from2, expected))
return false; return false;
tryGetIdentifierNameInto(from2, from2_str); tryGetIdentifierNameInto(from2, from2_str);
query->table = from1_str;
query->database = from2_str; query->database = from2_str;
} }

View File

@ -1,6 +1,6 @@
#include <Parsers/ParserShowIndexesQuery.h> #include <Parsers/ParserShowIndexesQuery.h>
#include <Parsers/ASTIdentifier_fwd.h> #include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTLiteral.h> #include <Parsers/ASTLiteral.h>
#include <Parsers/ASTShowIndexesQuery.h> #include <Parsers/ASTShowIndexesQuery.h>
#include <Parsers/CommonParsers.h> #include <Parsers/CommonParsers.h>
@ -17,7 +17,6 @@ bool ParserShowIndexesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
ASTPtr from1; ASTPtr from1;
ASTPtr from2; ASTPtr from2;
String from1_str;
String from2_str; String from2_str;
auto query = std::make_shared<ASTShowIndexesQuery>(); auto query = std::make_shared<ASTShowIndexesQuery>();
@ -39,25 +38,18 @@ bool ParserShowIndexesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
else else
return false; return false;
tryGetIdentifierNameInto(from1, from1_str); const auto * table_id = from1->as<ASTIdentifier>();
if (!table_id)
bool abbreviated_form = from1_str.contains("."); // FROM database.table return false;
if (abbreviated_form) query->table = table_id->shortName();
{ if (table_id->compound())
std::vector<String> split; query->database = table_id->name_parts[0];
boost::split(split, from1_str, boost::is_any_of("."));
query->database = split[0];
query->table = split[1];
}
else else
{ {
if (ParserKeyword(Keyword::FROM).ignore(pos, expected) || ParserKeyword(Keyword::IN).ignore(pos, expected)) if (ParserKeyword(Keyword::FROM).ignore(pos, expected) || ParserKeyword(Keyword::IN).ignore(pos, expected))
if (!ParserIdentifier().parse(pos, from2, expected)) if (!ParserIdentifier().parse(pos, from2, expected))
return false; return false;
tryGetIdentifierNameInto(from2, from2_str); tryGetIdentifierNameInto(from2, from2_str);
query->table = from1_str;
query->database = from2_str; query->database = from2_str;
} }

View File

@ -2009,33 +2009,6 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons
{ {
auto result = getAnalysisResult(); auto result = getAnalysisResult();
if (is_parallel_reading_from_replicas && context->canUseParallelReplicasOnInitiator()
&& context->getSettingsRef().parallel_replicas_local_plan)
{
CoordinationMode mode = CoordinationMode::Default;
switch (result.read_type)
{
case ReadFromMergeTree::ReadType::Default:
mode = CoordinationMode::Default;
break;
case ReadFromMergeTree::ReadType::InOrder:
mode = CoordinationMode::WithOrder;
break;
case ReadFromMergeTree::ReadType::InReverseOrder:
mode = CoordinationMode::ReverseOrder;
break;
case ReadFromMergeTree::ReadType::ParallelReplicas:
throw Exception(ErrorCodes::LOGICAL_ERROR, "Read type can't be ParallelReplicas on initiator");
}
chassert(number_of_current_replica.has_value());
chassert(all_ranges_callback.has_value());
/// initialize working set from local replica
all_ranges_callback.value()(
InitialAllRangesAnnouncement(mode, result.parts_with_ranges.getDescriptions(), number_of_current_replica.value()));
}
if (enable_remove_parts_from_snapshot_optimization) if (enable_remove_parts_from_snapshot_optimization)
{ {
/// Do not keep data parts in snapshot. /// Do not keep data parts in snapshot.

View File

@ -15,6 +15,7 @@ namespace ErrorCodes
{ {
extern const int SUPPORT_IS_DISABLED; extern const int SUPPORT_IS_DISABLED;
extern const int REPLICA_STATUS_CHANGED; extern const int REPLICA_STATUS_CHANGED;
extern const int LOGICAL_ERROR;
} }
ReplicatedMergeTreeAttachThread::ReplicatedMergeTreeAttachThread(StorageReplicatedMergeTree & storage_) ReplicatedMergeTreeAttachThread::ReplicatedMergeTreeAttachThread(StorageReplicatedMergeTree & storage_)
@ -117,6 +118,67 @@ void ReplicatedMergeTreeAttachThread::checkHasReplicaMetadataInZooKeeper(const z
} }
} }
Int32 ReplicatedMergeTreeAttachThread::fixReplicaMetadataVersionIfNeeded(zkutil::ZooKeeperPtr zookeeper)
{
const String & zookeeper_path = storage.zookeeper_path;
const String & replica_path = storage.replica_path;
const bool replica_readonly = storage.is_readonly;
for (size_t i = 0; i != 2; ++i)
{
String replica_metadata_version_str;
const bool replica_metadata_version_exists = zookeeper->tryGet(replica_path + "/metadata_version", replica_metadata_version_str);
if (!replica_metadata_version_exists)
return -1;
const Int32 metadata_version = parse<Int32>(replica_metadata_version_str);
if (metadata_version != 0 || replica_readonly)
{
/// No need to fix anything
return metadata_version;
}
Coordination::Stat stat;
zookeeper->get(fs::path(zookeeper_path) / "metadata", &stat);
if (stat.version == 0)
{
/// No need to fix anything
return metadata_version;
}
ReplicatedMergeTreeQueue & queue = storage.queue;
queue.pullLogsToQueue(zookeeper);
if (queue.getStatus().metadata_alters_in_queue != 0)
{
LOG_DEBUG(log, "No need to update metadata_version as there are ALTER_METADATA entries in the queue");
return metadata_version;
}
const Coordination::Requests ops = {
zkutil::makeSetRequest(fs::path(replica_path) / "metadata_version", std::to_string(stat.version), 0),
zkutil::makeCheckRequest(fs::path(zookeeper_path) / "metadata", stat.version),
};
Coordination::Responses ops_responses;
const auto code = zookeeper->tryMulti(ops, ops_responses);
if (code == Coordination::Error::ZOK)
{
LOG_DEBUG(log, "Successfully set metadata_version to {}", stat.version);
return stat.version;
}
if (code != Coordination::Error::ZBADVERSION)
{
throw zkutil::KeeperException(code);
}
}
/// Second attempt is only possible if metadata_version != 0 or metadata.version changed during the first attempt.
/// If metadata_version != 0, on second attempt we will return the new metadata_version.
/// If metadata.version changed, on second attempt we will either get metadata_version != 0 and return the new metadata_version or we will get metadata_alters_in_queue != 0 and return 0.
/// Either way, on second attempt this method should return.
throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to fix replica metadata_version in ZooKeeper after two attempts");
}
void ReplicatedMergeTreeAttachThread::runImpl() void ReplicatedMergeTreeAttachThread::runImpl()
{ {
storage.setZooKeeper(); storage.setZooKeeper();
@ -160,11 +222,11 @@ void ReplicatedMergeTreeAttachThread::runImpl()
/// Just in case it was not removed earlier due to connection loss /// Just in case it was not removed earlier due to connection loss
zookeeper->tryRemove(replica_path + "/flags/force_restore_data"); zookeeper->tryRemove(replica_path + "/flags/force_restore_data");
String replica_metadata_version; const Int32 replica_metadata_version = fixReplicaMetadataVersionIfNeeded(zookeeper);
const bool replica_metadata_version_exists = zookeeper->tryGet(replica_path + "/metadata_version", replica_metadata_version); const bool replica_metadata_version_exists = replica_metadata_version != -1;
if (replica_metadata_version_exists) if (replica_metadata_version_exists)
{ {
storage.setInMemoryMetadata(metadata_snapshot->withMetadataVersion(parse<int>(replica_metadata_version))); storage.setInMemoryMetadata(metadata_snapshot->withMetadataVersion(replica_metadata_version));
} }
else else
{ {

View File

@ -48,6 +48,8 @@ private:
void runImpl(); void runImpl();
void finalizeInitialization(); void finalizeInitialization();
Int32 fixReplicaMetadataVersionIfNeeded(zkutil::ZooKeeperPtr zookeeper);
}; };
} }

View File

@ -2222,6 +2222,7 @@ ReplicatedMergeTreeQueue::Status ReplicatedMergeTreeQueue::getStatus() const
res.inserts_in_queue = 0; res.inserts_in_queue = 0;
res.merges_in_queue = 0; res.merges_in_queue = 0;
res.part_mutations_in_queue = 0; res.part_mutations_in_queue = 0;
res.metadata_alters_in_queue = 0;
res.queue_oldest_time = 0; res.queue_oldest_time = 0;
res.inserts_oldest_time = 0; res.inserts_oldest_time = 0;
res.merges_oldest_time = 0; res.merges_oldest_time = 0;
@ -2264,6 +2265,11 @@ ReplicatedMergeTreeQueue::Status ReplicatedMergeTreeQueue::getStatus() const
res.oldest_part_to_mutate_to = entry->new_part_name; res.oldest_part_to_mutate_to = entry->new_part_name;
} }
} }
if (entry->type == LogEntry::ALTER_METADATA)
{
++res.metadata_alters_in_queue;
}
} }
return res; return res;

View File

@ -473,6 +473,7 @@ public:
UInt32 inserts_in_queue; UInt32 inserts_in_queue;
UInt32 merges_in_queue; UInt32 merges_in_queue;
UInt32 part_mutations_in_queue; UInt32 part_mutations_in_queue;
UInt32 metadata_alters_in_queue;
UInt32 queue_oldest_time; UInt32 queue_oldest_time;
UInt32 inserts_oldest_time; UInt32 inserts_oldest_time;
UInt32 merges_oldest_time; UInt32 merges_oldest_time;

View File

@ -277,7 +277,7 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
if (has_valid_arguments) if (has_valid_arguments)
{ {
if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 0) if (!query.attach && is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 0)
{ {
throw Exception(ErrorCodes::BAD_ARGUMENTS, throw Exception(ErrorCodes::BAD_ARGUMENTS,
"It's not allowed to specify explicit zookeeper_path and replica_name " "It's not allowed to specify explicit zookeeper_path and replica_name "
@ -285,7 +285,7 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
"specify them explicitly, enable setting " "specify them explicitly, enable setting "
"database_replicated_allow_replicated_engine_arguments."); "database_replicated_allow_replicated_engine_arguments.");
} }
else if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 1) else if (!query.attach && is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 1)
{ {
LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "It's not recommended to explicitly specify " LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "It's not recommended to explicitly specify "
"zookeeper_path and replica_name in ReplicatedMergeTree arguments"); "zookeeper_path and replica_name in ReplicatedMergeTree arguments");
@ -305,7 +305,7 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica name must be a string literal{}", verbose_help_message); throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica name must be a string literal{}", verbose_help_message);
if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 2) if (!query.attach && is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 2)
{ {
LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "Replacing user-provided ZooKeeper path and replica name ({}, {}) " LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "Replacing user-provided ZooKeeper path and replica name ({}, {}) "
"with default arguments", zookeeper_path, replica_name); "with default arguments", zookeeper_path, replica_name);

View File

@ -0,0 +1,33 @@
<clickhouse>
<remote_servers>
<parallel_replicas>
<shard>
<internal_replication>false</internal_replication>
<replica>
<host>node0</host>
<port>9000</port>
</replica>
<replica>
<host>node1</host>
<port>9000</port>
</replica>
<replica>
<host>node2</host>
<port>9000</port>
</replica>
<replica>
<host>node3</host>
<port>9000</port>
</replica>
<replica>
<host>node4</host>
<port>9000</port>
</replica>
<replica>
<host>node5</host>
<port>9000</port>
</replica>
</shard>
</parallel_replicas>
</remote_servers>
</clickhouse>

View File

@ -0,0 +1,73 @@
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
nodes = [
cluster.add_instance(
f"node{num}", main_configs=["configs/remote_servers.xml"], with_zookeeper=True
)
for num in range(6)
]
@pytest.fixture(scope="module", autouse=True)
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def _create_tables(table_name):
for idx, node in enumerate(nodes):
node.query(
f"DROP TABLE IF EXISTS {table_name}",
settings={"database_atomic_wait_for_drop_and_detach_synchronously": True},
)
node.query(
f"""
CREATE TABLE {table_name} (value Int64)
Engine=ReplicatedMergeTree('/test_parallel_replicas/shard/{table_name}', '{idx}')
ORDER BY ()
"""
)
nodes[0].query(
f"INSERT INTO {table_name} SELECT * FROM numbers(1000)",
settings={"insert_deduplicate": 0},
)
nodes[0].query(f"SYSTEM SYNC REPLICA ON CLUSTER 'parallel_replicas' {table_name}")
for idx, node in enumerate(nodes):
node.query("SYSTEM STOP REPLICATED SENDS")
# the same data on all nodes except for a single value
node.query(
f"INSERT INTO {table_name} VALUES ({idx})",
settings={"insert_deduplicate": 0},
)
# check that we use the state of data parts from the initiator node (for some sort of determinism of what is been read).
# currently it is implemented only when we build local plan for the initiator node (we aim to make this behavior default)
def test_initiator_snapshot_is_used_for_reading(start_cluster):
table_name = "t"
_create_tables(table_name)
for idx, node in enumerate(nodes):
expected = 499500 + idx # sum of all integers 0..999 + idx
assert (
node.query(
f"SELECT sum(value) FROM {table_name}",
settings={
"allow_experimental_parallel_reading_from_replicas": 2,
"max_parallel_replicas": 100,
"cluster_for_parallel_replicas": "parallel_replicas",
"parallel_replicas_local_plan": True,
},
)
== f"{expected}\n"
)

View File

@ -1549,3 +1549,19 @@ def test_all_groups_cluster(started_cluster):
assert "bad_settings_node\ndummy_node\n" == bad_settings_node.query( assert "bad_settings_node\ndummy_node\n" == bad_settings_node.query(
"select host_name from system.clusters where name='all_groups.db_cluster' order by host_name" "select host_name from system.clusters where name='all_groups.db_cluster' order by host_name"
) )
def test_detach_attach_table(started_cluster):
main_node.query("DROP DATABASE IF EXISTS detach_attach_db SYNC")
main_node.query(
"CREATE DATABASE detach_attach_db ENGINE = Replicated('/clickhouse/databases/detach_attach_db');"
)
main_node.query(
"CREATE TABLE detach_attach_db.detach_attach_table (k UInt64) ENGINE=ReplicatedMergeTree ORDER BY k;"
)
main_node.query("INSERT INTO detach_attach_db.detach_attach_table VALUES (1);")
main_node.query("DETACH TABLE detach_attach_db.detach_attach_table PERMANENTLY;")
main_node.query("ATTACH TABLE detach_attach_db.detach_attach_table;")
assert (
main_node.query("SELECT * FROM detach_attach_db.detach_attach_table;") == "1\n"
)

View File

@ -23,23 +23,3 @@ Row 1:
x: 1 x: 1
2 2
-- Bug 67476: Queries with overflow mode != throw must not be cached by the query cache -- Bug 67476: Queries with overflow mode != throw must not be cached by the query cache
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0

View File

@ -43,25 +43,15 @@ DROP TABLE IF EXISTS tab;
CREATE TABLE tab(c UInt64) ENGINE = Memory; CREATE TABLE tab(c UInt64) ENGINE = Memory;
SYSTEM DROP QUERY CACHE; SYSTEM DROP QUERY CACHE;
SELECT sum(c) FROM tab SETTINGS read_overflow_mode = 'break', use_query_cache = 1; SELECT sum(c) FROM tab SETTINGS read_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE }
SELECT count(*) from system.query_cache; SELECT sum(c) FROM tab SETTINGS read_overflow_mode_leaf = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE }
SELECT sum(c) FROM tab SETTINGS read_overflow_mode_leaf = 'break', use_query_cache = 1; SELECT sum(c) FROM tab SETTINGS group_by_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE }
SELECT count(*) from system.query_cache; SELECT sum(c) FROM tab SETTINGS sort_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE }
SELECT sum(c) FROM tab SETTINGS group_by_overflow_mode = 'break', use_query_cache = 1; SELECT sum(c) FROM tab SETTINGS result_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE }
SELECT count(*) from system.query_cache; SELECT sum(c) FROM tab SETTINGS timeout_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE }
SELECT sum(c) FROM tab SETTINGS sort_overflow_mode = 'break', use_query_cache = 1; SELECT sum(c) FROM tab SETTINGS set_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE }
SELECT count(*) from system.query_cache; SELECT sum(c) FROM tab SETTINGS join_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE }
SELECT sum(c) FROM tab SETTINGS result_overflow_mode = 'break', use_query_cache = 1; SELECT sum(c) FROM tab SETTINGS transfer_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE }
SELECT count(*) from system.query_cache; SELECT sum(c) FROM tab SETTINGS distinct_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE }
SELECT sum(c) FROM tab SETTINGS timeout_overflow_mode = 'break', use_query_cache = 1;
SELECT count(*) from system.query_cache;
SELECT sum(c) FROM tab SETTINGS set_overflow_mode = 'break', use_query_cache = 1;
SELECT count(*) from system.query_cache;
SELECT sum(c) FROM tab SETTINGS join_overflow_mode = 'break', use_query_cache = 1;
SELECT count(*) from system.query_cache;
SELECT sum(c) FROM tab SETTINGS transfer_overflow_mode = 'break', use_query_cache = 1;
SELECT count(*) from system.query_cache;
SELECT sum(c) FROM tab SETTINGS distinct_overflow_mode = 'break', use_query_cache = 1;
SELECT count(*) from system.query_cache;
SYSTEM DROP QUERY CACHE; SYSTEM DROP QUERY CACHE;

View File

@ -45,3 +45,7 @@ uint64 UInt64 NO PRI SOR \N
int32 Int32 NO \N int32 Int32 NO \N
str String NO \N str String NO \N
uint64 UInt64 NO PRI SOR \N uint64 UInt64 NO PRI SOR \N
--- SHOW COLUMNS FROM table with dots
int32 Nullable(Int32) YES \N
str String NO SOR \N
uint64 UInt64 NO PRI SOR \N

View File

@ -90,3 +90,18 @@ SHOW COLUMNS FROM database_123456789abcde.tab;
DROP DATABASE database_123456789abcde; DROP DATABASE database_123456789abcde;
DROP TABLE tab; DROP TABLE tab;
DROP TABLE IF EXISTS `tab.with.dots`;
CREATE TABLE `tab.with.dots`
(
`uint64` UInt64,
`int32` Nullable(Int32) COMMENT 'example comment',
`str` String,
INDEX idx str TYPE set(1000)
)
ENGINE = MergeTree
PRIMARY KEY (uint64)
ORDER BY (uint64, str);
SELECT '--- SHOW COLUMNS FROM table with dots';
SHOW COLUMNS FROM `tab.with.dots`;
DROP TABLE `tab.with.dots`;

View File

@ -49,3 +49,10 @@ tbl 1 PRIMARY 1 a A 0 \N \N \N PRIMARY YES
--- Short form --- Short form
tbl 1 mmi_idx 1 \N 0 \N \N \N MINMAX YES b tbl 1 mmi_idx 1 \N 0 \N \N \N MINMAX YES b
tbl 1 PRIMARY 1 a A 0 \N \N \N PRIMARY YES tbl 1 PRIMARY 1 a A 0 \N \N \N PRIMARY YES
--- SHOW INDEX FROM table with dots
tab.with.dots 1 blf_idx 1 \N 0 \N \N \N BLOOM_FILTER YES d, b
tab.with.dots 1 mm1_idx 1 \N 0 \N \N \N MINMAX YES a, c, d
tab.with.dots 1 mm2_idx 1 \N 0 \N \N \N MINMAX YES c, d, e
tab.with.dots 1 PRIMARY 1 c A 0 \N \N \N PRIMARY YES
tab.with.dots 1 PRIMARY 2 a A 0 \N \N \N PRIMARY YES
tab.with.dots 1 set_idx 1 \N 0 \N \N \N SET YES e

View File

@ -78,3 +78,22 @@ SHOW INDEX FROM database_123456789abcde.tbl;
DROP DATABASE database_123456789abcde; DROP DATABASE database_123456789abcde;
DROP TABLE tbl; DROP TABLE tbl;
DROP TABLE IF EXISTS `tab.with.dots`;
CREATE TABLE `tab.with.dots`
(
a UInt64,
b UInt64,
c UInt64,
d UInt64,
e UInt64,
INDEX mm1_idx (a, c, d) TYPE minmax,
INDEX mm2_idx (c, d, e) TYPE minmax,
INDEX set_idx (e) TYPE set(100),
INDEX blf_idx (d, b) TYPE bloom_filter(0.8)
)
ENGINE = MergeTree
PRIMARY KEY (c, a);
SELECT '--- SHOW INDEX FROM table with dots';
SHOW INDEX FROM `tab.with.dots`;
DROP TABLE `tab.with.dots`;

View File

@ -0,0 +1,47 @@
-- Simple types
-- { echoOn }
SELECT x FROM format(JSONEachRow, 'x Date', '{"x":""}');
1970-01-01
SELECT x FROM format(JSONEachRow, 'x Date32', '{"x":""}');
1970-01-01
SELECT toTimeZone(x, 'UTC') FROM format(JSONEachRow, 'x DateTime', '{"x":""}');
1970-01-01 00:00:00
SELECT toTimeZone(x, 'UTC') FROM format(JSONEachRow, 'x DateTime64', '{"x":""}');
1970-01-01 00:00:00.000
SELECT x FROM format(JSONEachRow, 'x IPv4', '{"x":""}');
0.0.0.0
SELECT x FROM format(JSONEachRow, 'x IPv6', '{"x":""}');
::
SELECT x FROM format(JSONEachRow, 'x UUID', '{"x":""}');
00000000-0000-0000-0000-000000000000
-- { echoOn }
SELECT COUNT(DISTINCT col) FROM table1;
1
-- { echoOn }
SELECT * FROM table1 ORDER BY address ASC;
::
2001:db8:3333:4444:5555:6666:7777:8888
-- Nullable
-- { echoOn }
SELECT x FROM format(JSONEachRow, 'x Nullable(IPv6)', '{"x":""}');
\N
-- Compound types
SELECT x FROM format(JSONEachRow, 'x Array(UUID)', '{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e",""]}');
['00000000-0000-0000-0000-000000000000','b15f852c-c41a-4fd6-9247-1929c841715e','00000000-0000-0000-0000-000000000000']
SELECT x FROM format(JSONEachRow, 'x Array(Nullable(IPv6))', '{"x":["",""]}');
[NULL,NULL]
SELECT x FROM format(JSONEachRow, 'x Tuple(Date, IPv4, String)', '{"x":["", "", "abc"]}');
('1970-01-01','0.0.0.0','abc')
SELECT x FROM format(JSONEachRow, 'x Map(String, IPv6)', '{"x":{"abc": ""}}');
{'abc':'::'}
SELECT x FROM format(JSONEachRow, 'x Variant(Date, UUID)', '{"x":""}');
\N
-- Deep composition
SELECT x FROM format(JSONEachRow, 'x Array(Array(IPv6))', '{"x":[["2001:db8:3333:4444:CCCC:DDDD:EEEE:FFFF", ""], ["", "2001:db8:3333:4444:5555:6666:7777:8888"]]}');
[['2001:db8:3333:4444:cccc:dddd:eeee:ffff','::'],['::','2001:db8:3333:4444:5555:6666:7777:8888']]
SELECT x FROM format(JSONEachRow, 'x Variant(Date, Array(UUID))', '{"x":["", "b15f852c-c41a-4fd6-9247-1929c841715e"]}');
['00000000-0000-0000-0000-000000000000','b15f852c-c41a-4fd6-9247-1929c841715e']
SELECT x FROM format(JSONEachRow, 'x Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))', '{"x":[[""], ["",{"abc":""}]]}');
(['00000000-0000-0000-0000-000000000000'],('00000000-0000-0000-0000-000000000000',{'abc':'::'}))
SELECT x FROM format(JSONEachRow, 'x Map(Tuple(Date,IPv4), Variant(UUID,IPv6))', '{"x":{["",""]:""}}');
{('1970-01-01','0.0.0.0'):NULL}

View File

@ -0,0 +1,60 @@
SET input_format_json_empty_as_default = 1, allow_experimental_variant_type = 1;
-- Simple types
-- { echoOn }
SELECT x FROM format(JSONEachRow, 'x Date', '{"x":""}');
SELECT x FROM format(JSONEachRow, 'x Date32', '{"x":""}');
SELECT toTimeZone(x, 'UTC') FROM format(JSONEachRow, 'x DateTime', '{"x":""}');
SELECT toTimeZone(x, 'UTC') FROM format(JSONEachRow, 'x DateTime64', '{"x":""}');
SELECT x FROM format(JSONEachRow, 'x IPv4', '{"x":""}');
SELECT x FROM format(JSONEachRow, 'x IPv6', '{"x":""}');
SELECT x FROM format(JSONEachRow, 'x UUID', '{"x":""}');
-- { echoOff }
-- Simple type AggregateFunction
DROP TABLE IF EXISTS table1;
CREATE TABLE table1(col AggregateFunction(uniq, UInt64)) ENGINE=Memory();
DROP TABLE IF EXISTS table2;
CREATE TABLE table2(UserID UInt64) ENGINE=Memory();
INSERT INTO table1 SELECT uniqState(UserID) FROM table2;
INSERT INTO table1 SELECT x FROM format(JSONEachRow, 'x AggregateFunction(uniq, UInt64)' AS T, '{"x":""}');
-- { echoOn }
SELECT COUNT(DISTINCT col) FROM table1;
-- { echoOff }
DROP TABLE table1;
DROP TABLE table2;
-- The setting input_format_defaults_for_omitted_fields determines the default value if enabled.
CREATE TABLE table1(address IPv6 DEFAULT toIPv6('2001:db8:3333:4444:5555:6666:7777:8888')) ENGINE=Memory();
SET input_format_defaults_for_omitted_fields = 0;
INSERT INTO table1 FORMAT JSONEachRow {"address":""};
SET input_format_defaults_for_omitted_fields = 1;
INSERT INTO table1 FORMAT JSONEachRow {"address":""};
-- { echoOn }
SELECT * FROM table1 ORDER BY address ASC;
-- { echoOff }
DROP TABLE table1;
-- Nullable
-- { echoOn }
SELECT x FROM format(JSONEachRow, 'x Nullable(IPv6)', '{"x":""}');
-- Compound types
SELECT x FROM format(JSONEachRow, 'x Array(UUID)', '{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e",""]}');
SELECT x FROM format(JSONEachRow, 'x Array(Nullable(IPv6))', '{"x":["",""]}');
SELECT x FROM format(JSONEachRow, 'x Tuple(Date, IPv4, String)', '{"x":["", "", "abc"]}');
SELECT x FROM format(JSONEachRow, 'x Map(String, IPv6)', '{"x":{"abc": ""}}');
SELECT x FROM format(JSONEachRow, 'x Variant(Date, UUID)', '{"x":""}');
-- Deep composition
SELECT x FROM format(JSONEachRow, 'x Array(Array(IPv6))', '{"x":[["2001:db8:3333:4444:CCCC:DDDD:EEEE:FFFF", ""], ["", "2001:db8:3333:4444:5555:6666:7777:8888"]]}');
SELECT x FROM format(JSONEachRow, 'x Variant(Date, Array(UUID))', '{"x":["", "b15f852c-c41a-4fd6-9247-1929c841715e"]}');
SELECT x FROM format(JSONEachRow, 'x Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))', '{"x":[[""], ["",{"abc":""}]]}');
SELECT x FROM format(JSONEachRow, 'x Map(Tuple(Date,IPv4), Variant(UUID,IPv6))', '{"x":{["",""]:""}}');

View File

@ -0,0 +1,8 @@
Array(UUID)
{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e","00000000-0000-0000-0000-000000000000"]}
{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e","00000000-0000-0000-0000-000000000000"]}
{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e","00000000-0000-0000-0000-000000000000"]}
Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))
{"x":[["00000000-0000-0000-0000-000000000000"],["00000000-0000-0000-0000-000000000000",{"abc":"::"}]]}
{"x":[["00000000-0000-0000-0000-000000000000"],["00000000-0000-0000-0000-000000000000",{"abc":"::"}]]}
{"x":[["00000000-0000-0000-0000-000000000000"],["00000000-0000-0000-0000-000000000000",{"abc":"::"}]]}

View File

@ -0,0 +1,31 @@
#!/usr/bin/env bash
# Tags: no-parallel
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
DATA_FILE=$CLICKHOUSE_TEST_UNIQUE_NAME.json
# Wrapper for clickhouse-client to always output in JSONEachRow format, that
# way format settings will not affect output.
function clickhouse_local()
{
$CLICKHOUSE_LOCAL --output-format JSONEachRow "$@"
}
echo 'Array(UUID)'
echo '{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e",""]}' > $DATA_FILE
# Use increasingly smaller read buffers.
clickhouse_local -q "SELECT x FROM file('$DATA_FILE', 'JSONEachRow', 'x Array(UUID)') SETTINGS input_format_json_empty_as_default=1, input_format_parallel_parsing=0, storage_file_read_method='read', max_read_buffer_size=4"
clickhouse_local -q "SELECT x FROM file('$DATA_FILE', 'JSONEachRow', 'x Array(UUID)') SETTINGS input_format_json_empty_as_default=1, input_format_parallel_parsing=0, storage_file_read_method='read', max_read_buffer_size=2"
clickhouse_local -q "SELECT x FROM file('$DATA_FILE', 'JSONEachRow', 'x Array(UUID)') SETTINGS input_format_json_empty_as_default=1, input_format_parallel_parsing=0, storage_file_read_method='read', max_read_buffer_size=1"
echo 'Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))'
echo '{"x":[[""], ["",{"abc":""}]]}' > $DATA_FILE
# Use increasingly smaller read buffers.
clickhouse_local -q "SELECT x FROM file('$DATA_FILE', 'JSONEachRow', 'x Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))') SETTINGS input_format_json_empty_as_default=1, input_format_parallel_parsing=0, storage_file_read_method='read', max_read_buffer_size=16"
clickhouse_local -q "SELECT x FROM file('$DATA_FILE', 'JSONEachRow', 'x Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))') SETTINGS input_format_json_empty_as_default=1, input_format_parallel_parsing=0, storage_file_read_method='read', max_read_buffer_size=8"
clickhouse_local -q "SELECT x FROM file('$DATA_FILE', 'JSONEachRow', 'x Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))') SETTINGS input_format_json_empty_as_default=1, input_format_parallel_parsing=0, storage_file_read_method='read', max_read_buffer_size=1"
rm $DATA_FILE