mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-09 17:14:47 +00:00
Merge remote-tracking branch 'origin/master' into zznode
This commit is contained in:
commit
9102a6f119
@ -109,7 +109,7 @@ if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CL
|
|||||||
<networks>
|
<networks>
|
||||||
<ip>::/0</ip>
|
<ip>::/0</ip>
|
||||||
</networks>
|
</networks>
|
||||||
<password>${CLICKHOUSE_PASSWORD}</password>
|
<password><![CDATA[${CLICKHOUSE_PASSWORD//]]>/]]]]><![CDATA[>}]]></password>
|
||||||
<quota>default</quota>
|
<quota>default</quota>
|
||||||
<access_management>${CLICKHOUSE_ACCESS_MANAGEMENT}</access_management>
|
<access_management>${CLICKHOUSE_ACCESS_MANAGEMENT}</access_management>
|
||||||
</${CLICKHOUSE_USER}>
|
</${CLICKHOUSE_USER}>
|
||||||
|
@ -1396,6 +1396,7 @@ SELECT * FROM json_each_row_nested
|
|||||||
- [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - ignore unknown keys in json object for named tuples. Default value - `false`.
|
- [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - ignore unknown keys in json object for named tuples. Default value - `false`.
|
||||||
- [input_format_json_compact_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) - allow variable number of columns in JSONCompact/JSONCompactEachRow format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
- [input_format_json_compact_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) - allow variable number of columns in JSONCompact/JSONCompactEachRow format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||||
- [input_format_json_throw_on_bad_escape_sequence](/docs/en/operations/settings/settings-formats.md/#input_format_json_throw_on_bad_escape_sequence) - throw an exception if JSON string contains bad escape sequence. If disabled, bad escape sequences will remain as is in the data. Default value - `true`.
|
- [input_format_json_throw_on_bad_escape_sequence](/docs/en/operations/settings/settings-formats.md/#input_format_json_throw_on_bad_escape_sequence) - throw an exception if JSON string contains bad escape sequence. If disabled, bad escape sequences will remain as is in the data. Default value - `true`.
|
||||||
|
- [input_format_json_empty_as_default](/docs/en/operations/settings/settings-formats.md/#input_format_json_empty_as_default) - treat empty fields in JSON input as default values. Default value - `false`. For complex default expressions [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) must be enabled too.
|
||||||
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
|
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
|
||||||
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
|
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
|
||||||
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.
|
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.
|
||||||
|
@ -752,6 +752,17 @@ Possible values:
|
|||||||
|
|
||||||
Default value: 0.
|
Default value: 0.
|
||||||
|
|
||||||
|
### input_format_json_empty_as_default {#input_format_json_empty_as_default}
|
||||||
|
|
||||||
|
When enabled, replace empty input fields in JSON with default values. For complex default expressions `input_format_defaults_for_omitted_fields` must be enabled too.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
+ 0 — Disable.
|
||||||
|
+ 1 — Enable.
|
||||||
|
|
||||||
|
Default value: 0.
|
||||||
|
|
||||||
## TSV format settings {#tsv-format-settings}
|
## TSV format settings {#tsv-format-settings}
|
||||||
|
|
||||||
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}
|
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}
|
||||||
|
@ -57,11 +57,13 @@ namespace ErrorCodes
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
static bool supportsAtomicRenameImpl()
|
static std::optional<std::string> supportsAtomicRenameImpl()
|
||||||
{
|
{
|
||||||
VersionNumber renameat2_minimal_version(3, 15, 0);
|
VersionNumber renameat2_minimal_version(3, 15, 0);
|
||||||
VersionNumber linux_version(Poco::Environment::osVersion());
|
VersionNumber linux_version(Poco::Environment::osVersion());
|
||||||
return linux_version >= renameat2_minimal_version;
|
if (linux_version >= renameat2_minimal_version)
|
||||||
|
return std::nullopt;
|
||||||
|
return fmt::format("Linux kernel 3.15+ is required, got {}", linux_version.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool renameat2(const std::string & old_path, const std::string & new_path, int flags)
|
static bool renameat2(const std::string & old_path, const std::string & new_path, int flags)
|
||||||
@ -97,10 +99,14 @@ static bool renameat2(const std::string & old_path, const std::string & new_path
|
|||||||
ErrnoException::throwFromPath(ErrorCodes::SYSTEM_ERROR, new_path, "Cannot rename {} to {}", old_path, new_path);
|
ErrnoException::throwFromPath(ErrorCodes::SYSTEM_ERROR, new_path, "Cannot rename {} to {}", old_path, new_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool supportsAtomicRename()
|
bool supportsAtomicRename(std::string * out_message)
|
||||||
{
|
{
|
||||||
static bool supports = supportsAtomicRenameImpl();
|
static auto error = supportsAtomicRenameImpl();
|
||||||
return supports;
|
if (!error.has_value())
|
||||||
|
return true;
|
||||||
|
if (out_message)
|
||||||
|
*out_message = error.value();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -152,16 +158,22 @@ static bool renameat2(const std::string & old_path, const std::string & new_path
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static bool supportsAtomicRenameImpl()
|
static std::optional<std::string> supportsAtomicRenameImpl()
|
||||||
{
|
{
|
||||||
auto fun = dlsym(RTLD_DEFAULT, "renamex_np");
|
auto fun = dlsym(RTLD_DEFAULT, "renamex_np");
|
||||||
return fun != nullptr;
|
if (fun != nullptr)
|
||||||
|
return std::nullopt;
|
||||||
|
return "macOS 10.12 or later is required";
|
||||||
}
|
}
|
||||||
|
|
||||||
bool supportsAtomicRename()
|
bool supportsAtomicRename(std::string * out_message)
|
||||||
{
|
{
|
||||||
static bool supports = supportsAtomicRenameImpl();
|
static auto error = supportsAtomicRenameImpl();
|
||||||
return supports;
|
if (!error.has_value())
|
||||||
|
return true;
|
||||||
|
if (out_message)
|
||||||
|
*out_message = error.value();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -179,8 +191,10 @@ static bool renameat2(const std::string &, const std::string &, int)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool supportsAtomicRename()
|
bool supportsAtomicRename(std::string * out_message)
|
||||||
{
|
{
|
||||||
|
if (out_message)
|
||||||
|
*out_message = "only Linux and macOS are supported";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ namespace DB
|
|||||||
{
|
{
|
||||||
|
|
||||||
/// Returns true, if the following functions supported by the system
|
/// Returns true, if the following functions supported by the system
|
||||||
bool supportsAtomicRename();
|
bool supportsAtomicRename(std::string * out_message = nullptr);
|
||||||
|
|
||||||
/// Atomically rename old_path to new_path. If new_path exists, do not overwrite it and throw exception
|
/// Atomically rename old_path to new_path. If new_path exists, do not overwrite it and throw exception
|
||||||
void renameNoReplace(const std::string & old_path, const std::string & new_path);
|
void renameNoReplace(const std::string & old_path, const std::string & new_path);
|
||||||
|
@ -1144,6 +1144,7 @@ class IColumn;
|
|||||||
M(Bool, input_format_try_infer_variants, false, "Try to infer the Variant type in text formats when there is more than one possible type for column/array elements", 0) \
|
M(Bool, input_format_try_infer_variants, false, "Try to infer the Variant type in text formats when there is more than one possible type for column/array elements", 0) \
|
||||||
M(Bool, type_json_skip_duplicated_paths, false, "When enabled, during parsing JSON object into JSON type duplicated paths will be ignored and only the first one will be inserted instead of an exception", 0) \
|
M(Bool, type_json_skip_duplicated_paths, false, "When enabled, during parsing JSON object into JSON type duplicated paths will be ignored and only the first one will be inserted instead of an exception", 0) \
|
||||||
M(UInt64, input_format_json_max_depth, 1000, "Maximum depth of a field in JSON. This is not a strict limit, it does not have to be applied precisely.", 0) \
|
M(UInt64, input_format_json_max_depth, 1000, "Maximum depth of a field in JSON. This is not a strict limit, it does not have to be applied precisely.", 0) \
|
||||||
|
M(Bool, input_format_json_empty_as_default, false, "Treat empty fields in JSON input as default values.", 0) \
|
||||||
M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \
|
M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \
|
||||||
M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \
|
M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \
|
||||||
M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \
|
M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \
|
||||||
|
@ -71,6 +71,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
|||||||
},
|
},
|
||||||
{"24.9",
|
{"24.9",
|
||||||
{
|
{
|
||||||
|
{"input_format_json_empty_as_default", false, false, "Added new setting to allow to treat empty fields in JSON input as default values."},
|
||||||
{"input_format_try_infer_variants", false, false, "Try to infer Variant type in text formats when there is more than one possible type for column/array elements"},
|
{"input_format_try_infer_variants", false, false, "Try to infer Variant type in text formats when there is more than one possible type for column/array elements"},
|
||||||
{"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."},
|
{"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."},
|
||||||
{"create_if_not_exists", false, false, "New setting."},
|
{"create_if_not_exists", false, false, "New setting."},
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#include <IO/WriteBufferFromString.h>
|
#include <IO/WriteBufferFromString.h>
|
||||||
|
|
||||||
#include <Formats/FormatSettings.h>
|
#include <Formats/FormatSettings.h>
|
||||||
|
#include <Formats/JSONUtils.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -615,28 +616,49 @@ void SerializationArray::serializeTextJSONPretty(const IColumn & column, size_t
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SerializationArray::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
template <typename ReturnType>
|
||||||
|
ReturnType SerializationArray::deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
{
|
{
|
||||||
deserializeTextImpl(column, istr,
|
auto deserialize_nested = [&settings, this](IColumn & nested_column, ReadBuffer & buf) -> ReturnType
|
||||||
[&](IColumn & nested_column)
|
{
|
||||||
|
if constexpr (std::is_same_v<ReturnType, void>)
|
||||||
{
|
{
|
||||||
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
|
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
|
||||||
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(nested_column, istr, settings, nested);
|
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(nested_column, buf, settings, nested);
|
||||||
else
|
else
|
||||||
nested->deserializeTextJSON(nested_column, istr, settings);
|
nested->deserializeTextJSON(nested_column, buf, settings);
|
||||||
}, false);
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
|
||||||
|
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(nested_column, buf, settings, nested);
|
||||||
|
return nested->tryDeserializeTextJSON(nested_column, buf, settings);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (settings.json.empty_as_default)
|
||||||
|
return deserializeTextImpl<ReturnType>(column, istr,
|
||||||
|
[&deserialize_nested, &istr](IColumn & nested_column) -> ReturnType
|
||||||
|
{
|
||||||
|
return JSONUtils::deserializeEmpyStringAsDefaultOrNested<ReturnType>(nested_column, istr, deserialize_nested);
|
||||||
|
}, false);
|
||||||
|
else
|
||||||
|
return deserializeTextImpl<ReturnType>(column, istr,
|
||||||
|
[&deserialize_nested, &istr](IColumn & nested_column) -> ReturnType
|
||||||
|
{
|
||||||
|
return deserialize_nested(nested_column, istr);
|
||||||
|
}, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SerializationArray::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
deserializeTextJSONImpl<void>(column, istr, settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SerializationArray::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
bool SerializationArray::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
{
|
{
|
||||||
auto read_nested = [&](IColumn & nested_column)
|
return deserializeTextJSONImpl<bool>(column, istr, settings);
|
||||||
{
|
|
||||||
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
|
|
||||||
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(nested_column, istr, settings, nested);
|
|
||||||
return nested->tryDeserializeTextJSON(nested_column, istr, settings);
|
|
||||||
};
|
|
||||||
|
|
||||||
return deserializeTextImpl<bool>(column, istr, std::move(read_nested), false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -82,6 +82,10 @@ public:
|
|||||||
SerializationPtr create(const SerializationPtr & prev) const override;
|
SerializationPtr create(const SerializationPtr & prev) const override;
|
||||||
ColumnPtr create(const ColumnPtr & prev) const override;
|
ColumnPtr create(const ColumnPtr & prev) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <typename ReturnType>
|
||||||
|
ReturnType deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <Columns/ColumnMap.h>
|
#include <Columns/ColumnMap.h>
|
||||||
#include <Core/Field.h>
|
#include <Core/Field.h>
|
||||||
#include <Formats/FormatSettings.h>
|
#include <Formats/FormatSettings.h>
|
||||||
|
#include <Formats/JSONUtils.h>
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
@ -316,28 +317,52 @@ void SerializationMap::serializeTextJSONPretty(const IColumn & column, size_t ro
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SerializationMap::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
template <typename ReturnType>
|
||||||
|
ReturnType SerializationMap::deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
{
|
{
|
||||||
deserializeTextImpl(column, istr,
|
auto deserialize_nested = [&settings](IColumn & subcolumn, ReadBuffer & buf, const SerializationPtr & subcolumn_serialization) -> ReturnType
|
||||||
[&settings](ReadBuffer & buf, const SerializationPtr & subcolumn_serialization, IColumn & subcolumn)
|
{
|
||||||
|
if constexpr (std::is_same_v<ReturnType, void>)
|
||||||
{
|
{
|
||||||
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(subcolumn))
|
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(subcolumn))
|
||||||
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(subcolumn, buf, settings, subcolumn_serialization);
|
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(subcolumn, buf, settings, subcolumn_serialization);
|
||||||
else
|
else
|
||||||
subcolumn_serialization->deserializeTextJSON(subcolumn, buf, settings);
|
subcolumn_serialization->deserializeTextJSON(subcolumn, buf, settings);
|
||||||
});
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(subcolumn))
|
||||||
|
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(subcolumn, buf, settings, subcolumn_serialization);
|
||||||
|
return subcolumn_serialization->tryDeserializeTextJSON(subcolumn, buf, settings);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (settings.json.empty_as_default)
|
||||||
|
return deserializeTextImpl<ReturnType>(column, istr,
|
||||||
|
[&deserialize_nested](ReadBuffer & buf, const SerializationPtr & subcolumn_serialization, IColumn & subcolumn) -> ReturnType
|
||||||
|
{
|
||||||
|
return JSONUtils::deserializeEmpyStringAsDefaultOrNested<ReturnType>(subcolumn, buf,
|
||||||
|
[&deserialize_nested, &subcolumn_serialization](IColumn & subcolumn_, ReadBuffer & buf_) -> ReturnType
|
||||||
|
{
|
||||||
|
return deserialize_nested(subcolumn_, buf_, subcolumn_serialization);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
else
|
||||||
|
return deserializeTextImpl<ReturnType>(column, istr,
|
||||||
|
[&deserialize_nested](ReadBuffer & buf, const SerializationPtr & subcolumn_serialization, IColumn & subcolumn) -> ReturnType
|
||||||
|
{
|
||||||
|
return deserialize_nested(subcolumn, buf, subcolumn_serialization);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void SerializationMap::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
deserializeTextJSONImpl<void>(column, istr, settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SerializationMap::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
bool SerializationMap::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
{
|
{
|
||||||
auto reader = [&settings](ReadBuffer & buf, const SerializationPtr & subcolumn_serialization, IColumn & subcolumn)
|
return deserializeTextJSONImpl<bool>(column, istr, settings);
|
||||||
{
|
|
||||||
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(subcolumn))
|
|
||||||
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(subcolumn, buf, settings, subcolumn_serialization);
|
|
||||||
return subcolumn_serialization->tryDeserializeTextJSON(subcolumn, buf, settings);
|
|
||||||
};
|
|
||||||
|
|
||||||
return deserializeTextImpl<bool>(column, istr, reader);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void SerializationMap::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
void SerializationMap::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||||
|
@ -74,6 +74,9 @@ private:
|
|||||||
|
|
||||||
template <typename ReturnType = void, typename Reader>
|
template <typename ReturnType = void, typename Reader>
|
||||||
ReturnType deserializeTextImpl(IColumn & column, ReadBuffer & istr, Reader && reader) const;
|
ReturnType deserializeTextImpl(IColumn & column, ReadBuffer & istr, Reader && reader) const;
|
||||||
|
|
||||||
|
template <typename ReturnType>
|
||||||
|
ReturnType deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <Core/Field.h>
|
#include <Core/Field.h>
|
||||||
#include <Columns/ColumnTuple.h>
|
#include <Columns/ColumnTuple.h>
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
|
#include <Formats/JSONUtils.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
@ -313,27 +314,9 @@ void SerializationTuple::serializeTextJSONPretty(const IColumn & column, size_t
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename ReturnType>
|
template <typename ReturnType>
|
||||||
ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
ReturnType SerializationTuple::deserializeTupleJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings, auto && deserialize_element) const
|
||||||
{
|
{
|
||||||
static constexpr bool throw_exception = std::is_same_v<ReturnType, void>;
|
static constexpr auto throw_exception = std::is_same_v<ReturnType, void>;
|
||||||
|
|
||||||
auto deserialize_element = [&](IColumn & element_column, size_t element_pos)
|
|
||||||
{
|
|
||||||
if constexpr (throw_exception)
|
|
||||||
{
|
|
||||||
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(element_column))
|
|
||||||
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(element_column, istr, settings, elems[element_pos]);
|
|
||||||
else
|
|
||||||
elems[element_pos]->deserializeTextJSON(element_column, istr, settings);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(element_column))
|
|
||||||
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(element_column, istr, settings, elems[element_pos]);
|
|
||||||
return elems[element_pos]->tryDeserializeTextJSON(element_column, istr, settings);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if (settings.json.read_named_tuples_as_objects
|
if (settings.json.read_named_tuples_as_objects
|
||||||
&& have_explicit_names)
|
&& have_explicit_names)
|
||||||
@ -506,12 +489,51 @@ ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SerializationTuple::deserializeTextJSON(DB::IColumn & column, DB::ReadBuffer & istr, const DB::FormatSettings & settings) const
|
template <typename ReturnType>
|
||||||
|
ReturnType SerializationTuple::deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
{
|
{
|
||||||
deserializeTextJSONImpl(column, istr, settings);
|
auto deserialize_nested = [&settings](IColumn & nested_column, ReadBuffer & buf, const SerializationPtr & nested_column_serialization) -> ReturnType
|
||||||
|
{
|
||||||
|
if constexpr (std::is_same_v<ReturnType, void>)
|
||||||
|
{
|
||||||
|
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
|
||||||
|
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(nested_column, buf, settings, nested_column_serialization);
|
||||||
|
else
|
||||||
|
nested_column_serialization->deserializeTextJSON(nested_column, buf, settings);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
|
||||||
|
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(nested_column, buf, settings, nested_column_serialization);
|
||||||
|
else
|
||||||
|
return nested_column_serialization->tryDeserializeTextJSON(nested_column, buf, settings);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (settings.json.empty_as_default)
|
||||||
|
return deserializeTupleJSONImpl<ReturnType>(column, istr, settings,
|
||||||
|
[&deserialize_nested, &istr, this](IColumn & nested_column, size_t element_pos) -> ReturnType
|
||||||
|
{
|
||||||
|
return JSONUtils::deserializeEmpyStringAsDefaultOrNested<ReturnType>(nested_column, istr,
|
||||||
|
[&deserialize_nested, element_pos, this](IColumn & nested_column_, ReadBuffer & buf) -> ReturnType
|
||||||
|
{
|
||||||
|
return deserialize_nested(nested_column_, buf, elems[element_pos]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
else
|
||||||
|
return deserializeTupleJSONImpl<ReturnType>(column, istr, settings,
|
||||||
|
[&deserialize_nested, &istr, this](IColumn & nested_column, size_t element_pos) -> ReturnType
|
||||||
|
{
|
||||||
|
return deserialize_nested(nested_column, istr, elems[element_pos]);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SerializationTuple::tryDeserializeTextJSON(DB::IColumn & column, DB::ReadBuffer & istr, const DB::FormatSettings & settings) const
|
void SerializationTuple::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
|
{
|
||||||
|
deserializeTextJSONImpl<void>(column, istr, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool SerializationTuple::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||||
{
|
{
|
||||||
return deserializeTextJSONImpl<bool>(column, istr, settings);
|
return deserializeTextJSONImpl<bool>(column, istr, settings);
|
||||||
}
|
}
|
||||||
|
@ -81,7 +81,10 @@ private:
|
|||||||
template <typename ReturnType = void>
|
template <typename ReturnType = void>
|
||||||
ReturnType deserializeTextImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings, bool whole) const;
|
ReturnType deserializeTextImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings, bool whole) const;
|
||||||
|
|
||||||
template <typename ReturnType = void>
|
template <typename ReturnType>
|
||||||
|
ReturnType deserializeTupleJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings, auto && deserialize_element) const;
|
||||||
|
|
||||||
|
template <typename ReturnType>
|
||||||
ReturnType deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const;
|
ReturnType deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const;
|
||||||
|
|
||||||
template <typename ReturnType = void>
|
template <typename ReturnType = void>
|
||||||
|
@ -197,8 +197,9 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_
|
|||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Moving tables between databases of different engines is not supported");
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Moving tables between databases of different engines is not supported");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (exchange && !supportsAtomicRename())
|
std::string message;
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RENAME EXCHANGE is not supported");
|
if (exchange && !supportsAtomicRename(&message))
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RENAME EXCHANGE is not supported because exchanging files is not supported by the OS ({})", message);
|
||||||
|
|
||||||
waitDatabaseStarted();
|
waitDatabaseStarted();
|
||||||
|
|
||||||
|
@ -152,6 +152,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se
|
|||||||
format_settings.json.try_infer_objects_as_tuples = settings.input_format_json_try_infer_named_tuples_from_objects;
|
format_settings.json.try_infer_objects_as_tuples = settings.input_format_json_try_infer_named_tuples_from_objects;
|
||||||
format_settings.json.throw_on_bad_escape_sequence = settings.input_format_json_throw_on_bad_escape_sequence;
|
format_settings.json.throw_on_bad_escape_sequence = settings.input_format_json_throw_on_bad_escape_sequence;
|
||||||
format_settings.json.ignore_unnecessary_fields = settings.input_format_json_ignore_unnecessary_fields;
|
format_settings.json.ignore_unnecessary_fields = settings.input_format_json_ignore_unnecessary_fields;
|
||||||
|
format_settings.json.empty_as_default = settings.input_format_json_empty_as_default;
|
||||||
format_settings.json.type_json_skip_duplicated_paths = settings.type_json_skip_duplicated_paths;
|
format_settings.json.type_json_skip_duplicated_paths = settings.type_json_skip_duplicated_paths;
|
||||||
format_settings.null_as_default = settings.input_format_null_as_default;
|
format_settings.null_as_default = settings.input_format_null_as_default;
|
||||||
format_settings.force_null_for_omitted_fields = settings.input_format_force_null_for_omitted_fields;
|
format_settings.force_null_for_omitted_fields = settings.input_format_force_null_for_omitted_fields;
|
||||||
|
@ -237,6 +237,7 @@ struct FormatSettings
|
|||||||
bool infer_incomplete_types_as_strings = true;
|
bool infer_incomplete_types_as_strings = true;
|
||||||
bool throw_on_bad_escape_sequence = true;
|
bool throw_on_bad_escape_sequence = true;
|
||||||
bool ignore_unnecessary_fields = true;
|
bool ignore_unnecessary_fields = true;
|
||||||
|
bool empty_as_default = false;
|
||||||
bool type_json_skip_duplicated_paths = false;
|
bool type_json_skip_duplicated_paths = false;
|
||||||
} json{};
|
} json{};
|
||||||
|
|
||||||
|
@ -2,12 +2,14 @@
|
|||||||
#include <Formats/JSONUtils.h>
|
#include <Formats/JSONUtils.h>
|
||||||
#include <Formats/ReadSchemaUtils.h>
|
#include <Formats/ReadSchemaUtils.h>
|
||||||
#include <Formats/EscapingRuleUtils.h>
|
#include <Formats/EscapingRuleUtils.h>
|
||||||
|
#include <IO/PeekableReadBuffer.h>
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
#include <IO/WriteBufferValidUTF8.h>
|
#include <IO/WriteBufferValidUTF8.h>
|
||||||
#include <DataTypes/Serializations/SerializationNullable.h>
|
#include <DataTypes/Serializations/SerializationNullable.h>
|
||||||
#include <DataTypes/DataTypeNullable.h>
|
#include <DataTypes/DataTypeNullable.h>
|
||||||
#include <DataTypes/DataTypeObjectDeprecated.h>
|
#include <DataTypes/DataTypeObjectDeprecated.h>
|
||||||
#include <DataTypes/DataTypeFactory.h>
|
#include <DataTypes/DataTypeFactory.h>
|
||||||
|
#include <Common/assert_cast.h>
|
||||||
|
|
||||||
#include <base/find_symbols.h>
|
#include <base/find_symbols.h>
|
||||||
|
|
||||||
@ -286,11 +288,19 @@ namespace JSONUtils
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (as_nullable)
|
auto deserialize = [as_nullable, &format_settings, &serialization](IColumn & column_, ReadBuffer & buf) -> bool
|
||||||
return SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(column, in, format_settings, serialization);
|
{
|
||||||
|
if (as_nullable)
|
||||||
|
return SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(column_, buf, format_settings, serialization);
|
||||||
|
|
||||||
serialization->deserializeTextJSON(column, in, format_settings);
|
serialization->deserializeTextJSON(column_, buf, format_settings);
|
||||||
return true;
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (format_settings.json.empty_as_default)
|
||||||
|
return JSONUtils::deserializeEmpyStringAsDefaultOrNested<bool, false>(column, in, deserialize);
|
||||||
|
else
|
||||||
|
return deserialize(column, in);
|
||||||
}
|
}
|
||||||
catch (Exception & e)
|
catch (Exception & e)
|
||||||
{
|
{
|
||||||
@ -920,6 +930,78 @@ namespace JSONUtils
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename ReturnType, bool default_column_return_value>
|
||||||
|
ReturnType deserializeEmpyStringAsDefaultOrNested(IColumn & column, ReadBuffer & istr, const NestedDeserialize<ReturnType> & deserialize_nested)
|
||||||
|
{
|
||||||
|
static constexpr auto throw_exception = std::is_same_v<ReturnType, void>;
|
||||||
|
|
||||||
|
static constexpr auto EMPTY_STRING = "\"\"";
|
||||||
|
static constexpr auto EMPTY_STRING_LENGTH = std::string_view(EMPTY_STRING).length();
|
||||||
|
|
||||||
|
if (istr.eof() || *istr.position() != EMPTY_STRING[0])
|
||||||
|
return deserialize_nested(column, istr);
|
||||||
|
|
||||||
|
auto do_deserialize = [](IColumn & column_, ReadBuffer & buf, auto && check_for_empty_string, auto && deserialize) -> ReturnType
|
||||||
|
{
|
||||||
|
if (check_for_empty_string(buf))
|
||||||
|
{
|
||||||
|
column_.insertDefault();
|
||||||
|
return ReturnType(default_column_return_value);
|
||||||
|
}
|
||||||
|
return deserialize(column_, buf);
|
||||||
|
};
|
||||||
|
|
||||||
|
if (istr.available() >= EMPTY_STRING_LENGTH)
|
||||||
|
{
|
||||||
|
/// We have enough data in buffer to check if we have an empty string.
|
||||||
|
auto check_for_empty_string = [](ReadBuffer & buf) -> bool
|
||||||
|
{
|
||||||
|
auto * pos = buf.position();
|
||||||
|
if (checkString(EMPTY_STRING, buf))
|
||||||
|
return true;
|
||||||
|
buf.position() = pos;
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
return do_deserialize(column, istr, check_for_empty_string, deserialize_nested);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// We don't have enough data in buffer to check if we have an empty string.
|
||||||
|
/// Use PeekableReadBuffer to make a checkpoint before checking for an
|
||||||
|
/// empty string and rollback if check was failed.
|
||||||
|
|
||||||
|
auto check_for_empty_string = [](ReadBuffer & buf) -> bool
|
||||||
|
{
|
||||||
|
auto & peekable_buf = assert_cast<PeekableReadBuffer &>(buf);
|
||||||
|
peekable_buf.setCheckpoint();
|
||||||
|
SCOPE_EXIT(peekable_buf.dropCheckpoint());
|
||||||
|
if (checkString(EMPTY_STRING, peekable_buf))
|
||||||
|
return true;
|
||||||
|
peekable_buf.rollbackToCheckpoint();
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
auto deserialize_nested_with_check = [&deserialize_nested](IColumn & column_, ReadBuffer & buf) -> ReturnType
|
||||||
|
{
|
||||||
|
auto & peekable_buf = assert_cast<PeekableReadBuffer &>(buf);
|
||||||
|
if constexpr (throw_exception)
|
||||||
|
deserialize_nested(column_, peekable_buf);
|
||||||
|
else if (!deserialize_nested(column_, peekable_buf))
|
||||||
|
return ReturnType(false);
|
||||||
|
|
||||||
|
if (unlikely(peekable_buf.hasUnreadData()))
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Incorrect state while parsing JSON: PeekableReadBuffer has unread data in own memory: {}", String(peekable_buf.position(), peekable_buf.available()));
|
||||||
|
|
||||||
|
return ReturnType(true);
|
||||||
|
};
|
||||||
|
|
||||||
|
PeekableReadBuffer peekable_buf(istr, true);
|
||||||
|
return do_deserialize(column, peekable_buf, check_for_empty_string, deserialize_nested_with_check);
|
||||||
|
}
|
||||||
|
|
||||||
|
template void deserializeEmpyStringAsDefaultOrNested<void, true>(IColumn & column, ReadBuffer & istr, const NestedDeserialize<void> & deserialize_nested);
|
||||||
|
template bool deserializeEmpyStringAsDefaultOrNested<bool, true>(IColumn & column, ReadBuffer & istr, const NestedDeserialize<bool> & deserialize_nested);
|
||||||
|
template bool deserializeEmpyStringAsDefaultOrNested<bool, false>(IColumn & column, ReadBuffer & istr, const NestedDeserialize<bool> & deserialize_nested);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <IO/Progress.h>
|
#include <IO/Progress.h>
|
||||||
#include <Core/NamesAndTypes.h>
|
#include <Core/NamesAndTypes.h>
|
||||||
#include <Common/Stopwatch.h>
|
#include <Common/Stopwatch.h>
|
||||||
|
#include <functional>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -146,6 +147,16 @@ namespace JSONUtils
|
|||||||
|
|
||||||
bool skipUntilFieldInObject(ReadBuffer & in, const String & desired_field_name, const FormatSettings::JSON & settings);
|
bool skipUntilFieldInObject(ReadBuffer & in, const String & desired_field_name, const FormatSettings::JSON & settings);
|
||||||
void skipTheRestOfObject(ReadBuffer & in, const FormatSettings::JSON & settings);
|
void skipTheRestOfObject(ReadBuffer & in, const FormatSettings::JSON & settings);
|
||||||
|
|
||||||
|
template <typename ReturnType>
|
||||||
|
using NestedDeserialize = std::function<ReturnType(IColumn &, ReadBuffer &)>;
|
||||||
|
|
||||||
|
template <typename ReturnType, bool default_column_return_value = true>
|
||||||
|
ReturnType deserializeEmpyStringAsDefaultOrNested(IColumn & column, ReadBuffer & istr, const NestedDeserialize<ReturnType> & deserialize_nested);
|
||||||
|
|
||||||
|
extern template void deserializeEmpyStringAsDefaultOrNested<void, true>(IColumn & column, ReadBuffer & istr, const NestedDeserialize<void> & deserialize_nested);
|
||||||
|
extern template bool deserializeEmpyStringAsDefaultOrNested<bool, true>(IColumn & column, ReadBuffer & istr, const NestedDeserialize<bool> & deserialize_nested);
|
||||||
|
extern template bool deserializeEmpyStringAsDefaultOrNested<bool, false>(IColumn & column, ReadBuffer & istr, const NestedDeserialize<bool> & deserialize_nested);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,9 @@ ConcurrentHashJoin::ConcurrentHashJoin(
|
|||||||
CurrentMetrics::ConcurrentHashJoinPoolThreads,
|
CurrentMetrics::ConcurrentHashJoinPoolThreads,
|
||||||
CurrentMetrics::ConcurrentHashJoinPoolThreadsActive,
|
CurrentMetrics::ConcurrentHashJoinPoolThreadsActive,
|
||||||
CurrentMetrics::ConcurrentHashJoinPoolThreadsScheduled,
|
CurrentMetrics::ConcurrentHashJoinPoolThreadsScheduled,
|
||||||
slots))
|
/*max_threads_*/ slots,
|
||||||
|
/*max_free_threads_*/ 0,
|
||||||
|
/*queue_size_*/ slots))
|
||||||
, stats_collecting_params(stats_collecting_params_)
|
, stats_collecting_params(stats_collecting_params_)
|
||||||
{
|
{
|
||||||
hash_joins.resize(slots);
|
hash_joins.resize(slots);
|
||||||
|
@ -1236,6 +1236,7 @@ IBlocksStreamPtr HashJoin::getNonJoinedBlocks(const Block & left_sample_block,
|
|||||||
|
|
||||||
void HashJoin::reuseJoinedData(const HashJoin & join)
|
void HashJoin::reuseJoinedData(const HashJoin & join)
|
||||||
{
|
{
|
||||||
|
have_compressed = join.have_compressed;
|
||||||
data = join.data;
|
data = join.data;
|
||||||
from_storage_join = true;
|
from_storage_join = true;
|
||||||
|
|
||||||
|
@ -968,6 +968,11 @@ void InterpreterCreateQuery::validateMaterializedViewColumnsAndEngine(const ASTC
|
|||||||
if (database && database->getEngineName() != "Atomic")
|
if (database && database->getEngineName() != "Atomic")
|
||||||
throw Exception(ErrorCodes::INCORRECT_QUERY,
|
throw Exception(ErrorCodes::INCORRECT_QUERY,
|
||||||
"Refreshable materialized views (except with APPEND) only support Atomic database engine, but database {} has engine {}", create.getDatabase(), database->getEngineName());
|
"Refreshable materialized views (except with APPEND) only support Atomic database engine, but database {} has engine {}", create.getDatabase(), database->getEngineName());
|
||||||
|
|
||||||
|
std::string message;
|
||||||
|
if (!supportsAtomicRename(&message))
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
||||||
|
"Can't create refreshable materialized view because exchanging files is not supported by the OS ({})", message);
|
||||||
}
|
}
|
||||||
|
|
||||||
Block input_block;
|
Block input_block;
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <Parsers/ParserShowColumnsQuery.h>
|
#include <Parsers/ParserShowColumnsQuery.h>
|
||||||
|
|
||||||
#include <Parsers/ASTIdentifier_fwd.h>
|
#include <Parsers/ASTIdentifier.h>
|
||||||
#include <Parsers/ASTLiteral.h>
|
#include <Parsers/ASTLiteral.h>
|
||||||
#include <Parsers/ASTShowColumnsQuery.h>
|
#include <Parsers/ASTShowColumnsQuery.h>
|
||||||
#include <Parsers/CommonParsers.h>
|
#include <Parsers/CommonParsers.h>
|
||||||
@ -18,7 +18,6 @@ bool ParserShowColumnsQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
|
|||||||
ASTPtr from1;
|
ASTPtr from1;
|
||||||
ASTPtr from2;
|
ASTPtr from2;
|
||||||
|
|
||||||
String from1_str;
|
|
||||||
String from2_str;
|
String from2_str;
|
||||||
|
|
||||||
auto query = std::make_shared<ASTShowColumnsQuery>();
|
auto query = std::make_shared<ASTShowColumnsQuery>();
|
||||||
@ -43,25 +42,18 @@ bool ParserShowColumnsQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
|
|||||||
else
|
else
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
tryGetIdentifierNameInto(from1, from1_str);
|
const auto * table_id = from1->as<ASTIdentifier>();
|
||||||
|
if (!table_id)
|
||||||
bool abbreviated_form = from1_str.contains("."); // FROM database.table
|
return false;
|
||||||
if (abbreviated_form)
|
query->table = table_id->shortName();
|
||||||
{
|
if (table_id->compound())
|
||||||
std::vector<String> split;
|
query->database = table_id->name_parts[0];
|
||||||
boost::split(split, from1_str, boost::is_any_of("."));
|
|
||||||
query->database = split[0];
|
|
||||||
query->table = split[1];
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (ParserKeyword(Keyword::FROM).ignore(pos, expected) || ParserKeyword(Keyword::IN).ignore(pos, expected))
|
if (ParserKeyword(Keyword::FROM).ignore(pos, expected) || ParserKeyword(Keyword::IN).ignore(pos, expected))
|
||||||
if (!ParserIdentifier().parse(pos, from2, expected))
|
if (!ParserIdentifier().parse(pos, from2, expected))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
tryGetIdentifierNameInto(from2, from2_str);
|
tryGetIdentifierNameInto(from2, from2_str);
|
||||||
|
|
||||||
query->table = from1_str;
|
|
||||||
query->database = from2_str;
|
query->database = from2_str;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <Parsers/ParserShowIndexesQuery.h>
|
#include <Parsers/ParserShowIndexesQuery.h>
|
||||||
|
|
||||||
#include <Parsers/ASTIdentifier_fwd.h>
|
#include <Parsers/ASTIdentifier.h>
|
||||||
#include <Parsers/ASTLiteral.h>
|
#include <Parsers/ASTLiteral.h>
|
||||||
#include <Parsers/ASTShowIndexesQuery.h>
|
#include <Parsers/ASTShowIndexesQuery.h>
|
||||||
#include <Parsers/CommonParsers.h>
|
#include <Parsers/CommonParsers.h>
|
||||||
@ -17,7 +17,6 @@ bool ParserShowIndexesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
|
|||||||
ASTPtr from1;
|
ASTPtr from1;
|
||||||
ASTPtr from2;
|
ASTPtr from2;
|
||||||
|
|
||||||
String from1_str;
|
|
||||||
String from2_str;
|
String from2_str;
|
||||||
|
|
||||||
auto query = std::make_shared<ASTShowIndexesQuery>();
|
auto query = std::make_shared<ASTShowIndexesQuery>();
|
||||||
@ -39,25 +38,18 @@ bool ParserShowIndexesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
|
|||||||
else
|
else
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
tryGetIdentifierNameInto(from1, from1_str);
|
const auto * table_id = from1->as<ASTIdentifier>();
|
||||||
|
if (!table_id)
|
||||||
bool abbreviated_form = from1_str.contains("."); // FROM database.table
|
return false;
|
||||||
if (abbreviated_form)
|
query->table = table_id->shortName();
|
||||||
{
|
if (table_id->compound())
|
||||||
std::vector<String> split;
|
query->database = table_id->name_parts[0];
|
||||||
boost::split(split, from1_str, boost::is_any_of("."));
|
|
||||||
query->database = split[0];
|
|
||||||
query->table = split[1];
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (ParserKeyword(Keyword::FROM).ignore(pos, expected) || ParserKeyword(Keyword::IN).ignore(pos, expected))
|
if (ParserKeyword(Keyword::FROM).ignore(pos, expected) || ParserKeyword(Keyword::IN).ignore(pos, expected))
|
||||||
if (!ParserIdentifier().parse(pos, from2, expected))
|
if (!ParserIdentifier().parse(pos, from2, expected))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
tryGetIdentifierNameInto(from2, from2_str);
|
tryGetIdentifierNameInto(from2, from2_str);
|
||||||
|
|
||||||
query->table = from1_str;
|
|
||||||
query->database = from2_str;
|
query->database = from2_str;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,6 +15,7 @@ namespace ErrorCodes
|
|||||||
{
|
{
|
||||||
extern const int SUPPORT_IS_DISABLED;
|
extern const int SUPPORT_IS_DISABLED;
|
||||||
extern const int REPLICA_STATUS_CHANGED;
|
extern const int REPLICA_STATUS_CHANGED;
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
ReplicatedMergeTreeAttachThread::ReplicatedMergeTreeAttachThread(StorageReplicatedMergeTree & storage_)
|
ReplicatedMergeTreeAttachThread::ReplicatedMergeTreeAttachThread(StorageReplicatedMergeTree & storage_)
|
||||||
@ -117,6 +118,67 @@ void ReplicatedMergeTreeAttachThread::checkHasReplicaMetadataInZooKeeper(const z
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Int32 ReplicatedMergeTreeAttachThread::fixReplicaMetadataVersionIfNeeded(zkutil::ZooKeeperPtr zookeeper)
|
||||||
|
{
|
||||||
|
const String & zookeeper_path = storage.zookeeper_path;
|
||||||
|
const String & replica_path = storage.replica_path;
|
||||||
|
const bool replica_readonly = storage.is_readonly;
|
||||||
|
|
||||||
|
for (size_t i = 0; i != 2; ++i)
|
||||||
|
{
|
||||||
|
String replica_metadata_version_str;
|
||||||
|
const bool replica_metadata_version_exists = zookeeper->tryGet(replica_path + "/metadata_version", replica_metadata_version_str);
|
||||||
|
if (!replica_metadata_version_exists)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
const Int32 metadata_version = parse<Int32>(replica_metadata_version_str);
|
||||||
|
|
||||||
|
if (metadata_version != 0 || replica_readonly)
|
||||||
|
{
|
||||||
|
/// No need to fix anything
|
||||||
|
return metadata_version;
|
||||||
|
}
|
||||||
|
|
||||||
|
Coordination::Stat stat;
|
||||||
|
zookeeper->get(fs::path(zookeeper_path) / "metadata", &stat);
|
||||||
|
if (stat.version == 0)
|
||||||
|
{
|
||||||
|
/// No need to fix anything
|
||||||
|
return metadata_version;
|
||||||
|
}
|
||||||
|
|
||||||
|
ReplicatedMergeTreeQueue & queue = storage.queue;
|
||||||
|
queue.pullLogsToQueue(zookeeper);
|
||||||
|
if (queue.getStatus().metadata_alters_in_queue != 0)
|
||||||
|
{
|
||||||
|
LOG_DEBUG(log, "No need to update metadata_version as there are ALTER_METADATA entries in the queue");
|
||||||
|
return metadata_version;
|
||||||
|
}
|
||||||
|
|
||||||
|
const Coordination::Requests ops = {
|
||||||
|
zkutil::makeSetRequest(fs::path(replica_path) / "metadata_version", std::to_string(stat.version), 0),
|
||||||
|
zkutil::makeCheckRequest(fs::path(zookeeper_path) / "metadata", stat.version),
|
||||||
|
};
|
||||||
|
Coordination::Responses ops_responses;
|
||||||
|
const auto code = zookeeper->tryMulti(ops, ops_responses);
|
||||||
|
if (code == Coordination::Error::ZOK)
|
||||||
|
{
|
||||||
|
LOG_DEBUG(log, "Successfully set metadata_version to {}", stat.version);
|
||||||
|
return stat.version;
|
||||||
|
}
|
||||||
|
if (code != Coordination::Error::ZBADVERSION)
|
||||||
|
{
|
||||||
|
throw zkutil::KeeperException(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Second attempt is only possible if metadata_version != 0 or metadata.version changed during the first attempt.
|
||||||
|
/// If metadata_version != 0, on second attempt we will return the new metadata_version.
|
||||||
|
/// If metadata.version changed, on second attempt we will either get metadata_version != 0 and return the new metadata_version or we will get metadata_alters_in_queue != 0 and return 0.
|
||||||
|
/// Either way, on second attempt this method should return.
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to fix replica metadata_version in ZooKeeper after two attempts");
|
||||||
|
}
|
||||||
|
|
||||||
void ReplicatedMergeTreeAttachThread::runImpl()
|
void ReplicatedMergeTreeAttachThread::runImpl()
|
||||||
{
|
{
|
||||||
storage.setZooKeeper();
|
storage.setZooKeeper();
|
||||||
@ -160,11 +222,11 @@ void ReplicatedMergeTreeAttachThread::runImpl()
|
|||||||
/// Just in case it was not removed earlier due to connection loss
|
/// Just in case it was not removed earlier due to connection loss
|
||||||
zookeeper->tryRemove(replica_path + "/flags/force_restore_data");
|
zookeeper->tryRemove(replica_path + "/flags/force_restore_data");
|
||||||
|
|
||||||
String replica_metadata_version;
|
const Int32 replica_metadata_version = fixReplicaMetadataVersionIfNeeded(zookeeper);
|
||||||
const bool replica_metadata_version_exists = zookeeper->tryGet(replica_path + "/metadata_version", replica_metadata_version);
|
const bool replica_metadata_version_exists = replica_metadata_version != -1;
|
||||||
if (replica_metadata_version_exists)
|
if (replica_metadata_version_exists)
|
||||||
{
|
{
|
||||||
storage.setInMemoryMetadata(metadata_snapshot->withMetadataVersion(parse<int>(replica_metadata_version)));
|
storage.setInMemoryMetadata(metadata_snapshot->withMetadataVersion(replica_metadata_version));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -48,6 +48,8 @@ private:
|
|||||||
void runImpl();
|
void runImpl();
|
||||||
|
|
||||||
void finalizeInitialization();
|
void finalizeInitialization();
|
||||||
|
|
||||||
|
Int32 fixReplicaMetadataVersionIfNeeded(zkutil::ZooKeeperPtr zookeeper);
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -2222,6 +2222,7 @@ ReplicatedMergeTreeQueue::Status ReplicatedMergeTreeQueue::getStatus() const
|
|||||||
res.inserts_in_queue = 0;
|
res.inserts_in_queue = 0;
|
||||||
res.merges_in_queue = 0;
|
res.merges_in_queue = 0;
|
||||||
res.part_mutations_in_queue = 0;
|
res.part_mutations_in_queue = 0;
|
||||||
|
res.metadata_alters_in_queue = 0;
|
||||||
res.queue_oldest_time = 0;
|
res.queue_oldest_time = 0;
|
||||||
res.inserts_oldest_time = 0;
|
res.inserts_oldest_time = 0;
|
||||||
res.merges_oldest_time = 0;
|
res.merges_oldest_time = 0;
|
||||||
@ -2264,6 +2265,11 @@ ReplicatedMergeTreeQueue::Status ReplicatedMergeTreeQueue::getStatus() const
|
|||||||
res.oldest_part_to_mutate_to = entry->new_part_name;
|
res.oldest_part_to_mutate_to = entry->new_part_name;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (entry->type == LogEntry::ALTER_METADATA)
|
||||||
|
{
|
||||||
|
++res.metadata_alters_in_queue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
|
@ -473,6 +473,7 @@ public:
|
|||||||
UInt32 inserts_in_queue;
|
UInt32 inserts_in_queue;
|
||||||
UInt32 merges_in_queue;
|
UInt32 merges_in_queue;
|
||||||
UInt32 part_mutations_in_queue;
|
UInt32 part_mutations_in_queue;
|
||||||
|
UInt32 metadata_alters_in_queue;
|
||||||
UInt32 queue_oldest_time;
|
UInt32 queue_oldest_time;
|
||||||
UInt32 inserts_oldest_time;
|
UInt32 inserts_oldest_time;
|
||||||
UInt32 merges_oldest_time;
|
UInt32 merges_oldest_time;
|
||||||
|
@ -215,7 +215,7 @@ static TableZnodeInfo extractZooKeeperPathAndReplicaNameFromEngineArgs(
|
|||||||
bool is_replicated_database = local_context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY &&
|
bool is_replicated_database = local_context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY &&
|
||||||
DatabaseCatalog::instance().getDatabase(table_id.database_name)->getEngineName() == "Replicated";
|
DatabaseCatalog::instance().getDatabase(table_id.database_name)->getEngineName() == "Replicated";
|
||||||
|
|
||||||
if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 0)
|
if (!query.attach && is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 0)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
"It's not allowed to specify explicit zookeeper_path and replica_name "
|
"It's not allowed to specify explicit zookeeper_path and replica_name "
|
||||||
@ -223,7 +223,7 @@ static TableZnodeInfo extractZooKeeperPathAndReplicaNameFromEngineArgs(
|
|||||||
"specify them explicitly, enable setting "
|
"specify them explicitly, enable setting "
|
||||||
"database_replicated_allow_replicated_engine_arguments.");
|
"database_replicated_allow_replicated_engine_arguments.");
|
||||||
}
|
}
|
||||||
else if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 1)
|
else if (!query.attach && is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 1)
|
||||||
{
|
{
|
||||||
LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "It's not recommended to explicitly specify "
|
LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "It's not recommended to explicitly specify "
|
||||||
"zookeeper_path and replica_name in ReplicatedMergeTree arguments");
|
"zookeeper_path and replica_name in ReplicatedMergeTree arguments");
|
||||||
@ -238,7 +238,7 @@ static TableZnodeInfo extractZooKeeperPathAndReplicaNameFromEngineArgs(
|
|||||||
if (!ast_replica_name || ast_replica_name->value.getType() != Field::Types::String)
|
if (!ast_replica_name || ast_replica_name->value.getType() != Field::Types::String)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica name must be a string literal{}", verbose_help_message);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica name must be a string literal{}", verbose_help_message);
|
||||||
|
|
||||||
if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 2)
|
if (!query.attach && is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 2)
|
||||||
{
|
{
|
||||||
LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "Replacing user-provided ZooKeeper path and replica name ({}, {}) "
|
LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "Replacing user-provided ZooKeeper path and replica name ({}, {}) "
|
||||||
"with default arguments", ast_zk_path->value.safeGet<String>(), ast_replica_name->value.safeGet<String>());
|
"with default arguments", ast_zk_path->value.safeGet<String>(), ast_replica_name->value.safeGet<String>());
|
||||||
|
@ -1549,3 +1549,19 @@ def test_all_groups_cluster(started_cluster):
|
|||||||
assert "bad_settings_node\ndummy_node\n" == bad_settings_node.query(
|
assert "bad_settings_node\ndummy_node\n" == bad_settings_node.query(
|
||||||
"select host_name from system.clusters where name='all_groups.db_cluster' order by host_name"
|
"select host_name from system.clusters where name='all_groups.db_cluster' order by host_name"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_detach_attach_table(started_cluster):
|
||||||
|
main_node.query("DROP DATABASE IF EXISTS detach_attach_db SYNC")
|
||||||
|
main_node.query(
|
||||||
|
"CREATE DATABASE detach_attach_db ENGINE = Replicated('/clickhouse/databases/detach_attach_db');"
|
||||||
|
)
|
||||||
|
main_node.query(
|
||||||
|
"CREATE TABLE detach_attach_db.detach_attach_table (k UInt64) ENGINE=ReplicatedMergeTree ORDER BY k;"
|
||||||
|
)
|
||||||
|
main_node.query("INSERT INTO detach_attach_db.detach_attach_table VALUES (1);")
|
||||||
|
main_node.query("DETACH TABLE detach_attach_db.detach_attach_table PERMANENTLY;")
|
||||||
|
main_node.query("ATTACH TABLE detach_attach_db.detach_attach_table;")
|
||||||
|
assert (
|
||||||
|
main_node.query("SELECT * FROM detach_attach_db.detach_attach_table;") == "1\n"
|
||||||
|
)
|
||||||
|
@ -45,3 +45,7 @@ uint64 UInt64 NO PRI SOR \N
|
|||||||
int32 Int32 NO \N
|
int32 Int32 NO \N
|
||||||
str String NO \N
|
str String NO \N
|
||||||
uint64 UInt64 NO PRI SOR \N
|
uint64 UInt64 NO PRI SOR \N
|
||||||
|
--- SHOW COLUMNS FROM table with dots
|
||||||
|
int32 Nullable(Int32) YES \N
|
||||||
|
str String NO SOR \N
|
||||||
|
uint64 UInt64 NO PRI SOR \N
|
||||||
|
@ -90,3 +90,18 @@ SHOW COLUMNS FROM database_123456789abcde.tab;
|
|||||||
DROP DATABASE database_123456789abcde;
|
DROP DATABASE database_123456789abcde;
|
||||||
|
|
||||||
DROP TABLE tab;
|
DROP TABLE tab;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS `tab.with.dots`;
|
||||||
|
CREATE TABLE `tab.with.dots`
|
||||||
|
(
|
||||||
|
`uint64` UInt64,
|
||||||
|
`int32` Nullable(Int32) COMMENT 'example comment',
|
||||||
|
`str` String,
|
||||||
|
INDEX idx str TYPE set(1000)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
PRIMARY KEY (uint64)
|
||||||
|
ORDER BY (uint64, str);
|
||||||
|
SELECT '--- SHOW COLUMNS FROM table with dots';
|
||||||
|
SHOW COLUMNS FROM `tab.with.dots`;
|
||||||
|
DROP TABLE `tab.with.dots`;
|
||||||
|
@ -49,3 +49,10 @@ tbl 1 PRIMARY 1 a A 0 \N \N \N PRIMARY YES
|
|||||||
--- Short form
|
--- Short form
|
||||||
tbl 1 mmi_idx 1 \N 0 \N \N \N MINMAX YES b
|
tbl 1 mmi_idx 1 \N 0 \N \N \N MINMAX YES b
|
||||||
tbl 1 PRIMARY 1 a A 0 \N \N \N PRIMARY YES
|
tbl 1 PRIMARY 1 a A 0 \N \N \N PRIMARY YES
|
||||||
|
--- SHOW INDEX FROM table with dots
|
||||||
|
tab.with.dots 1 blf_idx 1 \N 0 \N \N \N BLOOM_FILTER YES d, b
|
||||||
|
tab.with.dots 1 mm1_idx 1 \N 0 \N \N \N MINMAX YES a, c, d
|
||||||
|
tab.with.dots 1 mm2_idx 1 \N 0 \N \N \N MINMAX YES c, d, e
|
||||||
|
tab.with.dots 1 PRIMARY 1 c A 0 \N \N \N PRIMARY YES
|
||||||
|
tab.with.dots 1 PRIMARY 2 a A 0 \N \N \N PRIMARY YES
|
||||||
|
tab.with.dots 1 set_idx 1 \N 0 \N \N \N SET YES e
|
||||||
|
@ -78,3 +78,22 @@ SHOW INDEX FROM database_123456789abcde.tbl;
|
|||||||
DROP DATABASE database_123456789abcde;
|
DROP DATABASE database_123456789abcde;
|
||||||
|
|
||||||
DROP TABLE tbl;
|
DROP TABLE tbl;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS `tab.with.dots`;
|
||||||
|
CREATE TABLE `tab.with.dots`
|
||||||
|
(
|
||||||
|
a UInt64,
|
||||||
|
b UInt64,
|
||||||
|
c UInt64,
|
||||||
|
d UInt64,
|
||||||
|
e UInt64,
|
||||||
|
INDEX mm1_idx (a, c, d) TYPE minmax,
|
||||||
|
INDEX mm2_idx (c, d, e) TYPE minmax,
|
||||||
|
INDEX set_idx (e) TYPE set(100),
|
||||||
|
INDEX blf_idx (d, b) TYPE bloom_filter(0.8)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
PRIMARY KEY (c, a);
|
||||||
|
SELECT '--- SHOW INDEX FROM table with dots';
|
||||||
|
SHOW INDEX FROM `tab.with.dots`;
|
||||||
|
DROP TABLE `tab.with.dots`;
|
||||||
|
@ -0,0 +1,47 @@
|
|||||||
|
-- Simple types
|
||||||
|
-- { echoOn }
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Date', '{"x":""}');
|
||||||
|
1970-01-01
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Date32', '{"x":""}');
|
||||||
|
1970-01-01
|
||||||
|
SELECT toTimeZone(x, 'UTC') FROM format(JSONEachRow, 'x DateTime', '{"x":""}');
|
||||||
|
1970-01-01 00:00:00
|
||||||
|
SELECT toTimeZone(x, 'UTC') FROM format(JSONEachRow, 'x DateTime64', '{"x":""}');
|
||||||
|
1970-01-01 00:00:00.000
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x IPv4', '{"x":""}');
|
||||||
|
0.0.0.0
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x IPv6', '{"x":""}');
|
||||||
|
::
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x UUID', '{"x":""}');
|
||||||
|
00000000-0000-0000-0000-000000000000
|
||||||
|
-- { echoOn }
|
||||||
|
SELECT COUNT(DISTINCT col) FROM table1;
|
||||||
|
1
|
||||||
|
-- { echoOn }
|
||||||
|
SELECT * FROM table1 ORDER BY address ASC;
|
||||||
|
::
|
||||||
|
2001:db8:3333:4444:5555:6666:7777:8888
|
||||||
|
-- Nullable
|
||||||
|
-- { echoOn }
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Nullable(IPv6)', '{"x":""}');
|
||||||
|
\N
|
||||||
|
-- Compound types
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Array(UUID)', '{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e",""]}');
|
||||||
|
['00000000-0000-0000-0000-000000000000','b15f852c-c41a-4fd6-9247-1929c841715e','00000000-0000-0000-0000-000000000000']
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Array(Nullable(IPv6))', '{"x":["",""]}');
|
||||||
|
[NULL,NULL]
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Tuple(Date, IPv4, String)', '{"x":["", "", "abc"]}');
|
||||||
|
('1970-01-01','0.0.0.0','abc')
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Map(String, IPv6)', '{"x":{"abc": ""}}');
|
||||||
|
{'abc':'::'}
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Variant(Date, UUID)', '{"x":""}');
|
||||||
|
\N
|
||||||
|
-- Deep composition
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Array(Array(IPv6))', '{"x":[["2001:db8:3333:4444:CCCC:DDDD:EEEE:FFFF", ""], ["", "2001:db8:3333:4444:5555:6666:7777:8888"]]}');
|
||||||
|
[['2001:db8:3333:4444:cccc:dddd:eeee:ffff','::'],['::','2001:db8:3333:4444:5555:6666:7777:8888']]
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Variant(Date, Array(UUID))', '{"x":["", "b15f852c-c41a-4fd6-9247-1929c841715e"]}');
|
||||||
|
['00000000-0000-0000-0000-000000000000','b15f852c-c41a-4fd6-9247-1929c841715e']
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))', '{"x":[[""], ["",{"abc":""}]]}');
|
||||||
|
(['00000000-0000-0000-0000-000000000000'],('00000000-0000-0000-0000-000000000000',{'abc':'::'}))
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Map(Tuple(Date,IPv4), Variant(UUID,IPv6))', '{"x":{["",""]:""}}');
|
||||||
|
{('1970-01-01','0.0.0.0'):NULL}
|
60
tests/queries/0_stateless/03222_json_empty_as_default.sql
Normal file
60
tests/queries/0_stateless/03222_json_empty_as_default.sql
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
SET input_format_json_empty_as_default = 1, allow_experimental_variant_type = 1;
|
||||||
|
|
||||||
|
-- Simple types
|
||||||
|
-- { echoOn }
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Date', '{"x":""}');
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Date32', '{"x":""}');
|
||||||
|
SELECT toTimeZone(x, 'UTC') FROM format(JSONEachRow, 'x DateTime', '{"x":""}');
|
||||||
|
SELECT toTimeZone(x, 'UTC') FROM format(JSONEachRow, 'x DateTime64', '{"x":""}');
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x IPv4', '{"x":""}');
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x IPv6', '{"x":""}');
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x UUID', '{"x":""}');
|
||||||
|
-- { echoOff }
|
||||||
|
|
||||||
|
-- Simple type AggregateFunction
|
||||||
|
DROP TABLE IF EXISTS table1;
|
||||||
|
CREATE TABLE table1(col AggregateFunction(uniq, UInt64)) ENGINE=Memory();
|
||||||
|
DROP TABLE IF EXISTS table2;
|
||||||
|
CREATE TABLE table2(UserID UInt64) ENGINE=Memory();
|
||||||
|
|
||||||
|
INSERT INTO table1 SELECT uniqState(UserID) FROM table2;
|
||||||
|
INSERT INTO table1 SELECT x FROM format(JSONEachRow, 'x AggregateFunction(uniq, UInt64)' AS T, '{"x":""}');
|
||||||
|
|
||||||
|
-- { echoOn }
|
||||||
|
SELECT COUNT(DISTINCT col) FROM table1;
|
||||||
|
-- { echoOff }
|
||||||
|
|
||||||
|
DROP TABLE table1;
|
||||||
|
DROP TABLE table2;
|
||||||
|
|
||||||
|
-- The setting input_format_defaults_for_omitted_fields determines the default value if enabled.
|
||||||
|
CREATE TABLE table1(address IPv6 DEFAULT toIPv6('2001:db8:3333:4444:5555:6666:7777:8888')) ENGINE=Memory();
|
||||||
|
|
||||||
|
SET input_format_defaults_for_omitted_fields = 0;
|
||||||
|
INSERT INTO table1 FORMAT JSONEachRow {"address":""};
|
||||||
|
|
||||||
|
SET input_format_defaults_for_omitted_fields = 1;
|
||||||
|
INSERT INTO table1 FORMAT JSONEachRow {"address":""};
|
||||||
|
|
||||||
|
-- { echoOn }
|
||||||
|
SELECT * FROM table1 ORDER BY address ASC;
|
||||||
|
-- { echoOff }
|
||||||
|
|
||||||
|
DROP TABLE table1;
|
||||||
|
|
||||||
|
-- Nullable
|
||||||
|
-- { echoOn }
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Nullable(IPv6)', '{"x":""}');
|
||||||
|
|
||||||
|
-- Compound types
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Array(UUID)', '{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e",""]}');
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Array(Nullable(IPv6))', '{"x":["",""]}');
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Tuple(Date, IPv4, String)', '{"x":["", "", "abc"]}');
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Map(String, IPv6)', '{"x":{"abc": ""}}');
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Variant(Date, UUID)', '{"x":""}');
|
||||||
|
|
||||||
|
-- Deep composition
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Array(Array(IPv6))', '{"x":[["2001:db8:3333:4444:CCCC:DDDD:EEEE:FFFF", ""], ["", "2001:db8:3333:4444:5555:6666:7777:8888"]]}');
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Variant(Date, Array(UUID))', '{"x":["", "b15f852c-c41a-4fd6-9247-1929c841715e"]}');
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))', '{"x":[[""], ["",{"abc":""}]]}');
|
||||||
|
SELECT x FROM format(JSONEachRow, 'x Map(Tuple(Date,IPv4), Variant(UUID,IPv6))', '{"x":{["",""]:""}}');
|
@ -0,0 +1,8 @@
|
|||||||
|
Array(UUID)
|
||||||
|
{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e","00000000-0000-0000-0000-000000000000"]}
|
||||||
|
{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e","00000000-0000-0000-0000-000000000000"]}
|
||||||
|
{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e","00000000-0000-0000-0000-000000000000"]}
|
||||||
|
Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))
|
||||||
|
{"x":[["00000000-0000-0000-0000-000000000000"],["00000000-0000-0000-0000-000000000000",{"abc":"::"}]]}
|
||||||
|
{"x":[["00000000-0000-0000-0000-000000000000"],["00000000-0000-0000-0000-000000000000",{"abc":"::"}]]}
|
||||||
|
{"x":[["00000000-0000-0000-0000-000000000000"],["00000000-0000-0000-0000-000000000000",{"abc":"::"}]]}
|
31
tests/queries/0_stateless/03222_json_empty_as_default_small_read_buffer.sh
Executable file
31
tests/queries/0_stateless/03222_json_empty_as_default_small_read_buffer.sh
Executable file
@ -0,0 +1,31 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Tags: no-parallel
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
DATA_FILE=$CLICKHOUSE_TEST_UNIQUE_NAME.json
|
||||||
|
|
||||||
|
# Wrapper for clickhouse-client to always output in JSONEachRow format, that
|
||||||
|
# way format settings will not affect output.
|
||||||
|
function clickhouse_local()
|
||||||
|
{
|
||||||
|
$CLICKHOUSE_LOCAL --output-format JSONEachRow "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
echo 'Array(UUID)'
|
||||||
|
echo '{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e",""]}' > $DATA_FILE
|
||||||
|
# Use increasingly smaller read buffers.
|
||||||
|
clickhouse_local -q "SELECT x FROM file('$DATA_FILE', 'JSONEachRow', 'x Array(UUID)') SETTINGS input_format_json_empty_as_default=1, input_format_parallel_parsing=0, storage_file_read_method='read', max_read_buffer_size=4"
|
||||||
|
clickhouse_local -q "SELECT x FROM file('$DATA_FILE', 'JSONEachRow', 'x Array(UUID)') SETTINGS input_format_json_empty_as_default=1, input_format_parallel_parsing=0, storage_file_read_method='read', max_read_buffer_size=2"
|
||||||
|
clickhouse_local -q "SELECT x FROM file('$DATA_FILE', 'JSONEachRow', 'x Array(UUID)') SETTINGS input_format_json_empty_as_default=1, input_format_parallel_parsing=0, storage_file_read_method='read', max_read_buffer_size=1"
|
||||||
|
|
||||||
|
echo 'Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))'
|
||||||
|
echo '{"x":[[""], ["",{"abc":""}]]}' > $DATA_FILE
|
||||||
|
# Use increasingly smaller read buffers.
|
||||||
|
clickhouse_local -q "SELECT x FROM file('$DATA_FILE', 'JSONEachRow', 'x Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))') SETTINGS input_format_json_empty_as_default=1, input_format_parallel_parsing=0, storage_file_read_method='read', max_read_buffer_size=16"
|
||||||
|
clickhouse_local -q "SELECT x FROM file('$DATA_FILE', 'JSONEachRow', 'x Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))') SETTINGS input_format_json_empty_as_default=1, input_format_parallel_parsing=0, storage_file_read_method='read', max_read_buffer_size=8"
|
||||||
|
clickhouse_local -q "SELECT x FROM file('$DATA_FILE', 'JSONEachRow', 'x Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))') SETTINGS input_format_json_empty_as_default=1, input_format_parallel_parsing=0, storage_file_read_method='read', max_read_buffer_size=1"
|
||||||
|
|
||||||
|
rm $DATA_FILE
|
Loading…
Reference in New Issue
Block a user