Merge branch 'master' into fix-global-trace-collector

This commit is contained in:
Antonio Andelic 2024-06-06 14:36:02 +02:00
commit 27fe0439fa
37 changed files with 683 additions and 368 deletions

View File

@ -25,7 +25,8 @@ azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
./setup_minio.sh stateless # to have a proper environment
echo "Get previous release tag"
previous_release_tag=$(dpkg --info package_folder/clickhouse-client*.deb | grep "Version: " | awk '{print $2}' | cut -f1 -d'+' | get_previous_release_tag)
# shellcheck disable=SC2016
previous_release_tag=$(dpkg-deb --showformat='${Version}' --show package_folder/clickhouse-client*.deb | get_previous_release_tag)
echo $previous_release_tag
echo "Clone previous release repository"

View File

@ -139,7 +139,7 @@ For the query to run successfully, the following conditions must be met:
ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM table1
```
This query copies the data partition from the `table1` to `table2` and replaces existing partition in the `table2`.
This query copies the data partition from `table1` to `table2` and replaces the existing partition in `table2`. The operation is atomic.
Note that:

View File

@ -83,7 +83,7 @@ namespace SettingsChangesHistory
/// For newly added setting choose the most appropriate previous_value (for example, if new setting
/// controls new feature and it's 'true' by default, use 'false' as previous_value).
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
static const std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
{
{"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"},
{"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"},

View File

@ -543,6 +543,7 @@ template <typename DataType> constexpr bool IsDataTypeNumber = false;
template <typename DataType> constexpr bool IsDataTypeDateOrDateTime = false;
template <typename DataType> constexpr bool IsDataTypeDate = false;
template <typename DataType> constexpr bool IsDataTypeEnum = false;
template <typename DataType> constexpr bool IsDataTypeStringOrFixedString = false;
template <typename DataType> constexpr bool IsDataTypeDecimalOrNumber = IsDataTypeDecimal<DataType> || IsDataTypeNumber<DataType>;
@ -556,6 +557,8 @@ class DataTypeDate;
class DataTypeDate32;
class DataTypeDateTime;
class DataTypeDateTime64;
class DataTypeString;
class DataTypeFixedString;
template <is_decimal T> constexpr bool IsDataTypeDecimal<DataTypeDecimal<T>> = true;
@ -572,6 +575,9 @@ template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDate32> = tru
template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDateTime> = true;
template <> inline constexpr bool IsDataTypeDateOrDateTime<DataTypeDateTime64> = true;
template <> inline constexpr bool IsDataTypeStringOrFixedString<DataTypeString> = true;
template <> inline constexpr bool IsDataTypeStringOrFixedString<DataTypeFixedString> = true;
template <typename T>
class DataTypeEnum;

View File

@ -709,7 +709,7 @@ bool tryParseImpl(typename DataType::FieldType & x, ReadBuffer & rb, const DateL
else
return tryReadFloatTextFast(x, rb);
}
else /*if constexpr (is_integer_v<typename DataType::FieldType>)*/
else /*if constexpr (is_integral_v<typename DataType::FieldType>)*/
return tryReadIntText(x, rb);
}
@ -814,6 +814,16 @@ enum class ConvertFromStringParsingMode : uint8_t
BestEffortUS
};
struct AccurateConvertStrategyAdditions
{
UInt32 scale { 0 };
};
struct AccurateOrNullConvertStrategyAdditions
{
UInt32 scale { 0 };
};
template <typename FromDataType, typename ToDataType, typename Name,
ConvertFromStringExceptionMode exception_mode, ConvertFromStringParsingMode parsing_mode>
struct ConvertThroughParsing
@ -1020,7 +1030,13 @@ struct ConvertThroughParsing
break;
}
}
parseImpl<ToDataType>(vec_to[i], read_buffer, local_time_zone, precise_float_parsing);
if constexpr (std::is_same_v<Additions, AccurateConvertStrategyAdditions>)
{
if (!tryParseImpl<ToDataType>(vec_to[i], read_buffer, local_time_zone, precise_float_parsing))
throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "Cannot parse string to type {}", TypeName<typename ToDataType::FieldType>);
}
else
parseImpl<ToDataType>(vec_to[i], read_buffer, local_time_zone, precise_float_parsing);
} while (false);
}
}
@ -1120,16 +1136,6 @@ struct ConvertThroughParsing
/// Function toUnixTimestamp has exactly the same implementation as toDateTime of String type.
struct NameToUnixTimestamp { static constexpr auto name = "toUnixTimestamp"; };
struct AccurateConvertStrategyAdditions
{
UInt32 scale { 0 };
};
struct AccurateOrNullConvertStrategyAdditions
{
UInt32 scale { 0 };
};
enum class BehaviourOnErrorFromString : uint8_t
{
ConvertDefaultBehaviorTag,
@ -3174,8 +3180,11 @@ private:
{
TypeIndex from_type_index = from_type->getTypeId();
WhichDataType which(from_type_index);
TypeIndex to_type_index = to_type->getTypeId();
WhichDataType to(to_type_index);
bool can_apply_accurate_cast = (cast_type == CastType::accurate || cast_type == CastType::accurateOrNull)
&& (which.isInt() || which.isUInt() || which.isFloat());
can_apply_accurate_cast |= cast_type == CastType::accurate && which.isStringOrFixedString() && to.isNativeInteger();
FormatSettings::DateTimeOverflowBehavior date_time_overflow_behavior = default_date_time_overflow_behavior;
if (context)
@ -3260,6 +3269,20 @@ private:
return true;
}
}
else if constexpr (IsDataTypeStringOrFixedString<LeftDataType>)
{
if constexpr (IsDataTypeNumber<RightDataType>)
{
chassert(wrapper_cast_type == CastType::accurate);
result_column = ConvertImpl<LeftDataType, RightDataType, FunctionCastName>::execute(
arguments,
result_type,
input_rows_count,
BehaviourOnErrorFromString::ConvertDefaultBehaviorTag,
AccurateConvertStrategyAdditions());
}
return true;
}
return false;
});

View File

@ -240,4 +240,34 @@ bool SplitTokenExtractor::nextInStringLike(const char * data, size_t length, siz
return !bad_token && !token.empty();
}
void SplitTokenExtractor::substringToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter, bool is_prefix, bool is_suffix) const
{
size_t cur = 0;
size_t token_start = 0;
size_t token_len = 0;
while (cur < length && nextInString(data, length, &cur, &token_start, &token_len))
// In order to avoid filter updates with incomplete tokens,
// first token is ignored, unless substring is prefix and
// last token is ignored, unless substring is suffix
if ((token_start > 0 || is_prefix) && (token_start + token_len < length || is_suffix))
bloom_filter.add(data + token_start, token_len);
}
void SplitTokenExtractor::substringToGinFilter(const char * data, size_t length, GinFilter & gin_filter, bool is_prefix, bool is_suffix) const
{
gin_filter.setQueryString(data, length);
size_t cur = 0;
size_t token_start = 0;
size_t token_len = 0;
while (cur < length && nextInString(data, length, &cur, &token_start, &token_len))
// In order to avoid filter updates with incomplete tokens,
// first token is ignored, unless substring is prefix and
// last token is ignored, unless substring is suffix
if ((token_start > 0 || is_prefix) && (token_start + token_len < length || is_suffix))
gin_filter.addTerm(data + token_start, token_len);
}
}

View File

@ -28,8 +28,22 @@ struct ITokenExtractor
/// It skips unescaped `%` and `_` and supports escaping symbols, but it is less lightweight.
virtual bool nextInStringLike(const char * data, size_t length, size_t * pos, String & out) const = 0;
/// Updates Bloom filter from exact-match string filter value
virtual void stringToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const = 0;
/// Updates Bloom filter from substring-match string filter value.
/// An `ITokenExtractor` implementation may decide to skip certain
/// tokens depending on whether the substring is a prefix or a suffix.
virtual void substringToBloomFilter(
const char * data,
size_t length,
BloomFilter & bloom_filter,
bool is_prefix [[maybe_unused]],
bool is_suffix [[maybe_unused]]) const
{
stringToBloomFilter(data, length, bloom_filter);
}
virtual void stringPaddedToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const
{
stringToBloomFilter(data, length, bloom_filter);
@ -37,8 +51,22 @@ struct ITokenExtractor
virtual void stringLikeToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter) const = 0;
/// Updates GIN filter from exact-match string filter value
virtual void stringToGinFilter(const char * data, size_t length, GinFilter & gin_filter) const = 0;
/// Updates GIN filter from substring-match string filter value.
/// An `ITokenExtractor` implementation may decide to skip certain
/// tokens depending on whether the substring is a prefix or a suffix.
virtual void substringToGinFilter(
const char * data,
size_t length,
GinFilter & gin_filter,
bool is_prefix [[maybe_unused]],
bool is_suffix [[maybe_unused]]) const
{
stringToGinFilter(data, length, gin_filter);
}
virtual void stringPaddedToGinFilter(const char * data, size_t length, GinFilter & gin_filter) const
{
stringToGinFilter(data, length, gin_filter);
@ -148,6 +176,11 @@ struct SplitTokenExtractor final : public ITokenExtractorHelper<SplitTokenExtrac
bool nextInStringLike(const char * data, size_t length, size_t * __restrict pos, String & token) const override;
void substringToBloomFilter(const char * data, size_t length, BloomFilter & bloom_filter, bool is_prefix, bool is_suffix) const override;
void substringToGinFilter(const char * data, size_t length, GinFilter & gin_filter, bool is_prefix, bool is_suffix) const override;
};
}

View File

@ -59,16 +59,6 @@ std::string DataPartStorageOnDiskBase::getRelativePath() const
return fs::path(root_path) / part_dir / "";
}
std::string DataPartStorageOnDiskBase::getParentDirectory() const
{
/// Cut last "/" if it exists (it shouldn't). Otherwise fs::path behave differently.
fs::path part_dir_without_slash = part_dir.ends_with("/") ? part_dir.substr(0, part_dir.size() - 1) : part_dir;
if (part_dir_without_slash.has_parent_path())
return part_dir_without_slash.parent_path();
return "";
}
std::optional<String> DataPartStorageOnDiskBase::getRelativePathForPrefix(LoggerPtr log, const String & prefix, bool detached, bool broken) const
{
assert(!broken || detached);
@ -684,9 +674,9 @@ void DataPartStorageOnDiskBase::remove(
if (!has_delete_prefix)
{
auto parent_path = getParentDirectory();
if (!parent_path.empty())
if (part_dir_without_slash.has_parent_path())
{
auto parent_path = part_dir_without_slash.parent_path();
if (parent_path == MergeTreeData::DETACHED_DIR_NAME)
throw Exception(
ErrorCodes::LOGICAL_ERROR,
@ -694,7 +684,7 @@ void DataPartStorageOnDiskBase::remove(
part_dir,
root_path);
part_dir_without_slash = fs::path(parent_path) / ("delete_tmp_" + std::string{part_dir_without_slash.filename()});
part_dir_without_slash = parent_path / ("delete_tmp_" + std::string{part_dir_without_slash.filename()});
}
else
{

View File

@ -20,7 +20,6 @@ public:
std::string getRelativePath() const override;
std::string getPartDirectory() const override;
std::string getFullRootPath() const override;
std::string getParentDirectory() const override;
Poco::Timestamp getLastModified() const override;
UInt64 calculateTotalSizeOnDisk() const override;

View File

@ -96,12 +96,11 @@ public:
virtual MergeTreeDataPartStorageType getType() const = 0;
/// Methods to get path components of a data part.
virtual std::string getFullPath() const = 0; /// '/var/lib/clickhouse/data/database/table/moving/all_1_5_1'
virtual std::string getRelativePath() const = 0; /// 'database/table/moving/all_1_5_1'
virtual std::string getPartDirectory() const = 0; /// 'all_1_5_1'
virtual std::string getFullRootPath() const = 0; /// '/var/lib/clickhouse/data/database/table/moving'
virtual std::string getParentDirectory() const = 0; /// '' (or 'detached' for 'detached/all_1_5_1')
/// Can add it if needed /// 'database/table/moving'
virtual std::string getFullPath() const = 0; /// '/var/lib/clickhouse/data/database/table/moving/all_1_5_1'
virtual std::string getRelativePath() const = 0; /// 'database/table/moving/all_1_5_1'
virtual std::string getPartDirectory() const = 0; /// 'all_1_5_1'
virtual std::string getFullRootPath() const = 0; /// '/var/lib/clickhouse/data/database/table/moving'
/// Can add it if needed /// 'database/table/moving'
/// virtual std::string getRelativeRootPath() const = 0;
/// Get a storage for projection.

View File

@ -737,11 +737,7 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks
{
/// Don't scare people with broken part error
if (!isRetryableException(std::current_exception()))
{
auto message = getCurrentExceptionMessage(true);
LOG_ERROR(storage.log, "Part {} is broken and need manual correction. Reason: {}",
getDataPartStorage().getFullPath(), message);
}
LOG_ERROR(storage.log, "Part {} is broken and need manual correction", getDataPartStorage().getFullPath());
// There could be conditions that data part to be loaded is broken, but some of meta infos are already written
// into meta data before exception, need to clean them all.

View File

@ -3894,7 +3894,7 @@ void MergeTreeData::checkPartDynamicColumns(MutableDataPartPtr & part, DataParts
}
}
void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, bool need_rename, bool rename_in_transaction)
void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, bool need_rename)
{
part->is_temp = false;
part->setState(DataPartState::PreActive);
@ -3906,15 +3906,12 @@ void MergeTreeData::preparePartForCommit(MutableDataPartPtr & part, Transaction
return !may_be_cleaned_up || temporary_parts.contains(dir_name);
}());
if (need_rename && !rename_in_transaction)
if (need_rename)
part->renameTo(part->name, true);
LOG_TEST(log, "preparePartForCommit: inserting {} into data_parts_indexes", part->getNameWithState());
data_parts_indexes.insert(part);
if (rename_in_transaction)
out_transaction.addPart(part, need_rename);
else
out_transaction.addPart(part, /* need_rename= */ false);
out_transaction.addPart(part);
}
bool MergeTreeData::addTempPart(
@ -3963,8 +3960,7 @@ bool MergeTreeData::renameTempPartAndReplaceImpl(
MutableDataPartPtr & part,
Transaction & out_transaction,
DataPartsLock & lock,
DataPartsVector * out_covered_parts,
bool rename_in_transaction)
DataPartsVector * out_covered_parts)
{
LOG_TRACE(log, "Renaming temporary part {} to {} with tid {}.", part->getDataPartStorage().getPartDirectory(), part->name, out_transaction.getTID());
@ -4003,7 +3999,7 @@ bool MergeTreeData::renameTempPartAndReplaceImpl(
/// All checks are passed. Now we can rename the part on disk.
/// So, we maintain invariant: if a non-temporary part in filesystem then it is in data_parts
preparePartForCommit(part, out_transaction, /* need_rename= */ true, rename_in_transaction);
preparePartForCommit(part, out_transaction, /* need_rename */ true);
if (out_covered_parts)
{
@ -4018,31 +4014,29 @@ bool MergeTreeData::renameTempPartAndReplaceUnlocked(
MutableDataPartPtr & part,
Transaction & out_transaction,
DataPartsLock & lock,
bool rename_in_transaction)
DataPartsVector * out_covered_parts)
{
return renameTempPartAndReplaceImpl(part, out_transaction, lock, /*out_covered_parts=*/ nullptr, rename_in_transaction);
return renameTempPartAndReplaceImpl(part, out_transaction, lock, out_covered_parts);
}
MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
MutableDataPartPtr & part,
Transaction & out_transaction,
bool rename_in_transaction)
Transaction & out_transaction)
{
auto part_lock = lockParts();
DataPartsVector covered_parts;
renameTempPartAndReplaceImpl(part, out_transaction, part_lock, &covered_parts, rename_in_transaction);
renameTempPartAndReplaceImpl(part, out_transaction, part_lock, &covered_parts);
return covered_parts;
}
bool MergeTreeData::renameTempPartAndAdd(
MutableDataPartPtr & part,
Transaction & out_transaction,
DataPartsLock & lock,
bool rename_in_transaction)
DataPartsLock & lock)
{
DataPartsVector covered_parts;
if (!renameTempPartAndReplaceImpl(part, out_transaction, lock, &covered_parts, rename_in_transaction))
if (!renameTempPartAndReplaceImpl(part, out_transaction, lock, &covered_parts))
return false;
if (!covered_parts.empty())
@ -4083,9 +4077,9 @@ void MergeTreeData::removePartsFromWorkingSet(MergeTreeTransaction * txn, const
resetObjectColumnsFromActiveParts(acquired_lock);
}
void MergeTreeData::removePartsFromWorkingSetImmediatelyAndSetTemporaryState(const DataPartsVector & remove, DataPartsLock * acquired_lock)
void MergeTreeData::removePartsFromWorkingSetImmediatelyAndSetTemporaryState(const DataPartsVector & remove)
{
auto lock = (acquired_lock) ? DataPartsLock() : lockParts();
auto lock = lockParts();
for (const auto & part : remove)
{
@ -4251,9 +4245,8 @@ MergeTreeData::PartsToRemoveFromZooKeeper MergeTreeData::removePartsInRangeFromW
auto [new_data_part, tmp_dir_holder] = createEmptyPart(empty_info, partition, empty_part_name, NO_TRANSACTION_PTR);
MergeTreeData::Transaction transaction(*this, NO_TRANSACTION_RAW);
renameTempPartAndAdd(new_data_part, transaction, lock, /*rename_in_transaction=*/ true); /// All covered parts must be already removed
renameTempPartAndAdd(new_data_part, transaction, lock); /// All covered parts must be already removed
transaction.renameParts();
/// It will add the empty part to the set of Outdated parts without making it Active (exactly what we need)
transaction.rollback(&lock);
new_data_part->remove_time.store(0, std::memory_order_relaxed);
@ -6624,54 +6617,25 @@ TransactionID MergeTreeData::Transaction::getTID() const
return Tx::PrehistoricTID;
}
void MergeTreeData::Transaction::addPart(MutableDataPartPtr & part, bool need_rename)
void MergeTreeData::Transaction::addPart(MutableDataPartPtr & part)
{
precommitted_parts.insert(part);
if (need_rename)
precommitted_parts_need_rename.insert(part);
}
void MergeTreeData::Transaction::rollback(DataPartsLock * lock)
{
if (!isEmpty())
{
for (const auto & part : precommitted_parts)
part->version.creation_csn.store(Tx::RolledBackCSN);
auto non_detached_precommitted_parts = precommitted_parts;
/// Remove detached parts from working set.
///
/// It is possible to have detached parts here, only when rename (in
/// commit()) of detached parts had been broken (i.e. during ATTACH),
/// i.e. the part itself is broken.
DataPartsVector detached_precommitted_parts;
for (auto it = non_detached_precommitted_parts.begin(); it != non_detached_precommitted_parts.end();)
{
const auto & part = *it;
if (part->getDataPartStorage().getParentDirectory() == DETACHED_DIR_NAME)
{
detached_precommitted_parts.push_back(part);
it = non_detached_precommitted_parts.erase(it);
}
else
++it;
}
WriteBufferFromOwnString buf;
buf << "Removing parts:";
for (const auto & part : non_detached_precommitted_parts)
for (const auto & part : precommitted_parts)
buf << " " << part->getDataPartStorage().getPartDirectory();
buf << ".";
if (!detached_precommitted_parts.empty())
{
buf << " Rollbacking parts state to temporary and removing from working set:";
for (const auto & part : detached_precommitted_parts)
buf << " " << part->getDataPartStorage().getPartDirectory();
buf << ".";
}
LOG_DEBUG(data.log, "Undoing transaction {}. {}", getTID(), buf.str());
for (const auto & part : precommitted_parts)
part->version.creation_csn.store(Tx::RolledBackCSN);
/// It would be much better with TSA...
auto our_lock = (lock) ? DataPartsLock() : data.lockParts();
@ -6681,7 +6645,7 @@ void MergeTreeData::Transaction::rollback(DataPartsLock * lock)
if (!data.all_data_dropped)
{
Strings part_names;
for (const auto & part : non_detached_precommitted_parts)
for (const auto & part : precommitted_parts)
part_names.emplace_back(part->name);
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are some PreActive parts ({}) to rollback, "
"but data parts set is empty and table {} was not dropped. It's a bug",
@ -6690,12 +6654,8 @@ void MergeTreeData::Transaction::rollback(DataPartsLock * lock)
}
else
{
data.removePartsFromWorkingSetImmediatelyAndSetTemporaryState(
detached_precommitted_parts,
&our_lock);
data.removePartsFromWorkingSet(txn,
DataPartsVector(non_detached_precommitted_parts.begin(), non_detached_precommitted_parts.end()),
DataPartsVector(precommitted_parts.begin(), precommitted_parts.end()),
/* clear_without_timeout = */ true, &our_lock);
}
}
@ -6705,16 +6665,7 @@ void MergeTreeData::Transaction::rollback(DataPartsLock * lock)
void MergeTreeData::Transaction::clear()
{
chassert(precommitted_parts.size() >= precommitted_parts_need_rename.size());
precommitted_parts.clear();
precommitted_parts_need_rename.clear();
}
void MergeTreeData::Transaction::renameParts()
{
for (const auto & part_need_rename : precommitted_parts_need_rename)
part_need_rename->renameTo(part_need_rename->name, true);
precommitted_parts_need_rename.clear();
}
MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(DataPartsLock * acquired_parts_lock)
@ -6723,9 +6674,6 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(DataPartsLock
if (!isEmpty())
{
if (!precommitted_parts_need_rename.empty())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Parts not renamed");
auto settings = data.getSettings();
auto parts_lock = acquired_parts_lock ? DataPartsLock() : data.lockParts();
auto * owing_parts_lock = acquired_parts_lock ? acquired_parts_lock : &parts_lock;
@ -6734,8 +6682,6 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(DataPartsLock
if (part->getDataPartStorage().hasActiveTransaction())
part->getDataPartStorage().commitTransaction();
renameParts();
if (txn)
{
for (const auto & part : precommitted_parts)

View File

@ -255,9 +255,7 @@ public:
DataPartsVector commit(DataPartsLock * acquired_parts_lock = nullptr);
void renameParts();
void addPart(MutableDataPartPtr & part, bool need_rename);
void addPart(MutableDataPartPtr & part);
void rollback(DataPartsLock * lock = nullptr);
@ -288,9 +286,9 @@ public:
MergeTreeData & data;
MergeTreeTransaction * txn;
MutableDataParts precommitted_parts;
MutableDataParts precommitted_parts_need_rename;
MutableDataParts locked_parts;
};
using TransactionUniquePtr = std::unique_ptr<Transaction>;
@ -590,27 +588,25 @@ public:
bool renameTempPartAndAdd(
MutableDataPartPtr & part,
Transaction & transaction,
DataPartsLock & lock,
bool rename_in_transaction);
DataPartsLock & lock);
/// The same as renameTempPartAndAdd but the block range of the part can contain existing parts.
/// Returns all parts covered by the added part (in ascending order).
DataPartsVector renameTempPartAndReplace(
MutableDataPartPtr & part,
Transaction & out_transaction,
bool rename_in_transaction);
Transaction & out_transaction);
/// Unlocked version of previous one. Useful when added multiple parts with a single lock.
bool renameTempPartAndReplaceUnlocked(
MutableDataPartPtr & part,
Transaction & out_transaction,
DataPartsLock & lock,
bool rename_in_transaction);
DataPartsVector * out_covered_parts = nullptr);
/// Remove parts from working set immediately (without wait for background
/// process). Transfer part state to temporary. Have very limited usage only
/// for new parts which aren't already present in table.
void removePartsFromWorkingSetImmediatelyAndSetTemporaryState(const DataPartsVector & remove, DataPartsLock * acquired_lock = nullptr);
void removePartsFromWorkingSetImmediatelyAndSetTemporaryState(const DataPartsVector & remove);
/// Removes parts from the working set parts.
/// Parts in add must already be in data_parts with PreActive, Active, or Outdated states.
@ -1606,10 +1602,7 @@ private:
/// Preparing itself to be committed in memory: fill some fields inside part, add it to data_parts_indexes
/// in precommitted state and to transaction
///
/// @param need_rename - rename the part
/// @param rename_in_transaction - if set, the rename will be done as part of transaction (without holding DataPartsLock), otherwise inplace (when it does not make sense).
void preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, bool need_rename, bool rename_in_transaction = false);
void preparePartForCommit(MutableDataPartPtr & part, Transaction & out_transaction, bool need_rename);
/// Low-level method for preparing parts for commit (in-memory).
/// FIXME Merge MergeTreeTransaction and Transaction
@ -1617,8 +1610,7 @@ private:
MutableDataPartPtr & part,
Transaction & out_transaction,
DataPartsLock & lock,
DataPartsVector * out_covered_parts,
bool rename_in_transaction);
DataPartsVector * out_covered_parts);
/// RAII Wrapper for atomic work with currently moving parts
/// Acquire them in constructor and remove them in destructor

View File

@ -748,10 +748,7 @@ MergeTreeData::DataPartPtr MergeTreeDataMergerMutator::renameMergedTemporaryPart
"but transactions were enabled for this table");
/// Rename new part, add to the set and remove original parts.
auto replaced_parts = data.renameTempPartAndReplace(new_data_part, out_transaction, /*rename_in_transaction=*/ true);
/// Explicitly rename part while still holding the lock for tmp folder to avoid cleanup
out_transaction.renameParts();
auto replaced_parts = data.renameTempPartAndReplace(new_data_part, out_transaction);
/// Let's check that all original parts have been deleted and only them.
if (replaced_parts.size() != parts.size())

View File

@ -566,7 +566,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals(
out.function = RPNElement::FUNCTION_EQUALS;
out.bloom_filter = std::make_unique<BloomFilter>(params);
const auto & value = const_value.get<String>();
token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter);
token_extractor->substringToBloomFilter(value.data(), value.size(), *out.bloom_filter, true, false);
return true;
}
else if (function_name == "endsWith")
@ -575,7 +575,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals(
out.function = RPNElement::FUNCTION_EQUALS;
out.bloom_filter = std::make_unique<BloomFilter>(params);
const auto & value = const_value.get<String>();
token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter);
token_extractor->substringToBloomFilter(value.data(), value.size(), *out.bloom_filter, false, true);
return true;
}
else if (function_name == "multiSearchAny"
@ -596,7 +596,15 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals(
bloom_filters.back().emplace_back(params);
const auto & value = element.get<String>();
token_extractor->stringToBloomFilter(value.data(), value.size(), bloom_filters.back().back());
if (function_name == "multiSearchAny")
{
token_extractor->substringToBloomFilter(value.data(), value.size(), bloom_filters.back().back(), false, false);
}
else
{
token_extractor->stringToBloomFilter(value.data(), value.size(), bloom_filters.back().back());
}
}
out.set_bloom_filters = std::move(bloom_filters);
return true;
@ -625,12 +633,12 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals(
for (const auto & alternative : alternatives)
{
bloom_filters.back().emplace_back(params);
token_extractor->stringToBloomFilter(alternative.data(), alternative.size(), bloom_filters.back().back());
token_extractor->substringToBloomFilter(alternative.data(), alternative.size(), bloom_filters.back().back(), false, false);
}
out.set_bloom_filters = std::move(bloom_filters);
}
else
token_extractor->stringToBloomFilter(required_substring.data(), required_substring.size(), *out.bloom_filter);
token_extractor->substringToBloomFilter(required_substring.data(), required_substring.size(), *out.bloom_filter, false, false);
return true;
}

View File

@ -595,7 +595,7 @@ bool MergeTreeConditionFullText::traverseASTEquals(
out.function = RPNElement::FUNCTION_EQUALS;
out.gin_filter = std::make_unique<GinFilter>(params);
const auto & value = const_value.get<String>();
token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter);
token_extractor->substringToGinFilter(value.data(), value.size(), *out.gin_filter, true, false);
return true;
}
else if (function_name == "endsWith")
@ -604,7 +604,7 @@ bool MergeTreeConditionFullText::traverseASTEquals(
out.function = RPNElement::FUNCTION_EQUALS;
out.gin_filter = std::make_unique<GinFilter>(params);
const auto & value = const_value.get<String>();
token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter);
token_extractor->substringToGinFilter(value.data(), value.size(), *out.gin_filter, false, true);
return true;
}
else if (function_name == "multiSearchAny")
@ -622,7 +622,7 @@ bool MergeTreeConditionFullText::traverseASTEquals(
gin_filters.back().emplace_back(params);
const auto & value = element.get<String>();
token_extractor->stringToGinFilter(value.data(), value.size(), gin_filters.back().back());
token_extractor->substringToGinFilter(value.data(), value.size(), gin_filters.back().back(), false, false);
}
out.set_gin_filters = std::move(gin_filters);
return true;
@ -650,14 +650,14 @@ bool MergeTreeConditionFullText::traverseASTEquals(
for (const auto & alternative : alternatives)
{
gin_filters.back().emplace_back(params);
token_extractor->stringToGinFilter(alternative.data(), alternative.size(), gin_filters.back().back());
token_extractor->substringToGinFilter(alternative.data(), alternative.size(), gin_filters.back().back(), false, false);
}
out.set_gin_filters = std::move(gin_filters);
}
else
{
out.gin_filter = std::make_unique<GinFilter>(params);
token_extractor->stringToGinFilter(required_substring.data(), required_substring.size(), *out.gin_filter);
token_extractor->substringToGinFilter(required_substring.data(), required_substring.size(), *out.gin_filter, false, false);
}
return true;

View File

@ -186,8 +186,7 @@ void MergeTreeSink::finishDelayedChunk()
}
}
/// FIXME
added = storage.renameTempPartAndAdd(part, transaction, lock, /*rename_in_transaction=*/ false);
added = storage.renameTempPartAndAdd(part, transaction, lock);
transaction.commit(&lock);
}

View File

@ -236,11 +236,10 @@ bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrit
if (data_part_storage.hasActiveTransaction())
data_part_storage.precommitTransaction();
storage.renameTempPartAndReplace(new_part, *transaction_ptr, /*rename_in_transaction=*/ true);
storage.renameTempPartAndReplace(new_part, *transaction_ptr);
try
{
transaction_ptr->renameParts();
storage.checkPartChecksumsAndCommit(*transaction_ptr, new_part, mutate_task->getHardlinkedFiles());
}
catch (const Exception & e)

View File

@ -97,8 +97,7 @@ bool MutatePlainMergeTreeTask::executeStep()
MergeTreeData::Transaction transaction(storage, merge_mutate_entry->txn.get());
/// FIXME Transactions: it's too optimistic, better to lock parts before starting transaction
storage.renameTempPartAndReplace(new_part, transaction, /*rename_in_transaction=*/ true);
transaction.renameParts();
storage.renameTempPartAndReplace(new_part, transaction);
transaction.commit();
storage.updateMutationEntriesErrors(future_part, true, "");

View File

@ -888,7 +888,7 @@ std::pair<std::vector<String>, bool> ReplicatedMergeTreeSinkImpl<async_insert>::
try
{
auto lock = storage.lockParts();
storage.renameTempPartAndAdd(part, transaction, lock, /*rename_in_transaction=*/ false);
storage.renameTempPartAndAdd(part, transaction, lock);
}
catch (const Exception & e)
{
@ -903,9 +903,6 @@ std::pair<std::vector<String>, bool> ReplicatedMergeTreeSinkImpl<async_insert>::
throw;
}
/// Rename parts before committing to ZooKeeper without holding DataPartsLock.
transaction.renameParts();
ThreadFuzzer::maybeInjectSleep();
fiu_do_on(FailPoints::replicated_merge_tree_commit_zk_fail_after_op, { zookeeper->forceFailureAfterOperation(); });

View File

@ -1788,7 +1788,7 @@ void StorageMergeTree::renameAndCommitEmptyParts(MutableDataPartsVector & new_pa
for (auto & part: new_parts)
{
DataPartsVector covered_parts_by_one_part = renameTempPartAndReplace(part, transaction, /*rename_in_transaction=*/ true);
DataPartsVector covered_parts_by_one_part = renameTempPartAndReplace(part, transaction);
if (covered_parts_by_one_part.size() > 1)
throw Exception(ErrorCodes::LOGICAL_ERROR,
@ -1798,10 +1798,10 @@ void StorageMergeTree::renameAndCommitEmptyParts(MutableDataPartsVector & new_pa
std::move(covered_parts_by_one_part.begin(), covered_parts_by_one_part.end(), std::back_inserter(covered_parts));
}
LOG_INFO(log, "Remove {} parts by covering them with empty {} parts. With txn {}.",
covered_parts.size(), new_parts.size(), transaction.getTID());
transaction.renameParts();
transaction.commit();
/// Remove covered parts without waiting for old_parts_lifetime seconds.
@ -2064,7 +2064,7 @@ PartitionCommandsResultInfo StorageMergeTree::attachPartition(
{
auto lock = lockParts();
fillNewPartNameAndResetLevel(loaded_parts[i], lock);
renameTempPartAndAdd(loaded_parts[i], transaction, lock, /*rename_in_transaction=*/ false);
renameTempPartAndAdd(loaded_parts[i], transaction, lock);
transaction.commit(&lock);
}
@ -2180,9 +2180,8 @@ void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, con
for (auto part : dst_parts)
{
fillNewPartName(part, data_parts_lock);
renameTempPartAndReplaceUnlocked(part, transaction, data_parts_lock, /*rename_in_transaction=*/ true);
renameTempPartAndReplaceUnlocked(part, transaction, data_parts_lock);
}
transaction.renameParts();
/// Populate transaction
transaction.commit(&data_parts_lock);
@ -2285,9 +2284,10 @@ void StorageMergeTree::movePartitionToTable(const StoragePtr & dest_table, const
for (auto & part : dst_parts)
{
dest_table_storage->fillNewPartName(part, dest_data_parts_lock);
dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, dest_data_parts_lock, /*rename_in_transaction=*/ false);
dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, dest_data_parts_lock);
}
removePartsFromWorkingSet(local_context->getCurrentTransaction().get(), src_parts, true, src_data_parts_lock);
transaction.commit(&src_data_parts_lock);
}
@ -2447,7 +2447,7 @@ void StorageMergeTree::attachRestoredParts(MutableDataPartsVector && parts)
{
auto lock = lockParts();
fillNewPartName(part, lock);
renameTempPartAndAdd(part, transaction, lock, /*rename_in_transaction=*/ false);
renameTempPartAndAdd(part, transaction, lock);
transaction.commit(&lock);
}
}

View File

@ -2093,8 +2093,7 @@ bool StorageReplicatedMergeTree::executeLogEntry(LogEntry & entry)
Transaction transaction(*this, NO_TRANSACTION_RAW);
part->version.setCreationTID(Tx::PrehistoricTID, nullptr);
renameTempPartAndReplace(part, transaction, /*rename_in_transaction=*/ true);
transaction.renameParts();
renameTempPartAndReplace(part, transaction);
checkPartChecksumsAndCommit(transaction, part);
writePartLog(PartLogElement::Type::NEW_PART, {}, 0 /** log entry is fake so we don't measure the time */,
@ -2883,11 +2882,11 @@ bool StorageReplicatedMergeTree::executeReplaceRange(LogEntry & entry)
Coordination::Requests ops;
for (PartDescriptionPtr & part_desc : final_parts)
{
renameTempPartAndReplace(part_desc->res_part, transaction, /*rename_in_transaction=*/ true);
renameTempPartAndReplace(part_desc->res_part, transaction);
getCommitPartOps(ops, part_desc->res_part);
lockSharedData(*part_desc->res_part, /*replace_existing_lock=*/ true, part_desc->hardlinked_files);
lockSharedData(*part_desc->res_part, /* replace_existing_lock */ true, part_desc->hardlinked_files);
}
transaction.renameParts();
if (!ops.empty())
@ -4959,8 +4958,7 @@ bool StorageReplicatedMergeTree::fetchPart(
if (!to_detached)
{
Transaction transaction(*this, NO_TRANSACTION_RAW);
renameTempPartAndReplace(part, transaction, /*rename_in_transaction=*/ true);
transaction.renameParts();
renameTempPartAndReplace(part, transaction);
chassert(!part_to_clone || !is_zero_copy_part(part));
replaced_parts = checkPartChecksumsAndCommit(transaction, part, /*hardlinked_files*/ {}, /*replace_zero_copy_lock*/ true);
@ -8204,9 +8202,8 @@ void StorageReplicatedMergeTree::replacePartitionFrom(
{
auto data_parts_lock = lockParts();
for (auto & part : dst_parts)
renameTempPartAndReplaceUnlocked(part, transaction, data_parts_lock, /*rename_in_transaction=*/ true);
renameTempPartAndReplaceUnlocked(part, transaction, data_parts_lock);
}
transaction.renameParts();
for (const auto & dst_part : dst_parts)
lockSharedData(*dst_part, false, /*hardlinked_files*/ {});
@ -8481,7 +8478,7 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
auto dest_data_parts_lock = dest_table_storage->lockParts();
for (auto & part : dst_parts)
dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, dest_data_parts_lock, /*rename_in_transaction=*/ false);
dest_table_storage->renameTempPartAndReplaceUnlocked(part, transaction, dest_data_parts_lock);
for (const auto & dst_part : dst_parts)
dest_table_storage->lockSharedData(*dst_part, false, /*hardlinked_files*/ {});
@ -10114,8 +10111,7 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP
try
{
MergeTreeData::Transaction transaction(*this, NO_TRANSACTION_RAW);
auto replaced_parts = renameTempPartAndReplace(new_data_part, transaction, /*rename_in_transaction=*/ true);
transaction.renameParts();
auto replaced_parts = renameTempPartAndReplace(new_data_part, transaction);
if (!replaced_parts.empty())
{

View File

@ -10,9 +10,21 @@ from typing import Any, Callable, List, Optional, Union
import requests
import get_robot_token as grt # we need an updated ROBOT_TOKEN
from ci_config import CI_CONFIG
try:
# A work around for scripts using this downloading module without required deps
import get_robot_token as grt # we need an updated ROBOT_TOKEN
except ImportError:
class grt: # type: ignore
ROBOT_TOKEN = None
@staticmethod
def get_best_robot_token() -> str:
return ""
DOWNLOAD_RETRIES_COUNT = 5
@ -63,15 +75,10 @@ def get_gh_api(
"""
def set_auth_header():
if "headers" in kwargs:
if "Authorization" not in kwargs["headers"]:
kwargs["headers"][
"Authorization"
] = f"Bearer {grt.get_best_robot_token()}"
else:
kwargs["headers"] = {
"Authorization": f"Bearer {grt.get_best_robot_token()}"
}
headers = kwargs.get("headers", {})
if "Authorization" not in headers:
headers["Authorization"] = f"Bearer {grt.get_best_robot_token()}"
kwargs["headers"] = headers
if grt.ROBOT_TOKEN is not None:
set_auth_header()

View File

@ -1,79 +1,38 @@
#!/usr/bin/env python3
import logging
import os
from pathlib import Path
import requests
from requests.adapters import HTTPAdapter # type: ignore
from urllib3.util.retry import Retry # type: ignore
from get_previous_release_tag import ReleaseInfo, get_previous_release
CLICKHOUSE_TAGS_URL = "https://api.github.com/repos/ClickHouse/ClickHouse/tags"
DOWNLOAD_PREFIX = (
"https://github.com/ClickHouse/ClickHouse/releases/download/v{version}-{type}/"
from build_download_helper import DownloadException, download_build_with_progress
from get_previous_release_tag import (
ReleaseInfo,
get_previous_release,
get_release_by_tag,
)
CLICKHOUSE_COMMON_STATIC_PACKAGE_NAME = "clickhouse-common-static_{version}_amd64.deb"
CLICKHOUSE_COMMON_STATIC_DBG_PACKAGE_NAME = (
"clickhouse-common-static-dbg_{version}_amd64.deb"
)
CLICKHOUSE_CLIENT_PACKAGE_NAME = "clickhouse-client_{version}_amd64.deb"
CLICKHOUSE_LIBRARY_BRIDGE_PACKAGE_NAME = "clickhouse-library-bridge_{version}_amd64.deb"
CLICKHOUSE_ODBC_BRIDGE_PACKAGE_NAME = "clickhouse-odbc-bridge_{version}_amd64.deb"
CLICKHOUSE_SERVER_PACKAGE_NAME = "clickhouse-server_{version}_amd64.deb"
PACKAGES_DIR = "previous_release_package_folder/"
VERSION_PATTERN = r"((?:\d+\.)?(?:\d+\.)?(?:\d+\.)?\d+-[a-zA-Z]*)"
PACKAGES_DIR = Path("previous_release_package_folder")
def download_package(url, out_path, retries=10, backoff_factor=0.3):
session = requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=[500, 502, 503, 504],
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
response = session.get(url)
response.raise_for_status()
print(f"Download {url} to {out_path}")
with open(out_path, "wb") as fd:
fd.write(response.content)
def download_packages(release, dest_path=PACKAGES_DIR):
if not os.path.exists(dest_path):
os.makedirs(dest_path)
def download_packages(release: ReleaseInfo, dest_path: Path = PACKAGES_DIR) -> None:
dest_path.mkdir(parents=True, exist_ok=True)
logging.info("Will download %s", release)
def get_dest_path(pkg_name):
return os.path.join(dest_path, pkg_name)
for pkg in (
CLICKHOUSE_COMMON_STATIC_PACKAGE_NAME,
CLICKHOUSE_COMMON_STATIC_DBG_PACKAGE_NAME,
CLICKHOUSE_CLIENT_PACKAGE_NAME,
CLICKHOUSE_LIBRARY_BRIDGE_PACKAGE_NAME,
CLICKHOUSE_ODBC_BRIDGE_PACKAGE_NAME,
CLICKHOUSE_SERVER_PACKAGE_NAME,
):
url = (DOWNLOAD_PREFIX + pkg).format(version=release.version, type=release.type)
pkg_name = get_dest_path(pkg.format(version=release.version))
download_package(url, pkg_name)
for pkg, url in release.assets.items():
if not pkg.endswith("_amd64.deb") or "-dbg_" in pkg:
continue
pkg_name = dest_path / pkg
download_build_with_progress(url, pkg_name)
def download_last_release(dest_path):
def download_last_release(dest_path: Path) -> None:
current_release = get_previous_release(None)
if current_release is None:
raise DownloadException("The current release is not found")
download_packages(current_release, dest_path=dest_path)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
release = ReleaseInfo(input())
release = get_release_by_tag(input())
download_packages(release)

View File

@ -2,47 +2,37 @@
import logging
import re
from typing import List, Optional, Tuple
from typing import Dict, List, Optional, Tuple
import requests
CLICKHOUSE_TAGS_URL = "https://api.github.com/repos/ClickHouse/ClickHouse/tags"
CLICKHOUSE_PACKAGE_URL = (
"https://github.com/ClickHouse/ClickHouse/releases/download/"
"v{version}-{type}/clickhouse-common-static_{version}_amd64.deb"
from build_download_helper import get_gh_api
from git_helper import TAG_REGEXP
from version_helper import (
ClickHouseVersion,
get_version_from_string,
get_version_from_tag,
)
VERSION_PATTERN = r"(v(?:\d+\.)?(?:\d+\.)?(?:\d+\.)?\d+-[a-zA-Z]*)"
CLICKHOUSE_TAGS_URL = "https://api.github.com/repos/ClickHouse/ClickHouse/releases"
PACKAGE_REGEXP = r"\Aclickhouse-common-static_.+[.]deb"
logger = logging.getLogger(__name__)
class Version:
def __init__(self, version: str):
self.version = version
def __lt__(self, other: "Version") -> bool:
return list(map(int, self.version.split("."))) < list(
map(int, other.version.split("."))
)
def __str__(self):
return self.version
class ReleaseInfo:
def __init__(self, release_tag: str):
self.version = Version(release_tag[1:].split("-")[0])
self.type = release_tag[1:].split("-")[1]
def __init__(self, release_tag: str, assets: Dict[str, str]):
self.version = get_version_from_tag(release_tag)
self.type = self.version.description
self.assets = assets
def __str__(self):
return f"v{self.version}-{self.type}"
return self.version.describe
def __repr__(self):
return f"ReleaseInfo: {self.version}-{self.type}"
return f"ReleaseInfo: {self.version.describe}"
def find_previous_release(
server_version: Optional[Version], releases: List[ReleaseInfo]
server_version: Optional[ClickHouseVersion], releases: List[ReleaseInfo]
) -> Tuple[bool, Optional[ReleaseInfo]]:
releases.sort(key=lambda x: x.version, reverse=True)
@ -54,15 +44,7 @@ def find_previous_release(
# Check if the artifact exists on GitHub.
# It can be not true for a short period of time
# after creating a tag for a new release before uploading the packages.
if (
requests.head(
CLICKHOUSE_PACKAGE_URL.format(
version=release.version, type=release.type
),
timeout=10,
).status_code
!= 404
):
if any(re.match(PACKAGE_REGEXP, name) for name in release.assets.keys()):
return True, release
logger.debug(
@ -74,12 +56,14 @@ def find_previous_release(
return False, None
def get_previous_release(server_version: Optional[Version]) -> Optional[ReleaseInfo]:
def get_previous_release(
server_version: Optional[ClickHouseVersion],
) -> Optional[ReleaseInfo]:
page = 1
found = False
while not found:
response = requests.get(
CLICKHOUSE_TAGS_URL, {"page": page, "per_page": 100}, timeout=10
response = get_gh_api(
CLICKHOUSE_TAGS_URL, params={"page": page, "per_page": 100}, timeout=10
)
if not response.ok:
logger.error(
@ -87,24 +71,42 @@ def get_previous_release(server_version: Optional[Version]) -> Optional[ReleaseI
)
response.raise_for_status()
releases_str = set(re.findall(VERSION_PATTERN, response.text))
if len(releases_str) == 0:
raise ValueError(
"Cannot find previous release for "
+ str(server_version)
+ " server version"
)
releases = response.json()
releases = [ReleaseInfo(release) for release in releases_str]
found, previous_release = find_previous_release(server_version, releases)
release_infos = [] # type: List[ReleaseInfo]
for r in releases:
if re.match(TAG_REGEXP, r["tag_name"]):
assets = {
a["name"]: a["browser_download_url"]
for a in r["assets"]
if a["state"] == "uploaded"
}
release_infos.append(ReleaseInfo(r["tag_name"], assets))
found, previous_release = find_previous_release(server_version, release_infos)
page += 1
return previous_release
def get_release_by_tag(tag: str) -> ReleaseInfo:
response = get_gh_api(f"{CLICKHOUSE_TAGS_URL}/tags/{tag}", timeout=10)
release = response.json()
assets = {
a["name"]: a["browser_download_url"]
for a in release["assets"]
if a["state"] == "uploaded"
}
return ReleaseInfo(release["tag_name"], assets)
def main():
logging.basicConfig(level=logging.INFO)
server_version = Version(input())
version_string = input()
version_string = version_string.split("+", maxsplit=1)[0]
try:
server_version = get_version_from_string(version_string)
except ValueError:
server_version = get_version_from_tag(version_string)
print(get_previous_release(server_version))

View File

@ -23,7 +23,7 @@ from lambda_shared_package.lambda_shared.pr import (
check_pr_description,
)
from pr_info import PRInfo
from report import FAILURE, PENDING, SUCCESS
from report import FAILURE, PENDING, SUCCESS, StatusType
TRUSTED_ORG_IDS = {
54801242, # clickhouse
@ -58,7 +58,7 @@ def pr_is_by_trusted_user(pr_user_login, pr_user_orgs):
# Returns can_run, description
def should_run_ci_for_pr(pr_info: PRInfo) -> Tuple[bool, str]:
# Consider the labels and whether the user is trusted.
print("Got labels", pr_info.labels)
logging.info("Got labels: %s", pr_info.labels)
if OK_SKIP_LABELS.intersection(pr_info.labels):
return True, "Don't try new checks for release/backports/cherry-picks"
@ -66,9 +66,10 @@ def should_run_ci_for_pr(pr_info: PRInfo) -> Tuple[bool, str]:
if Labels.CAN_BE_TESTED not in pr_info.labels and not pr_is_by_trusted_user(
pr_info.user_login, pr_info.user_orgs
):
print(
f"PRs by untrusted users need the '{Labels.CAN_BE_TESTED}' label - "
"please contact a member of the core team"
logging.info(
"PRs by untrusted users need the '%s' label - "
"please contact a member of the core team",
Labels.CAN_BE_TESTED,
)
return False, "Needs 'can be tested' label"
@ -93,6 +94,7 @@ def main():
description = format_description(description)
gh = Github(get_best_robot_token(), per_page=100)
commit = get_commit(gh, pr_info.sha)
status = SUCCESS # type: StatusType
description_error, category = check_pr_description(pr_info.body, GITHUB_REPOSITORY)
pr_labels_to_add = []
@ -125,13 +127,16 @@ def main():
f"::notice :: Add backport labels [{backport_labels}] for a given PR category"
)
print(f"Change labels: add {pr_labels_to_add}, remove {pr_labels_to_remove}")
logging.info(
"Change labels: add %s, remove %s", pr_labels_to_add, pr_labels_to_remove
)
if pr_labels_to_add:
post_labels(gh, pr_info, pr_labels_to_add)
if pr_labels_to_remove:
remove_labels(gh, pr_info, pr_labels_to_remove)
# 1. Next three IFs are in a correct order. First - fatal error
if description_error:
print(
"::error ::Cannot run, PR description does not match the template: "
@ -146,9 +151,10 @@ def main():
f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/"
"blob/master/.github/PULL_REQUEST_TEMPLATE.md?plain=1"
)
status = FAILURE
post_commit_status(
commit,
FAILURE,
status,
url,
format_description(description_error),
PR_CHECK,
@ -156,41 +162,38 @@ def main():
)
sys.exit(1)
# 2. Then we check if the documentation is not created to fail the Mergeable check
if (
Labels.PR_FEATURE in pr_info.labels
and not pr_info.has_changes_in_documentation()
):
print(
f"The '{Labels.PR_FEATURE}' in the labels, "
f"::error ::The '{Labels.PR_FEATURE}' in the labels, "
"but there's no changed documentation"
)
post_commit_status(
commit,
FAILURE,
"",
f"expect adding docs for {Labels.PR_FEATURE}",
PR_CHECK,
pr_info,
)
# allow the workflow to continue
status = FAILURE
description = f"expect adding docs for {Labels.PR_FEATURE}"
# 3. But we allow the workflow to continue
# 4. And post only a single commit status on a failure
if not can_run:
post_commit_status(
commit,
FAILURE,
status,
"",
description,
PR_CHECK,
pr_info,
)
print("::notice ::Cannot run")
print("::error ::Cannot run")
sys.exit(1)
# The status for continue can be posted only one time, not more.
post_commit_status(
commit,
SUCCESS,
status,
"",
"ok",
description,
PR_CHECK,
pr_info,
)

View File

@ -260,8 +260,9 @@ def test_create_table():
"CREATE TABLE table16 (`x` int) ENGINE = DeltaLake('http://minio1:9001/root/data/test11.csv.gz', 'minio', '[HIDDEN]')",
"CREATE TABLE table17 (x int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'CSV') settings mode = 'ordered'",
"CREATE TABLE table18 (x int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'CSV', 'gzip') settings mode = 'ordered'",
"CREATE TABLE table19 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV') settings mode = 'ordered'",
"CREATE TABLE table20 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV', 'gzip') settings mode = 'ordered'",
# due to sensitive data substituion the query will be normalized, so not "settings" but "SETTINGS"
"CREATE TABLE table19 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV') SETTINGS mode = 'ordered'",
"CREATE TABLE table20 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV', 'gzip') SETTINGS mode = 'ordered'",
],
must_not_contain=[password],
)

View File

@ -4,6 +4,11 @@
5
5
5
5
5
5
5
5
1
12
2023-05-30 14:38:20

View File

@ -16,6 +16,21 @@ SELECT accurateCast(-129, 'Int8'); -- { serverError CANNOT_CONVERT_TYPE }
SELECT accurateCast(5, 'Int8');
SELECT accurateCast(128, 'Int8'); -- { serverError CANNOT_CONVERT_TYPE }
SELECT accurateCast('-1', 'UInt8'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('5', 'UInt8');
SELECT accurateCast('257', 'UInt8'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('-1', 'UInt16'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('5', 'UInt16');
SELECT accurateCast('65536', 'UInt16'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('-1', 'UInt32'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('5', 'UInt32');
SELECT accurateCast('4294967296', 'UInt32'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('-1', 'UInt64'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('5', 'UInt64');
SELECT accurateCast('-129', 'Int8'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast('5', 'Int8');
SELECT accurateCast('128', 'Int8'); -- { serverError CANNOT_PARSE_TEXT }
SELECT accurateCast(10, 'Decimal32(9)'); -- { serverError DECIMAL_OVERFLOW }
SELECT accurateCast(1, 'Decimal32(9)');
SELECT accurateCast(-10, 'Decimal32(9)'); -- { serverError DECIMAL_OVERFLOW }

View File

@ -1,19 +1,19 @@
1 Hello ClickHouse
2 Hello World
1 Well, Hello ClickHouse !
2 Well, Hello World !
Granules: 6/6
Granules: 2/6
Granules: 6/6
Granules: 2/6
---
1 Hello ClickHouse
2 Hello World
6 World Champion
1 Well, Hello ClickHouse !
2 Well, Hello World !
6 True World Champion
Granules: 6/6
Granules: 3/6
Granules: 6/6
Granules: 3/6
---
5 OLAP Database
5 Its An OLAP Database
Granules: 6/6
Granules: 1/6
Granules: 6/6

View File

@ -14,19 +14,19 @@ ENGINE = MergeTree
ORDER BY id
SETTINGS index_granularity = 1;
INSERT INTO tab VALUES (1, 'Hello ClickHouse'), (2, 'Hello World'), (3, 'Good Weather'), (4, 'Say Hello'), (5, 'OLAP Database'), (6, 'World Champion');
INSERT INTO tab VALUES (1, 'Well, Hello ClickHouse !'), (2, 'Well, Hello World !'), (3, 'Good Weather !'), (4, 'Say Hello !'), (5, 'Its An OLAP Database'), (6, 'True World Champion');
SELECT * FROM tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id;
SELECT * FROM tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id;
-- Read 2/6 granules
-- Required string: 'Hello '
-- Alternatives: 'Hello ClickHouse', 'Hello World'
-- Required string: ' Hello '
-- Alternatives: ' Hello ClickHouse ', ' Hello World '
SELECT *
FROM
(
EXPLAIN PLAN indexes=1
SELECT * FROM tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id
SELECT * FROM tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id
)
WHERE
explain LIKE '%Granules: %'
@ -37,7 +37,7 @@ SELECT *
FROM
(
EXPLAIN PLAN indexes=1
SELECT * FROM tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id
SELECT * FROM tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id
)
WHERE
explain LIKE '%Granules: %'
@ -46,17 +46,17 @@ SETTINGS
SELECT '---';
SELECT * FROM tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id;
SELECT * FROM tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id;
-- Read 3/6 granules
-- Required string: -
-- Alternatives: 'ClickHouse', 'World'
-- Alternatives: ' ClickHouse ', ' World '
SELECT *
FROM
(
EXPLAIN PLAN indexes = 1
SELECT * FROM tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id
SELECT * FROM tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id
)
WHERE
explain LIKE '%Granules: %'
@ -67,7 +67,7 @@ SELECT *
FROM
(
EXPLAIN PLAN indexes = 1
SELECT * FROM tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id
SELECT * FROM tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id
)
WHERE
explain LIKE '%Granules: %'
@ -76,17 +76,17 @@ SETTINGS
SELECT '---';
SELECT * FROM tab WHERE match(str, 'OLAP.*') ORDER BY id;
SELECT * FROM tab WHERE match(str, ' OLAP .*') ORDER BY id;
-- Read 1/6 granules
-- Required string: 'OLAP'
-- Required string: ' OLAP '
-- Alternatives: -
SELECT *
FROM
(
EXPLAIN PLAN indexes = 1
SELECT * FROM tab WHERE match(str, 'OLAP (.*?)*') ORDER BY id
SELECT * FROM tab WHERE match(str, ' OLAP (.*?)*') ORDER BY id
)
WHERE
explain LIKE '%Granules: %'
@ -97,7 +97,7 @@ SELECT *
FROM
(
EXPLAIN PLAN indexes = 1
SELECT * FROM tab WHERE match(str, 'OLAP (.*?)*') ORDER BY id
SELECT * FROM tab WHERE match(str, ' OLAP (.*?)*') ORDER BY id
)
WHERE
explain LIKE '%Granules: %'

View File

@ -13,19 +13,19 @@ af full_text
1
Test full_text()
af full_text
101 Alick a01
106 Alick a06
111 Alick b01
116 Alick b06
101 Alick a01
106 Alick a06
101 x Alick a01 y
106 x Alick a06 y
111 x Alick b01 y
116 x Alick b06 y
101 x Alick a01 y
106 x Alick a06 y
1
101 Alick a01
111 Alick b01
101 x Alick a01 y
111 x Alick b01 y
1
Test on array columns
af full_text
3 ['Click a03','Click b03']
3 ['x Click a03 y','x Click b03 y']
1
Test on map columns
af full_text

View File

@ -67,7 +67,7 @@ CREATE TABLE tab_x(k UInt64, s String, INDEX af(s) TYPE full_text())
ENGINE = MergeTree() ORDER BY k
SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi';
INSERT INTO tab_x VALUES (101, 'Alick a01'), (102, 'Blick a02'), (103, 'Click a03'), (104, 'Dlick a04'), (105, 'Elick a05'), (106, 'Alick a06'), (107, 'Blick a07'), (108, 'Click a08'), (109, 'Dlick a09'), (110, 'Elick a10'), (111, 'Alick b01'), (112, 'Blick b02'), (113, 'Click b03'), (114, 'Dlick b04'), (115, 'Elick b05'), (116, 'Alick b06'), (117, 'Blick b07'), (118, 'Click b08'), (119, 'Dlick b09'), (120, 'Elick b10');
INSERT INTO tab_x VALUES (101, 'x Alick a01 y'), (102, 'x Blick a02 y'), (103, 'x Click a03 y'), (104, 'x Dlick a04 y'), (105, 'x Elick a05 y'), (106, 'x Alick a06 y'), (107, 'x Blick a07 y'), (108, 'x Click a08 y'), (109, 'x Dlick a09 y'), (110, 'x Elick a10 y'), (111, 'x Alick b01 y'), (112, 'x Blick b02 y'), (113, 'x Click b03 y'), (114, 'x Dlick b04 y'), (115, 'x Elick b05 y'), (116, 'x Alick b06 y'), (117, 'x Blick b07 y'), (118, 'x Click b08 y'), (119, 'x Dlick b09 y'), (120, 'x Elick b10 y');
-- check full_text index was created
SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab_x' AND database = currentDatabase() LIMIT 1;
@ -86,27 +86,27 @@ SELECT read_rows==8 from system.query_log
LIMIT 1;
-- search full_text index with IN operator
SELECT * FROM tab_x WHERE s IN ('Alick a01', 'Alick a06') ORDER BY k;
SELECT * FROM tab_x WHERE s IN ('x Alick a01 y', 'x Alick a06 y') ORDER BY k;
-- check the query only read 2 granules (4 rows total; each granule has 2 rows)
SYSTEM FLUSH LOGS;
SELECT read_rows==4 from system.query_log
WHERE query_kind ='Select'
AND current_database = currentDatabase()
AND endsWith(trimRight(query), 'SELECT * FROM tab_x WHERE s IN (\'Alick a01\', \'Alick a06\') ORDER BY k;')
AND endsWith(trimRight(query), 'SELECT * FROM tab_x WHERE s IN (\'x Alick a01 y\', \'x Alick a06 y\') ORDER BY k;')
AND type='QueryFinish'
AND result_rows==2
LIMIT 1;
-- search full_text index with multiSearch
SELECT * FROM tab_x WHERE multiSearchAny(s, ['a01', 'b01']) ORDER BY k;
SELECT * FROM tab_x WHERE multiSearchAny(s, [' a01 ', ' b01 ']) ORDER BY k;
-- check the query only read 2 granules (4 rows total; each granule has 2 rows)
SYSTEM FLUSH LOGS;
SELECT read_rows==4 from system.query_log
WHERE query_kind ='Select'
AND current_database = currentDatabase()
AND endsWith(trimRight(query), 'SELECT * FROM tab_x WHERE multiSearchAny(s, [\'a01\', \'b01\']) ORDER BY k;')
AND endsWith(trimRight(query), 'SELECT * FROM tab_x WHERE multiSearchAny(s, [\' a01 \', \' b01 \']) ORDER BY k;')
AND type='QueryFinish'
AND result_rows==2
LIMIT 1;
@ -126,14 +126,14 @@ INSERT INTO tab SELECT rowNumberInBlock(), groupArray(s) FROM tab_x GROUP BY k%1
SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1;
-- search full_text index with has
SELECT * FROM tab WHERE has(s, 'Click a03') ORDER BY k;
SELECT * FROM tab WHERE has(s, 'x Click a03 y') ORDER BY k;
-- check the query must read all 10 granules (20 rows total; each granule has 2 rows)
SYSTEM FLUSH LOGS;
SELECT read_rows==2 from system.query_log
WHERE query_kind ='Select'
AND current_database = currentDatabase()
AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE has(s, \'Click a03\') ORDER BY k;')
AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE has(s, \'x Click a03 y\') ORDER BY k;')
AND type='QueryFinish'
AND result_rows==1
LIMIT 1;

View File

@ -1,5 +1,5 @@
1 Hello ClickHouse
2 Hello World
1 Well, Hello ClickHouse !
2 Well, Hello World !
1 Hello ClickHouse
2 Hello World
Granules: 6/6
@ -11,9 +11,9 @@
Granules: 6/6
Granules: 2/6
---
1 Hello ClickHouse
2 Hello World
6 World Champion
1 Well, Hello ClickHouse !
2 Well, Hello World !
6 True World Champion
1 Hello ClickHouse
2 Hello World
6 World Champion
@ -26,7 +26,7 @@
Granules: 6/6
Granules: 3/6
---
5 OLAP Database
5 Its An OLAP Database
5 OLAP Database
Granules: 6/6
Granules: 1/6

View File

@ -21,21 +21,22 @@ ENGINE = MergeTree
ORDER BY id
SETTINGS index_granularity = 1;
INSERT INTO tokenbf_tab VALUES (1, 'Hello ClickHouse'), (2, 'Hello World'), (3, 'Good Weather'), (4, 'Say Hello'), (5, 'OLAP Database'), (6, 'World Champion');
INSERT INTO tokenbf_tab VALUES (1, 'Well, Hello ClickHouse !'), (2, 'Well, Hello World !'), (3, 'Good Weather !'), (4, 'Say Hello !'), (5, 'Its An OLAP Database'), (6, 'True World Champion');
INSERT INTO ngrambf_tab VALUES (1, 'Hello ClickHouse'), (2, 'Hello World'), (3, 'Good Weather'), (4, 'Say Hello'), (5, 'OLAP Database'), (6, 'World Champion');
SELECT * FROM tokenbf_tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id;
SELECT * FROM tokenbf_tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id;
SELECT * FROM ngrambf_tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id;
-- Read 2/6 granules
-- Required string: 'Hello '
-- Alternatives: 'Hello ClickHouse', 'Hello World'
-- Surrounded by spaces for tokenbf
SELECT *
FROM
(
EXPLAIN PLAN indexes=1
SELECT * FROM tokenbf_tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id
SELECT * FROM tokenbf_tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id
)
WHERE
explain LIKE '%Granules: %'
@ -46,7 +47,7 @@ SELECT *
FROM
(
EXPLAIN PLAN indexes=1
SELECT * FROM tokenbf_tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id
SELECT * FROM tokenbf_tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id
)
WHERE
explain LIKE '%Granules: %'
@ -78,18 +79,19 @@ SETTINGS
SELECT '---';
SELECT * FROM tokenbf_tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id;
SELECT * FROM tokenbf_tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id;
SELECT * FROM ngrambf_tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id;
-- Read 3/6 granules
-- Required string: -
-- Alternatives: 'ClickHouse', 'World'
-- Surrounded by spaces for tokenbf
SELECT *
FROM
(
EXPLAIN PLAN indexes = 1
SELECT * FROM tokenbf_tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id
SELECT * FROM tokenbf_tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id
)
WHERE
explain LIKE '%Granules: %'
@ -100,7 +102,7 @@ SELECT *
FROM
(
EXPLAIN PLAN indexes = 1
SELECT * FROM tokenbf_tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id
SELECT * FROM tokenbf_tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id
)
WHERE
explain LIKE '%Granules: %'
@ -131,18 +133,19 @@ SETTINGS
SELECT '---';
SELECT * FROM tokenbf_tab WHERE match(str, 'OLAP.*') ORDER BY id;
SELECT * FROM tokenbf_tab WHERE match(str, ' OLAP .*') ORDER BY id;
SELECT * FROM ngrambf_tab WHERE match(str, 'OLAP.*') ORDER BY id;
-- Read 1/6 granules
-- Required string: 'OLAP'
-- Alternatives: -
-- Surrounded by spaces for tokenbf
SELECT *
FROM
(
EXPLAIN PLAN indexes = 1
SELECT * FROM tokenbf_tab WHERE match(str, 'OLAP (.*?)*') ORDER BY id
SELECT * FROM tokenbf_tab WHERE match(str, ' OLAP (.*?)*') ORDER BY id
)
WHERE
explain LIKE '%Granules: %'
@ -152,7 +155,7 @@ SELECT *
FROM
(
EXPLAIN PLAN indexes = 1
SELECT * FROM tokenbf_tab WHERE match(str, 'OLAP (.*?)*') ORDER BY id
SELECT * FROM tokenbf_tab WHERE match(str, ' OLAP (.*?)*') ORDER BY id
)
WHERE
explain LIKE '%Granules: %'

View File

@ -0,0 +1,83 @@
-------- Bloom filter --------
-- No skip for prefix
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for prefix with complete token
Parts: 1/1
Parts: 0/1
-- No skip for suffix
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for suffix with complete token
Parts: 1/1
Parts: 0/1
-- No skip for substring
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for substring with complete token
Parts: 1/1
Parts: 0/1
-- No skip for multiple substrings
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for multiple substrings with complete tokens
Parts: 1/1
Parts: 0/1
-- No skip for multiple non-existsing substrings, only one with complete token
Parts: 1/1
Parts: 1/1
-------- GIN filter --------
-- No skip for prefix
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for prefix with complete token
Parts: 1/1
Parts: 0/1
-- No skip for suffix
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for suffix with complete token
Parts: 1/1
Parts: 0/1
-- No skip for substring
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for substring with complete token
Parts: 1/1
Parts: 0/1
-- No skip for multiple substrings
Parts: 1/1
Parts: 1/1
1 Service is not ready
-- Skip for multiple substrings with complete tokens
Parts: 1/1
Parts: 0/1
-- No skip for multiple non-existsing substrings, only one with complete token
Parts: 1/1
Parts: 1/1

View File

@ -0,0 +1,227 @@
SELECT '-------- Bloom filter --------';
SELECT '';
DROP TABLE IF EXISTS 03165_token_bf;
CREATE TABLE 03165_token_bf
(
id Int64,
message String,
INDEX idx_message message TYPE tokenbf_v1(32768, 3, 2) GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY id;
INSERT INTO 03165_token_bf VALUES(1, 'Service is not ready');
SELECT '-- No skip for prefix';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE startsWith(message, 'Serv')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE startsWith(message, 'Serv');
SELECT '';
SELECT '-- Skip for prefix with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE startsWith(message, 'Serv i')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE startsWith(message, 'Serv i');
SELECT '';
SELECT '-- No skip for suffix';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE endsWith(message, 'eady')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE endsWith(message, 'eady');
SELECT '';
SELECT '-- Skip for suffix with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE endsWith(message, ' eady')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE endsWith(message, ' eady');
SELECT '';
SELECT '-- No skip for substring';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE match(message, 'no')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE match(message, 'no');
SELECT '';
SELECT '-- Skip for substring with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE match(message, ' xyz ')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE match(message, ' xyz ');
SELECT '';
SELECT '-- No skip for multiple substrings';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, ['ce', 'no'])
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, ['ce', 'no']);
SELECT '';
SELECT '-- Skip for multiple substrings with complete tokens';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, [' wx ', ' yz '])
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, [' wx ', ' yz ']);
SELECT '';
SELECT '-- No skip for multiple non-existsing substrings, only one with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, [' wx ', 'yz'])
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, [' wx ', 'yz']);
DROP TABLE IF EXISTS 03165_token_bf;
SELECT '';
SELECT '-------- GIN filter --------';
SELECT '';
SET allow_experimental_inverted_index=1;
DROP TABLE IF EXISTS 03165_token_ft;
CREATE TABLE 03165_token_ft
(
id Int64,
message String,
INDEX idx_message message TYPE full_text() GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY id;
INSERT INTO 03165_token_ft VALUES(1, 'Service is not ready');
SELECT '-- No skip for prefix';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE startsWith(message, 'Serv')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE startsWith(message, 'Serv');
SELECT '';
SELECT '-- Skip for prefix with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE startsWith(message, 'Serv i')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE startsWith(message, 'Serv i');
SELECT '';
SELECT '-- No skip for suffix';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE endsWith(message, 'eady')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE endsWith(message, 'eady');
SELECT '';
SELECT '-- Skip for suffix with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE endsWith(message, ' eady')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE endsWith(message, ' eady');
SELECT '';
SELECT '-- No skip for substring';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE match(message, 'no')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE match(message, 'no');
SELECT '';
SELECT '-- Skip for substring with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE match(message, ' xyz ')
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE match(message, ' xyz ');
SELECT '';
SELECT '-- No skip for multiple substrings';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE multiSearchAny(message, ['ce', 'no'])
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE multiSearchAny(message, ['ce', 'no']);
SELECT '';
SELECT '-- Skip for multiple substrings with complete tokens';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE multiSearchAny(message, [' wx ', ' yz '])
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE multiSearchAny(message, [' wx ', ' yz ']);
SELECT '';
SELECT '-- No skip for multiple non-existsing substrings, only one with complete token';
SELECT trim(explain)
FROM (
EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE multiSearchAny(message, [' wx ', 'yz'])
)
WHERE explain LIKE '%Parts:%';
SELECT * FROM 03165_token_ft WHERE multiSearchAny(message, [' wx ', 'yz']);