mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Check for punctuation
This commit is contained in:
parent
ecdafeaf83
commit
21382afa2b
@ -66,7 +66,7 @@ AggregateFunctionPtr createAggregateFunctionSimpleLinearRegression(
|
||||
#undef FOR_LEASTSQR_TYPES
|
||||
#undef DISPATCH
|
||||
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT ,
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal types ({}, {}) of arguments of aggregate function {}, must "
|
||||
"be Native Ints, Native UInts or Floats", x_arg->getName(), y_arg->getName(), name);
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ public:
|
||||
/// Returns false if queue is finished
|
||||
[[nodiscard]] bool pushFront(const T & x)
|
||||
{
|
||||
return emplaceImpl</* back= */ false>(/* timeout_milliseconds= */ std::nullopt , x);
|
||||
return emplaceImpl</* back= */ false>(/* timeout_milliseconds= */ std::nullopt, x);
|
||||
}
|
||||
|
||||
/// Returns false if queue is finished
|
||||
|
@ -52,20 +52,8 @@ static bool parseNumber(const String & description, size_t l, size_t r, size_t &
|
||||
}
|
||||
|
||||
|
||||
/* Parse a string that generates shards and replicas. Separator - one of two characters | or ,
|
||||
* depending on whether shards or replicas are generated.
|
||||
* For example:
|
||||
* host1,host2,... - generates set of shards from host1, host2, ...
|
||||
* host1|host2|... - generates set of replicas from host1, host2, ...
|
||||
* abc{8..10}def - generates set of shards abc8def, abc9def, abc10def.
|
||||
* abc{08..10}def - generates set of shards abc08def, abc09def, abc10def.
|
||||
* abc{x,yy,z}def - generates set of shards abcxdef, abcyydef, abczdef.
|
||||
* abc{x|yy|z} def - generates set of replicas abcxdef, abcyydef, abczdef.
|
||||
* abc{1..9}de{f,g,h} - is a direct product, 27 shards.
|
||||
* abc{1..9}de{0|1} - is a direct product, 9 shards, in each 2 replicas.
|
||||
*/
|
||||
std::vector<String>
|
||||
parseRemoteDescription(const String & description, size_t l, size_t r, char separator, size_t max_addresses, const String & func_name)
|
||||
std::vector<String> parseRemoteDescription(
|
||||
const String & description, size_t l, size_t r, char separator, size_t max_addresses, const String & func_name)
|
||||
{
|
||||
std::vector<String> res;
|
||||
std::vector<String> cur;
|
||||
|
@ -3,7 +3,7 @@
|
||||
#include <vector>
|
||||
namespace DB
|
||||
{
|
||||
/* Parse a string that generates shards and replicas. Separator - one of two characters | or ,
|
||||
/* Parse a string that generates shards and replicas. Separator - one of two characters '|' or ','
|
||||
* depending on whether shards or replicas are generated.
|
||||
* For example:
|
||||
* host1,host2,... - generates set of shards from host1, host2, ...
|
||||
|
@ -27,7 +27,7 @@ TEST(Common, SensitiveDataMasker)
|
||||
{
|
||||
|
||||
Poco::AutoPtr<Poco::Util::XMLConfiguration> empty_xml_config = new Poco::Util::XMLConfiguration();
|
||||
DB::SensitiveDataMasker masker(*empty_xml_config , "");
|
||||
DB::SensitiveDataMasker masker(*empty_xml_config, "");
|
||||
masker.addMaskingRule("all a letters", "a+", "--a--");
|
||||
masker.addMaskingRule("all b letters", "b+", "--b--");
|
||||
masker.addMaskingRule("all d letters", "d+", "--d--");
|
||||
@ -45,7 +45,7 @@ TEST(Common, SensitiveDataMasker)
|
||||
masker.printStats();
|
||||
#endif
|
||||
|
||||
DB::SensitiveDataMasker masker2(*empty_xml_config , "");
|
||||
DB::SensitiveDataMasker masker2(*empty_xml_config, "");
|
||||
masker2.addMaskingRule("hide root password", "qwerty123", "******");
|
||||
masker2.addMaskingRule("hide SSN", "[0-9]{3}-[0-9]{2}-[0-9]{4}", "000-00-0000");
|
||||
masker2.addMaskingRule("hide email", "[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}", "hidden@hidden.test");
|
||||
@ -58,7 +58,7 @@ TEST(Common, SensitiveDataMasker)
|
||||
"SELECT id FROM mysql('localhost:3308', 'database', 'table', 'root', '******') WHERE "
|
||||
"ssn='000-00-0000' or email='hidden@hidden.test'");
|
||||
|
||||
DB::SensitiveDataMasker maskerbad(*empty_xml_config , "");
|
||||
DB::SensitiveDataMasker maskerbad(*empty_xml_config, "");
|
||||
|
||||
// gtest has not good way to check exception content, so just do it manually (see https://github.com/google/googletest/issues/952 )
|
||||
try
|
||||
|
@ -40,7 +40,7 @@ void deserializeSnapshotMagic(ReadBuffer & in)
|
||||
Coordination::read(dbid, in);
|
||||
static constexpr int32_t SNP_HEADER = 1514885966; /// "ZKSN"
|
||||
if (magic_header != SNP_HEADER)
|
||||
throw Exception(ErrorCodes::CORRUPTED_DATA ,"Incorrect magic header in file, expected {}, got {}", SNP_HEADER, magic_header);
|
||||
throw Exception(ErrorCodes::CORRUPTED_DATA, "Incorrect magic header in file, expected {}, got {}", SNP_HEADER, magic_header);
|
||||
}
|
||||
|
||||
int64_t deserializeSessionAndTimeout(KeeperStorage & storage, ReadBuffer & in)
|
||||
|
@ -121,7 +121,7 @@ GTEST_TEST(SettingMySQLDataTypesSupport, SetString)
|
||||
ASSERT_EQ(Field("decimal,datetime64"), setting);
|
||||
|
||||
// comma with spaces
|
||||
setting = " datetime64 , decimal ";
|
||||
setting = " datetime64 , decimal "; /// bad punctuation is ok here
|
||||
ASSERT_TRUE(setting.changed);
|
||||
ASSERT_TRUE(setting.value.isSet(MySQLDataTypesSupport::DECIMAL));
|
||||
ASSERT_TRUE(setting.value.isSet(MySQLDataTypesSupport::DATETIME64));
|
||||
@ -166,4 +166,3 @@ GTEST_TEST(SettingMySQLDataTypesSupport, SetInvalidString)
|
||||
ASSERT_TRUE(setting.changed);
|
||||
ASSERT_EQ(0, setting.value.getValue());
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ template <typename A> struct ResultOfBitNot
|
||||
* Float<x>, [U]Int<y> -> Float<max(x, y*2)>
|
||||
* Decimal<x>, Decimal<y> -> Decimal<max(x,y)>
|
||||
* UUID, UUID -> UUID
|
||||
* UInt64 , Int<x> -> Error
|
||||
* UInt64, Int<x> -> Error
|
||||
* Float<x>, [U]Int64 -> Error
|
||||
*/
|
||||
template <typename A, typename B>
|
||||
|
@ -322,7 +322,7 @@ void buildSingleAttribute(
|
||||
|
||||
|
||||
/** Transforms
|
||||
* PRIMARY KEY Attr1 ,..., AttrN
|
||||
* PRIMARY KEY Attr1, ..., AttrN
|
||||
* to the next configuration
|
||||
* <id><name>Attr1</name></id>
|
||||
* or
|
||||
|
@ -292,8 +292,8 @@ struct SimHashImpl
|
||||
continue;
|
||||
|
||||
// we need to store the new word hash value to the oldest location.
|
||||
// for example, N = 5, array |a0|a1|a2|a3|a4|, now , a0 is the oldest location,
|
||||
// so we need to store new word hash into location of a0, then ,this array become
|
||||
// for example, N = 5, array |a0|a1|a2|a3|a4|, now, a0 is the oldest location,
|
||||
// so we need to store new word hash into location of a0, then this array become
|
||||
// |a5|a1|a2|a3|a4|, next time, a1 become the oldest location, we need to store new
|
||||
// word hash value into location of a1, then array become |a5|a6|a2|a3|a4|
|
||||
words[offset] = BytesRef{word_start, length};
|
||||
@ -793,4 +793,3 @@ REGISTER_FUNCTION(StringHash)
|
||||
factory.registerFunction<FunctionWordShingleMinHashArgCaseInsensitiveUTF8>();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -375,14 +375,14 @@ bool sliceHasImplAnyAllImplInt16(
|
||||
_mm256_or_si256(
|
||||
_mm256_andnot_si256(
|
||||
_mm256_shuffle_epi8(_mm256_permute2x128_si256(first_nm_mask, first_nm_mask, 1), _mm256_set_epi8(7,6,5,4,3,2,1,0,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8)),
|
||||
_mm256_cmpeq_epi16(second_data, _mm256_shuffle_epi8(_mm256_permute2x128_si256(first_data ,first_data, 1), _mm256_set_epi8(7,6,5,4,3,2,1,0,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8)))),
|
||||
_mm256_cmpeq_epi16(second_data, _mm256_shuffle_epi8(_mm256_permute2x128_si256(first_data, first_data, 1), _mm256_set_epi8(7,6,5,4,3,2,1,0,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8)))),
|
||||
_mm256_andnot_si256(
|
||||
_mm256_shuffle_epi8(_mm256_permute2x128_si256(first_nm_mask, first_nm_mask, 1), _mm256_set_epi8(5,4,3,2,1,0,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6)),
|
||||
_mm256_cmpeq_epi16(second_data, _mm256_shuffle_epi8(_mm256_permute2x128_si256(first_data, first_data, 1), _mm256_set_epi8(5,4,3,2,1,0,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6))))),
|
||||
_mm256_or_si256(
|
||||
_mm256_andnot_si256(
|
||||
_mm256_shuffle_epi8(_mm256_permute2x128_si256(first_nm_mask, first_nm_mask, 1), _mm256_set_epi8(3,2,1,0,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4)),
|
||||
_mm256_cmpeq_epi16(second_data, _mm256_shuffle_epi8(_mm256_permute2x128_si256(first_data ,first_data ,1), _mm256_set_epi8(3,2,1,0,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4)))),
|
||||
_mm256_cmpeq_epi16(second_data, _mm256_shuffle_epi8(_mm256_permute2x128_si256(first_data, first_data, 1), _mm256_set_epi8(3,2,1,0,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4)))),
|
||||
_mm256_andnot_si256(
|
||||
_mm256_shuffle_epi8(_mm256_permute2x128_si256(first_nm_mask, first_nm_mask, 1), _mm256_set_epi8(1,0,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2)),
|
||||
_mm256_cmpeq_epi16(second_data, _mm256_shuffle_epi8(_mm256_permute2x128_si256(first_data, first_data, 1), _mm256_set_epi8(1,0,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2))))))
|
||||
|
@ -258,7 +258,7 @@ void PocoHTTPClient::addMetric(const Aws::Http::HttpRequest & request, S3MetricT
|
||||
void PocoHTTPClient::makeRequestInternal(
|
||||
Aws::Http::HttpRequest & request,
|
||||
std::shared_ptr<PocoHTTPResponse> & response,
|
||||
Aws::Utils::RateLimits::RateLimiterInterface * readLimiter ,
|
||||
Aws::Utils::RateLimits::RateLimiterInterface * readLimiter,
|
||||
Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const
|
||||
{
|
||||
/// Most sessions in pool are already connected and it is not possible to set proxy host/port to a connected session.
|
||||
|
@ -292,7 +292,7 @@ struct AggregationMethodStringNoCache
|
||||
{
|
||||
}
|
||||
|
||||
using State = ColumnsHashing::HashMethodString<typename Data::value_type, Mapped, true, false, false ,nullable>;
|
||||
using State = ColumnsHashing::HashMethodString<typename Data::value_type, Mapped, true, false, false, nullable>;
|
||||
|
||||
static const bool low_cardinality_optimization = false;
|
||||
static const bool one_key_nullable_optimization = nullable;
|
||||
|
@ -551,7 +551,7 @@ void DDLWorker::processTask(DDLTaskBase & task, const ZooKeeperPtr & zookeeper)
|
||||
chassert(!task.completely_processed);
|
||||
|
||||
/// Setup tracing context on current thread for current DDL
|
||||
OpenTelemetry::TracingContextHolder tracing_ctx_holder(__PRETTY_FUNCTION__ ,
|
||||
OpenTelemetry::TracingContextHolder tracing_ctx_holder(__PRETTY_FUNCTION__,
|
||||
task.entry.tracing_context,
|
||||
this->context->getOpenTelemetrySpanLog());
|
||||
tracing_ctx_holder.root_span.kind = OpenTelemetry::CONSUMER;
|
||||
|
@ -193,7 +193,7 @@ AccessRightsElements InterpreterRenameQuery::getRequiredAccess(InterpreterRename
|
||||
required_access.emplace_back(AccessType::CREATE_TABLE | AccessType::INSERT, elem.to.getDatabase(), elem.to.getTable());
|
||||
if (rename.exchange)
|
||||
{
|
||||
required_access.emplace_back(AccessType::CREATE_TABLE | AccessType::INSERT , elem.from.getDatabase(), elem.from.getTable());
|
||||
required_access.emplace_back(AccessType::CREATE_TABLE | AccessType::INSERT, elem.from.getDatabase(), elem.from.getTable());
|
||||
required_access.emplace_back(AccessType::SELECT | AccessType::DROP_TABLE, elem.to.getDatabase(), elem.to.getTable());
|
||||
}
|
||||
}
|
||||
|
@ -482,7 +482,7 @@ CSN TransactionLog::finalizeCommittedTransaction(MergeTreeTransaction * txn, CSN
|
||||
bool removed = running_list.erase(txn->tid.getHash());
|
||||
if (!removed)
|
||||
{
|
||||
LOG_ERROR(log , "I's a bug: TID {} {} doesn't exist", txn->tid.getHash(), txn->tid);
|
||||
LOG_ERROR(log, "It's a bug: TID {} {} doesn't exist", txn->tid.getHash(), txn->tid);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
@ -31,10 +31,10 @@ protected:
|
||||
not_endswith,
|
||||
endswith_cs,
|
||||
not_endswith_cs,
|
||||
equal, //=~
|
||||
not_equal,//!~
|
||||
equal_cs, //=
|
||||
not_equal_cs,//!=
|
||||
equal, /// =~
|
||||
not_equal, /// !~
|
||||
equal_cs, /// =
|
||||
not_equal_cs, /// !=
|
||||
has,
|
||||
not_has,
|
||||
has_all,
|
||||
@ -49,10 +49,10 @@ protected:
|
||||
not_hassuffix,
|
||||
hassuffix_cs,
|
||||
not_hassuffix_cs,
|
||||
in_cs, //in
|
||||
not_in_cs, //!in
|
||||
in, //in~
|
||||
not_in ,//!in~
|
||||
in_cs, /// in
|
||||
not_in_cs, /// !in
|
||||
in, /// in~
|
||||
not_in, /// !in~
|
||||
matches_regex,
|
||||
startswith,
|
||||
not_startswith,
|
||||
|
@ -359,11 +359,11 @@ INSTANTIATE_TEST_SUITE_P(ParserKQLQuery, ParserTest,
|
||||
"SELECT *\nFROM Customers\nORDER BY LastName DESC"
|
||||
},
|
||||
{
|
||||
"Customers | order by Age desc , FirstName asc ",
|
||||
"Customers | order by Age desc, FirstName asc ",
|
||||
"SELECT *\nFROM Customers\nORDER BY\n Age DESC,\n FirstName ASC"
|
||||
},
|
||||
{
|
||||
"Customers | order by Age asc , FirstName desc",
|
||||
"Customers | order by Age asc, FirstName desc",
|
||||
"SELECT *\nFROM Customers\nORDER BY\n Age ASC,\n FirstName DESC"
|
||||
},
|
||||
{
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
/// - key: field name with full path. eg. a struct field's name is like a.x.i
|
||||
/// - value: a pair, first value refers to this field's start index, second value refers to how many
|
||||
/// indices this field take. eg.
|
||||
/// For a parquet schema {x: int , y: {i: int, j: int}}, the return will be
|
||||
/// For a parquet schema {x: int, y: {i: int, j: int}}, the return will be
|
||||
/// - x: (0, 1)
|
||||
/// - y: (1, 2)
|
||||
/// - y.i: (1, 1)
|
||||
|
@ -236,10 +236,10 @@ bool JSONEachRowRowInputFormat::readRow(MutableColumns & columns, RowReadExtensi
|
||||
|
||||
bool JSONEachRowRowInputFormat::checkEndOfData(bool is_first_row)
|
||||
{
|
||||
/// We consume , or \n before scanning a new row, instead scanning to next row at the end.
|
||||
/// We consume ',' or '\n' before scanning a new row, instead scanning to next row at the end.
|
||||
/// The reason is that if we want an exact number of rows read with LIMIT x
|
||||
/// from a streaming table engine with text data format, like File or Kafka
|
||||
/// then seeking to next ;, or \n would trigger reading of an extra row at the end.
|
||||
/// then seeking to next ';,' or '\n' would trigger reading of an extra row at the end.
|
||||
|
||||
/// Semicolon is added for convenience as it could be used at end of INSERT query.
|
||||
if (!in->eof())
|
||||
|
@ -30,7 +30,7 @@ static Block checkHeaders(const DataStreams & input_streams_)
|
||||
}
|
||||
|
||||
IntersectOrExceptStep::IntersectOrExceptStep(
|
||||
DataStreams input_streams_ , Operator operator_ , size_t max_threads_)
|
||||
DataStreams input_streams_, Operator operator_, size_t max_threads_)
|
||||
: header(checkHeaders(input_streams_))
|
||||
, current_operator(operator_)
|
||||
, max_threads(max_threads_)
|
||||
|
@ -72,7 +72,7 @@ struct ViewsData
|
||||
std::atomic_bool has_exception = false;
|
||||
std::exception_ptr first_exception;
|
||||
|
||||
ViewsData(ThreadStatusesHolderPtr thread_status_holder_, ContextPtr context_, StorageID source_storage_id_, StorageMetadataPtr source_metadata_snapshot_ , StoragePtr source_storage_)
|
||||
ViewsData(ThreadStatusesHolderPtr thread_status_holder_, ContextPtr context_, StorageID source_storage_id_, StorageMetadataPtr source_metadata_snapshot_, StoragePtr source_storage_)
|
||||
: thread_status_holder(std::move(thread_status_holder_))
|
||||
, context(std::move(context_))
|
||||
, source_storage_id(std::move(source_storage_id_))
|
||||
|
@ -638,7 +638,7 @@ void HTTPHandler::processQuery(
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected MemoryWriteBuffer");
|
||||
|
||||
auto rdbuf = prev_memory_buffer->tryGetReadBuffer();
|
||||
copyData(*rdbuf , *next_buffer);
|
||||
copyData(*rdbuf, *next_buffer);
|
||||
|
||||
return next_buffer;
|
||||
};
|
||||
|
@ -242,8 +242,8 @@ void listFilesWithRegexpMatchingImpl(
|
||||
{
|
||||
if (recursive)
|
||||
{
|
||||
listFilesWithRegexpMatchingImpl(fs::path(full_path).append(it->path().string()) / "" ,
|
||||
looking_for_directory ? suffix_with_globs.substr(next_slash_after_glob_pos) : current_glob ,
|
||||
listFilesWithRegexpMatchingImpl(fs::path(full_path).append(it->path().string()) / "",
|
||||
looking_for_directory ? suffix_with_globs.substr(next_slash_after_glob_pos) : current_glob,
|
||||
total_bytes_to_read, result, recursive);
|
||||
}
|
||||
else if (looking_for_directory && re2::RE2::FullMatch(file_name, matcher))
|
||||
|
@ -149,7 +149,7 @@ public:
|
||||
return getNested()->mayBenefitFromIndexForIn(left_in_operand, query_context, metadata_snapshot);
|
||||
}
|
||||
|
||||
CheckResults checkData(const ASTPtr & query , ContextPtr context) override { return getNested()->checkData(query, context); }
|
||||
CheckResults checkData(const ASTPtr & query, ContextPtr context) override { return getNested()->checkData(query, context); }
|
||||
void checkTableCanBeDropped() const override { getNested()->checkTableCanBeDropped(); }
|
||||
bool storesDataOnDisk() const override { return getNested()->storesDataOnDisk(); }
|
||||
Strings getDataPaths() const override { return getNested()->getDataPaths(); }
|
||||
|
@ -6569,7 +6569,7 @@ void StorageReplicatedMergeTree::fetchPartition(
|
||||
|
||||
try
|
||||
{
|
||||
/// part name , metadata, part_path , true, 0, zookeeper
|
||||
/// part name, metadata, part_path, true, 0, zookeeper
|
||||
if (!fetchPart(part_name, metadata_snapshot, from_zookeeper_name, part_path, true, 0, zookeeper, /* try_fetch_shared = */ false))
|
||||
throw Exception(ErrorCodes::UNFINISHED, "Failed to fetch part {} from {}", part_name, from_);
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ TableFunctionPtr TableFunctionFactory::get(
|
||||
{
|
||||
auto hints = getHints(table_function->name);
|
||||
if (!hints.empty())
|
||||
throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "Unknown table function {}. Maybe you meant: {}", table_function->name , toString(hints));
|
||||
throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "Unknown table function {}. Maybe you meant: {}", table_function->name, toString(hints));
|
||||
else
|
||||
throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "Unknown table function {}", table_function->name);
|
||||
}
|
||||
|
@ -410,3 +410,6 @@ find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep
|
||||
|
||||
# The stateful directory should only contain the tests that depend on the test dataset (hits or visits).
|
||||
find $ROOT_PATH/tests/queries/1_stateful -name '*.sql' -or -name '*.sh' | grep -v '00076_system_columns_bytes' | xargs -I{} bash -c 'grep -q -P "hits|visits" "{}" || echo "The test {} does not depend on the test dataset (hits or visits table) and should be located in the 0_stateless directory. You can also add an exception to the check-style script."'
|
||||
|
||||
# Check for bad punctuation: whitespace before comma.
|
||||
find $ROOT_PATH/{src,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -P --line-number '\w ,' | grep -v 'bad punctuation is ok here' && echo "^ There is bad punctuation: whitespace before comma. You should write it like this: 'Hello, world!'"
|
||||
|
Loading…
Reference in New Issue
Block a user