mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Merge
This commit is contained in:
commit
39a8563a69
@ -16,12 +16,12 @@ inline Field toField(const T & x)
|
||||
return Field(typename NearestFieldType<T>::Type(x));
|
||||
}
|
||||
|
||||
inline Field toField(const mysqlxx::Date & x)
|
||||
inline Field toField(const LocalDate & x)
|
||||
{
|
||||
return toField(static_cast<UInt16>(x.getDayNum()));
|
||||
}
|
||||
|
||||
inline Field toField(const mysqlxx::DateTime & x)
|
||||
inline Field toField(const LocalDateTime & x)
|
||||
{
|
||||
return toField(static_cast<UInt32>(static_cast<time_t>(x)));
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ class CompressedWriteBuffer;
|
||||
* Предназначено для взаимодействия между серверами.
|
||||
*
|
||||
* Может быть указан поток для записи индекса. Индекс содержит смещения до каждого кусочка каждого столбца.
|
||||
* Если делается append в уже существующий файл, и нужно записать индекс, то укажите initial_size_of_file.
|
||||
*/
|
||||
class NativeBlockOutputStream : public IBlockOutputStream
|
||||
{
|
||||
@ -23,7 +24,7 @@ public:
|
||||
*/
|
||||
NativeBlockOutputStream(
|
||||
WriteBuffer & ostr_, UInt64 client_revision_ = 0,
|
||||
WriteBuffer * index_ostr_ = nullptr);
|
||||
WriteBuffer * index_ostr_ = nullptr, size_t initial_size_of_file_ = 0);
|
||||
|
||||
void write(const Block & block) override;
|
||||
void flush() override { ostr.next(); }
|
||||
@ -37,6 +38,7 @@ private:
|
||||
UInt64 client_revision;
|
||||
|
||||
WriteBuffer * index_ostr;
|
||||
size_t initial_size_of_file; /// Начальный размер файла с данными, если делается append. Используется для индекса.
|
||||
/// Если требуется записывать индекс, то ostr обязан быть CompressedWriteBuffer.
|
||||
CompressedWriteBuffer * ostr_concrete = nullptr;
|
||||
};
|
||||
|
@ -125,13 +125,6 @@ public:
|
||||
|
||||
bool behavesAsNumber() const override { return true; }
|
||||
|
||||
/// Returns length of textual name for an enum element (used in FunctionVisibleWidth)
|
||||
std::size_t getNameLength(const FieldType & value) const
|
||||
{
|
||||
/// @todo length of escaped string should be calculated here
|
||||
return getNameForValue(value).size;
|
||||
}
|
||||
|
||||
const StringRef & getNameForValue(const FieldType & value) const
|
||||
{
|
||||
const auto it = value_to_name_map.find(value);
|
||||
@ -144,14 +137,13 @@ public:
|
||||
return it->second;
|
||||
}
|
||||
|
||||
FieldType getValue(const std::string & name) const
|
||||
FieldType getValue(StringRef name) const
|
||||
{
|
||||
const auto it = name_to_value_map.find(StringRef{name});
|
||||
const auto it = name_to_value_map.find(name);
|
||||
if (it == std::end(name_to_value_map))
|
||||
throw Exception{
|
||||
"Unknown element '" + name + "' for type " + getName(),
|
||||
ErrorCodes::LOGICAL_ERROR
|
||||
};
|
||||
"Unknown element '" + name.toString() + "' for type " + getName(),
|
||||
ErrorCodes::LOGICAL_ERROR};
|
||||
|
||||
return it->second;
|
||||
}
|
||||
@ -182,7 +174,7 @@ public:
|
||||
{
|
||||
std::string name;
|
||||
readString(name, istr);
|
||||
field = nearestFieldType(getValue(name));
|
||||
field = nearestFieldType(getValue(StringRef(name)));
|
||||
}
|
||||
|
||||
void serializeTextEscaped(const Field & field, WriteBuffer & ostr) const override
|
||||
@ -194,7 +186,7 @@ public:
|
||||
{
|
||||
std::string name;
|
||||
readEscapedString(name, istr);
|
||||
field = nearestFieldType(getValue(name));
|
||||
field = nearestFieldType(getValue(StringRef(name)));
|
||||
}
|
||||
|
||||
void serializeTextQuoted(const Field & field, WriteBuffer & ostr) const override
|
||||
@ -206,7 +198,7 @@ public:
|
||||
{
|
||||
std::string name;
|
||||
readQuotedString(name, istr);
|
||||
field = nearestFieldType(getValue(name));
|
||||
field = nearestFieldType(getValue(StringRef(name)));
|
||||
}
|
||||
|
||||
void serializeTextJSON(const Field & field, WriteBuffer & ostr) const override
|
||||
|
@ -54,8 +54,10 @@ private:
|
||||
RegionParents area;
|
||||
/// регион -> округ, включающий его или 0, если такого нет
|
||||
RegionParents district;
|
||||
/// регион -> континет, включающий его или 0, если такого нет
|
||||
/// регион -> континет (первый при подъёме по иерархии регионов), включающий его или 0, если такого нет
|
||||
RegionParents continent;
|
||||
/// регион -> континет (последний при подъёме по иерархии регионов), включающий его или 0, если такого нет
|
||||
RegionParents top_continent;
|
||||
|
||||
/// регион -> население или 0, если неизвестно.
|
||||
RegionPopulations populations;
|
||||
@ -88,6 +90,7 @@ public:
|
||||
RegionParents new_area(initial_size);
|
||||
RegionParents new_district(initial_size);
|
||||
RegionParents new_continent(initial_size);
|
||||
RegionParents new_top_continent(initial_size);
|
||||
RegionPopulations new_populations(initial_size);
|
||||
RegionDepths new_depths(initial_size);
|
||||
RegionTypes types(initial_size);
|
||||
@ -151,6 +154,7 @@ public:
|
||||
new_area .resize(max_region_id + 1);
|
||||
new_district .resize(max_region_id + 1);
|
||||
new_continent .resize(max_region_id + 1);
|
||||
new_top_continent.resize(max_region_id + 1);
|
||||
new_populations .resize(max_region_id + 1);
|
||||
new_depths .resize(max_region_id + 1);
|
||||
types .resize(max_region_id + 1);
|
||||
@ -173,7 +177,7 @@ public:
|
||||
if (types[i] == REGION_TYPE_CONTINENT)
|
||||
{
|
||||
new_continent[i] = i;
|
||||
continue;
|
||||
new_top_continent[i] = i;
|
||||
}
|
||||
|
||||
RegionDepth depth = 0;
|
||||
@ -203,8 +207,9 @@ public:
|
||||
|
||||
if (types[current] == REGION_TYPE_CONTINENT)
|
||||
{
|
||||
new_continent[i] = current;
|
||||
break;
|
||||
if (!new_continent[i])
|
||||
new_continent[i] = current;
|
||||
new_top_continent[i] = current;
|
||||
}
|
||||
}
|
||||
|
||||
@ -217,6 +222,7 @@ public:
|
||||
area.swap(new_area);
|
||||
district.swap(new_district);
|
||||
continent.swap(new_continent);
|
||||
top_continent.swap(new_top_continent);
|
||||
populations.swap(new_populations);
|
||||
depths.swap(new_depths);
|
||||
}
|
||||
@ -268,6 +274,13 @@ public:
|
||||
return continent[region];
|
||||
}
|
||||
|
||||
RegionID toTopContinent(RegionID region) const
|
||||
{
|
||||
if (static_cast<size_t>(region) >= top_continent.size())
|
||||
return 0;
|
||||
return top_continent[region];
|
||||
}
|
||||
|
||||
RegionID toParent(RegionID region) const
|
||||
{
|
||||
if (static_cast<size_t>(region) >= parents.size())
|
||||
|
@ -113,9 +113,9 @@ private:
|
||||
}
|
||||
|
||||
|
||||
mysqlxx::DateTime getLastModification() const
|
||||
LocalDateTime getLastModification() const
|
||||
{
|
||||
mysqlxx::DateTime update_time{std::time(nullptr)};
|
||||
LocalDateTime update_time{std::time(nullptr)};
|
||||
|
||||
if (dont_check_update_time)
|
||||
return update_time;
|
||||
@ -417,7 +417,7 @@ private:
|
||||
Block sample_block;
|
||||
mutable mysqlxx::PoolWithFailover pool;
|
||||
const std::string load_all_query;
|
||||
mysqlxx::DateTime last_modification;
|
||||
LocalDateTime last_modification;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -347,7 +347,6 @@ template <typename FieldType> struct FormatImpl<DataTypeEnum<FieldType>>
|
||||
{
|
||||
static void execute(const FieldType x, WriteBuffer & wb, const DataTypeEnum<FieldType> & type)
|
||||
{
|
||||
/// @todo should we escape the string here? Presumably no as it will be escaped twice otherwise
|
||||
writeString(type.getNameForValue(x), wb);
|
||||
}
|
||||
};
|
||||
@ -1724,23 +1723,6 @@ class FunctionCast final : public IFunction
|
||||
using NameValuePair = std::pair<std::string, ValueType>;
|
||||
using EnumValues = std::vector<NameValuePair>;
|
||||
|
||||
// EnumValues value_intersection;
|
||||
// std::set_intersection(std::begin(from_values), std::end(from_values),
|
||||
// std::begin(to_values), std::end(to_values), std::back_inserter(value_intersection),
|
||||
// [] (auto && from, auto && to) { return from.second < to.second; });
|
||||
//
|
||||
// for (const auto & name_value : value_intersection)
|
||||
// {
|
||||
// const auto & old_name = name_value.first;
|
||||
// const auto & new_name = to_type->getNameForValue(name_value.second).toString();
|
||||
// if (old_name != new_name)
|
||||
// throw Exception{
|
||||
// "Enum conversion changes name for value " + toString(name_value.second) +
|
||||
// " from '" + old_name + "' to '" + new_name + "'",
|
||||
// ErrorCodes::CANNOT_CONVERT_TYPE
|
||||
// };
|
||||
// }
|
||||
|
||||
EnumValues name_intersection;
|
||||
std::set_intersection(std::begin(from_values), std::end(from_values),
|
||||
std::begin(to_values), std::end(to_values), std::back_inserter(name_intersection),
|
||||
@ -1762,7 +1744,8 @@ class FunctionCast final : public IFunction
|
||||
template <typename ColumnStringType, typename EnumType>
|
||||
auto createStringToEnumWrapper()
|
||||
{
|
||||
return [] (Block & block, const ColumnNumbers & arguments, const size_t result) {
|
||||
return [] (Block & block, const ColumnNumbers & arguments, const size_t result)
|
||||
{
|
||||
const auto first_col = block.getByPosition(arguments.front()).column.get();
|
||||
|
||||
auto & col_with_type_and_name = block.getByPosition(result);
|
||||
@ -1773,12 +1756,12 @@ class FunctionCast final : public IFunction
|
||||
{
|
||||
const auto size = col->size();
|
||||
|
||||
const auto res = result_type.createColumn();
|
||||
auto & out_data = static_cast<typename EnumType::ColumnType &>(*result_col).getData();
|
||||
auto res = result_type.createColumn();
|
||||
auto & out_data = static_cast<typename EnumType::ColumnType &>(*res).getData();
|
||||
out_data.resize(size);
|
||||
|
||||
for (const auto i : ext::range(0, size))
|
||||
out_data[i] = result_type.getValue(col->getDataAt(i).toString());
|
||||
out_data[i] = result_type.getValue(col->getDataAt(i));
|
||||
|
||||
result_col = res;
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ namespace ErrorCodes
|
||||
* - словари регионов, операционных систем, поисковых систем.
|
||||
*
|
||||
* Подняться по дереву до определенного уровня.
|
||||
* regionToCity, regionToArea, regionToCountry,
|
||||
* regionToCity, regionToArea, regionToCountry, ...
|
||||
* OSToRoot,
|
||||
* SEToRoot,
|
||||
*
|
||||
@ -91,6 +91,11 @@ struct RegionToContinentImpl
|
||||
static UInt32 apply(UInt32 x, const RegionsHierarchy & hierarchy) { return hierarchy.toContinent(x); }
|
||||
};
|
||||
|
||||
struct RegionToTopContinentImpl
|
||||
{
|
||||
static UInt32 apply(UInt32 x, const RegionsHierarchy & hierarchy) { return hierarchy.toTopContinent(x); }
|
||||
};
|
||||
|
||||
struct RegionToPopulationImpl
|
||||
{
|
||||
static UInt32 apply(UInt32 x, const RegionsHierarchy & hierarchy) { return hierarchy.getPopulation(x); }
|
||||
@ -513,6 +518,7 @@ struct NameRegionToArea { static constexpr auto name = "regionToArea"; };
|
||||
struct NameRegionToDistrict { static constexpr auto name = "regionToDistrict"; };
|
||||
struct NameRegionToCountry { static constexpr auto name = "regionToCountry"; };
|
||||
struct NameRegionToContinent { static constexpr auto name = "regionToContinent"; };
|
||||
struct NameRegionToTopContinent { static constexpr auto name = "regionToTopContinent"; };
|
||||
struct NameRegionToPopulation { static constexpr auto name = "regionToPopulation"; };
|
||||
struct NameOSToRoot { static constexpr auto name = "OSToRoot"; };
|
||||
struct NameSEToRoot { static constexpr auto name = "SEToRoot"; };
|
||||
@ -571,6 +577,15 @@ struct FunctionRegionToContinent :
|
||||
}
|
||||
};
|
||||
|
||||
struct FunctionRegionToTopContinent :
|
||||
public FunctionTransformWithDictionary<UInt32, RegionToTopContinentImpl, RegionsHierarchyGetter, NameRegionToTopContinent>
|
||||
{
|
||||
static IFunction * create(const Context & context)
|
||||
{
|
||||
return new base_type{context.getDictionaries().getRegionsHierarchies()};
|
||||
}
|
||||
};
|
||||
|
||||
struct FunctionRegionToPopulation :
|
||||
public FunctionTransformWithDictionary<UInt32, RegionToPopulationImpl, RegionsHierarchyGetter, NameRegionToPopulation>
|
||||
{
|
||||
|
@ -9,8 +9,8 @@
|
||||
#include <common/Common.h>
|
||||
#include <common/DateLUT.h>
|
||||
|
||||
#include <mysqlxx/Date.h>
|
||||
#include <mysqlxx/DateTime.h>
|
||||
#include <common/LocalDate.h>
|
||||
#include <common/LocalDateTime.h>
|
||||
|
||||
#include <DB/Core/Types.h>
|
||||
#include <DB/Common/Exception.h>
|
||||
@ -429,7 +429,7 @@ inline void readDateText(DayNum_t & date, ReadBuffer & buf)
|
||||
date = DateLUT::instance().makeDayNum(year, month, day);
|
||||
}
|
||||
|
||||
inline void readDateText(mysqlxx::Date & date, ReadBuffer & buf)
|
||||
inline void readDateText(LocalDate & date, ReadBuffer & buf)
|
||||
{
|
||||
char s[10];
|
||||
size_t size = buf.read(s, 10);
|
||||
@ -491,7 +491,7 @@ inline void readDateTimeText(time_t & datetime, ReadBuffer & buf)
|
||||
readDateTimeTextFallback(datetime, buf);
|
||||
}
|
||||
|
||||
inline void readDateTimeText(mysqlxx::DateTime & datetime, ReadBuffer & buf)
|
||||
inline void readDateTimeText(LocalDateTime & datetime, ReadBuffer & buf)
|
||||
{
|
||||
char s[19];
|
||||
size_t size = buf.read(s, 19);
|
||||
@ -527,8 +527,8 @@ inline void readBinary(bool & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(uint128 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
|
||||
inline void readBinary(VisitID_t & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(mysqlxx::Date & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(mysqlxx::DateTime & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(LocalDate & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
inline void readBinary(LocalDateTime & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||
|
||||
|
||||
/// Общие методы для чтения значения в текстовом виде из tab-separated формата.
|
||||
@ -546,8 +546,8 @@ inline void readText(String & x, ReadBuffer & buf) { readEscapedString(x, buf);
|
||||
inline void readText(bool & x, ReadBuffer & buf) { readBoolText(x, buf); }
|
||||
|
||||
inline void readText(VisitID_t & x, ReadBuffer & buf) { readIntText(x, buf); }
|
||||
inline void readText(mysqlxx::Date & x, ReadBuffer & buf) { readDateText(x, buf); }
|
||||
inline void readText(mysqlxx::DateTime & x, ReadBuffer & buf) { readDateTimeText(x, buf); }
|
||||
inline void readText(LocalDate & x, ReadBuffer & buf) { readDateText(x, buf); }
|
||||
inline void readText(LocalDateTime & x, ReadBuffer & buf) { readDateTimeText(x, buf); }
|
||||
|
||||
|
||||
/// Общие методы для чтения значения в текстовом виде, при необходимости, в кавычках.
|
||||
@ -566,14 +566,14 @@ inline void readQuoted(bool & x, ReadBuffer & buf) { readBoolText(x, buf); }
|
||||
|
||||
inline void readQuoted(VisitID_t & x, ReadBuffer & buf) { readIntText(x, buf); }
|
||||
|
||||
inline void readQuoted(mysqlxx::Date & x, ReadBuffer & buf)
|
||||
inline void readQuoted(LocalDate & x, ReadBuffer & buf)
|
||||
{
|
||||
assertString("'", buf);
|
||||
readDateText(x, buf);
|
||||
assertString("'", buf);
|
||||
}
|
||||
|
||||
inline void readQuoted(mysqlxx::DateTime & x, ReadBuffer & buf)
|
||||
inline void readQuoted(LocalDateTime & x, ReadBuffer & buf)
|
||||
{
|
||||
assertString("'", buf);
|
||||
readDateTimeText(x, buf);
|
||||
@ -597,14 +597,14 @@ inline void readDoubleQuoted(bool & x, ReadBuffer & buf) { readBoolText(x, buf
|
||||
|
||||
inline void readDoubleQuoted(VisitID_t & x, ReadBuffer & buf) { readIntText(x, buf); }
|
||||
|
||||
inline void readDoubleQuoted(mysqlxx::Date & x, ReadBuffer & buf)
|
||||
inline void readDoubleQuoted(LocalDate & x, ReadBuffer & buf)
|
||||
{
|
||||
assertString("\"", buf);
|
||||
readDateText(x, buf);
|
||||
assertString("\"", buf);
|
||||
}
|
||||
|
||||
inline void readDoubleQuoted(mysqlxx::DateTime & x, ReadBuffer & buf)
|
||||
inline void readDoubleQuoted(LocalDateTime & x, ReadBuffer & buf)
|
||||
{
|
||||
assertString("\"", buf);
|
||||
readDateTimeText(x, buf);
|
||||
|
@ -393,7 +393,7 @@ inline void writeDateText(DayNum_t date, WriteBuffer & buf)
|
||||
buf.write(s, 10);
|
||||
}
|
||||
|
||||
inline void writeDateText(mysqlxx::Date date, WriteBuffer & buf)
|
||||
inline void writeDateText(LocalDate date, WriteBuffer & buf)
|
||||
{
|
||||
char s[10] = {'0', '0', '0', '0', '-', '0', '0', '-', '0', '0'};
|
||||
|
||||
@ -449,7 +449,7 @@ inline void writeDateTimeText(time_t datetime, WriteBuffer & buf)
|
||||
}
|
||||
|
||||
template <char date_delimeter = '-', char time_delimeter = ':'>
|
||||
inline void writeDateTimeText(mysqlxx::DateTime datetime, WriteBuffer & buf)
|
||||
inline void writeDateTimeText(LocalDateTime datetime, WriteBuffer & buf)
|
||||
{
|
||||
char s[19] = {'0', '0', '0', '0', date_delimeter, '0', '0', date_delimeter, '0', '0', ' ', '0', '0', time_delimeter, '0', '0', time_delimeter, '0', '0'};
|
||||
|
||||
@ -508,8 +508,8 @@ inline void writeBinary(const bool & x, WriteBuffer & buf) { writePODBinary(x,
|
||||
inline void writeBinary(const uint128 & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
||||
|
||||
inline void writeBinary(const VisitID_t & x, WriteBuffer & buf) { writePODBinary(static_cast<const UInt64 &>(x), buf); }
|
||||
inline void writeBinary(const mysqlxx::Date & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
||||
inline void writeBinary(const mysqlxx::DateTime & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
||||
inline void writeBinary(const LocalDate & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
||||
inline void writeBinary(const LocalDateTime & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
||||
|
||||
|
||||
/// Методы для вывода значения в текстовом виде для tab-separated формата.
|
||||
@ -531,8 +531,8 @@ inline void writeText(const char * x, WriteBuffer & buf) { writeEscapedString(
|
||||
inline void writeText(const char * x, size_t size, WriteBuffer & buf) { writeEscapedString(x, size, buf); }
|
||||
|
||||
inline void writeText(const VisitID_t & x, WriteBuffer & buf) { writeIntText(static_cast<const UInt64 &>(x), buf); }
|
||||
inline void writeText(const mysqlxx::Date & x, WriteBuffer & buf) { writeDateText(x, buf); }
|
||||
inline void writeText(const mysqlxx::DateTime & x, WriteBuffer & buf) { writeDateTimeText(x, buf); }
|
||||
inline void writeText(const LocalDate & x, WriteBuffer & buf) { writeDateText(x, buf); }
|
||||
inline void writeText(const LocalDateTime & x, WriteBuffer & buf) { writeDateTimeText(x, buf); }
|
||||
|
||||
template<typename T>
|
||||
inline void writeText(const mysqlxx::Null<T> & x, WriteBuffer & buf)
|
||||
@ -563,14 +563,14 @@ inline void writeQuoted(const VisitID_t & x, WriteBuffer & buf)
|
||||
writeIntText(static_cast<const UInt64 &>(x), buf);
|
||||
}
|
||||
|
||||
inline void writeQuoted(const mysqlxx::Date & x, WriteBuffer & buf)
|
||||
inline void writeQuoted(const LocalDate & x, WriteBuffer & buf)
|
||||
{
|
||||
writeChar('\'', buf);
|
||||
writeDateText(x, buf);
|
||||
writeChar('\'', buf);
|
||||
}
|
||||
|
||||
inline void writeQuoted(const mysqlxx::DateTime & x, WriteBuffer & buf)
|
||||
inline void writeQuoted(const LocalDateTime & x, WriteBuffer & buf)
|
||||
{
|
||||
writeChar('\'', buf);
|
||||
writeDateTimeText(x, buf);
|
||||
@ -606,14 +606,14 @@ inline void writeDoubleQuoted(const VisitID_t & x, WriteBuffer & buf)
|
||||
writeIntText(static_cast<const UInt64 &>(x), buf);
|
||||
}
|
||||
|
||||
inline void writeDoubleQuoted(const mysqlxx::Date & x, WriteBuffer & buf)
|
||||
inline void writeDoubleQuoted(const LocalDate & x, WriteBuffer & buf)
|
||||
{
|
||||
writeChar('"', buf);
|
||||
writeDateText(x, buf);
|
||||
writeChar('"', buf);
|
||||
}
|
||||
|
||||
inline void writeDoubleQuoted(const mysqlxx::DateTime & x, WriteBuffer & buf)
|
||||
inline void writeDoubleQuoted(const LocalDateTime & x, WriteBuffer & buf)
|
||||
{
|
||||
writeChar('"', buf);
|
||||
writeDateTimeText(x, buf);
|
||||
|
@ -141,13 +141,7 @@ public:
|
||||
if (select_query->attributes & IAST::IsPreprocessedForInJoinSubqueries)
|
||||
return;
|
||||
|
||||
if (select_query->table.isNull())
|
||||
{
|
||||
select_query->setAttributes(IAST::IsPreprocessedForInJoinSubqueries);
|
||||
return;
|
||||
}
|
||||
|
||||
if (typeid_cast<const ASTFunction *>(select_query->table.get()) != nullptr)
|
||||
if (!isQueryFromTable(*select_query))
|
||||
{
|
||||
select_query->setAttributes(IAST::IsPreprocessedForInJoinSubqueries);
|
||||
return;
|
||||
@ -195,8 +189,8 @@ public:
|
||||
else if ((node != static_cast<IAST *>(select_query))
|
||||
&& ((sub_select_query = typeid_cast<ASTSelectQuery *>(node)) != nullptr))
|
||||
{
|
||||
if (isQueryFromTable(*sub_select_query))
|
||||
++node->select_query_depth;
|
||||
++node->select_query_depth;
|
||||
|
||||
if (sub_select_query->enclosing_in_or_join != nullptr)
|
||||
{
|
||||
/// Найден подзапрос внутри секции IN или JOIN.
|
||||
@ -204,18 +198,21 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
for (auto & child : node->children)
|
||||
if (!(node->attributes & IAST::IsPreprocessedForInJoinSubqueries))
|
||||
{
|
||||
if (!(child->attributes & IAST::IsPreprocessedForInJoinSubqueries))
|
||||
for (auto & child : node->children)
|
||||
{
|
||||
auto n = child.get();
|
||||
n->enclosing_in_or_join = node->enclosing_in_or_join;
|
||||
n->select_query_depth = node->select_query_depth;
|
||||
to_preprocess.push_back(n);
|
||||
if (!(child->attributes & IAST::IsPreprocessedForInJoinSubqueries))
|
||||
{
|
||||
auto n = child.get();
|
||||
n->enclosing_in_or_join = node->enclosing_in_or_join;
|
||||
n->select_query_depth = node->select_query_depth;
|
||||
to_preprocess.push_back(n);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
node->attributes |= IAST::IsPreprocessedForInJoinSubqueries;
|
||||
node->attributes |= IAST::IsPreprocessedForInJoinSubqueries;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -228,7 +225,10 @@ private:
|
||||
/// Если подзапрос внутри секции IN или JOIN является непосредственным потомком
|
||||
/// главного запроса и указано ключевое слово GLOBAL, то подзапрос пропускается.
|
||||
if ((sub_select_query.select_query_depth == 1) && is_global)
|
||||
{
|
||||
sub_select_query.attributes |= IAST::IsPreprocessedForInJoinSubqueries;
|
||||
return;
|
||||
}
|
||||
|
||||
auto subquery_table_storage = getDistributedSubqueryStorage(sub_select_query);
|
||||
if (!subquery_table_storage)
|
||||
|
@ -62,9 +62,9 @@ public:
|
||||
/// Имеет ли секция IN/NOT IN/JOIN атрибут GLOBAL?
|
||||
static constexpr Attributes IsGlobal = 1U << 5;
|
||||
|
||||
/** Глубина одного узла N - это глубина того запроса, которому принадлежит N.
|
||||
* Дальше глубина одного запроса определяется следующим образом:
|
||||
* - если запрос Q является главным, то select_query_depth(Q) = 0
|
||||
/** Глубина одного узла N - это глубина того запроса SELECT, которому принадлежит N.
|
||||
* Дальше глубина одного запроса SELECT определяется следующим образом:
|
||||
* - если запрос Q корневой, то select_query_depth(Q) = 0
|
||||
* - если запрос S является непосредственным подзапросом одного запроса R,
|
||||
* то select_query_depth(S) = select_query_depth(R) + 1
|
||||
*/
|
||||
|
@ -278,7 +278,7 @@ public:
|
||||
* Взятие этого мьютекса означает, что мы хотим заблокировать columns_lock на чтение с намерением потом, не
|
||||
* снимая блокировку, заблокировать его на запись.
|
||||
*/
|
||||
mutable Poco::FastMutex alter_mutex;
|
||||
mutable std::mutex alter_mutex;
|
||||
|
||||
~DataPart()
|
||||
{
|
||||
@ -565,7 +565,7 @@ public:
|
||||
|
||||
/// Для перешардирования.
|
||||
using MutableDataParts = std::set<MutableDataPartPtr, DataPartPtrLess>;
|
||||
using PerShardDataParts = std::unordered_map<size_t, MutableDataParts>;
|
||||
using PerShardDataParts = std::unordered_map<size_t, MutableDataPartPtr>;
|
||||
|
||||
/// Некоторые операции над множеством кусков могут возвращать такой объект.
|
||||
/// Если не был вызван commit или rollback, деструктор откатывает операцию.
|
||||
@ -644,7 +644,7 @@ public:
|
||||
}
|
||||
|
||||
DataPartPtr data_part;
|
||||
Poco::ScopedLockWithUnlock<Poco::FastMutex> alter_lock;
|
||||
std::unique_lock<std::mutex> alter_lock;
|
||||
|
||||
DataPart::Checksums new_checksums;
|
||||
NamesAndTypesList new_columns;
|
||||
@ -752,6 +752,15 @@ public:
|
||||
*/
|
||||
size_t getMaxPartsCountForMonth() const;
|
||||
|
||||
/** Минимальный номер блока в указанном месяце.
|
||||
* Возвращает также bool - есть ли хоть один кусок.
|
||||
*/
|
||||
std::pair<Int64, bool> getMinBlockNumberForMonth(DayNum_t month) const;
|
||||
|
||||
/** Есть ли указанный номер блока в каком-нибудь куске указанного месяца.
|
||||
*/
|
||||
bool hasBlockNumberInMonth(Int64 block_number, DayNum_t month) const;
|
||||
|
||||
/** Если в таблице слишком много активных кусков, спит некоторое время, чтобы дать им возможность смерджиться.
|
||||
* Если передано until - проснуться раньше, если наступило событие.
|
||||
*/
|
||||
@ -792,7 +801,7 @@ public:
|
||||
*/
|
||||
void renameAndDetachPart(const DataPartPtr & part, const String & prefix = "", bool restore_covered = false, bool move_to_detached = true);
|
||||
|
||||
/** Убирает кусок из списка кусков (включая all_data_parts), но не перемещщает директорию.
|
||||
/** Убирает кусок из списка кусков (включая all_data_parts), но не перемещает директорию.
|
||||
*/
|
||||
void detachPartInPlace(const DataPartPtr & part);
|
||||
|
||||
@ -860,7 +869,7 @@ public:
|
||||
|
||||
size_t getColumnSize(const std::string & name) const
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock{data_parts_mutex};
|
||||
std::lock_guard<std::mutex> lock{data_parts_mutex};
|
||||
|
||||
const auto it = column_sizes.find(name);
|
||||
return it == std::end(column_sizes) ? 0 : it->second;
|
||||
@ -869,7 +878,7 @@ public:
|
||||
using ColumnSizes = std::unordered_map<std::string, size_t>;
|
||||
ColumnSizes getColumnSizes() const
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock{data_parts_mutex};
|
||||
std::lock_guard<std::mutex> lock{data_parts_mutex};
|
||||
return column_sizes;
|
||||
}
|
||||
|
||||
@ -878,6 +887,8 @@ public:
|
||||
static String getMonthName(DayNum_t month);
|
||||
static DayNum_t getMonthDayNum(const Field & partition);
|
||||
static DayNum_t getMonthFromName(const String & month_name);
|
||||
/// Получить месяц из имени куска или достаточной его части.
|
||||
static DayNum_t getMonthFromPartPrefix(const String & part_prefix);
|
||||
|
||||
Context & context;
|
||||
const String date_column_name;
|
||||
@ -916,14 +927,14 @@ private:
|
||||
|
||||
/** Актуальное множество кусков с данными. */
|
||||
DataParts data_parts;
|
||||
mutable Poco::FastMutex data_parts_mutex;
|
||||
mutable std::mutex data_parts_mutex;
|
||||
|
||||
/** Множество всех кусков с данными, включая уже слитые в более крупные, но ещё не удалённые. Оно обычно небольшое (десятки элементов).
|
||||
* Ссылки на кусок есть отсюда, из списка актуальных кусков и из каждого потока чтения, который его сейчас использует.
|
||||
* То есть, если количество ссылок равно 1 - то кусок не актуален и не используется прямо сейчас, и его можно удалить.
|
||||
*/
|
||||
DataParts all_data_parts;
|
||||
mutable Poco::FastMutex all_data_parts_mutex;
|
||||
mutable std::mutex all_data_parts_mutex;
|
||||
|
||||
/** Для каждого шарда множество шардированных кусков.
|
||||
*/
|
||||
@ -940,7 +951,7 @@ private:
|
||||
void createConvertExpression(const DataPartPtr & part, const NamesAndTypesList & old_columns, const NamesAndTypesList & new_columns,
|
||||
ExpressionActionsPtr & out_expression, NameToNameMap & out_rename_map, bool & out_force_update_metadata);
|
||||
|
||||
/// Рассчитывает размеры столбцов в сжатом виде для текущего состояния data_parts
|
||||
/// Рассчитывает размеры столбцов в сжатом виде для текущего состояния data_parts. Вызывается под блокировкой.
|
||||
void calculateColumnSizes();
|
||||
/// Добавляет или вычитывает вклад part в размеры столбцов в сжатом виде
|
||||
void addPartContributionToColumnSizes(const DataPartPtr & part);
|
||||
|
@ -40,19 +40,22 @@ public:
|
||||
bool only_small,
|
||||
const AllowedMergingPredicate & can_merge);
|
||||
|
||||
/** Выбрать все куски принадлежащие одной партиции.
|
||||
*/
|
||||
MergeTreeData::DataPartsVector selectAllPartsFromPartition(DayNum_t partition);
|
||||
|
||||
/** Сливает куски.
|
||||
* Если reservation != nullptr, то и дело уменьшает размер зарезервированного места
|
||||
* приблизительно пропорционально количеству уже выписанных данных.
|
||||
*/
|
||||
MergeTreeData::MutableDataPartPtr mergeParts(
|
||||
MergeTreeData::DataPartPtr mergeParts(
|
||||
const MergeTreeData::DataPartsVector & parts, const String & merged_name, MergeListEntry & merge_entry,
|
||||
size_t aio_threshold, MergeTreeData::Transaction * out_transaction = nullptr,
|
||||
DiskSpaceMonitor::Reservation * disk_reservation = nullptr);
|
||||
|
||||
/** Перешардирует заданную партицию.
|
||||
*/
|
||||
MergeTreeData::PerShardDataParts reshardPartition(
|
||||
const ReshardingJob & job,
|
||||
size_t aio_threshold,
|
||||
DiskSpaceMonitor::Reservation * disk_reservation = nullptr);
|
||||
|
||||
/// Примерное количество места на диске, нужное для мерджа. С запасом.
|
||||
static size_t estimateDiskSpaceForMerge(const MergeTreeData::DataPartsVector & parts);
|
||||
|
||||
@ -63,6 +66,13 @@ public:
|
||||
void uncancel() { cancelled = false; }
|
||||
bool isCancelled() const { return cancelled; }
|
||||
|
||||
void abortIfRequested() const;
|
||||
|
||||
private:
|
||||
/** Выбрать все куски принадлежащие одной партиции.
|
||||
*/
|
||||
MergeTreeData::DataPartsVector selectAllPartsFromPartition(DayNum_t partition);
|
||||
|
||||
private:
|
||||
MergeTreeData & data;
|
||||
|
||||
|
@ -42,12 +42,6 @@ public:
|
||||
*/
|
||||
ShardedBlocksWithDateIntervals shardBlock(const Block & block);
|
||||
|
||||
/** Все строки должны относиться к одному месяцу.
|
||||
* temp_index - значение left и right для нового куска. Можно будет изменить при переименовании.
|
||||
* Возвращает временный кусок с именем, начинающимся с tmp_.
|
||||
*/
|
||||
MergeTreeData::MutableDataPartPtr writeTempPart(ShardedBlockWithDateInterval & sharded_block_with_dates, Int64 temp_index);
|
||||
|
||||
private:
|
||||
std::vector<IColumn::Filter> createFilters(Block block);
|
||||
|
||||
|
@ -282,6 +282,11 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
std::string getPartPath() const
|
||||
{
|
||||
return part_path;
|
||||
}
|
||||
|
||||
/// Если данные заранее отсортированы.
|
||||
void write(const Block & block) override
|
||||
{
|
||||
|
@ -95,6 +95,22 @@ private:
|
||||
/// Обновить времена insert-ов в ZooKeeper.
|
||||
void updateTimesInZooKeeper(zkutil::ZooKeeperPtr zookeeper, bool min_unprocessed_insert_time_changed, bool max_processed_insert_time_changed);
|
||||
|
||||
|
||||
/// Помечает элемент очереди как выполняющийся.
|
||||
class CurrentlyExecuting
|
||||
{
|
||||
private:
|
||||
ReplicatedMergeTreeQueue::LogEntryPtr entry;
|
||||
ReplicatedMergeTreeQueue & queue;
|
||||
|
||||
friend class ReplicatedMergeTreeQueue;
|
||||
|
||||
/// Создаётся только в функции selectEntryToProcess. Вызывается под mutex-ом.
|
||||
CurrentlyExecuting(ReplicatedMergeTreeQueue::LogEntryPtr & entry, ReplicatedMergeTreeQueue & queue);
|
||||
public:
|
||||
~CurrentlyExecuting();
|
||||
};
|
||||
|
||||
public:
|
||||
ReplicatedMergeTreeQueue() {}
|
||||
|
||||
@ -132,7 +148,8 @@ public:
|
||||
/** Выбрать следующее действие для обработки.
|
||||
* merger используется только чтобы проверить, не приостановлены ли мерджи.
|
||||
*/
|
||||
LogEntryPtr selectEntryToProcess(MergeTreeDataMerger & merger);
|
||||
using SelectedEntry = std::pair<ReplicatedMergeTreeQueue::LogEntryPtr, std::unique_ptr<CurrentlyExecuting>>;
|
||||
SelectedEntry selectEntryToProcess(MergeTreeDataMerger & merger);
|
||||
|
||||
/** Выполнить функцию func для обработки действия.
|
||||
* При этом, на время выполнения, отметить элемент очереди как выполняющийся
|
||||
|
@ -37,7 +37,7 @@ public:
|
||||
Client(const Client &) = delete;
|
||||
Client & operator=(const Client &) = delete;
|
||||
bool send(const InterserverIOEndpointLocation & to_location, const InterserverIOEndpointLocation & from_location,
|
||||
const std::vector<std::string> & parts, size_t shard_no);
|
||||
const std::string & part, size_t shard_no);
|
||||
void cancel() { is_cancelled = true; }
|
||||
|
||||
private:
|
||||
|
@ -498,35 +498,4 @@ private:
|
||||
PartitionToMergeLock partition_to_merge_lock;
|
||||
};
|
||||
|
||||
/** Рекурсивная блокировка, которая защищает заданную партицию от задачи слияния.
|
||||
*/
|
||||
class ScopedPartitionMergeLock final
|
||||
{
|
||||
public:
|
||||
ScopedPartitionMergeLock(StorageReplicatedMergeTree & storage_, const std::string & partition_name_)
|
||||
: storage(storage_), partition_name(partition_name_)
|
||||
{
|
||||
fake_part_name = storage.acquirePartitionMergeLock(partition_name);
|
||||
}
|
||||
|
||||
ScopedPartitionMergeLock(const ScopedPartitionMergeLock &) = delete;
|
||||
ScopedPartitionMergeLock & operator=(const ScopedPartitionMergeLock &) = delete;
|
||||
|
||||
/// Получить уникальное название блокировки.
|
||||
std::string getId() const
|
||||
{
|
||||
return fake_part_name;
|
||||
}
|
||||
|
||||
~ScopedPartitionMergeLock()
|
||||
{
|
||||
storage.releasePartitionMergeLock(partition_name);
|
||||
}
|
||||
|
||||
private:
|
||||
StorageReplicatedMergeTree & storage;
|
||||
const std::string partition_name;
|
||||
std::string fake_part_name;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ private:
|
||||
if (current_time % 3 != 0)
|
||||
return false;
|
||||
|
||||
mysqlxx::Date now(current_time);
|
||||
LocalDate now(current_time);
|
||||
return (now.month() == 12 && now.day() >= 20)
|
||||
|| (now.month() == 1 && now.day() <= 5);
|
||||
}
|
||||
@ -500,7 +500,6 @@ private:
|
||||
{
|
||||
/** В случае, если параметр query не задан, то запрос будет читаться из stdin.
|
||||
* При этом, запрос будет читаться не потоково (целиком в оперативку).
|
||||
* Поддерживается только один запрос в stdin.
|
||||
*/
|
||||
|
||||
ReadBufferFromFileDescriptor in(STDIN_FILENO);
|
||||
|
@ -323,6 +323,7 @@ namespace ErrorCodes
|
||||
extern const int INVALID_SHARD_WEIGHT = 317;
|
||||
extern const int INVALID_CONFIG_PARAMETER = 318;
|
||||
extern const int UNKNOWN_STATUS_OF_INSERT = 319;
|
||||
extern const int DUPLICATE_SHARD_PATHS = 320;
|
||||
|
||||
extern const int KEEPER_EXCEPTION = 999;
|
||||
extern const int POCO_EXCEPTION = 1000;
|
||||
|
@ -19,9 +19,9 @@ namespace DB
|
||||
|
||||
NativeBlockOutputStream::NativeBlockOutputStream(
|
||||
WriteBuffer & ostr_, UInt64 client_revision_,
|
||||
WriteBuffer * index_ostr_)
|
||||
WriteBuffer * index_ostr_, size_t initial_size_of_file_)
|
||||
: ostr(ostr_), client_revision(client_revision_),
|
||||
index_ostr(index_ostr_)
|
||||
index_ostr(index_ostr_), initial_size_of_file(initial_size_of_file_)
|
||||
{
|
||||
if (index_ostr)
|
||||
{
|
||||
@ -112,7 +112,7 @@ void NativeBlockOutputStream::write(const Block & block)
|
||||
if (index_ostr)
|
||||
{
|
||||
ostr_concrete->next(); /// Заканчиваем сжатый блок.
|
||||
mark.offset_in_compressed_file = ostr_concrete->getCompressedBytes();
|
||||
mark.offset_in_compressed_file = initial_size_of_file + ostr_concrete->getCompressedBytes();
|
||||
mark.offset_in_decompressed_block = ostr_concrete->getRemainingBytes();
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include <mysqlxx/Value.h>
|
||||
#include <DB/IO/ReadHelpers.h>
|
||||
|
||||
#include <DB/DataTypes/DataTypesNumberFixed.h>
|
||||
#include <DB/DataTypes/DataTypeDate.h>
|
||||
@ -94,7 +94,7 @@ DataTypePtr DataTypeFactory::get(const String & name) const
|
||||
|
||||
Poco::RegularExpression::MatchVec matches;
|
||||
if (fixed_string_regexp.match(name, 0, matches) && matches.size() == 2)
|
||||
return new DataTypeFixedString(mysqlxx::Value(name.data() + matches[1].offset, matches[1].length, nullptr).getUInt());
|
||||
return new DataTypeFixedString(parse<size_t>(name.data() + matches[1].offset, matches[1].length));
|
||||
|
||||
if (nested_regexp.match(name, 0, matches) && matches.size() == 3)
|
||||
{
|
||||
|
@ -11,6 +11,7 @@ void registerFunctionsDictionaries(FunctionFactory & factory)
|
||||
factory.registerFunction<FunctionRegionToDistrict>();
|
||||
factory.registerFunction<FunctionRegionToCountry>();
|
||||
factory.registerFunction<FunctionRegionToContinent>();
|
||||
factory.registerFunction<FunctionRegionToTopContinent>();
|
||||
factory.registerFunction<FunctionRegionToPopulation>();
|
||||
factory.registerFunction<FunctionOSToRoot>();
|
||||
factory.registerFunction<FunctionSEToRoot>();
|
||||
|
@ -145,34 +145,24 @@ namespace VisibleWidth
|
||||
const auto & in = col->getData();
|
||||
auto & out = res->getData();
|
||||
|
||||
String str;
|
||||
|
||||
for (const auto & idx_num : ext::enumerate(in))
|
||||
{
|
||||
/// escape name to calculate correct length
|
||||
{
|
||||
WriteBufferFromString out{str};
|
||||
writeEscapedString(type->getNameForValue(idx_num.second), out);
|
||||
}
|
||||
|
||||
out[idx_num.first] = str.size();
|
||||
StringRef name = type->getNameForValue(idx_num.second);
|
||||
out[idx_num.first] = stringWidth(
|
||||
reinterpret_cast<const UInt8 *>(name.data),
|
||||
reinterpret_cast<const UInt8 *>(name.data) + name.size);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
else if (const auto col = typeid_cast<const typename DataTypeEnum::ConstColumnType *>(column.get()))
|
||||
{
|
||||
String str;
|
||||
|
||||
/// escape name to calculate correct length
|
||||
{
|
||||
WriteBufferFromString out{str};
|
||||
writeEscapedString(type->getNameForValue(col->getData()), out);
|
||||
}
|
||||
StringRef name = type->getNameForValue(col->getData());
|
||||
|
||||
block.getByPosition(result).column = new ColumnConstUInt64{
|
||||
col->size(), str.size()
|
||||
};
|
||||
col->size(), stringWidth(
|
||||
reinterpret_cast<const UInt8 *>(name.data),
|
||||
reinterpret_cast<const UInt8 *>(name.data) + name.size)};
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -13,8 +13,8 @@ int main(int argc, char ** argv)
|
||||
<< DB::quote << "Hello, world!" << '\n'
|
||||
<< DB::double_quote << "Hello, world!" << '\n'
|
||||
<< DB::binary << "Hello, world!" << '\n'
|
||||
<< mysqlxx::DateTime(time(0)) << '\n'
|
||||
<< mysqlxx::Date(time(0)) << '\n'
|
||||
<< LocalDateTime(time(0)) << '\n'
|
||||
<< LocalDate(time(0)) << '\n'
|
||||
<< 1234567890123456789UL << '\n'
|
||||
<< DB::flush;
|
||||
}
|
||||
|
@ -826,7 +826,7 @@ void Context::setReshardingWorker(std::shared_ptr<ReshardingWorker> resharding_w
|
||||
{
|
||||
Poco::ScopedLock<Poco::Mutex> lock(shared->mutex);
|
||||
if (shared->resharding_worker)
|
||||
throw Exception("Resharding background thread has already been set.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Resharding background thread has already been initialized.", ErrorCodes::LOGICAL_ERROR);
|
||||
shared->resharding_worker = resharding_worker;
|
||||
}
|
||||
|
||||
@ -834,7 +834,8 @@ ReshardingWorker & Context::getReshardingWorker()
|
||||
{
|
||||
Poco::ScopedLock<Poco::Mutex> lock(shared->mutex);
|
||||
if (!shared->resharding_worker)
|
||||
throw Exception("Resharding background thread not set.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("Resharding background thread not initialized: resharding missing in configuration file.",
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
return *shared->resharding_worker;
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,7 @@ String QuotaForInterval::toString() const
|
||||
std::stringstream res;
|
||||
|
||||
res << std::fixed << std::setprecision(3)
|
||||
<< "Interval: " << mysqlxx::DateTime(rounded_time) << " - " << mysqlxx::DateTime(rounded_time + duration) << ".\n"
|
||||
<< "Interval: " << LocalDateTime(rounded_time) << " - " << LocalDateTime(rounded_time + duration) << ".\n"
|
||||
<< "Queries: " << used.queries << ".\n"
|
||||
<< "Errors: " << used.errors << ".\n"
|
||||
<< "Result rows: " << used.result_rows << ".\n"
|
||||
@ -129,7 +129,7 @@ void QuotaForInterval::check(size_t max_amount, size_t used_amount, time_t curre
|
||||
|
||||
message << " has been exceeded. "
|
||||
<< resource_name << ": " << used_amount << ", max: " << max_amount << ". "
|
||||
<< "Interval will end at " << mysqlxx::DateTime(rounded_time + duration) << ".";
|
||||
<< "Interval will end at " << LocalDateTime(rounded_time + duration) << ".";
|
||||
|
||||
throw Exception(message.str(), ErrorCodes::QUOTA_EXPIRED);
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ private:
|
||||
|
||||
struct TestEntry
|
||||
{
|
||||
unsigned int line_num;
|
||||
std::string input;
|
||||
std::string expected_output;
|
||||
size_t shard_count;
|
||||
@ -69,6 +70,7 @@ void run()
|
||||
/// Тривиальный запрос.
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT 1",
|
||||
"SELECT 1",
|
||||
0,
|
||||
@ -77,6 +79,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT 1",
|
||||
"SELECT 1",
|
||||
1,
|
||||
@ -85,6 +88,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT 1",
|
||||
"SELECT 1",
|
||||
2,
|
||||
@ -93,6 +97,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT 1",
|
||||
"SELECT 1",
|
||||
0,
|
||||
@ -101,6 +106,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT 1",
|
||||
"SELECT 1",
|
||||
1,
|
||||
@ -109,6 +115,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT 1",
|
||||
"SELECT 1",
|
||||
2,
|
||||
@ -117,6 +124,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT 1",
|
||||
"SELECT 1",
|
||||
0,
|
||||
@ -125,6 +133,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT 1",
|
||||
"SELECT 1",
|
||||
1,
|
||||
@ -133,6 +142,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT 1",
|
||||
"SELECT 1",
|
||||
2,
|
||||
@ -141,6 +151,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT 1",
|
||||
"SELECT 1",
|
||||
0,
|
||||
@ -149,6 +160,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT 1",
|
||||
"SELECT 1",
|
||||
1,
|
||||
@ -157,6 +169,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT 1",
|
||||
"SELECT 1",
|
||||
2,
|
||||
@ -167,6 +180,7 @@ void run()
|
||||
/// Секция IN / глубина 1
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
1,
|
||||
@ -175,6 +189,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -183,6 +198,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
1,
|
||||
@ -191,6 +207,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -199,6 +216,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -207,6 +225,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits)",
|
||||
2,
|
||||
@ -215,6 +234,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits)",
|
||||
2,
|
||||
@ -223,6 +243,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits)",
|
||||
2,
|
||||
@ -233,6 +254,7 @@ void run()
|
||||
/// Секция NOT IN / глубина 1
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -241,6 +263,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -249,6 +272,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -257,6 +281,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID NOT IN (SELECT UserID FROM remote_db.remote_visits)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID NOT IN (SELECT UserID FROM remote_db.remote_visits)",
|
||||
2,
|
||||
@ -265,6 +290,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID NOT IN (SELECT UserID FROM remote_db.remote_visits)",
|
||||
2,
|
||||
@ -273,6 +299,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID NOT IN (SELECT UserID FROM remote_db.remote_visits)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID NOT IN (SELECT UserID FROM remote_db.remote_visits)",
|
||||
2,
|
||||
@ -283,6 +310,7 @@ void run()
|
||||
/// Секция GLOBAL IN / глубина 1
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -291,6 +319,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -299,6 +328,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -307,6 +337,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -317,6 +348,7 @@ void run()
|
||||
/// Секция GLOBAL NOT IN / глубина 1
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -325,6 +357,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -333,6 +366,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -341,6 +375,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL NOT IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -351,6 +386,7 @@ void run()
|
||||
/// Секция JOIN / глубина 1
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
2,
|
||||
@ -359,6 +395,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
2,
|
||||
@ -367,6 +404,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
2,
|
||||
@ -375,6 +413,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM remote_db.remote_visits) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM remote_db.remote_visits) USING UserID",
|
||||
2,
|
||||
@ -383,6 +422,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM remote_db.remote_visits) USING UserID",
|
||||
2,
|
||||
@ -391,6 +431,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM remote_db.remote_visits) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM remote_db.remote_visits) USING UserID",
|
||||
2,
|
||||
@ -401,6 +442,7 @@ void run()
|
||||
/// Секция GLOBAL JOIN / глубина 1
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
2,
|
||||
@ -409,6 +451,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
2,
|
||||
@ -417,6 +460,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
2,
|
||||
@ -425,6 +469,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all) USING UserID",
|
||||
2,
|
||||
@ -435,6 +480,7 @@ void run()
|
||||
/// Секция JOIN / глубина 1 / 2 подзапроса.
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE RegionID = 2) USING UserID",
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE RegionID = 2) USING UserID",
|
||||
2,
|
||||
@ -443,6 +489,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT UserID FROM remote_db.remote_visits WHERE RegionID = 2) USING UserID",
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT UserID FROM remote_db.remote_visits WHERE RegionID = 2) USING UserID",
|
||||
2,
|
||||
@ -451,24 +498,27 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE RegionID = 2) USING UserID",
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE RegionID = 2) USING UserID",
|
||||
2,
|
||||
DB::DistributedProductMode::DENY,
|
||||
false
|
||||
true
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE RegionID = 2) USING UserID",
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE RegionID = 2) USING UserID",
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE RegionID = 1) GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE RegionID = 2) USING UserID",
|
||||
2,
|
||||
DB::DistributedProductMode::GLOBAL,
|
||||
true
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE RegionID = 2) USING UserID",
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE RegionID = 2) USING UserID",
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT UserID FROM remote_db.remote_visits WHERE RegionID = 2) USING UserID",
|
||||
2,
|
||||
DB::DistributedProductMode::LOCAL,
|
||||
true
|
||||
@ -477,6 +527,7 @@ void run()
|
||||
/// Секция IN / глубина 1 / таблица на уровне 2
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
2,
|
||||
@ -485,6 +536,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
2,
|
||||
@ -493,6 +545,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
2,
|
||||
@ -501,6 +554,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM (SELECT UserID FROM remote_db.remote_visits))",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM (SELECT UserID FROM remote_db.remote_visits))",
|
||||
2,
|
||||
@ -509,6 +563,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM (SELECT UserID FROM remote_db.remote_visits))",
|
||||
2,
|
||||
@ -517,6 +572,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM (SELECT UserID FROM remote_db.remote_visits))",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM (SELECT UserID FROM remote_db.remote_visits))",
|
||||
2,
|
||||
@ -527,6 +583,7 @@ void run()
|
||||
/// Секция GLOBAL IN / глубина 1 / таблица на уровне 2
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
2,
|
||||
@ -535,6 +592,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
2,
|
||||
@ -543,6 +601,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
2,
|
||||
@ -551,6 +610,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all))",
|
||||
2,
|
||||
@ -561,6 +621,7 @@ void run()
|
||||
/// Секция IN на уровне 1, секция GLOBAL IN на уровне 2.
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)))",
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)))",
|
||||
2,
|
||||
@ -569,6 +630,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)))",
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)))",
|
||||
2,
|
||||
@ -577,6 +639,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)))",
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)))",
|
||||
2,
|
||||
@ -585,6 +648,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)))",
|
||||
"SELECT UserID FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote_db.remote_visits WHERE UserID GLOBAL IN (SELECT UserID FROM (SELECT UserID FROM remote_db.remote_visits)))",
|
||||
2,
|
||||
@ -595,6 +659,7 @@ void run()
|
||||
/// Секция JOIN / глубина 1 / таблица на уровне 2
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)) USING UserID",
|
||||
2,
|
||||
@ -603,6 +668,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)) USING UserID",
|
||||
2,
|
||||
@ -611,6 +677,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)) USING UserID",
|
||||
2,
|
||||
@ -619,6 +686,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM (SELECT UserID FROM remote_db.remote_visits)) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM (SELECT UserID FROM remote_db.remote_visits)) USING UserID",
|
||||
2,
|
||||
@ -627,6 +695,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM (SELECT UserID FROM test.visits_all)) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM (SELECT UserID FROM remote_db.remote_visits)) USING UserID",
|
||||
2,
|
||||
@ -635,6 +704,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM (SELECT UserID FROM remote_db.remote_visits)) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM (SELECT UserID FROM remote_db.remote_visits)) USING UserID",
|
||||
2,
|
||||
@ -645,6 +715,7 @@ void run()
|
||||
/// Секция IN / глубина 2
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
2,
|
||||
@ -653,6 +724,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM remote_db.remote_visits WHERE OtherID = 1))",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM remote_db.remote_visits WHERE OtherID = 1))",
|
||||
2,
|
||||
@ -661,6 +733,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
2,
|
||||
@ -669,14 +742,16 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
2,
|
||||
DB::DistributedProductMode::DENY,
|
||||
false
|
||||
true
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all WHERE BrowserID GLOBAL IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
2,
|
||||
@ -685,6 +760,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM remote_db.remote_visits WHERE OtherID = 1))",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM remote_db.remote_visits WHERE OtherID = 1))",
|
||||
2,
|
||||
@ -693,6 +769,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM remote_db.remote_visits WHERE BrowserID IN (SELECT BrowserID FROM remote_db.remote_visits WHERE OtherID = 1))",
|
||||
2,
|
||||
@ -701,14 +778,16 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM remote_db.remote_visits WHERE OtherID = 1))",
|
||||
2,
|
||||
DB::DistributedProductMode::LOCAL,
|
||||
true
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.visits_all WHERE BrowserID IN (SELECT BrowserID FROM remote_db.remote_visits WHERE OtherID = 1))",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM remote_db.remote_visits WHERE BrowserID IN (SELECT BrowserID FROM remote_db.remote_visits WHERE OtherID = 1))",
|
||||
2,
|
||||
@ -719,6 +798,7 @@ void run()
|
||||
/// Секция JOIN / глубина 2
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.visits_all ALL INNER JOIN (SELECT CounterID FROM (SELECT CounterID FROM test.visits_all)) USING CounterID)",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.visits_all ALL INNER JOIN (SELECT CounterID FROM (SELECT CounterID FROM test.visits_all)) USING CounterID)",
|
||||
2,
|
||||
@ -727,6 +807,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all ALL INNER JOIN (SELECT CounterID FROM (SELECT CounterID FROM remote_db.remote_visits)) USING CounterID)",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all ALL INNER JOIN (SELECT CounterID FROM (SELECT CounterID FROM remote_db.remote_visits)) USING CounterID)",
|
||||
2,
|
||||
@ -735,14 +816,16 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all ALL INNER JOIN (SELECT CounterID FROM (SELECT CounterID FROM test.visits_all)) USING CounterID)",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all ALL INNER JOIN (SELECT CounterID FROM (SELECT CounterID FROM test.visits_all)) USING CounterID)",
|
||||
2,
|
||||
DB::DistributedProductMode::DENY,
|
||||
false
|
||||
true
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.visits_all ALL INNER JOIN (SELECT CounterID FROM (SELECT CounterID FROM test.visits_all)) USING CounterID)",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT CounterID FROM (SELECT CounterID FROM test.visits_all)) USING CounterID)",
|
||||
2,
|
||||
@ -751,6 +834,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.visits_all ALL INNER JOIN (SELECT CounterID FROM (SELECT CounterID FROM test.visits_all)) USING CounterID)",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM remote_db.remote_visits ALL INNER JOIN (SELECT CounterID FROM (SELECT CounterID FROM remote_db.remote_visits)) USING CounterID)",
|
||||
2,
|
||||
@ -761,6 +845,7 @@ void run()
|
||||
/// Секция JOIN / глубина 2
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM (SELECT OtherID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2) USING OtherID)",
|
||||
"SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM (SELECT OtherID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2) USING OtherID)",
|
||||
2,
|
||||
@ -769,6 +854,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE OtherID GLOBAL IN (SELECT OtherID FROM (SELECT OtherID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT OtherID FROM remote_db.remote_visits WHERE RegionID = 2) USING OtherID)",
|
||||
"SELECT UserID FROM test.visits_all WHERE OtherID GLOBAL IN (SELECT OtherID FROM (SELECT OtherID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT OtherID FROM remote_db.remote_visits WHERE RegionID = 2) USING OtherID)",
|
||||
2,
|
||||
@ -777,14 +863,16 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE OtherID GLOBAL IN (SELECT OtherID FROM (SELECT OtherID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2) USING OtherID)",
|
||||
"SELECT UserID FROM test.visits_all WHERE OtherID GLOBAL IN (SELECT OtherID FROM (SELECT OtherID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2) USING OtherID)",
|
||||
2,
|
||||
DB::DistributedProductMode::DENY,
|
||||
false
|
||||
true
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM (SELECT OtherID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2) USING OtherID)",
|
||||
"SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM (SELECT OtherID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2) USING OtherID)",
|
||||
2,
|
||||
@ -793,6 +881,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM (SELECT OtherID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2) USING OtherID)",
|
||||
"SELECT UserID FROM test.visits_all WHERE OtherID GLOBAL IN (SELECT OtherID FROM (SELECT OtherID FROM test.visits_all WHERE RegionID = 1) GLOBAL ALL INNER JOIN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2) USING OtherID)",
|
||||
2,
|
||||
@ -801,6 +890,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM (SELECT OtherID FROM test.visits_all WHERE RegionID = 1) ALL INNER JOIN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2) USING OtherID)",
|
||||
"SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM (SELECT OtherID FROM remote_db.remote_visits WHERE RegionID = 1) ALL INNER JOIN (SELECT OtherID FROM remote_db.remote_visits WHERE RegionID = 2) USING OtherID)",
|
||||
2,
|
||||
@ -811,6 +901,7 @@ void run()
|
||||
/// Секция JOIN / секция IN
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2)) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2)) USING UserID",
|
||||
2,
|
||||
@ -819,6 +910,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2)) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2)) USING UserID",
|
||||
2,
|
||||
@ -827,14 +919,16 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2)) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2)) USING UserID",
|
||||
2,
|
||||
DB::DistributedProductMode::DENY,
|
||||
false
|
||||
true
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM remote_db.remote_visits WHERE RegionID = 2)) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM remote_db.remote_visits WHERE RegionID = 2)) USING UserID",
|
||||
2,
|
||||
@ -843,6 +937,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2)) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all GLOBAL ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE OtherID GLOBAL IN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2)) USING UserID",
|
||||
2,
|
||||
@ -851,6 +946,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM test.visits_all WHERE OtherID IN (SELECT OtherID FROM test.visits_all WHERE RegionID = 2)) USING UserID",
|
||||
"SELECT UserID FROM test.visits_all ALL INNER JOIN (SELECT UserID FROM remote_db.remote_visits WHERE OtherID IN (SELECT OtherID FROM remote_db.remote_visits WHERE RegionID = 2)) USING UserID",
|
||||
2,
|
||||
@ -861,6 +957,7 @@ void run()
|
||||
/// Табличная функция.
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM remote('127.0.0.{1,2}', test, visits_all) WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM remote('127.0.0.{1,2}', test, visits_all) WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -869,6 +966,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM remote('127.0.0.{1,2}', test, visits_all) WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM remote('127.0.0.{1,2}', test, visits_all) WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -877,6 +975,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote('127.0.0.{1,2}', test, visits_all))",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote('127.0.0.{1,2}', test, visits_all))",
|
||||
2,
|
||||
@ -885,6 +984,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM remote('127.0.0.{1,2}', test, visits_all) WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM remote('127.0.0.{1,2}', test, visits_all) WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -893,6 +993,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote('127.0.0.{1,2}', test, visits_all))",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote('127.0.0.{1,2}', test, visits_all))",
|
||||
2,
|
||||
@ -901,6 +1002,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM remote('127.0.0.{1,2}', test, visits_all) WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
"SELECT count() FROM remote('127.0.0.{1,2}', test, visits_all) WHERE UserID IN (SELECT UserID FROM test.visits_all)",
|
||||
2,
|
||||
@ -909,6 +1011,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote('127.0.0.{1,2}', test, visits_all))",
|
||||
"SELECT count() FROM test.visits_all WHERE UserID IN (SELECT UserID FROM remote('127.0.0.{1,2}', test, visits_all))",
|
||||
2,
|
||||
@ -919,6 +1022,7 @@ void run()
|
||||
/// Секция IN / глубина 2 / две распределённые таблицы
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.hits_all WHERE BrowserID IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID GLOBAL IN (SELECT CounterID FROM test.hits_all WHERE BrowserID GLOBAL IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
2,
|
||||
@ -927,6 +1031,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM test.hits_all WHERE BrowserID IN (SELECT BrowserID FROM test.visits_all WHERE OtherID = 1))",
|
||||
"SELECT UserID, RegionID FROM test.visits_all WHERE CounterID IN (SELECT CounterID FROM distant_db.distant_hits WHERE BrowserID IN (SELECT BrowserID FROM remote_db.remote_visits WHERE OtherID = 1))",
|
||||
2,
|
||||
@ -937,6 +1042,7 @@ void run()
|
||||
/// Агрегатная функция.
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT sum(RegionID IN (SELECT RegionID from test.hits_all)) FROM test.visits_all",
|
||||
"SELECT sum(RegionID IN (SELECT RegionID from test.hits_all)) FROM test.visits_all",
|
||||
2,
|
||||
@ -945,6 +1051,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT sum(RegionID IN (SELECT RegionID from test.hits_all)) FROM test.visits_all",
|
||||
"SELECT sum(RegionID IN (SELECT RegionID from test.hits_all)) FROM test.visits_all",
|
||||
2,
|
||||
@ -953,6 +1060,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT sum(RegionID GLOBAL IN (SELECT RegionID from test.hits_all)) FROM test.visits_all",
|
||||
"SELECT sum(RegionID GLOBAL IN (SELECT RegionID from test.hits_all)) FROM test.visits_all",
|
||||
2,
|
||||
@ -961,6 +1069,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT sum(RegionID IN (SELECT RegionID from test.hits_all)) FROM test.visits_all",
|
||||
"SELECT sum(RegionID GLOBAL IN (SELECT RegionID from test.hits_all)) FROM test.visits_all",
|
||||
2,
|
||||
@ -969,6 +1078,7 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT sum(RegionID GLOBAL IN (SELECT RegionID from test.hits_all)) FROM test.visits_all",
|
||||
"SELECT sum(RegionID GLOBAL IN (SELECT RegionID from test.hits_all)) FROM test.visits_all",
|
||||
2,
|
||||
@ -977,12 +1087,51 @@ void run()
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT sum(RegionID IN (SELECT RegionID from test.hits_all)) FROM test.visits_all",
|
||||
"SELECT sum(RegionID IN (SELECT RegionID from distant_db.distant_hits)) FROM test.visits_all",
|
||||
2,
|
||||
DB::DistributedProductMode::LOCAL,
|
||||
true
|
||||
},
|
||||
|
||||
/// Miscellaneous.
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE x GLOBAL IN (SELECT x FROM test.visits_all WHERE x GLOBAL IN (SELECT x FROM test.visits_all))",
|
||||
"SELECT count() FROM test.visits_all WHERE x GLOBAL IN (SELECT x FROM test.visits_all WHERE x GLOBAL IN (SELECT x FROM test.visits_all))",
|
||||
2,
|
||||
DB::DistributedProductMode::DENY,
|
||||
true
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE x GLOBAL IN (SELECT x FROM test.visits_all WHERE x GLOBAL IN (SELECT x FROM test.visits_all))",
|
||||
"SELECT count() FROM test.visits_all WHERE x GLOBAL IN (SELECT x FROM test.visits_all WHERE x GLOBAL IN (SELECT x FROM test.visits_all))",
|
||||
2,
|
||||
DB::DistributedProductMode::LOCAL,
|
||||
true
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT count() FROM test.visits_all WHERE x GLOBAL IN (SELECT x FROM test.visits_all WHERE x GLOBAL IN (SELECT x FROM test.visits_all))",
|
||||
"SELECT count() FROM test.visits_all WHERE x GLOBAL IN (SELECT x FROM test.visits_all WHERE x GLOBAL IN (SELECT x FROM test.visits_all))",
|
||||
2,
|
||||
DB::DistributedProductMode::GLOBAL,
|
||||
true
|
||||
},
|
||||
|
||||
{
|
||||
__LINE__,
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM test.hits_all))",
|
||||
"SELECT UserID FROM (SELECT UserID FROM test.visits_all WHERE UserID GLOBAL IN (SELECT UserID FROM test.hits_all))",
|
||||
2,
|
||||
DB::DistributedProductMode::DENY,
|
||||
true
|
||||
}
|
||||
};
|
||||
|
||||
performTests(entries);
|
||||
@ -1002,7 +1151,8 @@ void performTests(const TestEntries & entries)
|
||||
std::cout << "Test " << i << " passed.\n";
|
||||
}
|
||||
else
|
||||
std::cout << "Test " << i << " failed. Expected: " << entry.expected_output << ". Received: " << res.second << "\n";
|
||||
std::cout << "Test " << i << " at line " << entry.line_num << " failed. Expected: "
|
||||
<< entry.expected_output << ". Received: " << res.second << "\n";
|
||||
|
||||
++i;
|
||||
}
|
||||
|
@ -1,7 +1,5 @@
|
||||
#include <iostream>
|
||||
|
||||
#include <mysqlxx/mysqlxx.h>
|
||||
|
||||
#include <DB/Parsers/ASTCreateQuery.h>
|
||||
#include <DB/Parsers/ParserCreateQuery.h>
|
||||
#include <DB/Parsers/formatAST.h>
|
||||
|
@ -1,7 +1,5 @@
|
||||
#include <iostream>
|
||||
|
||||
#include <mysqlxx/mysqlxx.h>
|
||||
|
||||
#include <DB/Parsers/ParserSelectQuery.h>
|
||||
#include <DB/Parsers/parseQuery.h>
|
||||
#include <DB/Parsers/formatAST.h>
|
||||
|
@ -209,7 +209,7 @@ QueryParseResult QueryParser::parse(std::istream & s)
|
||||
result.limit = DB::parse<unsigned>(limit_nodes->item(0)->innerText());
|
||||
|
||||
LOG_DEBUG(log, "CounterID: " << result.CounterID
|
||||
<< ", dates: " << mysqlxx::Date(result.date_first) << " - " << mysqlxx::Date(result.date_last));
|
||||
<< ", dates: " << LocalDate(result.date_first) << " - " << LocalDate(result.date_last));
|
||||
|
||||
/// получаем список имён атрибутов
|
||||
Poco::AutoPtr<Poco::XML::NodeList> attributes = result.query->getElementsByTagName("attribute");
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include <Poco/File.h>
|
||||
#include <common/logger_useful.h>
|
||||
#include <common/Revision.h>
|
||||
#include <mysqlxx/DateTime.h>
|
||||
#include <common/LocalDateTime.h>
|
||||
|
||||
#include <DB/IO/copyData.h>
|
||||
#include <DB/IO/ReadBufferFromFile.h>
|
||||
@ -69,7 +69,7 @@ StatusFile::StatusFile(const std::string & path_)
|
||||
WriteBufferFromFileDescriptor out(fd, 1024);
|
||||
out
|
||||
<< "PID: " << getpid() << "\n"
|
||||
<< "Started at: " << mysqlxx::DateTime(time(0)) << "\n"
|
||||
<< "Started at: " << LocalDateTime(time(0)) << "\n"
|
||||
<< "Revision: " << Revision::get() << "\n";
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,3 @@
|
||||
#include <Poco/Ext/ScopedTry.h>
|
||||
|
||||
#include <DB/Storages/MergeTree/MergeTreeData.h>
|
||||
#include <DB/Interpreters/ExpressionAnalyzer.h>
|
||||
#include <DB/Storages/MergeTree/MergeTreeBlockInputStream.h>
|
||||
@ -119,8 +117,10 @@ MergeTreeData::MergeTreeData(
|
||||
|
||||
Int64 MergeTreeData::getMaxDataPartIndex()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock_all(all_data_parts_mutex);
|
||||
|
||||
Int64 max_part_id = 0;
|
||||
for (const auto & part : data_parts)
|
||||
for (const auto & part : all_data_parts)
|
||||
max_part_id = std::max(max_part_id, part->right);
|
||||
|
||||
return max_part_id;
|
||||
@ -146,8 +146,8 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
{
|
||||
LOG_DEBUG(log, "Loading data parts");
|
||||
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
Poco::ScopedLock<Poco::FastMutex> lock_all(all_data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock_all(all_data_parts_mutex);
|
||||
|
||||
data_parts.clear();
|
||||
|
||||
@ -304,11 +304,11 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
|
||||
MergeTreeData::DataPartsVector MergeTreeData::grabOldParts()
|
||||
{
|
||||
Poco::ScopedTry<Poco::FastMutex> lock;
|
||||
std::unique_lock<std::mutex> lock(all_data_parts_mutex, std::defer_lock);
|
||||
DataPartsVector res;
|
||||
|
||||
/// Если метод уже вызван из другого потока (или если all_data_parts прямо сейчас меняют), то можно ничего не делать.
|
||||
if (!lock.lock(&all_data_parts_mutex))
|
||||
if (!lock.try_lock())
|
||||
{
|
||||
LOG_TRACE(log, "grabOldParts: all_data_parts is locked");
|
||||
return res;
|
||||
@ -349,7 +349,7 @@ MergeTreeData::DataPartsVector MergeTreeData::grabOldParts()
|
||||
|
||||
void MergeTreeData::addOldParts(const MergeTreeData::DataPartsVector & parts)
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(all_data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock(all_data_parts_mutex);
|
||||
all_data_parts.insert(parts.begin(), parts.end());
|
||||
}
|
||||
|
||||
@ -386,8 +386,8 @@ void MergeTreeData::dropAllData()
|
||||
{
|
||||
LOG_TRACE(log, "dropAllData: waiting for locks.");
|
||||
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
Poco::ScopedLock<Poco::FastMutex> lock_all(all_data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock_all(all_data_parts_mutex);
|
||||
|
||||
LOG_TRACE(log, "dropAllData: removing data from memory.");
|
||||
|
||||
@ -719,8 +719,8 @@ MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
|
||||
|
||||
LOG_TRACE(log, "Renaming " << part->name << ".");
|
||||
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
Poco::ScopedLock<Poco::FastMutex> lock_all(all_data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock_all(all_data_parts_mutex);
|
||||
|
||||
String old_name = part->name;
|
||||
String old_path = getFullPath() + old_name + "/";
|
||||
@ -813,26 +813,27 @@ MergeTreeData::DataPartsVector MergeTreeData::renameTempPartAndReplace(
|
||||
|
||||
void MergeTreeData::replaceParts(const DataPartsVector & remove, const DataPartsVector & add, bool clear_without_timeout)
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
|
||||
for (const DataPartPtr & part : remove)
|
||||
{
|
||||
part->remove_time = clear_without_timeout ? 0 : time(0);
|
||||
removePartContributionToColumnSizes(part);
|
||||
data_parts.erase(part);
|
||||
|
||||
if (data_parts.erase(part))
|
||||
removePartContributionToColumnSizes(part);
|
||||
}
|
||||
|
||||
for (const DataPartPtr & part : add)
|
||||
{
|
||||
data_parts.insert(part);
|
||||
addPartContributionToColumnSizes(part);
|
||||
if (data_parts.insert(part).second)
|
||||
addPartContributionToColumnSizes(part);
|
||||
}
|
||||
}
|
||||
|
||||
void MergeTreeData::attachPart(const DataPartPtr & part)
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
Poco::ScopedLock<Poco::FastMutex> lock_all(all_data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock_all(all_data_parts_mutex);
|
||||
|
||||
if (!all_data_parts.insert(part).second)
|
||||
throw Exception("Part " + part->name + " is already attached", ErrorCodes::DUPLICATE_DATA_PART);
|
||||
@ -845,8 +846,8 @@ void MergeTreeData::renameAndDetachPart(const DataPartPtr & part, const String &
|
||||
{
|
||||
LOG_INFO(log, "Renaming " << part->name << " to " << prefix << part->name << " and detaching it.");
|
||||
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
Poco::ScopedLock<Poco::FastMutex> lock_all(all_data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock_all(all_data_parts_mutex);
|
||||
|
||||
if (!all_data_parts.erase(part))
|
||||
throw Exception("No such data part", ErrorCodes::NO_SUCH_DATA_PART);
|
||||
@ -915,21 +916,20 @@ void MergeTreeData::detachPartInPlace(const DataPartPtr & part)
|
||||
|
||||
MergeTreeData::DataParts MergeTreeData::getDataParts() const
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
return data_parts;
|
||||
}
|
||||
|
||||
MergeTreeData::DataPartsVector MergeTreeData::getDataPartsVector() const
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
|
||||
return DataPartsVector(std::begin(data_parts), std::end(data_parts));
|
||||
}
|
||||
|
||||
size_t MergeTreeData::getTotalActiveSizeInBytes() const
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
|
||||
size_t res = 0;
|
||||
for (auto & part : data_parts)
|
||||
@ -940,14 +940,13 @@ size_t MergeTreeData::getTotalActiveSizeInBytes() const
|
||||
|
||||
MergeTreeData::DataParts MergeTreeData::getAllDataParts() const
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(all_data_parts_mutex);
|
||||
|
||||
std::lock_guard<std::mutex> lock(all_data_parts_mutex);
|
||||
return all_data_parts;
|
||||
}
|
||||
|
||||
size_t MergeTreeData::getMaxPartsCountForMonth() const
|
||||
{
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
|
||||
size_t res = 0;
|
||||
size_t cur_count = 0;
|
||||
@ -971,6 +970,36 @@ size_t MergeTreeData::getMaxPartsCountForMonth() const
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
std::pair<Int64, bool> MergeTreeData::getMinBlockNumberForMonth(DayNum_t month) const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
|
||||
for (const auto & part : data_parts) /// Поиск можно сделать лучше.
|
||||
if (part->month == month)
|
||||
return { part->left, true }; /// Блоки в data_parts упорядочены по month и left.
|
||||
|
||||
return { 0, false };
|
||||
}
|
||||
|
||||
|
||||
bool MergeTreeData::hasBlockNumberInMonth(Int64 block_number, DayNum_t month) const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
|
||||
for (const auto & part : data_parts) /// Поиск можно сделать лучше.
|
||||
{
|
||||
if (part->month == month && part->left <= block_number && part->right >= block_number)
|
||||
return true;
|
||||
|
||||
if (part->month > month)
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void MergeTreeData::delayInsertIfNeeded(Poco::Event * until)
|
||||
{
|
||||
size_t parts_count = getMaxPartsCountForMonth();
|
||||
@ -1003,7 +1032,7 @@ MergeTreeData::DataPartPtr MergeTreeData::getActiveContainingPart(const String &
|
||||
MutableDataPartPtr tmp_part(new DataPart(*this));
|
||||
ActiveDataPartSet::parsePartName(part_name, *tmp_part);
|
||||
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock(data_parts_mutex);
|
||||
|
||||
/// Кусок может покрываться только предыдущим или следующим в data_parts.
|
||||
DataParts::iterator it = data_parts.lower_bound(tmp_part);
|
||||
@ -1031,7 +1060,7 @@ MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const String & part_na
|
||||
MutableDataPartPtr tmp_part(new DataPart(*this));
|
||||
ActiveDataPartSet::parsePartName(part_name, *tmp_part);
|
||||
|
||||
Poco::ScopedLock<Poco::FastMutex> lock(all_data_parts_mutex);
|
||||
std::lock_guard<std::mutex> lock(all_data_parts_mutex);
|
||||
DataParts::iterator it = all_data_parts.lower_bound(tmp_part);
|
||||
if (it != all_data_parts.end() && (*it)->name == part_name)
|
||||
return *it;
|
||||
@ -1041,15 +1070,12 @@ MergeTreeData::DataPartPtr MergeTreeData::getPartIfExists(const String & part_na
|
||||
|
||||
MergeTreeData::DataPartPtr MergeTreeData::getShardedPartIfExists(const String & part_name, size_t shard_no)
|
||||
{
|
||||
MutableDataPartPtr tmp_part(new DataPart(*this));
|
||||
ActiveDataPartSet::parsePartName(part_name, *tmp_part);
|
||||
const MutableDataPartPtr & part_from_shard = per_shard_data_parts.at(shard_no);
|
||||
|
||||
const MutableDataParts & sharded_parts = per_shard_data_parts.at(shard_no);
|
||||
MutableDataParts::const_iterator it = sharded_parts.lower_bound(tmp_part);
|
||||
if ((it != sharded_parts.end()) && ((*it)->name == part_name))
|
||||
return *it;
|
||||
|
||||
return nullptr;
|
||||
if (part_from_shard->name == part_name)
|
||||
return part_from_shard;
|
||||
else
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartAndFixMetadata(const String & relative_path)
|
||||
@ -1426,4 +1452,9 @@ DayNum_t MergeTreeData::getMonthFromName(const String & month_name)
|
||||
return date;
|
||||
}
|
||||
|
||||
DayNum_t MergeTreeData::getMonthFromPartPrefix(const String & part_prefix)
|
||||
{
|
||||
return getMonthFromName(part_prefix.substr(0, strlen("YYYYMM")));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <DB/Storages/MergeTree/MergedBlockOutputStream.h>
|
||||
#include <DB/Storages/MergeTree/DiskSpaceMonitor.h>
|
||||
#include <DB/Storages/MergeTree/MergeList.h>
|
||||
#include <DB/Storages/MergeTree/MergeTreeSharder.h>
|
||||
#include <DB/Storages/MergeTree/ReshardingJob.h>
|
||||
#include <DB/DataStreams/ExpressionBlockInputStream.h>
|
||||
#include <DB/DataStreams/MergingSortedBlockInputStream.h>
|
||||
@ -11,7 +12,7 @@
|
||||
#include <DB/DataStreams/AggregatingSortedBlockInputStream.h>
|
||||
#include <DB/DataStreams/MaterializingBlockInputStream.h>
|
||||
#include <DB/DataStreams/ConcatBlockInputStream.h>
|
||||
|
||||
#include <DB/Common/Increment.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -21,6 +22,27 @@ namespace ErrorCodes
|
||||
extern const int ABORTED;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
std::string createMergedPartName(const MergeTreeData::DataPartsVector & parts)
|
||||
{
|
||||
DayNum_t left_date = DayNum_t(std::numeric_limits<UInt16>::max());
|
||||
DayNum_t right_date = DayNum_t(std::numeric_limits<UInt16>::min());
|
||||
UInt32 level = 0;
|
||||
|
||||
for (const MergeTreeData::DataPartPtr & part : parts)
|
||||
{
|
||||
level = std::max(level, part->level);
|
||||
left_date = std::min(left_date, part->left_date);
|
||||
right_date = std::max(right_date, part->right_date);
|
||||
}
|
||||
|
||||
return ActiveDataPartSet::getPartName(left_date, right_date, parts.front()->left, parts.back()->right, level + 1);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// Не будем соглашаться мерджить куски, если места на диске менее чем во столько раз больше суммарного размера кусков.
|
||||
static const double DISK_USAGE_COEFFICIENT_TO_SELECT = 1.6;
|
||||
|
||||
@ -299,39 +321,16 @@ MergeTreeData::DataPartsVector MergeTreeDataMerger::selectAllPartsFromPartition(
|
||||
}
|
||||
|
||||
/// parts должны быть отсортированы.
|
||||
MergeTreeData::MutableDataPartPtr MergeTreeDataMerger::mergeParts(
|
||||
MergeTreeData::DataPartPtr MergeTreeDataMerger::mergeParts(
|
||||
const MergeTreeData::DataPartsVector & parts, const String & merged_name, MergeList::Entry & merge_entry,
|
||||
size_t aio_threshold, MergeTreeData::Transaction * out_transaction,
|
||||
DiskSpaceMonitor::Reservation * disk_reservation)
|
||||
{
|
||||
bool is_sharded = parts[0]->is_sharded;
|
||||
for (size_t i = 1; i < parts.size(); ++i)
|
||||
{
|
||||
if (parts[i]->is_sharded != is_sharded)
|
||||
throw Exception("Inconsistent set of parts provided for merging", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
size_t shard_no = 0;
|
||||
if (is_sharded)
|
||||
{
|
||||
shard_no = parts[0]->shard_no;
|
||||
for (size_t i = 1; i < parts.size(); ++i)
|
||||
{
|
||||
if (parts[i]->shard_no != shard_no)
|
||||
throw Exception("Inconsistent set of parts provided for merging", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
merge_entry->num_parts = parts.size();
|
||||
|
||||
LOG_DEBUG(log, "Merging " << parts.size() << " parts: from " << parts.front()->name << " to " << parts.back()->name << " into " << merged_name);
|
||||
|
||||
String merged_dir;
|
||||
if (is_sharded)
|
||||
merged_dir = data.getFullPath() + "reshard/" + toString(shard_no) + merged_name;
|
||||
else
|
||||
merged_dir = data.getFullPath() + merged_name;
|
||||
|
||||
String merged_dir = data.getFullPath() + merged_name;
|
||||
if (Poco::File(merged_dir).exists())
|
||||
throw Exception("Directory " + merged_dir + " already exists", ErrorCodes::DIRECTORY_ALREADY_EXISTS);
|
||||
|
||||
@ -375,12 +374,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMerger::mergeParts(
|
||||
{
|
||||
MarkRanges ranges(1, MarkRange(0, parts[i]->size));
|
||||
|
||||
String part_path;
|
||||
if (is_sharded)
|
||||
part_path = data.getFullPath() + "reshard/" + toString(shard_no) + "/" + parts[i]->name + '/';
|
||||
else
|
||||
part_path = data.getFullPath() + parts[i]->name + '/';
|
||||
|
||||
String part_path = data.getFullPath() + parts[i]->name + '/';
|
||||
auto input = std::make_unique<MergeTreeBlockInputStream>(
|
||||
part_path, DEFAULT_MERGE_BLOCK_SIZE, union_column_names, data,
|
||||
parts[i], ranges, false, nullptr, "", true, aio_threshold, DBMS_DEFAULT_BUFFER_SIZE, false);
|
||||
@ -436,12 +430,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMerger::mergeParts(
|
||||
throw Exception("Unknown mode of operation for MergeTreeData: " + toString(data.mode), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
String new_part_tmp_path;
|
||||
|
||||
if (is_sharded)
|
||||
new_part_tmp_path = data.getFullPath() + "reshard/" + toString(shard_no) + "/tmp_" + merged_name + "/";
|
||||
else
|
||||
new_part_tmp_path = data.getFullPath() + "tmp_" + merged_name + "/";
|
||||
String new_part_tmp_path = data.getFullPath() + "tmp_" + merged_name + "/";
|
||||
|
||||
auto compression_method = data.context.chooseCompressionMethod(
|
||||
merge_entry->total_size_bytes_compressed,
|
||||
@ -483,45 +472,41 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMerger::mergeParts(
|
||||
new_data_part->size = to.marksCount();
|
||||
new_data_part->modification_time = time(0);
|
||||
new_data_part->size_in_bytes = MergeTreeData::DataPart::calcTotalSize(new_part_tmp_path);
|
||||
new_data_part->is_sharded = is_sharded;
|
||||
new_data_part->shard_no = shard_no;
|
||||
new_data_part->is_sharded = false;
|
||||
|
||||
if (!is_sharded)
|
||||
/// Переименовываем новый кусок, добавляем в набор и убираем исходные куски.
|
||||
auto replaced_parts = data.renameTempPartAndReplace(new_data_part, nullptr, out_transaction);
|
||||
|
||||
if (new_data_part->name != merged_name)
|
||||
throw Exception("Unexpected part name: " + new_data_part->name + " instead of " + merged_name, ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
/// Проверим, что удалились все исходные куски и только они.
|
||||
if (replaced_parts.size() != parts.size())
|
||||
{
|
||||
/// Переименовываем новый кусок, добавляем в набор и убираем исходные куски.
|
||||
auto replaced_parts = data.renameTempPartAndReplace(new_data_part, nullptr, out_transaction);
|
||||
|
||||
if (new_data_part->name != merged_name)
|
||||
throw Exception("Unexpected part name: " + new_data_part->name + " instead of " + merged_name, ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
/// Проверим, что удалились все исходные куски и только они.
|
||||
if (replaced_parts.size() != parts.size())
|
||||
{
|
||||
/** Это нормально, хотя такое бывает редко.
|
||||
* Ситуация - было заменено 0 кусков вместо N может быть, например, в следующем случае:
|
||||
* - у нас был кусок A, но не было куска B и C;
|
||||
* - в очереди был мердж A, B -> AB, но его не делали, так как куска B нет;
|
||||
* - в очереди был мердж AB, C -> ABC, но его не делали, так как куска AB и C нет;
|
||||
* - мы выполнили задачу на скачивание куска B;
|
||||
* - мы начали делать мердж A, B -> AB, так как все куски появились;
|
||||
* - мы решили скачать с другой реплики кусок ABC, так как невозможно было сделать мердж AB, C -> ABC;
|
||||
* - кусок ABC появился, при его добавлении, были удалены старые куски A, B, C;
|
||||
* - мердж AB закончился. Добавился кусок AB. Но это устаревший кусок. В логе будет сообщение Obsolete part added,
|
||||
* затем попадаем сюда.
|
||||
* Ситуация - было заменено M > N кусков тоже нормальная.
|
||||
*
|
||||
* Хотя это должно предотвращаться проверкой в методе StorageReplicatedMergeTree::shouldExecuteLogEntry.
|
||||
*/
|
||||
LOG_WARNING(log, "Unexpected number of parts removed when adding " << new_data_part->name << ": " << replaced_parts.size()
|
||||
<< " instead of " << parts.size());
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 0; i < parts.size(); ++i)
|
||||
if (parts[i]->name != replaced_parts[i]->name)
|
||||
throw Exception("Unexpected part removed when adding " + new_data_part->name + ": " + replaced_parts[i]->name
|
||||
+ " instead of " + parts[i]->name, ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
/** Это нормально, хотя такое бывает редко.
|
||||
* Ситуация - было заменено 0 кусков вместо N может быть, например, в следующем случае:
|
||||
* - у нас был кусок A, но не было куска B и C;
|
||||
* - в очереди был мердж A, B -> AB, но его не делали, так как куска B нет;
|
||||
* - в очереди был мердж AB, C -> ABC, но его не делали, так как куска AB и C нет;
|
||||
* - мы выполнили задачу на скачивание куска B;
|
||||
* - мы начали делать мердж A, B -> AB, так как все куски появились;
|
||||
* - мы решили скачать с другой реплики кусок ABC, так как невозможно было сделать мердж AB, C -> ABC;
|
||||
* - кусок ABC появился, при его добавлении, были удалены старые куски A, B, C;
|
||||
* - мердж AB закончился. Добавился кусок AB. Но это устаревший кусок. В логе будет сообщение Obsolete part added,
|
||||
* затем попадаем сюда.
|
||||
* Ситуация - было заменено M > N кусков тоже нормальная.
|
||||
*
|
||||
* Хотя это должно предотвращаться проверкой в методе StorageReplicatedMergeTree::shouldExecuteLogEntry.
|
||||
*/
|
||||
LOG_WARNING(log, "Unexpected number of parts removed when adding " << new_data_part->name << ": " << replaced_parts.size()
|
||||
<< " instead of " << parts.size());
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 0; i < parts.size(); ++i)
|
||||
if (parts[i]->name != replaced_parts[i]->name)
|
||||
throw Exception("Unexpected part removed when adding " + new_data_part->name + ": " + replaced_parts[i]->name
|
||||
+ " instead of " + parts[i]->name, ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
LOG_TRACE(log, "Merged " << parts.size() << " parts: from " << parts.front()->name << " to " << parts.back()->name);
|
||||
@ -529,6 +514,257 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMerger::mergeParts(
|
||||
return new_data_part;
|
||||
}
|
||||
|
||||
MergeTreeData::PerShardDataParts MergeTreeDataMerger::reshardPartition(
|
||||
const ReshardingJob & job,
|
||||
size_t aio_threshold, DiskSpaceMonitor::Reservation * disk_reservation)
|
||||
{
|
||||
/// Собрать все куски партиции.
|
||||
DayNum_t month = MergeTreeData::getMonthFromName(job.partition);
|
||||
MergeTreeData::DataPartsVector parts = selectAllPartsFromPartition(month);
|
||||
|
||||
/// Создать временное название папки.
|
||||
std::string merged_name = createMergedPartName(parts);
|
||||
|
||||
MergeList::EntryPtr merge_entry_ptr = data.context.getMergeList().insert(job.database_name,
|
||||
job.table_name, merged_name);
|
||||
MergeList::Entry & merge_entry = *merge_entry_ptr;
|
||||
merge_entry->num_parts = parts.size();
|
||||
|
||||
LOG_DEBUG(log, "Resharding " << parts.size() << " parts from " << parts.front()->name
|
||||
<< " to " << parts.back()->name << " which span the partition " << job.partition);
|
||||
|
||||
/// Слияние всех кусков партиции.
|
||||
|
||||
NameSet union_columns_set;
|
||||
for (const MergeTreeData::DataPartPtr & part : parts)
|
||||
{
|
||||
Poco::ScopedReadRWLock part_lock(part->columns_lock);
|
||||
Names part_columns = part->columns.getNames();
|
||||
union_columns_set.insert(part_columns.begin(), part_columns.end());
|
||||
|
||||
merge_entry->total_size_bytes_compressed += part->size_in_bytes;
|
||||
merge_entry->total_size_marks += part->size;
|
||||
}
|
||||
|
||||
NamesAndTypesList columns_list = data.getColumnsList();
|
||||
NamesAndTypesList union_columns = columns_list.filter(union_columns_set);
|
||||
Names union_column_names = union_columns.getNames();
|
||||
|
||||
MergeTreeData::DataPart::ColumnToSize merged_column_to_size;
|
||||
if (aio_threshold > 0)
|
||||
{
|
||||
for (const MergeTreeData::DataPartPtr & part : parts)
|
||||
part->accumulateColumnSizes(merged_column_to_size);
|
||||
}
|
||||
|
||||
BlockInputStreams src_streams;
|
||||
|
||||
size_t sum_rows_approx = 0;
|
||||
|
||||
const auto rows_total = merge_entry->total_size_marks * data.index_granularity;
|
||||
|
||||
for (size_t i = 0; i < parts.size(); ++i)
|
||||
{
|
||||
MarkRanges ranges(1, MarkRange(0, parts[i]->size));
|
||||
|
||||
String part_path = data.getFullPath() + parts[i]->name + '/';
|
||||
|
||||
auto input = std::make_unique<MergeTreeBlockInputStream>(
|
||||
part_path, DEFAULT_MERGE_BLOCK_SIZE, union_column_names, data,
|
||||
parts[i], ranges, false, nullptr, "", true, aio_threshold, DBMS_DEFAULT_BUFFER_SIZE, false);
|
||||
|
||||
input->setProgressCallback([&merge_entry, rows_total] (const Progress & value)
|
||||
{
|
||||
const auto new_rows_read = __sync_add_and_fetch(&merge_entry->rows_read, value.rows);
|
||||
merge_entry->progress = static_cast<Float64>(new_rows_read) / rows_total;
|
||||
__sync_add_and_fetch(&merge_entry->bytes_read_uncompressed, value.bytes);
|
||||
});
|
||||
|
||||
if (data.mode != MergeTreeData::Unsorted)
|
||||
src_streams.push_back(new MaterializingBlockInputStream{
|
||||
new ExpressionBlockInputStream(input.release(), data.getPrimaryExpression())});
|
||||
else
|
||||
src_streams.push_back(input.release());
|
||||
|
||||
sum_rows_approx += parts[i]->size * data.index_granularity;
|
||||
}
|
||||
|
||||
/// Шардирование слитых блоков.
|
||||
|
||||
/// Для нумерации блоков.
|
||||
SimpleIncrement increment(data.getMaxDataPartIndex());
|
||||
|
||||
/// Создать новый кусок для каждого шарда.
|
||||
MergeTreeData::PerShardDataParts per_shard_data_parts;
|
||||
|
||||
per_shard_data_parts.reserve(job.paths.size());
|
||||
for (size_t shard_no = 0; shard_no < job.paths.size(); ++shard_no)
|
||||
{
|
||||
Int64 temp_index = increment.get();
|
||||
|
||||
MergeTreeData::MutableDataPartPtr data_part = std::make_shared<MergeTreeData::DataPart>(data);
|
||||
data_part->name = "tmp_" + merged_name;
|
||||
data_part->is_temp = true;
|
||||
data_part->left_date = std::numeric_limits<UInt16>::max();
|
||||
data_part->right_date = std::numeric_limits<UInt16>::min();
|
||||
data_part->month = month;
|
||||
data_part->left = temp_index;
|
||||
data_part->right = temp_index;
|
||||
data_part->level = 0;
|
||||
per_shard_data_parts.emplace(shard_no, data_part);
|
||||
}
|
||||
|
||||
/// Очень грубая оценка для размера сжатых данных каждой шардированной партиции.
|
||||
/// На самом деле всё зависит от свойств выражения для шардирования.
|
||||
UInt64 per_shard_size_bytes_compressed = merge_entry->total_size_bytes_compressed / static_cast<double>(job.paths.size());
|
||||
|
||||
auto compression_method = data.context.chooseCompressionMethod(
|
||||
per_shard_size_bytes_compressed,
|
||||
static_cast<double>(per_shard_size_bytes_compressed) / data.getTotalActiveSizeInBytes());
|
||||
|
||||
using MergedBlockOutputStreamPtr = std::unique_ptr<MergedBlockOutputStream>;
|
||||
using PerShardOutput = std::unordered_map<size_t, MergedBlockOutputStreamPtr>;
|
||||
|
||||
/// Создать для каждого шарда поток, который записывает соответствующие шардированные блоки.
|
||||
PerShardOutput per_shard_output;
|
||||
|
||||
per_shard_output.reserve(job.paths.size());
|
||||
for (size_t shard_no = 0; shard_no < job.paths.size(); ++shard_no)
|
||||
{
|
||||
std::string new_part_tmp_path = data.getFullPath() + "reshard/" + toString(shard_no) + "/tmp_" + merged_name + "/";
|
||||
Poco::File(new_part_tmp_path).createDirectories();
|
||||
|
||||
MergedBlockOutputStreamPtr output_stream;
|
||||
output_stream = std::make_unique<MergedBlockOutputStream>(data, new_part_tmp_path, union_columns, compression_method, merged_column_to_size, aio_threshold);
|
||||
per_shard_output.emplace(shard_no, std::move(output_stream));
|
||||
}
|
||||
|
||||
/// Порядок потоков важен: при совпадении ключа элементы идут в порядке номера потока-источника.
|
||||
/// В слитом куске строки с одинаковым ключом должны идти в порядке возрастания идентификатора исходного куска,
|
||||
/// то есть (примерного) возрастания времени вставки.
|
||||
std::unique_ptr<IProfilingBlockInputStream> merged_stream;
|
||||
|
||||
switch (data.mode)
|
||||
{
|
||||
case MergeTreeData::Ordinary:
|
||||
merged_stream = std::make_unique<MergingSortedBlockInputStream>(
|
||||
src_streams, data.getSortDescription(), DEFAULT_MERGE_BLOCK_SIZE);
|
||||
break;
|
||||
|
||||
case MergeTreeData::Collapsing:
|
||||
merged_stream = std::make_unique<CollapsingSortedBlockInputStream>(
|
||||
src_streams, data.getSortDescription(), data.sign_column, DEFAULT_MERGE_BLOCK_SIZE);
|
||||
break;
|
||||
|
||||
case MergeTreeData::Summing:
|
||||
merged_stream = std::make_unique<SummingSortedBlockInputStream>(
|
||||
src_streams, data.getSortDescription(), data.columns_to_sum, DEFAULT_MERGE_BLOCK_SIZE);
|
||||
break;
|
||||
|
||||
case MergeTreeData::Aggregating:
|
||||
merged_stream = std::make_unique<AggregatingSortedBlockInputStream>(
|
||||
src_streams, data.getSortDescription(), DEFAULT_MERGE_BLOCK_SIZE);
|
||||
break;
|
||||
|
||||
case MergeTreeData::Unsorted:
|
||||
merged_stream = std::make_unique<ConcatBlockInputStream>(src_streams);
|
||||
break;
|
||||
|
||||
default:
|
||||
throw Exception("Unknown mode of operation for MergeTreeData: " + toString(data.mode), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
merged_stream->readPrefix();
|
||||
|
||||
for (auto & entry : per_shard_output)
|
||||
{
|
||||
MergedBlockOutputStreamPtr & output_stream = entry.second;
|
||||
output_stream->writePrefix();
|
||||
}
|
||||
|
||||
size_t rows_written = 0;
|
||||
const size_t initial_reservation = disk_reservation ? disk_reservation->getSize() : 0;
|
||||
|
||||
MergeTreeSharder sharder(data, job);
|
||||
|
||||
Block block;
|
||||
while (block = merged_stream->read())
|
||||
{
|
||||
abortIfRequested();
|
||||
|
||||
ShardedBlocksWithDateIntervals blocks = sharder.shardBlock(block);
|
||||
|
||||
for (ShardedBlockWithDateInterval & block_with_dates : blocks)
|
||||
{
|
||||
abortIfRequested();
|
||||
|
||||
size_t shard_no = block_with_dates.shard_no;
|
||||
MergeTreeData::MutableDataPartPtr & data_part = per_shard_data_parts.at(shard_no);
|
||||
MergedBlockOutputStreamPtr & output_stream = per_shard_output.at(shard_no);
|
||||
|
||||
rows_written += block_with_dates.block.rows();
|
||||
output_stream->write(block_with_dates.block);
|
||||
|
||||
if (block_with_dates.min_date < data_part->left_date)
|
||||
data_part->left_date = block_with_dates.min_date;
|
||||
if (block_with_dates.max_date > data_part->right_date)
|
||||
data_part->right_date = block_with_dates.max_date;
|
||||
|
||||
merge_entry->rows_written = merged_stream->getInfo().rows;
|
||||
merge_entry->bytes_written_uncompressed = merged_stream->getInfo().bytes;
|
||||
|
||||
if (disk_reservation)
|
||||
disk_reservation->update(static_cast<size_t>((1 - std::min(1., 1. * rows_written / sum_rows_approx)) * initial_reservation));
|
||||
}
|
||||
}
|
||||
|
||||
merged_stream->readSuffix();
|
||||
|
||||
/// Завершить инициализацию куски новых партиций.
|
||||
for (size_t shard_no = 0; shard_no < job.paths.size(); ++shard_no)
|
||||
{
|
||||
abortIfRequested();
|
||||
|
||||
MergedBlockOutputStreamPtr & output_stream = per_shard_output.at(shard_no);
|
||||
if (0 == output_stream->marksCount())
|
||||
{
|
||||
/// В этот шард не попало никаких данных. Игнорируем.
|
||||
LOG_WARNING(log, "No data in partition for shard " + job.paths[shard_no].first);
|
||||
per_shard_data_parts.erase(shard_no);
|
||||
continue;
|
||||
}
|
||||
|
||||
MergeTreeData::MutableDataPartPtr & data_part = per_shard_data_parts.at(shard_no);
|
||||
|
||||
data_part->columns = union_columns;
|
||||
data_part->checksums = output_stream->writeSuffixAndGetChecksums();
|
||||
data_part->index.swap(output_stream->getIndex());
|
||||
data_part->size = output_stream->marksCount();
|
||||
data_part->modification_time = time(0);
|
||||
data_part->size_in_bytes = MergeTreeData::DataPart::calcTotalSize(output_stream->getPartPath());
|
||||
data_part->is_sharded = true;
|
||||
data_part->shard_no = shard_no;
|
||||
}
|
||||
|
||||
/// Превратить куски новых партиций в постоянные куски.
|
||||
for (auto & entry : per_shard_data_parts)
|
||||
{
|
||||
size_t shard_no = entry.first;
|
||||
MergeTreeData::MutableDataPartPtr & part_from_shard = entry.second;
|
||||
part_from_shard->is_temp = false;
|
||||
std::string prefix = data.getFullPath() + "reshard/" + toString(shard_no) + "/";
|
||||
std::string old_name = part_from_shard->name;
|
||||
std::string new_name = ActiveDataPartSet::getPartName(part_from_shard->left_date,
|
||||
part_from_shard->right_date, part_from_shard->left, part_from_shard->right, part_from_shard->level);
|
||||
part_from_shard->name = new_name;
|
||||
Poco::File(prefix + old_name).renameTo(prefix + new_name);
|
||||
}
|
||||
|
||||
LOG_TRACE(log, "Resharded the partition " << job.partition);
|
||||
|
||||
return per_shard_data_parts;
|
||||
}
|
||||
|
||||
size_t MergeTreeDataMerger::estimateDiskSpaceForMerge(const MergeTreeData::DataPartsVector & parts)
|
||||
{
|
||||
size_t res = 0;
|
||||
@ -538,4 +774,11 @@ size_t MergeTreeDataMerger::estimateDiskSpaceForMerge(const MergeTreeData::DataP
|
||||
return static_cast<size_t>(res * DISK_USAGE_COEFFICIENT_TO_RESERVE);
|
||||
}
|
||||
|
||||
void MergeTreeDataMerger::abortIfRequested() const
|
||||
{
|
||||
if (cancelled)
|
||||
throw Exception("Cancelled partition resharding", ErrorCodes::ABORTED);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -84,81 +84,6 @@ ShardedBlocksWithDateIntervals MergeTreeSharder::shardBlock(const Block & block)
|
||||
return res;
|
||||
}
|
||||
|
||||
MergeTreeData::MutableDataPartPtr MergeTreeSharder::writeTempPart(
|
||||
ShardedBlockWithDateInterval & sharded_block_with_dates, Int64 temp_index)
|
||||
{
|
||||
Block & block = sharded_block_with_dates.block;
|
||||
UInt16 min_date = sharded_block_with_dates.min_date;
|
||||
UInt16 max_date = sharded_block_with_dates.max_date;
|
||||
size_t shard_no = sharded_block_with_dates.shard_no;
|
||||
|
||||
const auto & date_lut = DateLUT::instance();
|
||||
|
||||
DayNum_t min_month = date_lut.toFirstDayNumOfMonth(DayNum_t(min_date));
|
||||
DayNum_t max_month = date_lut.toFirstDayNumOfMonth(DayNum_t(max_date));
|
||||
|
||||
if (min_month != max_month)
|
||||
throw Exception("Logical error: part spans more than one month.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
size_t part_size = (block.rows() + data.index_granularity - 1) / data.index_granularity;
|
||||
|
||||
String tmp_part_name = "tmp_" + ActiveDataPartSet::getPartName(
|
||||
DayNum_t(min_date), DayNum_t(max_date),
|
||||
temp_index, temp_index, 0);
|
||||
|
||||
String part_tmp_path = data.getFullPath() + "reshard/" + toString(shard_no) + "/" + tmp_part_name + "/";
|
||||
|
||||
Poco::File(part_tmp_path).createDirectories();
|
||||
|
||||
MergeTreeData::MutableDataPartPtr new_data_part = std::make_shared<MergeTreeData::DataPart>(data);
|
||||
new_data_part->name = tmp_part_name;
|
||||
new_data_part->is_temp = true;
|
||||
|
||||
/// Если для сортировки надо вычислить некоторые столбцы - делаем это.
|
||||
if (data.mode != MergeTreeData::Unsorted)
|
||||
data.getPrimaryExpression()->execute(block);
|
||||
|
||||
SortDescription sort_descr = data.getSortDescription();
|
||||
|
||||
/// Сортируем.
|
||||
IColumn::Permutation * perm_ptr = nullptr;
|
||||
IColumn::Permutation perm;
|
||||
if (data.mode != MergeTreeData::Unsorted)
|
||||
{
|
||||
if (!isAlreadySorted(block, sort_descr))
|
||||
{
|
||||
stableGetPermutation(block, sort_descr, perm);
|
||||
perm_ptr = &perm;
|
||||
}
|
||||
}
|
||||
|
||||
NamesAndTypesList columns = data.getColumnsList().filter(block.getColumnsList().getNames());
|
||||
MergedBlockOutputStream out(data, part_tmp_path, columns, CompressionMethod::LZ4);
|
||||
|
||||
out.getIndex().reserve(part_size * sort_descr.size());
|
||||
|
||||
out.writePrefix();
|
||||
out.writeWithPermutation(block, perm_ptr);
|
||||
MergeTreeData::DataPart::Checksums checksums = out.writeSuffixAndGetChecksums();
|
||||
|
||||
new_data_part->left_date = DayNum_t(min_date);
|
||||
new_data_part->right_date = DayNum_t(max_date);
|
||||
new_data_part->left = temp_index;
|
||||
new_data_part->right = temp_index;
|
||||
new_data_part->level = 0;
|
||||
new_data_part->size = part_size;
|
||||
new_data_part->modification_time = std::time(0);
|
||||
new_data_part->month = min_month;
|
||||
new_data_part->columns = columns;
|
||||
new_data_part->checksums = checksums;
|
||||
new_data_part->index.swap(out.getIndex());
|
||||
new_data_part->size_in_bytes = MergeTreeData::DataPart::calcTotalSize(part_tmp_path);
|
||||
new_data_part->is_sharded = true;
|
||||
new_data_part->shard_no = sharded_block_with_dates.shard_no;
|
||||
|
||||
return new_data_part;
|
||||
}
|
||||
|
||||
std::vector<IColumn::Filter> MergeTreeSharder::createFilters(Block block)
|
||||
{
|
||||
using create_filters_sig = std::vector<IColumn::Filter>(size_t, const IColumn *, size_t num_shards, const std::vector<size_t> & slots);
|
||||
|
@ -12,7 +12,7 @@ namespace DB
|
||||
void ReplicatedMergeTreeLogEntry::writeText(WriteBuffer & out) const
|
||||
{
|
||||
out << "format version: 3\n"
|
||||
<< "create_time: " << mysqlxx::DateTime(create_time ? create_time : time(0)) << "\n"
|
||||
<< "create_time: " << LocalDateTime(create_time ? create_time : time(0)) << "\n"
|
||||
<< "source replica: " << source_replica << '\n'
|
||||
<< "block_id: " << escape << block_id << '\n';
|
||||
|
||||
@ -68,7 +68,7 @@ void ReplicatedMergeTreeLogEntry::readText(ReadBuffer & in)
|
||||
|
||||
if (format_version >= 2)
|
||||
{
|
||||
mysqlxx::DateTime create_time_dt;
|
||||
LocalDateTime create_time_dt;
|
||||
in >> "create_time: " >> create_time_dt >> "\n";
|
||||
create_time = create_time_dt;
|
||||
}
|
||||
|
@ -449,7 +449,7 @@ void ReplicatedMergeTreeQueue::removeGetsAndMergesInRange(zkutil::ZooKeeperPtr z
|
||||
|
||||
bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(const LogEntry & entry, String & out_postpone_reason, MergeTreeDataMerger & merger)
|
||||
{
|
||||
/// queue_mutex уже захвачен. Функция вызывается только из selectEntryToProcess.
|
||||
/// mutex уже захвачен. Функция вызывается только из selectEntryToProcess.
|
||||
|
||||
if (entry.type == LogEntry::MERGE_PARTS || entry.type == LogEntry::GET_PART || entry.type == LogEntry::ATTACH_PART)
|
||||
{
|
||||
@ -522,7 +522,30 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(const LogEntry & entry, Str
|
||||
}
|
||||
|
||||
|
||||
ReplicatedMergeTreeQueue::LogEntryPtr ReplicatedMergeTreeQueue::selectEntryToProcess(MergeTreeDataMerger & merger)
|
||||
ReplicatedMergeTreeQueue::CurrentlyExecuting::CurrentlyExecuting(ReplicatedMergeTreeQueue::LogEntryPtr & entry, ReplicatedMergeTreeQueue & queue)
|
||||
: entry(entry), queue(queue)
|
||||
{
|
||||
entry->currently_executing = true;
|
||||
++entry->num_tries;
|
||||
entry->last_attempt_time = time(0);
|
||||
|
||||
if (!queue.future_parts.insert(entry->new_part_name).second)
|
||||
throw Exception("Tagging already tagged future part " + entry->new_part_name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
ReplicatedMergeTreeQueue::CurrentlyExecuting::~CurrentlyExecuting()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(queue.mutex);
|
||||
|
||||
entry->currently_executing = false;
|
||||
entry->execution_complete.notify_all();
|
||||
|
||||
if (!queue.future_parts.erase(entry->new_part_name))
|
||||
LOG_ERROR(queue.log, "Untagging already untagged future part " + entry->new_part_name + ". This is a bug.");
|
||||
}
|
||||
|
||||
|
||||
ReplicatedMergeTreeQueue::SelectedEntry ReplicatedMergeTreeQueue::selectEntryToProcess(MergeTreeDataMerger & merger)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
|
||||
@ -546,47 +569,15 @@ ReplicatedMergeTreeQueue::LogEntryPtr ReplicatedMergeTreeQueue::selectEntryToPro
|
||||
}
|
||||
}
|
||||
|
||||
return entry;
|
||||
if (entry)
|
||||
return { entry, std::unique_ptr<CurrentlyExecuting>{ new CurrentlyExecuting(entry, *this) } };
|
||||
else
|
||||
return {};
|
||||
}
|
||||
|
||||
|
||||
class CurrentlyExecuting
|
||||
{
|
||||
private:
|
||||
ReplicatedMergeTreeQueue::LogEntryPtr & entry;
|
||||
ReplicatedMergeTreeQueue & queue;
|
||||
|
||||
public:
|
||||
CurrentlyExecuting(ReplicatedMergeTreeQueue::LogEntryPtr & entry, ReplicatedMergeTreeQueue & queue)
|
||||
: entry(entry), queue(queue)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(queue.mutex);
|
||||
|
||||
entry->currently_executing = true;
|
||||
++entry->num_tries;
|
||||
entry->last_attempt_time = time(0);
|
||||
|
||||
if (!queue.future_parts.insert(entry->new_part_name).second)
|
||||
throw Exception("Tagging already tagged future part " + entry->new_part_name + ". This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
~CurrentlyExecuting()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(queue.mutex);
|
||||
|
||||
entry->currently_executing = false;
|
||||
entry->execution_complete.notify_all();
|
||||
|
||||
if (!queue.future_parts.erase(entry->new_part_name))
|
||||
LOG_ERROR(queue.log, "Untagging already untagged future part " + entry->new_part_name + ". This is a bug.");
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
bool ReplicatedMergeTreeQueue::processEntry(zkutil::ZooKeeperPtr zookeeper, LogEntryPtr & entry, const std::function<bool(LogEntryPtr &)> func)
|
||||
{
|
||||
CurrentlyExecuting guard(entry, *this);
|
||||
|
||||
std::exception_ptr saved_exception;
|
||||
|
||||
try
|
||||
|
@ -38,22 +38,6 @@ namespace ErrorCodes
|
||||
namespace
|
||||
{
|
||||
|
||||
std::string createMergedPartName(const MergeTreeData::DataPartsVector & parts)
|
||||
{
|
||||
DayNum_t left_date = DayNum_t(std::numeric_limits<UInt16>::max());
|
||||
DayNum_t right_date = DayNum_t(std::numeric_limits<UInt16>::min());
|
||||
UInt32 level = 0;
|
||||
|
||||
for (const MergeTreeData::DataPartPtr & part : parts)
|
||||
{
|
||||
level = std::max(level, part->level);
|
||||
left_date = std::min(left_date, part->left_date);
|
||||
right_date = std::max(right_date, part->right_date);
|
||||
}
|
||||
|
||||
return ActiveDataPartSet::getPartName(left_date, right_date, parts.front()->left, parts.back()->right, level + 1);
|
||||
}
|
||||
|
||||
class Arguments final
|
||||
{
|
||||
public:
|
||||
@ -93,20 +77,10 @@ ReshardingWorker::ReshardingWorker(const Poco::Util::AbstractConfiguration & con
|
||||
: context(context_), log(&Logger::get("ReshardingWorker"))
|
||||
{
|
||||
Arguments arguments(config, config_name);
|
||||
|
||||
auto zookeeper = context.getZooKeeper();
|
||||
|
||||
host_task_queue_path = "/clickhouse";
|
||||
zookeeper->createIfNotExists(host_task_queue_path, "");
|
||||
|
||||
host_task_queue_path += "/" + arguments.getTaskQueuePath();
|
||||
zookeeper->createIfNotExists(host_task_queue_path, "");
|
||||
|
||||
host_task_queue_path += "/resharding";
|
||||
zookeeper->createIfNotExists(host_task_queue_path, "");
|
||||
|
||||
host_task_queue_path += "/" + getFQDNOrHostName();
|
||||
zookeeper->createIfNotExists(host_task_queue_path, "");
|
||||
host_task_queue_path = arguments.getTaskQueuePath() + "resharding/" + getFQDNOrHostName();
|
||||
zookeeper->createAncestors(host_task_queue_path + "/");
|
||||
}
|
||||
|
||||
ReshardingWorker::~ReshardingWorker()
|
||||
@ -276,7 +250,7 @@ void ReshardingWorker::perform(const ReshardingJob & job)
|
||||
auto & storage = typeid_cast<StorageReplicatedMergeTree &>(*(generic_storage.get()));
|
||||
|
||||
/// Защитить перешардируемую партицию от задачи слияния.
|
||||
ScopedPartitionMergeLock partition_merge_lock(storage, job.partition);
|
||||
const MergeTreeMergeBlocker merge_blocker{storage.merger};
|
||||
|
||||
try
|
||||
{
|
||||
@ -309,166 +283,13 @@ void ReshardingWorker::createShardedPartitions(StorageReplicatedMergeTree & stor
|
||||
|
||||
LOG_DEBUG(log, "Splitting partition shard-wise.");
|
||||
|
||||
/// Куски одношо шарда, которые должы быть слиты.
|
||||
struct PartsToBeMerged
|
||||
{
|
||||
void add(MergeTreeData::MutableDataPartPtr & part)
|
||||
{
|
||||
parts.insert(part);
|
||||
total_size += part->size_in_bytes;
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
parts.clear();
|
||||
total_size = 0;
|
||||
}
|
||||
|
||||
MergeTreeData::MutableDataParts parts;
|
||||
size_t total_size = 0;
|
||||
};
|
||||
|
||||
/// Для каждого шарда, куски, которые должны быть слиты.
|
||||
std::unordered_map<size_t, PartsToBeMerged> to_merge;
|
||||
|
||||
/// Для нумерации блоков.
|
||||
SimpleIncrement increment(storage.data.getMaxDataPartIndex());
|
||||
|
||||
MergeTreeData::PerShardDataParts & per_shard_data_parts = storage.data.per_shard_data_parts;
|
||||
|
||||
auto zookeeper = storage.getZooKeeper();
|
||||
const auto & settings = context.getSettingsRef();
|
||||
|
||||
DayNum_t month = MergeTreeData::getMonthFromName(job.partition);
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(cancel_mutex);
|
||||
merger = std::make_unique<MergeTreeDataMerger>(storage.data);
|
||||
}
|
||||
|
||||
auto parts_from_partition = merger->selectAllPartsFromPartition(month);
|
||||
|
||||
MergeTreeSharder sharder(storage.data, job);
|
||||
|
||||
for (const auto & part : parts_from_partition)
|
||||
{
|
||||
MarkRanges ranges(1, MarkRange(0, part->size));
|
||||
|
||||
MergeTreeBlockInputStream source(
|
||||
storage.data.getFullPath() + part->name + '/',
|
||||
DEFAULT_MERGE_BLOCK_SIZE,
|
||||
part->columns.getNames(),
|
||||
storage.data,
|
||||
part,
|
||||
ranges,
|
||||
false,
|
||||
nullptr,
|
||||
"",
|
||||
true,
|
||||
settings.min_bytes_to_use_direct_io,
|
||||
DBMS_DEFAULT_BUFFER_SIZE,
|
||||
true);
|
||||
|
||||
Block block;
|
||||
while (block = source.read())
|
||||
{
|
||||
/// Разбить куски на несколько, согласно ключу шардирования.
|
||||
ShardedBlocksWithDateIntervals blocks = sharder.shardBlock(block);
|
||||
|
||||
for (ShardedBlockWithDateInterval & block_with_dates : blocks)
|
||||
{
|
||||
abortIfRequested();
|
||||
|
||||
/// Создать новый кусок соответствующий новому блоку.
|
||||
Int64 temp_index = increment.get();
|
||||
MergeTreeData::MutableDataPartPtr block_part = sharder.writeTempPart(block_with_dates, temp_index);
|
||||
|
||||
abortIfRequested();
|
||||
|
||||
/// Добавить новый кусок в список кусков соответствущего шарда, которые должны
|
||||
/// быть слиты. Если установлено, что при вставке этого куска, суммарный размер
|
||||
/// кусков бы превышал некоторый предел, сначала слияем все куски, затем их
|
||||
/// перемещаем в список кусков новой партиции.
|
||||
PartsToBeMerged & parts_to_be_merged = to_merge[block_with_dates.shard_no];
|
||||
|
||||
if ((parts_to_be_merged.total_size + block_part->size_in_bytes) > storage.data.settings.max_bytes_to_merge_parts)
|
||||
{
|
||||
MergeTreeData::MutableDataParts & sharded_parts = per_shard_data_parts[block_with_dates.shard_no];
|
||||
|
||||
if (parts_to_be_merged.parts.size() >= 2)
|
||||
{
|
||||
MergeTreeData::DataPartsVector parts(parts_to_be_merged.parts.begin(), parts_to_be_merged.parts.end());
|
||||
std::string merged_name = createMergedPartName(parts);
|
||||
|
||||
const auto & merge_entry = storage.data.context.getMergeList().insert(job.database_name,
|
||||
job.table_name, merged_name);
|
||||
|
||||
MergeTreeData::MutableDataPartPtr new_part = merger->mergeParts(parts, merged_name, *merge_entry,
|
||||
storage.data.context.getSettings().min_bytes_to_use_direct_io);
|
||||
|
||||
sharded_parts.insert(new_part);
|
||||
}
|
||||
else
|
||||
sharded_parts.insert(block_part);
|
||||
|
||||
/// Удалить исходные куски.
|
||||
parts_to_be_merged.clear();
|
||||
}
|
||||
|
||||
parts_to_be_merged.add(block_part);
|
||||
}
|
||||
}
|
||||
|
||||
/// Обработать все оставшиеся куски.
|
||||
for (auto & entry : to_merge)
|
||||
{
|
||||
abortIfRequested();
|
||||
|
||||
size_t shard_no = entry.first;
|
||||
PartsToBeMerged & parts_to_be_merged = entry.second;
|
||||
|
||||
MergeTreeData::MutableDataParts & sharded_parts = per_shard_data_parts[shard_no];
|
||||
|
||||
if (parts_to_be_merged.parts.size() >= 2)
|
||||
{
|
||||
MergeTreeData::DataPartsVector parts(parts_to_be_merged.parts.begin(), parts_to_be_merged.parts.end());
|
||||
std::string merged_name = createMergedPartName(parts);
|
||||
|
||||
const auto & merge_entry = storage.data.context.getMergeList().insert(job.database_name,
|
||||
job.table_name, merged_name);
|
||||
|
||||
MergeTreeData::MutableDataPartPtr new_part = merger->mergeParts(parts, merged_name, *merge_entry,
|
||||
storage.data.context.getSettings().min_bytes_to_use_direct_io);
|
||||
|
||||
sharded_parts.insert(new_part);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto single_part = *(parts_to_be_merged.parts.begin());
|
||||
sharded_parts.insert(single_part);
|
||||
}
|
||||
|
||||
/// Удалить исходные куски.
|
||||
parts_to_be_merged.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/// До сих пор все куски новых партиций были временны.
|
||||
for (auto & entry : per_shard_data_parts)
|
||||
{
|
||||
size_t shard_no = entry.first;
|
||||
MergeTreeData::MutableDataParts & sharded_parts = entry.second;
|
||||
for (auto & sharded_part : sharded_parts)
|
||||
{
|
||||
sharded_part->is_temp = false;
|
||||
std::string prefix = storage.full_path + "reshard/" + toString(shard_no) + "/";
|
||||
std::string old_name = sharded_part->name;
|
||||
std::string new_name = ActiveDataPartSet::getPartName(sharded_part->left_date,
|
||||
sharded_part->right_date, sharded_part->left, sharded_part->right, sharded_part->level);
|
||||
sharded_part->name = new_name;
|
||||
Poco::File(prefix + old_name).renameTo(prefix + new_name);
|
||||
}
|
||||
}
|
||||
MergeTreeData::PerShardDataParts & per_shard_data_parts = storage.data.per_shard_data_parts;
|
||||
per_shard_data_parts = merger->reshardPartition(job, storage.data.context.getSettings().min_bytes_to_use_direct_io);
|
||||
}
|
||||
|
||||
void ReshardingWorker::publishShardedPartitions(StorageReplicatedMergeTree & storage, const ReshardingJob & job)
|
||||
@ -482,17 +303,17 @@ void ReshardingWorker::publishShardedPartitions(StorageReplicatedMergeTree & sto
|
||||
struct TaskInfo
|
||||
{
|
||||
TaskInfo(const std::string & replica_path_,
|
||||
const std::vector<std::string> & parts_,
|
||||
const std::string & part_,
|
||||
const ReplicatedMergeTreeAddress & dest_,
|
||||
size_t shard_no_)
|
||||
: replica_path(replica_path_), dest(dest_), parts(parts_),
|
||||
: replica_path(replica_path_), dest(dest_), part(part_),
|
||||
shard_no(shard_no_)
|
||||
{
|
||||
}
|
||||
|
||||
std::string replica_path;
|
||||
ReplicatedMergeTreeAddress dest;
|
||||
std::vector<std::string> parts;
|
||||
std::string part;
|
||||
size_t shard_no;
|
||||
};
|
||||
|
||||
@ -507,14 +328,10 @@ void ReshardingWorker::publishShardedPartitions(StorageReplicatedMergeTree & sto
|
||||
for (const auto & entry : storage.data.per_shard_data_parts)
|
||||
{
|
||||
size_t shard_no = entry.first;
|
||||
const MergeTreeData::MutableDataParts & sharded_parts = entry.second;
|
||||
if (sharded_parts.empty())
|
||||
const MergeTreeData::MutableDataPartPtr & part_from_shard = entry.second;
|
||||
if (!part_from_shard)
|
||||
continue;
|
||||
|
||||
std::vector<std::string> part_names;
|
||||
for (const MergeTreeData::DataPartPtr & sharded_part : sharded_parts)
|
||||
part_names.push_back(sharded_part->name);
|
||||
|
||||
const WeightedZooKeeperPath & weighted_path = job.paths[shard_no];
|
||||
const std::string & zookeeper_path = weighted_path.first;
|
||||
|
||||
@ -524,7 +341,7 @@ void ReshardingWorker::publishShardedPartitions(StorageReplicatedMergeTree & sto
|
||||
const std::string replica_path = zookeeper_path + "/replicas/" + child;
|
||||
auto host = zookeeper->get(replica_path + "/host");
|
||||
ReplicatedMergeTreeAddress host_desc(host);
|
||||
task_info_list.emplace_back(replica_path, part_names, host_desc, shard_no);
|
||||
task_info_list.emplace_back(replica_path, part_from_shard->name, host_desc, shard_no);
|
||||
if (replica_path == storage.replica_path)
|
||||
{
|
||||
++local_count;
|
||||
@ -554,14 +371,14 @@ void ReshardingWorker::publishShardedPartitions(StorageReplicatedMergeTree & sto
|
||||
const TaskInfo & entry = task_info_list[i];
|
||||
const auto & replica_path = entry.replica_path;
|
||||
const auto & dest = entry.dest;
|
||||
const auto & parts = entry.parts;
|
||||
const auto & part = entry.part;
|
||||
size_t shard_no = entry.shard_no;
|
||||
|
||||
InterserverIOEndpointLocation to_location(replica_path, dest.host, dest.replication_port);
|
||||
|
||||
size_t j = i - local_count;
|
||||
tasks[j] = Tasks::value_type(std::bind(&ShardedPartitionSender::Client::send,
|
||||
&storage.sharded_partition_sender_client, to_location, from_location, parts, shard_no));
|
||||
&storage.sharded_partition_sender_client, to_location, from_location, part, shard_no));
|
||||
pool.schedule([j, &tasks]{ tasks[j](); });
|
||||
}
|
||||
}
|
||||
@ -586,15 +403,12 @@ void ReshardingWorker::publishShardedPartitions(StorageReplicatedMergeTree & sto
|
||||
{
|
||||
/// На локальной реплике просто перемещаем шардированную паритцию в папку detached/.
|
||||
const TaskInfo & entry = task_info_list[0];
|
||||
const auto & parts = entry.parts;
|
||||
const auto & part = entry.part;
|
||||
size_t shard_no = entry.shard_no;
|
||||
|
||||
for (const auto & part : parts)
|
||||
{
|
||||
std::string from_path = storage.full_path + "reshard/" + toString(shard_no) + "/" + part + "/";
|
||||
std::string to_path = storage.full_path + "detached/";
|
||||
Poco::File(from_path).moveTo(to_path);
|
||||
}
|
||||
std::string from_path = storage.full_path + "reshard/" + toString(shard_no) + "/" + part + "/";
|
||||
std::string to_path = storage.full_path + "detached/";
|
||||
Poco::File(from_path).moveTo(to_path);
|
||||
}
|
||||
}
|
||||
|
||||
@ -628,8 +442,8 @@ void ReshardingWorker::applyChanges(StorageReplicatedMergeTree & storage, const
|
||||
for (const auto & entry : storage.data.per_shard_data_parts)
|
||||
{
|
||||
size_t shard_no = entry.first;
|
||||
const MergeTreeData::MutableDataParts & sharded_parts = entry.second;
|
||||
if (sharded_parts.empty())
|
||||
const MergeTreeData::MutableDataPartPtr & part_from_shard = entry.second;
|
||||
if (!part_from_shard)
|
||||
continue;
|
||||
|
||||
const WeightedZooKeeperPath & weighted_path = job.paths[shard_no];
|
||||
|
@ -5,8 +5,6 @@
|
||||
#include <DB/IO/ReadHelpers.h>
|
||||
#include <DB/IO/WriteHelpers.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -15,28 +13,6 @@ namespace ErrorCodes
|
||||
extern const int ABORTED;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
std::string glue(const std::vector<std::string> & names, char delim)
|
||||
{
|
||||
std::string res;
|
||||
bool is_first = true;
|
||||
|
||||
for (const auto & name : names)
|
||||
{
|
||||
if (is_first)
|
||||
is_first = false;
|
||||
else
|
||||
res.append(1, delim);
|
||||
res.append(name);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace ShardedPartitionSender
|
||||
{
|
||||
|
||||
@ -66,33 +42,27 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, WriteBuffer & out
|
||||
throw Exception("ShardedPartitionSender service terminated", ErrorCodes::ABORTED);
|
||||
|
||||
InterserverIOEndpointLocation from_location(params.get("from_location"));
|
||||
std::string glued_parts = params.get("parts");
|
||||
std::string part_name = params.get("part");
|
||||
size_t shard_no = std::stoul(params.get("shard"));
|
||||
|
||||
std::vector<std::string> parts;
|
||||
boost::split(parts, glued_parts, boost::is_any_of(","));
|
||||
if (is_cancelled)
|
||||
throw Exception("ShardedPartitionSender service terminated", ErrorCodes::ABORTED);
|
||||
|
||||
for (const auto & part_name : parts)
|
||||
MergeTreeData::MutableDataPartPtr part = storage.fetcher.fetchShardedPart(from_location, part_name, shard_no);
|
||||
part->is_temp = false;
|
||||
|
||||
const std::string old_part_path = storage.full_path + part->name;
|
||||
const std::string new_part_path = storage.full_path + "detached/" + part_name;
|
||||
|
||||
Poco::File new_part_dir(new_part_path);
|
||||
if (new_part_dir.exists())
|
||||
{
|
||||
if (is_cancelled)
|
||||
throw Exception("ShardedPartitionSender service terminated", ErrorCodes::ABORTED);
|
||||
|
||||
MergeTreeData::MutableDataPartPtr part = storage.fetcher.fetchShardedPart(from_location, part_name, shard_no);
|
||||
part->is_temp = false;
|
||||
|
||||
const std::string old_part_path = storage.full_path + part->name;
|
||||
const std::string new_part_path = storage.full_path + "detached/" + part_name;
|
||||
|
||||
Poco::File new_part_dir(new_part_path);
|
||||
if (new_part_dir.exists())
|
||||
{
|
||||
LOG_WARNING(log, "Directory " + new_part_path + " already exists. Removing.");
|
||||
new_part_dir.remove(true);
|
||||
}
|
||||
|
||||
Poco::File(old_part_path).renameTo(new_part_path);
|
||||
LOG_WARNING(log, "Directory " + new_part_path + " already exists. Removing.");
|
||||
new_part_dir.remove(true);
|
||||
}
|
||||
|
||||
Poco::File(old_part_path).renameTo(new_part_path);
|
||||
|
||||
bool flag = true;
|
||||
writeBinary(flag, out);
|
||||
out.next();
|
||||
@ -104,16 +74,14 @@ Client::Client()
|
||||
}
|
||||
|
||||
bool Client::send(const InterserverIOEndpointLocation & to_location, const InterserverIOEndpointLocation & from_location,
|
||||
const std::vector<std::string> & parts, size_t shard_no)
|
||||
const std::string & part, size_t shard_no)
|
||||
{
|
||||
std::string glued_parts = glue(parts, ',');
|
||||
|
||||
ReadBufferFromHTTP::Params params =
|
||||
{
|
||||
{"endpoint", getEndpointId(to_location.name)},
|
||||
{"from_location", from_location.toString()},
|
||||
{"compress", "false"},
|
||||
{"parts", glued_parts},
|
||||
{"part", part},
|
||||
{"shard", toString(shard_no)}
|
||||
};
|
||||
|
||||
|
@ -75,6 +75,7 @@ namespace ErrorCodes
|
||||
extern const int INVALID_PARTITIONS_INTERVAL;
|
||||
extern const int RESHARDING_INVALID_PARAMETERS;
|
||||
extern const int INVALID_SHARD_WEIGHT;
|
||||
extern const int DUPLICATE_SHARD_PATHS;
|
||||
}
|
||||
|
||||
|
||||
@ -87,6 +88,27 @@ const auto WAIT_FOR_REPLICA_QUEUE_MS = 10 * 1000;
|
||||
|
||||
const auto MERGE_SELECTING_SLEEP_MS = 5 * 1000;
|
||||
|
||||
/** Добавляемым блокам данных присваиваются некоторые номера - целые числа.
|
||||
* Для добавляемых обычным способом (INSERT) блоков, номера выделяются по возрастанию.
|
||||
* Слияния делаются для диапазонов номеров блоков на числовой прямой:
|
||||
* если в слиянии участвуют номера блоков x, z, и есть блок с номером y, что x < y < z, то блок с номером y тоже участвует в слиянии.
|
||||
* Это требуется для сохранения свойств некоторых операций, которые могут производиться при слиянии - например, в CollapsingMergeTree.
|
||||
* В частности, это позволяет во время слияния знать, что в одном куске все данные были добавлены раньше, чем все данные в другом куске.
|
||||
*
|
||||
* Изредка возникает необходимость добавить в таблицу какой-то заведомо старый кусок данных,
|
||||
* чтобы он воспринимался как старый в логике работы CollapsingMergeTree.
|
||||
* Такой кусок данных можно добавить с помощью специального запроса ATTACH.
|
||||
* И в этом случае, мы должны выделить этому куску номера меньшие, чем номера всех остальных кусков.
|
||||
* В связи с этим, номера обычных кусков, добавляемых INSERT-ом, начинаются не с нуля, а с большего числа,
|
||||
* а меньшие номера считаются "зарезервированными".
|
||||
*
|
||||
* Почему это число равно 200?
|
||||
* Дело в том, что раньше не поддерживались отрицательные номера блоков.
|
||||
* А также, слияние сделано так, что при увеличении количества кусков, вставка новых кусков специально замедляется,
|
||||
* пока слияния не успеют уменьшить число кусков; и это было рассчитано примерно для 200 кусков.
|
||||
* А значит, что при вставке в таблицу всех кусков из другой таблицы, 200 номеров наверняка достаточно.
|
||||
* В свою очередь, это число выбрано почти наугад.
|
||||
*/
|
||||
const Int64 RESERVED_BLOCK_NUMBERS = 200;
|
||||
|
||||
|
||||
@ -1239,11 +1261,16 @@ void StorageReplicatedMergeTree::executeDropRange(const StorageReplicatedMergeTr
|
||||
size_t removed_parts = 0;
|
||||
|
||||
/// Удалим куски, содержащиеся в удаляемом диапазоне.
|
||||
auto parts = data.getDataParts();
|
||||
/// Важно, чтобы не осталось и старых кусков (оставшихся после мерджа), так как иначе,
|
||||
/// после добавления новой реплики, эта новая реплика их скачает, но не удалит.
|
||||
/// А также, если этого не делать, куски будут оживать после перезапуска сервера.
|
||||
/// Поэтому, используем getAllDataParts.
|
||||
auto parts = data.getAllDataParts();
|
||||
for (const auto & part : parts)
|
||||
{
|
||||
if (!ActiveDataPartSet::contains(entry.new_part_name, part->name))
|
||||
continue;
|
||||
|
||||
LOG_DEBUG(log, "Removing part " << part->name);
|
||||
++removed_parts;
|
||||
|
||||
@ -1343,17 +1370,20 @@ void StorageReplicatedMergeTree::queueUpdatingThread()
|
||||
|
||||
bool StorageReplicatedMergeTree::queueTask(BackgroundProcessingPool::Context & pool_context)
|
||||
{
|
||||
LogEntryPtr entry;
|
||||
/// Этот объект будет помечать элемент очереди как выполняющийся.
|
||||
ReplicatedMergeTreeQueue::SelectedEntry selected;
|
||||
|
||||
try
|
||||
{
|
||||
entry = queue.selectEntryToProcess(merger);
|
||||
selected = queue.selectEntryToProcess(merger);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
|
||||
LogEntryPtr & entry = selected.first;
|
||||
|
||||
if (!entry)
|
||||
return false;
|
||||
|
||||
@ -1454,15 +1484,30 @@ void StorageReplicatedMergeTree::mergeSelectingThread()
|
||||
}
|
||||
|
||||
/// Можно слить куски, если все номера между ними заброшены - не соответствуют никаким блокам.
|
||||
/// Номера до RESERVED_BLOCK_NUMBERS всегда не соответствуют никаким блокам.
|
||||
for (Int64 number = std::max(RESERVED_BLOCK_NUMBERS, left->right + 1); number <= right->left - 1; ++number)
|
||||
for (Int64 number = left->right + 1; number <= right->left - 1; ++number)
|
||||
{
|
||||
String path1 = zookeeper_path + "/block_numbers/" + month_name + "/block-" + padIndex(number);
|
||||
String path2 = zookeeper_path + "/nonincrement_block_numbers/" + month_name + "/block-" + padIndex(number);
|
||||
/** Для номеров до RESERVED_BLOCK_NUMBERS не используется AbandonableLock
|
||||
* - такие номера не могут быть "заброшены" - то есть, не использованными для кусков.
|
||||
* Это номера кусков, которые были добавлены с помощью ALTER ... ATTACH.
|
||||
* Они должны идти без пропусков (для каждого номера должен быть кусок).
|
||||
* Проверяем, что для всех таких номеров есть куски,
|
||||
* иначе, через "дыры" - отсутствующие куски, нельзя мерджить.
|
||||
*/
|
||||
|
||||
if (AbandonableLockInZooKeeper::check(path1, *zookeeper) != AbandonableLockInZooKeeper::ABANDONED &&
|
||||
AbandonableLockInZooKeeper::check(path2, *zookeeper) != AbandonableLockInZooKeeper::ABANDONED)
|
||||
return false;
|
||||
if (number < RESERVED_BLOCK_NUMBERS)
|
||||
{
|
||||
if (!data.hasBlockNumberInMonth(number, left->month))
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
String path1 = zookeeper_path + "/block_numbers/" + month_name + "/block-" + padIndex(number);
|
||||
String path2 = zookeeper_path + "/nonincrement_block_numbers/" + month_name + "/block-" + padIndex(number);
|
||||
|
||||
if (AbandonableLockInZooKeeper::check(path1, *zookeeper) != AbandonableLockInZooKeeper::ABANDONED &&
|
||||
AbandonableLockInZooKeeper::check(path2, *zookeeper) != AbandonableLockInZooKeeper::ABANDONED)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
memoized_parts_that_could_be_merged.insert(key);
|
||||
@ -2720,8 +2765,47 @@ void StorageReplicatedMergeTree::dropPartition(ASTPtr query, const Field & field
|
||||
return;
|
||||
}
|
||||
|
||||
ScopedPartitionMergeLock partition_merge_lock(*this, month_name);
|
||||
std::string fake_part_name = partition_merge_lock.getId();
|
||||
auto number_and_exists = data.getMinBlockNumberForMonth(data.getMonthFromName(month_name));
|
||||
|
||||
/// Если в партиции нет данных
|
||||
if (!number_and_exists.second)
|
||||
{
|
||||
LOG_DEBUG(log, "No data in partition " << month_name);
|
||||
return;
|
||||
}
|
||||
|
||||
Int64 left = number_and_exists.first;
|
||||
|
||||
/** Пропустим один номер в block_numbers для удаляемого месяца, и будем удалять только куски до этого номера.
|
||||
* Это запретит мерджи удаляемых кусков с новыми вставляемыми данными.
|
||||
* Инвариант: в логе не появятся слияния удаляемых кусков с другими кусками.
|
||||
* NOTE: Если понадобится аналогично поддержать запрос DROP PART, для него придется придумать какой-нибудь новый механизм,
|
||||
* чтобы гарантировать этот инвариант.
|
||||
*/
|
||||
Int64 right;
|
||||
|
||||
{
|
||||
AbandonableLockInZooKeeper block_number_lock = allocateBlockNumber(month_name);
|
||||
right = block_number_lock.getNumber();
|
||||
block_number_lock.unlock();
|
||||
}
|
||||
|
||||
/// Такого никогда не должно происходить.
|
||||
if (right == 0)
|
||||
throw Exception("Logical error: just allocated block number is zero", ErrorCodes::LOGICAL_ERROR);
|
||||
--right;
|
||||
|
||||
String fake_part_name = getFakePartNameForDrop(month_name, left, right);
|
||||
|
||||
/** Запретим выбирать для слияния удаляемые куски.
|
||||
* Инвариант: после появления в логе записи DROP_RANGE, в логе не появятся слияния удаляемых кусков.
|
||||
*/
|
||||
{
|
||||
std::lock_guard<std::mutex> merge_selecting_lock(merge_selecting_mutex);
|
||||
queue.disableMergesInRange(fake_part_name);
|
||||
}
|
||||
|
||||
LOG_DEBUG(log, "Disabled merges in range " << left << " - " << right << " for month " << month_name);
|
||||
|
||||
/// Наконец, добившись нужных инвариантов, можно положить запись в лог.
|
||||
LogEntry entry;
|
||||
@ -2743,66 +2827,6 @@ void StorageReplicatedMergeTree::dropPartition(ASTPtr query, const Field & field
|
||||
}
|
||||
}
|
||||
|
||||
std::string StorageReplicatedMergeTree::acquirePartitionMergeLock(const std::string & partition_name)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(mutex_partition_to_merge_lock);
|
||||
|
||||
auto it = partition_to_merge_lock.find(partition_name);
|
||||
if (it != partition_to_merge_lock.end())
|
||||
{
|
||||
auto & info = it->second;
|
||||
++info.ref_count;
|
||||
return info.fake_part_name;
|
||||
}
|
||||
|
||||
/** Пропустим один номер в block_numbers для удаляемого месяца, и будем удалять только куски до этого номера.
|
||||
* Это запретит мерджи удаляемых кусков с новыми вставляемыми данными.
|
||||
* Инвариант: в логе не появятся слияния удаляемых кусков с другими кусками.
|
||||
* NOTE: Если понадобится аналогично поддержать запрос DROP PART, для него придется придумать какой-нибудь новый механизм,
|
||||
* чтобы гарантировать этот инвариант.
|
||||
*/
|
||||
Int64 right;
|
||||
|
||||
{
|
||||
AbandonableLockInZooKeeper block_number_lock = allocateBlockNumber(partition_name);
|
||||
right = block_number_lock.getNumber();
|
||||
block_number_lock.unlock();
|
||||
}
|
||||
|
||||
/// Такого никогда не должно происходить.
|
||||
if (right == 0)
|
||||
throw Exception("Logical error: just allocated block number is zero", ErrorCodes::LOGICAL_ERROR);
|
||||
--right;
|
||||
|
||||
std::string fake_part_name = getFakePartNameForDrop(partition_name, 0, right);
|
||||
partition_to_merge_lock.emplace(partition_name, PartitionMergeLockInfo(fake_part_name));
|
||||
|
||||
/** Запретим выбирать для слияния удаляемые куски.
|
||||
* Инвариант: после появления в логе записи DROP_RANGE, в логе не появятся слияния удаляемых кусков.
|
||||
*/
|
||||
{
|
||||
std::lock_guard<std::mutex> merge_selecting_lock(merge_selecting_mutex);
|
||||
|
||||
queue.disableMergesInRange(fake_part_name);
|
||||
}
|
||||
|
||||
return fake_part_name;
|
||||
}
|
||||
|
||||
void StorageReplicatedMergeTree::releasePartitionMergeLock(const std::string & partition_name)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(mutex_partition_to_merge_lock);
|
||||
|
||||
auto it = partition_to_merge_lock.find(partition_name);
|
||||
if (it == partition_to_merge_lock.end())
|
||||
throw Exception("StorageReplicatedMergeTree: trying to release a non-existent partition merge lock",
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
auto & info = it->second;
|
||||
--info.ref_count;
|
||||
if (info.ref_count == 0)
|
||||
partition_to_merge_lock.erase(it);
|
||||
}
|
||||
|
||||
void StorageReplicatedMergeTree::attachPartition(ASTPtr query, const Field & field, bool unreplicated, bool attach_part, const Settings & settings)
|
||||
{
|
||||
@ -2828,6 +2852,8 @@ void StorageReplicatedMergeTree::attachPartition(ASTPtr query, const Field & fie
|
||||
{
|
||||
LOG_DEBUG(log, "Looking for parts for partition " << partition << " in " << source_dir);
|
||||
ActiveDataPartSet active_parts;
|
||||
|
||||
std::set<String> part_names;
|
||||
for (Poco::DirectoryIterator it = Poco::DirectoryIterator(full_path + source_dir); it != Poco::DirectoryIterator(); ++it)
|
||||
{
|
||||
String name = it.name();
|
||||
@ -2837,9 +2863,18 @@ void StorageReplicatedMergeTree::attachPartition(ASTPtr query, const Field & fie
|
||||
continue;
|
||||
LOG_DEBUG(log, "Found part " << name);
|
||||
active_parts.add(name);
|
||||
part_names.insert(name);
|
||||
}
|
||||
LOG_DEBUG(log, active_parts.size() << " of them are active");
|
||||
parts = active_parts.getParts();
|
||||
|
||||
/// Неактивные куски переименовываем, чтобы они не могли быть прикреплены в случае повторного ATTACH-а.
|
||||
for (const auto & name : part_names)
|
||||
{
|
||||
String containing_part = active_parts.getContainingPart(name);
|
||||
if (!containing_part.empty() && containing_part != name)
|
||||
Poco::File(full_path + source_dir + name).renameTo(full_path + source_dir + "inactive_" + name);
|
||||
}
|
||||
}
|
||||
|
||||
/// Синхронно проверим, что добавляемые куски существуют и не испорчены хотя бы на этой реплике. Запишем checksums.txt, если его нет.
|
||||
@ -2853,14 +2888,11 @@ void StorageReplicatedMergeTree::attachPartition(ASTPtr query, const Field & fie
|
||||
/// Выделим добавляемым кускам максимальные свободные номера, меньшие RESERVED_BLOCK_NUMBERS.
|
||||
/// NOTE: Проверка свободности номеров никак не синхронизируется. Выполнять несколько запросов ATTACH/DETACH/DROP одновременно нельзя.
|
||||
Int64 min_used_number = RESERVED_BLOCK_NUMBERS;
|
||||
DayNum_t month = DateLUT::instance().makeDayNum(parse<UInt16>(partition.substr(0, 4)), parse<UInt8>(partition.substr(4, 2)), 1);
|
||||
DayNum_t month = MergeTreeData::getMonthFromPartPrefix(partition);
|
||||
|
||||
{
|
||||
auto existing_parts = data.getDataParts();
|
||||
for (const auto & part : existing_parts)
|
||||
if (part->month == month)
|
||||
min_used_number = std::min(min_used_number, part->left);
|
||||
}
|
||||
auto num_and_exists = data.getMinBlockNumberForMonth(month);
|
||||
if (num_and_exists.second && num_and_exists.first < min_used_number)
|
||||
min_used_number = num_and_exists.first;
|
||||
|
||||
/// Добавим записи в лог.
|
||||
std::reverse(parts.begin(), parts.end());
|
||||
@ -3434,7 +3466,7 @@ void StorageReplicatedMergeTree::reshardPartitions(const String & database_name,
|
||||
{
|
||||
auto & resharding_worker = context.getReshardingWorker();
|
||||
if (!resharding_worker.isStarted())
|
||||
throw Exception("Resharding background thread is not running.", ErrorCodes::RESHARDING_NO_WORKER);
|
||||
throw Exception("Resharding background thread is not running", ErrorCodes::RESHARDING_NO_WORKER);
|
||||
|
||||
for (const auto & weighted_path : weighted_zookeeper_paths)
|
||||
{
|
||||
@ -3443,6 +3475,16 @@ void StorageReplicatedMergeTree::reshardPartitions(const String & database_name,
|
||||
throw Exception("Shard has invalid weight", ErrorCodes::INVALID_SHARD_WEIGHT);
|
||||
}
|
||||
|
||||
{
|
||||
std::vector<std::string> all_paths;
|
||||
all_paths.reserve(weighted_zookeeper_paths.size());
|
||||
for (const auto & weighted_path : weighted_zookeeper_paths)
|
||||
all_paths.push_back(weighted_path.first);
|
||||
std::sort(all_paths.begin(), all_paths.end());
|
||||
if (std::adjacent_find(all_paths.begin(), all_paths.end()) != all_paths.end())
|
||||
throw Exception("Shard paths must be distinct", ErrorCodes::DUPLICATE_SHARD_PATHS);
|
||||
}
|
||||
|
||||
DayNum_t first_partition_num = !first_partition.isNull() ? MergeTreeData::getMonthDayNum(first_partition) : DayNum_t();
|
||||
DayNum_t last_partition_num = !last_partition.isNull() ? MergeTreeData::getMonthDayNum(last_partition) : DayNum_t();
|
||||
|
||||
|
@ -118,11 +118,11 @@ class StripeLogBlockOutputStream : public IBlockOutputStream
|
||||
public:
|
||||
StripeLogBlockOutputStream(StorageStripeLog & storage_)
|
||||
: storage(storage_), lock(storage.rwlock),
|
||||
data_out_compressed(storage.full_path() + "data.bin"),
|
||||
data_out_compressed(storage.full_path() + "data.bin", DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_APPEND | O_CREAT),
|
||||
data_out(data_out_compressed, CompressionMethod::LZ4, storage.max_compress_block_size),
|
||||
index_out_compressed(storage.full_path() + "index.mrk", INDEX_BUFFER_SIZE),
|
||||
index_out_compressed(storage.full_path() + "index.mrk", INDEX_BUFFER_SIZE, O_WRONLY | O_APPEND | O_CREAT),
|
||||
index_out(index_out_compressed),
|
||||
block_out(data_out, 0, &index_out)
|
||||
block_out(data_out, 0, &index_out, Poco::File(storage.full_path() + "data.bin").getSize())
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -227,7 +227,7 @@ BlockInputStreams StorageSystemParts::read(
|
||||
table_column->insert(table);
|
||||
engine_column->insert(engine);
|
||||
|
||||
mysqlxx::Date partition_date {part->month};
|
||||
LocalDate partition_date {part->month};
|
||||
String partition = toString(partition_date.year()) + (partition_date.month() < 10 ? "0" : "") + toString(partition_date.month());
|
||||
partition_column->insert(partition);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include <DB/IO/ReadHelpers.h>
|
||||
#include <DB/Storages/MergeTree/ActiveDataPartSet.h>
|
||||
#include <mysqlxx/DateTime.h>
|
||||
#include <common/LocalDateTime.h>
|
||||
|
||||
|
||||
int main(int argc, char ** argv)
|
||||
@ -13,7 +13,7 @@ int main(int argc, char ** argv)
|
||||
std::cerr << name << '\n';
|
||||
|
||||
time_t time = DateLUT::instance().YYYYMMDDToDate(DB::parse<UInt32>(name));
|
||||
std::cerr << mysqlxx::DateTime(time) << '\n';
|
||||
std::cerr << LocalDateTime(time) << '\n';
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -0,0 +1,2 @@
|
||||
700
|
||||
700
|
@ -0,0 +1,169 @@
|
||||
DROP TABLE IF EXISTS test.r1;
|
||||
DROP TABLE IF EXISTS test.r2;
|
||||
|
||||
CREATE TABLE test.r1 (d Date DEFAULT '2016-01-01', x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/01/r/', 'r1', d, x, 111);
|
||||
|
||||
SET max_block_size = 1;
|
||||
|
||||
INSERT INTO test.r1 (x) SELECT number + 1000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 2000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 3000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 4000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 5000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 6000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 7000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 8000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 9000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 10000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
|
||||
INSERT INTO test.r1 (x) SELECT number + 11000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 12000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 13000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 14000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 15000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 16000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 17000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 18000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 19000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 20000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
|
||||
INSERT INTO test.r1 (x) SELECT number + 21000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 22000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 23000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 24000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 25000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 26000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 27000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 28000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 29000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 30000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
|
||||
INSERT INTO test.r1 (x) SELECT number + 31000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 32000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 33000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 34000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 35000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 36000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 37000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 38000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 39000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 40000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
|
||||
INSERT INTO test.r1 (x) SELECT number + 41000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 42000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 43000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 44000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 45000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 46000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 47000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 48000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 49000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 50000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
|
||||
INSERT INTO test.r1 (x) SELECT number + 51000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 52000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 53000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 54000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 55000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 56000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 57000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 58000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 59000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 60000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
|
||||
INSERT INTO test.r1 (x) SELECT number + 61000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 62000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 63000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 64000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 65000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 66000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 67000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 68000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 69000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
INSERT INTO test.r1 (x) SELECT number + 70000 AS x FROM system.numbers LIMIT 10;
|
||||
ALTER TABLE test.r1 DETACH PARTITION 201601;
|
||||
|
||||
SELECT count() FROM test.r1;
|
||||
|
||||
CREATE TABLE test.r2 (d Date DEFAULT '2016-01-01', x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/01/r/', 'r2', d, x, 111);
|
||||
|
||||
SELECT count() FROM test.r2;
|
||||
|
||||
SET replication_alter_partitions_sync = 2;
|
||||
|
||||
ALTER TABLE test.r1 ATTACH PARTITION 201601;
|
||||
|
||||
SELECT count() FROM test.r1;
|
||||
SELECT count() FROM test.r2;
|
||||
|
||||
DROP TABLE test.r1;
|
||||
DROP TABLE test.r2;
|
@ -0,0 +1,17 @@
|
||||
┌─────[1mx[0m─┬─[1my[0m─┐
|
||||
│ Hello │ 0 │
|
||||
└───────┴───┘
|
||||
┌─────[1mx[0m─┬─[1my[0m─┐
|
||||
│ Hello │ 0 │
|
||||
│ \\ │ 0 │
|
||||
└───────┴───┘
|
||||
┌─────[1mx[0m─┬─[1my[0m─┐
|
||||
│ Hello │ 0 │
|
||||
│ \\ │ 0 │
|
||||
│ \t\\t │ 0 │
|
||||
└───────┴───┘
|
||||
┌─────[1mx[0m─┬─[1my[0m─┬─[1mtoInt8(x)[0m─┬─[1ms[0m─────┬─[1mcasted[0m─┐
|
||||
│ Hello │ 0 │ -100 │ Hello │ Hello │
|
||||
│ \\ │ 0 │ 0 │ \\ │ \\ │
|
||||
│ \t\\t │ 0 │ 111 │ \t\\t │ \t\\t │
|
||||
└───────┴───┴───────────┴───────┴────────┘
|
12
dbms/tests/queries/0_stateless/00298_enum_width_and_cast.sql
Normal file
12
dbms/tests/queries/0_stateless/00298_enum_width_and_cast.sql
Normal file
@ -0,0 +1,12 @@
|
||||
DROP TABLE IF EXISTS test.enum;
|
||||
|
||||
CREATE TABLE test.enum (x Enum8('Hello' = -100, '\\' = 0, '\t\\t' = 111), y UInt8) ENGINE = TinyLog;
|
||||
INSERT INTO test.enum (y) VALUES (0);
|
||||
SELECT * FROM test.enum ORDER BY x, y FORMAT PrettyCompact;
|
||||
INSERT INTO test.enum (x) VALUES ('\\');
|
||||
SELECT * FROM test.enum ORDER BY x, y FORMAT PrettyCompact;
|
||||
INSERT INTO test.enum (x) VALUES ('\t\\t');
|
||||
SELECT * FROM test.enum ORDER BY x, y FORMAT PrettyCompact;
|
||||
SELECT x, y, toInt8(x), toString(x) AS s, CAST(s AS Enum8('Hello' = -100, '\\' = 0, '\t\\t' = 111)) AS casted FROM test.enum ORDER BY x, y FORMAT PrettyCompact;
|
||||
|
||||
DROP TABLE test.enum;
|
@ -0,0 +1,18 @@
|
||||
0
|
||||
0
|
||||
1
|
||||
0
|
||||
1
|
||||
2
|
||||
0
|
||||
0
|
||||
1
|
||||
0
|
||||
1
|
||||
2
|
||||
0
|
||||
0
|
||||
1
|
||||
0
|
||||
1
|
||||
2
|
@ -0,0 +1,37 @@
|
||||
DROP TABLE IF EXISTS test.log;
|
||||
|
||||
CREATE TABLE test.log (x UInt8) ENGINE = StripeLog;
|
||||
|
||||
SELECT * FROM test.log ORDER BY x;
|
||||
INSERT INTO test.log VALUES (0);
|
||||
SELECT * FROM test.log ORDER BY x;
|
||||
INSERT INTO test.log VALUES (1);
|
||||
SELECT * FROM test.log ORDER BY x;
|
||||
INSERT INTO test.log VALUES (2);
|
||||
SELECT * FROM test.log ORDER BY x;
|
||||
|
||||
DROP TABLE test.log;
|
||||
|
||||
CREATE TABLE test.log (x UInt8) ENGINE = TinyLog;
|
||||
|
||||
SELECT * FROM test.log ORDER BY x;
|
||||
INSERT INTO test.log VALUES (0);
|
||||
SELECT * FROM test.log ORDER BY x;
|
||||
INSERT INTO test.log VALUES (1);
|
||||
SELECT * FROM test.log ORDER BY x;
|
||||
INSERT INTO test.log VALUES (2);
|
||||
SELECT * FROM test.log ORDER BY x;
|
||||
|
||||
DROP TABLE test.log;
|
||||
|
||||
CREATE TABLE test.log (x UInt8) ENGINE = Log;
|
||||
|
||||
SELECT * FROM test.log ORDER BY x;
|
||||
INSERT INTO test.log VALUES (0);
|
||||
SELECT * FROM test.log ORDER BY x;
|
||||
INSERT INTO test.log VALUES (1);
|
||||
SELECT * FROM test.log ORDER BY x;
|
||||
INSERT INTO test.log VALUES (2);
|
||||
SELECT * FROM test.log ORDER BY x;
|
||||
|
||||
DROP TABLE test.log;
|
@ -2,17 +2,14 @@
|
||||
|
||||
#include <string.h>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <exception>
|
||||
#include <common/DateLUT.h>
|
||||
|
||||
#include <mysqlxx/Exception.h>
|
||||
|
||||
|
||||
namespace mysqlxx
|
||||
{
|
||||
|
||||
/** Хранит дату в broken-down виде.
|
||||
* Может быть инициализирован из даты в текстовом виде '2011-01-01' и из time_t.
|
||||
* Может быть инициализирован из даты в текстовом виде '20110101...(юзаются первые 8 символов)
|
||||
* Может быть инициализирован из даты в текстовом виде '20110101... (используются первые 8 символов)
|
||||
* Неявно преобразуется в time_t.
|
||||
* Сериализуется в ostream в текстовом виде.
|
||||
* Внимание: преобразование в unix timestamp и обратно производится в текущей тайм-зоне!
|
||||
@ -20,7 +17,7 @@ namespace mysqlxx
|
||||
*
|
||||
* packed - для memcmp (из-за того, что m_year - 2 байта, little endian, работает корректно только до 2047 года)
|
||||
*/
|
||||
class __attribute__ ((__packed__)) Date
|
||||
class __attribute__ ((__packed__)) LocalDate
|
||||
{
|
||||
private:
|
||||
unsigned short m_year;
|
||||
@ -40,14 +37,14 @@ private:
|
||||
void init(const char * s, size_t length)
|
||||
{
|
||||
if (length < 8)
|
||||
throw Exception("Cannot parse Date: " + std::string(s, length));
|
||||
throw std::runtime_error("Cannot parse LocalDate: " + std::string(s, length));
|
||||
|
||||
m_year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0');
|
||||
|
||||
if (s[4] == '-')
|
||||
{
|
||||
if (length < 10)
|
||||
throw Exception("Cannot parse Date: " + std::string(s, length));
|
||||
throw std::runtime_error("Cannot parse LocalDate: " + std::string(s, length));
|
||||
m_month = (s[5] - '0') * 10 + (s[6] - '0');
|
||||
m_day = (s[8] - '0') * 10 + (s[9] - '0');
|
||||
}
|
||||
@ -59,12 +56,12 @@ private:
|
||||
}
|
||||
|
||||
public:
|
||||
explicit Date(time_t time)
|
||||
explicit LocalDate(time_t time)
|
||||
{
|
||||
init(time);
|
||||
}
|
||||
|
||||
Date(DayNum_t day_num)
|
||||
LocalDate(DayNum_t day_num)
|
||||
{
|
||||
const auto & values = DateLUT::instance().getValues(day_num);
|
||||
m_year = values.year;
|
||||
@ -72,31 +69,31 @@ public:
|
||||
m_day = values.day_of_month;
|
||||
}
|
||||
|
||||
Date(unsigned short year_, unsigned char month_, unsigned char day_)
|
||||
LocalDate(unsigned short year_, unsigned char month_, unsigned char day_)
|
||||
: m_year(year_), m_month(month_), m_day(day_)
|
||||
{
|
||||
}
|
||||
|
||||
explicit Date(const std::string & s)
|
||||
explicit LocalDate(const std::string & s)
|
||||
{
|
||||
init(s.data(), s.size());
|
||||
}
|
||||
|
||||
Date(const char * data, size_t length)
|
||||
LocalDate(const char * data, size_t length)
|
||||
{
|
||||
init(data, length);
|
||||
}
|
||||
|
||||
Date() : m_year(0), m_month(0), m_day(0)
|
||||
LocalDate() : m_year(0), m_month(0), m_day(0)
|
||||
{
|
||||
}
|
||||
|
||||
Date(const Date & x)
|
||||
LocalDate(const LocalDate & x)
|
||||
{
|
||||
operator=(x);
|
||||
}
|
||||
|
||||
Date & operator= (const Date & x)
|
||||
LocalDate & operator= (const LocalDate & x)
|
||||
{
|
||||
m_year = x.m_year;
|
||||
m_month = x.m_month;
|
||||
@ -105,7 +102,7 @@ public:
|
||||
return *this;
|
||||
}
|
||||
|
||||
Date & operator= (time_t time)
|
||||
LocalDate & operator= (time_t time)
|
||||
{
|
||||
init(time);
|
||||
return *this;
|
||||
@ -134,36 +131,37 @@ public:
|
||||
void month(unsigned char x) { m_month = x; }
|
||||
void day(unsigned char x) { m_day = x; }
|
||||
|
||||
bool operator< (const Date & other) const
|
||||
bool operator< (const LocalDate & other) const
|
||||
{
|
||||
return 0 > memcmp(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
bool operator> (const Date & other) const
|
||||
bool operator> (const LocalDate & other) const
|
||||
{
|
||||
return 0 < memcmp(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
bool operator<= (const Date & other) const
|
||||
bool operator<= (const LocalDate & other) const
|
||||
{
|
||||
return 0 >= memcmp(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
bool operator>= (const Date & other) const
|
||||
bool operator>= (const LocalDate & other) const
|
||||
{
|
||||
return 0 <= memcmp(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
bool operator== (const Date & other) const
|
||||
bool operator== (const LocalDate & other) const
|
||||
{
|
||||
return 0 == memcmp(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
bool operator!= (const Date & other) const
|
||||
bool operator!= (const LocalDate & other) const
|
||||
{
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
/// NOTE Неэффективно.
|
||||
std::string toString(char separator = '-') const
|
||||
{
|
||||
std::stringstream ss;
|
||||
@ -177,19 +175,17 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
inline std::ostream & operator<< (std::ostream & ostr, const Date & date)
|
||||
inline std::ostream & operator<< (std::ostream & ostr, const LocalDate & date)
|
||||
{
|
||||
return ostr << date.year()
|
||||
<< '-' << (date.month() / 10) << (date.month() % 10)
|
||||
<< '-' << (date.day() / 10) << (date.day() % 10);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
namespace std
|
||||
{
|
||||
inline string to_string(const mysqlxx::Date & date)
|
||||
inline string to_string(const LocalDate & date)
|
||||
{
|
||||
return date.toString();
|
||||
}
|
@ -1,15 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <common/DateLUT.h>
|
||||
|
||||
#include <mysqlxx/Date.h>
|
||||
#include <iomanip>
|
||||
#include <exception>
|
||||
#include <common/DateLUT.h>
|
||||
#include <common/LocalDate.h>
|
||||
|
||||
|
||||
namespace mysqlxx
|
||||
{
|
||||
|
||||
/** Хранит дату и время в broken-down виде.
|
||||
* Может быть инициализирован из даты и времени в текстовом виде '2011-01-01 00:00:00' и из time_t.
|
||||
* Неявно преобразуется в time_t.
|
||||
@ -19,7 +16,7 @@ namespace mysqlxx
|
||||
*
|
||||
* packed - для memcmp (из-за того, что m_year - 2 байта, little endian, работает корректно только до 2047 года)
|
||||
*/
|
||||
class __attribute__ ((__packed__)) DateTime
|
||||
class __attribute__ ((__packed__)) LocalDateTime
|
||||
{
|
||||
private:
|
||||
unsigned short m_year;
|
||||
@ -57,7 +54,7 @@ private:
|
||||
void init(const char * s, size_t length)
|
||||
{
|
||||
if (length < 19)
|
||||
throw Exception("Cannot parse DateTime: " + std::string(s, length));
|
||||
throw std::runtime_error("Cannot parse LocalDateTime: " + std::string(s, length));
|
||||
|
||||
m_year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0');
|
||||
m_month = (s[5] - '0') * 10 + (s[6] - '0');
|
||||
@ -69,40 +66,40 @@ private:
|
||||
}
|
||||
|
||||
public:
|
||||
explicit DateTime(time_t time)
|
||||
explicit LocalDateTime(time_t time)
|
||||
{
|
||||
init(time);
|
||||
}
|
||||
|
||||
DateTime(unsigned short year_, unsigned char month_, unsigned char day_,
|
||||
LocalDateTime(unsigned short year_, unsigned char month_, unsigned char day_,
|
||||
unsigned char hour_, unsigned char minute_, unsigned char second_)
|
||||
: m_year(year_), m_month(month_), m_day(day_), m_hour(hour_), m_minute(minute_), m_second(second_)
|
||||
{
|
||||
}
|
||||
|
||||
explicit DateTime(const std::string & s)
|
||||
explicit LocalDateTime(const std::string & s)
|
||||
{
|
||||
if (s.size() < 19)
|
||||
throw Exception("Cannot parse DateTime: " + s);
|
||||
throw std::runtime_error("Cannot parse LocalDateTime: " + s);
|
||||
|
||||
init(s.data(), s.size());
|
||||
}
|
||||
|
||||
DateTime() : m_year(0), m_month(0), m_day(0), m_hour(0), m_minute(0), m_second(0)
|
||||
LocalDateTime() : m_year(0), m_month(0), m_day(0), m_hour(0), m_minute(0), m_second(0)
|
||||
{
|
||||
}
|
||||
|
||||
DateTime(const char * data, size_t length)
|
||||
LocalDateTime(const char * data, size_t length)
|
||||
{
|
||||
init(data, length);
|
||||
}
|
||||
|
||||
DateTime(const DateTime & x)
|
||||
LocalDateTime(const LocalDateTime & x)
|
||||
{
|
||||
operator=(x);
|
||||
}
|
||||
|
||||
DateTime & operator= (const DateTime & x)
|
||||
LocalDateTime & operator= (const LocalDateTime & x)
|
||||
{
|
||||
m_year = x.m_year;
|
||||
m_month = x.m_month;
|
||||
@ -114,7 +111,7 @@ public:
|
||||
return *this;
|
||||
}
|
||||
|
||||
DateTime & operator= (time_t time)
|
||||
LocalDateTime & operator= (time_t time)
|
||||
{
|
||||
init(time);
|
||||
return *this;
|
||||
@ -141,42 +138,42 @@ public:
|
||||
void minute(unsigned char x) { m_minute = x; }
|
||||
void second(unsigned char x) { m_second = x; }
|
||||
|
||||
Date toDate() const { return Date(m_year, m_month, m_day); }
|
||||
LocalDate toDate() const { return LocalDate(m_year, m_month, m_day); }
|
||||
|
||||
DateTime toStartOfDate() { return DateTime(m_year, m_month, m_day, 0, 0, 0); }
|
||||
LocalDateTime toStartOfDate() { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); }
|
||||
|
||||
bool operator< (const DateTime & other) const
|
||||
bool operator< (const LocalDateTime & other) const
|
||||
{
|
||||
return 0 > memcmp(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
bool operator> (const DateTime & other) const
|
||||
bool operator> (const LocalDateTime & other) const
|
||||
{
|
||||
return 0 < memcmp(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
bool operator<= (const DateTime & other) const
|
||||
bool operator<= (const LocalDateTime & other) const
|
||||
{
|
||||
return 0 >= memcmp(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
bool operator>= (const DateTime & other) const
|
||||
bool operator>= (const LocalDateTime & other) const
|
||||
{
|
||||
return 0 <= memcmp(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
bool operator== (const DateTime & other) const
|
||||
bool operator== (const LocalDateTime & other) const
|
||||
{
|
||||
return 0 == memcmp(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
bool operator!= (const DateTime & other) const
|
||||
bool operator!= (const LocalDateTime & other) const
|
||||
{
|
||||
return !(*this == other);
|
||||
}
|
||||
};
|
||||
|
||||
inline std::ostream & operator<< (std::ostream & ostr, const DateTime & datetime)
|
||||
inline std::ostream & operator<< (std::ostream & ostr, const LocalDateTime & datetime)
|
||||
{
|
||||
ostr << std::setfill('0') << std::setw(4) << datetime.year();
|
||||
|
||||
@ -189,12 +186,10 @@ inline std::ostream & operator<< (std::ostream & ostr, const DateTime & datetime
|
||||
return ostr;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
namespace std
|
||||
{
|
||||
inline string to_string(const mysqlxx::DateTime & datetime)
|
||||
inline string to_string(const LocalDateTime & datetime)
|
||||
{
|
||||
stringstream str;
|
||||
str << datetime;
|
@ -18,7 +18,7 @@ fi
|
||||
|
||||
is_it_github=$( git config --get remote.origin.url | grep 'github' )
|
||||
if [[ "$is_it_github" = "" ]]; then
|
||||
revision=53190
|
||||
revision=53694
|
||||
fi
|
||||
|
||||
echo $revision >> "${CMAKE_CURRENT_BINARY_DIR}/src/revision.h";
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <common/singleton.h>
|
||||
|
||||
#include <mysqlxx/Query.h>
|
||||
#include <mysqlxx/Exception.h>
|
||||
|
||||
#define MYSQLXX_DEFAULT_TIMEOUT 60
|
||||
#define MYSQLXX_DEFAULT_RW_TIMEOUT 1800
|
||||
|
@ -54,8 +54,8 @@ struct EscapeManipResult
|
||||
std::ostream & operator<< (double value) { return ostr << value; }
|
||||
std::ostream & operator<< (long long value) { return ostr << value; }
|
||||
std::ostream & operator<< (unsigned long long value) { return ostr << value; }
|
||||
std::ostream & operator<< (Date value) { return ostr << value; }
|
||||
std::ostream & operator<< (DateTime value) { return ostr << value; }
|
||||
std::ostream & operator<< (LocalDate value) { return ostr << value; }
|
||||
std::ostream & operator<< (LocalDateTime value) { return ostr << value; }
|
||||
|
||||
std::ostream & operator<< (const std::string & value)
|
||||
{
|
||||
@ -102,7 +102,7 @@ struct EscapeManipResult
|
||||
{
|
||||
if (i != 0)
|
||||
ostr << '\t';
|
||||
|
||||
|
||||
if (row[i].isNull())
|
||||
{
|
||||
ostr << "\\N";
|
||||
@ -115,7 +115,7 @@ struct EscapeManipResult
|
||||
return ostr;
|
||||
}
|
||||
|
||||
|
||||
|
||||
template <typename T>
|
||||
std::ostream & operator<< (const Null<T> & value)
|
||||
{
|
||||
@ -123,7 +123,7 @@ struct EscapeManipResult
|
||||
ostr << "\\N";
|
||||
else
|
||||
*this << value.data;
|
||||
|
||||
|
||||
return ostr ;
|
||||
}
|
||||
|
||||
@ -181,8 +181,8 @@ public:
|
||||
std::ostream & operator<< (double value) { return ostr << value; }
|
||||
std::ostream & operator<< (long long value) { return ostr << value; }
|
||||
std::ostream & operator<< (unsigned long long value) { return ostr << value; }
|
||||
std::ostream & operator<< (Date value) { return ostr << '\'' << value << '\''; }
|
||||
std::ostream & operator<< (DateTime value) { return ostr << '\'' << value << '\''; }
|
||||
std::ostream & operator<< (LocalDate value) { return ostr << '\'' << value << '\''; }
|
||||
std::ostream & operator<< (LocalDateTime value) { return ostr << '\'' << value << '\''; }
|
||||
|
||||
std::ostream & operator<< (const std::string & value)
|
||||
{
|
||||
@ -201,7 +201,7 @@ public:
|
||||
ostr.put('\'');
|
||||
return ostr;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
std::ostream & operator<< (const Null<T> & value)
|
||||
{
|
||||
@ -395,19 +395,19 @@ struct UnEscapeManipResult
|
||||
return istr;
|
||||
}
|
||||
|
||||
std::istream & operator>> (Date & value)
|
||||
std::istream & operator>> (LocalDate & value)
|
||||
{
|
||||
std::string s;
|
||||
(*this) >> s;
|
||||
value = Date(s);
|
||||
value = LocalDate(s);
|
||||
return istr;
|
||||
}
|
||||
|
||||
std::istream & operator>> (DateTime & value)
|
||||
std::istream & operator>> (LocalDateTime & value)
|
||||
{
|
||||
std::string s;
|
||||
(*this) >> s;
|
||||
value = DateTime(s);
|
||||
value = LocalDateTime(s);
|
||||
return istr;
|
||||
}
|
||||
|
||||
@ -499,5 +499,5 @@ inline UnQuoteManipResult operator>> (std::istream & istr, unquote_enum manip)
|
||||
return UnQuoteManipResult(istr);
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <mysqlxx/Types.h>
|
||||
#include <mysqlxx/Value.h>
|
||||
#include <mysqlxx/ResultBase.h>
|
||||
#include <mysqlxx/Exception.h>
|
||||
|
||||
|
||||
namespace mysqlxx
|
||||
|
@ -4,8 +4,8 @@
|
||||
#include <mysql/mysql.h>
|
||||
#include <Poco/Types.h>
|
||||
|
||||
#include <mysqlxx/Date.h>
|
||||
#include <mysqlxx/DateTime.h>
|
||||
#include <common/LocalDate.h>
|
||||
#include <common/LocalDateTime.h>
|
||||
|
||||
|
||||
namespace mysqlxx
|
||||
|
@ -60,7 +60,7 @@ public:
|
||||
{
|
||||
if (unlikely(isNull()))
|
||||
throwException("Value is NULL");
|
||||
|
||||
|
||||
return m_length > 0 && m_data[0] != '0';
|
||||
}
|
||||
|
||||
@ -69,7 +69,7 @@ public:
|
||||
{
|
||||
if (unlikely(isNull()))
|
||||
throwException("Value is NULL");
|
||||
|
||||
|
||||
return readUIntText(m_data, m_length);;
|
||||
}
|
||||
|
||||
@ -84,20 +84,20 @@ public:
|
||||
{
|
||||
if (unlikely(isNull()))
|
||||
throwException("Value is NULL");
|
||||
|
||||
|
||||
return readFloatText(m_data, m_length);
|
||||
}
|
||||
|
||||
/// Получить дату-время (из значения вида '2011-01-01 00:00:00').
|
||||
DateTime getDateTime() const
|
||||
LocalDateTime getDateTime() const
|
||||
{
|
||||
return DateTime(data(), size());
|
||||
return LocalDateTime(data(), size());
|
||||
}
|
||||
|
||||
/// Получить дату (из значения вида '2011-01-01' или '2011-01-01 00:00:00').
|
||||
Date getDate() const
|
||||
LocalDate getDate() const
|
||||
{
|
||||
return Date(data(), size());
|
||||
return LocalDate(data(), size());
|
||||
}
|
||||
|
||||
/// Получить строку.
|
||||
@ -105,7 +105,7 @@ public:
|
||||
{
|
||||
if (unlikely(isNull()))
|
||||
throwException("Value is NULL");
|
||||
|
||||
|
||||
return std::string(m_data, m_length);
|
||||
}
|
||||
|
||||
@ -136,7 +136,7 @@ private:
|
||||
size_t m_length;
|
||||
const ResultBase * res;
|
||||
|
||||
|
||||
|
||||
bool checkDateTime() const
|
||||
{
|
||||
return (m_length == 10 || m_length == 19) && m_data[4] == '-' && m_data[7] == '-';
|
||||
@ -392,8 +392,8 @@ template <> inline unsigned long long Value::get<unsigned long long >() const {
|
||||
template <> inline float Value::get<float >() const { return getDouble(); }
|
||||
template <> inline double Value::get<double >() const { return getDouble(); }
|
||||
template <> inline std::string Value::get<std::string >() const { return getString(); }
|
||||
template <> inline Date Value::get<Date >() const { return getDate(); }
|
||||
template <> inline DateTime Value::get<DateTime >() const { return getDateTime(); }
|
||||
template <> inline LocalDate Value::get<LocalDate >() const { return getDate(); }
|
||||
template <> inline LocalDateTime Value::get<LocalDateTime >() const { return getDateTime(); }
|
||||
|
||||
template <> inline VisitID_t Value::get<VisitID_t >() const { return VisitID_t(getUInt()); }
|
||||
|
||||
|
@ -4,8 +4,8 @@
|
||||
#include <mysqlxx/Transaction.h>
|
||||
#include <mysqlxx/Manip.h>
|
||||
#include <mysqlxx/Pool.h>
|
||||
#include <mysqlxx/Date.h>
|
||||
#include <mysqlxx/DateTime.h>
|
||||
#include <common/LocalDate.h>
|
||||
#include <common/LocalDateTime.h>
|
||||
#include <mysqlxx/Null.h>
|
||||
|
||||
|
||||
@ -63,5 +63,5 @@
|
||||
* из принципа "всё, что не используется сейчас - не реализовано",
|
||||
* а также зависит от небольшого количества кода из других мест репозитория Метрики
|
||||
* (при необходимости, зависимости можно убрать).
|
||||
* Предполагается, что пользователь сам допишет недостающий функционал.
|
||||
* Предполагается, что пользователь сам допишет недостающую функциональность.
|
||||
*/
|
||||
|
@ -33,8 +33,8 @@ int main(int argc, char ** argv)
|
||||
|
||||
time_t t1 = row[0];
|
||||
time_t t2 = row[1];
|
||||
std::cerr << t1 << ", " << mysqlxx::DateTime(t1) << std::endl;
|
||||
std::cerr << t2 << ", " << mysqlxx::DateTime(t2) << std::endl;
|
||||
std::cerr << t1 << ", " << LocalDateTime(t1) << std::endl;
|
||||
std::cerr << t2 << ", " << LocalDateTime(t2) << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user