Merge remote-tracking branch 'origin/master' into HEAD

This commit is contained in:
Alexander Kuzmenkov 2021-03-19 02:11:08 +03:00
commit 1beba597ca
931 changed files with 21379 additions and 8322 deletions

View File

@ -1,4 +1,4 @@
## ClickHouse release 21.3
## ClickHouse release 21.3 (LTS)
### ClickHouse release v21.3, 2021-03-12

View File

@ -155,7 +155,6 @@ option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests"
if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
# Only for Linux, x86_64.
# Implies ${ENABLE_FASTMEMCPY}
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
elseif(GLIBC_COMPATIBILITY)
message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration")
@ -241,9 +240,7 @@ else()
message(STATUS "Disabling compiler -pipe option (have only ${AVAILABLE_PHYSICAL_MEMORY} mb of memory)")
endif()
if(NOT DISABLE_CPU_OPTIMIZE)
include(cmake/cpu_features.cmake)
endif()
include(cmake/cpu_features.cmake)
option(ARCH_NATIVE "Add -march=native compiler flag")
@ -536,7 +533,7 @@ macro (add_executable target)
# explicitly acquire and interpose malloc symbols by clickhouse_malloc
# if GLIBC_COMPATIBILITY is ON and ENABLE_THINLTO is on than provide memcpy symbol explicitly to neutrialize thinlto's libcall generation.
if (GLIBC_COMPATIBILITY AND ENABLE_THINLTO)
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc> $<TARGET_OBJECTS:clickhouse_memcpy>)
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc> $<TARGET_OBJECTS:memcpy>)
else ()
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc>)
endif ()

View File

@ -74,7 +74,6 @@ target_link_libraries (common
${CITYHASH_LIBRARIES}
boost::headers_only
boost::system
FastMemcpy
Poco::Net
Poco::Net::SSL
Poco::Util

View File

@ -152,7 +152,7 @@ const DateLUTImpl & DateLUT::getImplementation(const std::string & time_zone) co
auto it = impls.emplace(time_zone, nullptr).first;
if (!it->second)
it->second = std::make_unique<DateLUTImpl>(time_zone);
it->second = std::unique_ptr<DateLUTImpl>(new DateLUTImpl(time_zone));
return *it->second;
}

View File

@ -32,7 +32,6 @@ public:
return date_lut.getImplementation(time_zone);
}
static void setDefaultTimezone(const std::string & time_zone)
{
auto & date_lut = getInstance();

View File

@ -46,24 +46,41 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
if (&inside_main)
assert(inside_main);
size_t i = 0;
time_t start_of_day = 0;
cctz::time_zone cctz_time_zone;
if (!cctz::load_time_zone(time_zone, &cctz_time_zone))
throw Poco::Exception("Cannot load time zone " + time_zone_);
cctz::time_zone::absolute_lookup start_of_epoch_lookup = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(start_of_day));
offset_at_start_of_epoch = start_of_epoch_lookup.offset;
offset_is_whole_number_of_hours_everytime = true;
constexpr cctz::civil_day epoch{1970, 1, 1};
constexpr cctz::civil_day lut_start{DATE_LUT_MIN_YEAR, 1, 1};
time_t start_of_day;
cctz::civil_day date{1970, 1, 1};
/// Note: it's validated against all timezones in the system.
static_assert((epoch - lut_start) == daynum_offset_epoch);
offset_at_start_of_epoch = cctz_time_zone.lookup(cctz_time_zone.lookup(epoch).pre).offset;
offset_at_start_of_lut = cctz_time_zone.lookup(cctz_time_zone.lookup(lut_start).pre).offset;
offset_is_whole_number_of_hours_during_epoch = true;
cctz::civil_day date = lut_start;
UInt32 i = 0;
do
{
cctz::time_zone::civil_lookup lookup = cctz_time_zone.lookup(date);
start_of_day = std::chrono::system_clock::to_time_t(lookup.pre); /// Ambiguity is possible.
/// Ambiguity is possible if time was changed backwards at the midnight
/// or after midnight time has been changed back to midnight, for example one hour backwards at 01:00
/// or after midnight time has been changed to the previous day, for example two hours backwards at 01:00
/// Then midnight appears twice. Usually time change happens exactly at 00:00 or 01:00.
/// If transition did not involve previous day, we should use the first midnight as the start of the day,
/// otherwise it's better to use the second midnight.
std::chrono::time_point start_of_day_time_point = lookup.trans < lookup.post
? lookup.post /* Second midnight appears after transition, so there was a piece of previous day after transition */
: lookup.pre;
start_of_day = std::chrono::system_clock::to_time_t(start_of_day_time_point);
Values & values = lut[i];
values.year = date.year();
@ -72,7 +89,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
values.day_of_week = getDayOfWeek(date);
values.date = start_of_day;
assert(values.year >= DATE_LUT_MIN_YEAR && values.year <= DATE_LUT_MAX_YEAR);
assert(values.year >= DATE_LUT_MIN_YEAR && values.year <= DATE_LUT_MAX_YEAR + 1);
assert(values.month >= 1 && values.month <= 12);
assert(values.day_of_month >= 1 && values.day_of_month <= 31);
assert(values.day_of_week >= 1 && values.day_of_week <= 7);
@ -85,50 +102,42 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
else
values.days_in_month = i != 0 ? lut[i - 1].days_in_month : 31;
values.time_at_offset_change = 0;
values.amount_of_offset_change = 0;
values.time_at_offset_change_value = 0;
values.amount_of_offset_change_value = 0;
if (start_of_day % 3600)
offset_is_whole_number_of_hours_everytime = false;
if (offset_is_whole_number_of_hours_during_epoch && start_of_day > 0 && start_of_day % 3600)
offset_is_whole_number_of_hours_during_epoch = false;
/// If UTC offset was changed in previous day.
if (i != 0)
/// If UTC offset was changed this day.
/// Change in time zone without transition is possible, e.g. Moscow 1991 Sun, 31 Mar, 02:00 MSK to EEST
cctz::time_zone::civil_transition transition{};
if (cctz_time_zone.next_transition(start_of_day_time_point - std::chrono::seconds(1), &transition)
&& (cctz::civil_day(transition.from) == date || cctz::civil_day(transition.to) == date)
&& transition.from != transition.to)
{
auto amount_of_offset_change_at_prev_day = 86400 - (lut[i].date - lut[i - 1].date);
if (amount_of_offset_change_at_prev_day)
{
lut[i - 1].amount_of_offset_change = amount_of_offset_change_at_prev_day;
values.time_at_offset_change_value = (transition.from - cctz::civil_second(date)) / Values::OffsetChangeFactor;
values.amount_of_offset_change_value = (transition.to - transition.from) / Values::OffsetChangeFactor;
const auto utc_offset_at_beginning_of_day = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(lut[i - 1].date)).offset;
// std::cerr << time_zone << ", " << date << ": change from " << transition.from << " to " << transition.to << "\n";
// std::cerr << time_zone << ", " << date << ": change at " << values.time_at_offset_change() << " with " << values.amount_of_offset_change() << "\n";
/// Find a time (timestamp offset from beginning of day),
/// when UTC offset was changed. Search is performed with 15-minute granularity, assuming it is enough.
/// We don't support too large changes.
if (values.amount_of_offset_change_value > 24 * 4)
values.amount_of_offset_change_value = 24 * 4;
else if (values.amount_of_offset_change_value < -24 * 4)
values.amount_of_offset_change_value = -24 * 4;
time_t time_at_offset_change = 900;
while (time_at_offset_change < 86400)
{
auto utc_offset_at_current_time = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(
lut[i - 1].date + time_at_offset_change)).offset;
if (utc_offset_at_current_time != utc_offset_at_beginning_of_day)
break;
time_at_offset_change += 900;
}
lut[i - 1].time_at_offset_change = time_at_offset_change;
/// We doesn't support cases when time change results in switching to previous day.
if (static_cast<int>(lut[i - 1].time_at_offset_change) + static_cast<int>(lut[i - 1].amount_of_offset_change) < 0)
lut[i - 1].time_at_offset_change = -lut[i - 1].amount_of_offset_change;
}
/// We don't support cases when time change results in switching to previous day.
/// Shift the point of time change later.
if (values.time_at_offset_change_value + values.amount_of_offset_change_value < 0)
values.time_at_offset_change_value = -values.amount_of_offset_change_value;
}
/// Going to next day.
++date;
++i;
}
while (start_of_day <= DATE_LUT_MAX && i <= DATE_LUT_MAX_DAY_NUM);
while (i < DATE_LUT_SIZE && lut[i - 1].year <= DATE_LUT_MAX_YEAR);
/// Fill excessive part of lookup table. This is needed only to simplify handling of overflow cases.
while (i < DATE_LUT_SIZE)

File diff suppressed because it is too large Load Diff

View File

@ -7,3 +7,8 @@
* See DateLUTImpl for usage examples.
*/
STRONG_TYPEDEF(UInt16, DayNum)
/** Represent number of days since 1970-01-01 but in extended range,
* for dates before 1970-01-01 and after 2105
*/
STRONG_TYPEDEF(Int32, ExtendedDayNum)

View File

@ -92,20 +92,10 @@ public:
LocalDate(const LocalDate &) noexcept = default;
LocalDate & operator= (const LocalDate &) noexcept = default;
LocalDate & operator= (time_t time)
{
init(time);
return *this;
}
operator time_t() const
{
return DateLUT::instance().makeDate(m_year, m_month, m_day);
}
DayNum getDayNum() const
{
return DateLUT::instance().makeDayNum(m_year, m_month, m_day);
const auto & lut = DateLUT::instance();
return DayNum(lut.makeDayNum(m_year, m_month, m_day).toUnderType());
}
operator DayNum() const
@ -166,12 +156,3 @@ public:
};
static_assert(sizeof(LocalDate) == 4);
namespace std
{
inline string to_string(const LocalDate & date)
{
return date.toString();
}
}

View File

@ -29,29 +29,16 @@ private:
/// NOTE We may use attribute packed instead, but it is less portable.
unsigned char pad = 0;
void init(time_t time)
void init(time_t time, const DateLUTImpl & time_zone)
{
if (unlikely(time > DATE_LUT_MAX || time == 0))
{
m_year = 0;
m_month = 0;
m_day = 0;
m_hour = 0;
m_minute = 0;
m_second = 0;
DateLUTImpl::DateTimeComponents components = time_zone.toDateTimeComponents(time);
return;
}
const auto & date_lut = DateLUT::instance();
const auto & values = date_lut.getValues(time);
m_year = values.year;
m_month = values.month;
m_day = values.day_of_month;
m_hour = date_lut.toHour(time);
m_minute = date_lut.toMinute(time);
m_second = date_lut.toSecond(time);
m_year = components.date.year;
m_month = components.date.month;
m_day = components.date.day;
m_hour = components.time.hour;
m_minute = components.time.minute;
m_second = components.time.second;
(void)pad; /// Suppress unused private field warning.
}
@ -73,9 +60,9 @@ private:
}
public:
explicit LocalDateTime(time_t time)
explicit LocalDateTime(time_t time, const DateLUTImpl & time_zone = DateLUT::instance())
{
init(time);
init(time, time_zone);
}
LocalDateTime(unsigned short year_, unsigned char month_, unsigned char day_,
@ -104,19 +91,6 @@ public:
LocalDateTime(const LocalDateTime &) noexcept = default;
LocalDateTime & operator= (const LocalDateTime &) noexcept = default;
LocalDateTime & operator= (time_t time)
{
init(time);
return *this;
}
operator time_t() const
{
return m_year == 0
? 0
: DateLUT::instance().makeDateTime(m_year, m_month, m_day, m_hour, m_minute, m_second);
}
unsigned short year() const { return m_year; }
unsigned char month() const { return m_month; }
unsigned char day() const { return m_day; }
@ -132,8 +106,30 @@ public:
void second(unsigned char x) { m_second = x; }
LocalDate toDate() const { return LocalDate(m_year, m_month, m_day); }
LocalDateTime toStartOfDate() const { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); }
LocalDateTime toStartOfDate() { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); }
std::string toString() const
{
std::string s{"0000-00-00 00:00:00"};
s[0] += m_year / 1000;
s[1] += (m_year / 100) % 10;
s[2] += (m_year / 10) % 10;
s[3] += m_year % 10;
s[5] += m_month / 10;
s[6] += m_month % 10;
s[8] += m_day / 10;
s[9] += m_day % 10;
s[11] += m_hour / 10;
s[12] += m_hour % 10;
s[14] += m_minute / 10;
s[15] += m_minute % 10;
s[17] += m_second / 10;
s[18] += m_second % 10;
return s;
}
bool operator< (const LocalDateTime & other) const
{
@ -167,14 +163,3 @@ public:
};
static_assert(sizeof(LocalDateTime) == 8);
namespace std
{
inline string to_string(const LocalDateTime & datetime)
{
stringstream str;
str << datetime;
return str.str();
}
}

View File

@ -12,6 +12,7 @@ private:
T t;
public:
using UnderlyingType = T;
template <class Enable = typename std::is_copy_constructible<T>::type>
explicit StrongTypedef(const T & t_) : t(t_) {}
template <class Enable = typename std::is_move_constructible<T>::type>

View File

@ -1,25 +1,2 @@
include (${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake)
add_executable (date_lut2 date_lut2.cpp)
add_executable (date_lut3 date_lut3.cpp)
add_executable (date_lut_default_timezone date_lut_default_timezone.cpp)
add_executable (local_date_time_comparison local_date_time_comparison.cpp)
add_executable (realloc-perf allocator.cpp)
set(PLATFORM_LIBS ${CMAKE_DL_LIBS})
target_link_libraries (date_lut2 PRIVATE common ${PLATFORM_LIBS})
target_link_libraries (date_lut3 PRIVATE common ${PLATFORM_LIBS})
target_link_libraries (date_lut_default_timezone PRIVATE common ${PLATFORM_LIBS})
target_link_libraries (local_date_time_comparison PRIVATE common)
target_link_libraries (realloc-perf PRIVATE common)
add_check(local_date_time_comparison)
if(USE_GTEST)
add_executable(unit_tests_libcommon gtest_json_test.cpp gtest_strong_typedef.cpp gtest_find_symbols.cpp)
target_link_libraries(unit_tests_libcommon PRIVATE common ${GTEST_MAIN_LIBRARIES} ${GTEST_LIBRARIES})
add_check(unit_tests_libcommon)
endif()
add_executable (dump_variable dump_variable.cpp)
target_link_libraries (dump_variable PRIVATE clickhouse_common_io)

View File

@ -1,47 +0,0 @@
#include <cstdlib>
#include <cstring>
#include <vector>
#include <thread>
void thread_func()
{
for (size_t i = 0; i < 100; ++i)
{
size_t size = 4096;
void * buf = malloc(size);
if (!buf)
abort();
memset(buf, 0, size);
while (size < 1048576)
{
size_t next_size = size * 4;
void * new_buf = realloc(buf, next_size);
if (!new_buf)
abort();
buf = new_buf;
memset(reinterpret_cast<char*>(buf) + size, 0, next_size - size);
size = next_size;
}
free(buf);
}
}
int main(int, char **)
{
std::vector<std::thread> threads(16);
for (size_t i = 0; i < 1000; ++i)
{
for (auto & thread : threads)
thread = std::thread(thread_func);
for (auto & thread : threads)
thread.join();
}
return 0;
}

View File

@ -1,53 +0,0 @@
#include <iostream>
#include <cstring>
#include <common/DateLUT.h>
static std::string toString(time_t Value)
{
struct tm tm;
char buf[96];
localtime_r(&Value, &tm);
snprintf(buf, sizeof(buf), "%04d-%02d-%02d %02d:%02d:%02d",
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
return buf;
}
static time_t orderedIdentifierToDate(unsigned value)
{
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_year = value / 10000 - 1900;
tm.tm_mon = (value % 10000) / 100 - 1;
tm.tm_mday = value % 100;
tm.tm_isdst = -1;
return mktime(&tm);
}
void loop(time_t begin, time_t end, int step)
{
const auto & date_lut = DateLUT::instance();
for (time_t t = begin; t < end; t += step)
std::cout << toString(t)
<< ", " << toString(date_lut.toTime(t))
<< ", " << date_lut.toHour(t)
<< std::endl;
}
int main(int, char **)
{
loop(orderedIdentifierToDate(20101031), orderedIdentifierToDate(20101101), 15 * 60);
loop(orderedIdentifierToDate(20100328), orderedIdentifierToDate(20100330), 15 * 60);
loop(orderedIdentifierToDate(20141020), orderedIdentifierToDate(20141106), 15 * 60);
return 0;
}

View File

@ -1,62 +0,0 @@
#include <iostream>
#include <cstring>
#include <Poco/Exception.h>
#include <common/DateLUT.h>
static std::string toString(time_t Value)
{
struct tm tm;
char buf[96];
localtime_r(&Value, &tm);
snprintf(buf, sizeof(buf), "%04d-%02d-%02d %02d:%02d:%02d",
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
return buf;
}
static time_t orderedIdentifierToDate(unsigned value)
{
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_year = value / 10000 - 1900;
tm.tm_mon = (value % 10000) / 100 - 1;
tm.tm_mday = value % 100;
tm.tm_isdst = -1;
return mktime(&tm);
}
void loop(time_t begin, time_t end, int step)
{
const auto & date_lut = DateLUT::instance();
for (time_t t = begin; t < end; t += step)
{
time_t t2 = date_lut.makeDateTime(date_lut.toYear(t), date_lut.toMonth(t), date_lut.toDayOfMonth(t),
date_lut.toHour(t), date_lut.toMinute(t), date_lut.toSecond(t));
std::string s1 = toString(t);
std::string s2 = toString(t2);
std::cerr << s1 << ", " << s2 << std::endl;
if (s1 != s2)
throw Poco::Exception("Test failed.");
}
}
int main(int, char **)
{
loop(orderedIdentifierToDate(20101031), orderedIdentifierToDate(20101101), 15 * 60);
loop(orderedIdentifierToDate(20100328), orderedIdentifierToDate(20100330), 15 * 60);
return 0;
}

View File

@ -1,31 +0,0 @@
#include <iostream>
#include <common/DateLUT.h>
#include <Poco/Exception.h>
int main(int, char **)
{
try
{
const auto & date_lut = DateLUT::instance();
std::cout << "Detected default timezone: `" << date_lut.getTimeZone() << "'" << std::endl;
time_t now = time(nullptr);
std::cout << "Current time: " << date_lut.timeToString(now)
<< ", UTC: " << DateLUT::instance("UTC").timeToString(now) << std::endl;
}
catch (const Poco::Exception & e)
{
std::cerr << e.displayText() << std::endl;
return 1;
}
catch (std::exception & e)
{
std::cerr << "std::exception: " << e.what() << std::endl;
return 2;
}
catch (...)
{
std::cerr << "Some exception" << std::endl;
return 3;
}
return 0;
}

View File

@ -1,656 +0,0 @@
#include <vector>
#include <string>
#include <exception>
#include <common/JSON.h>
#include <boost/range/irange.hpp>
using namespace std::literals::string_literals;
#include <gtest/gtest.h>
enum class ResultType
{
Return,
Throw
};
struct GetStringTestRecord
{
const char * input;
ResultType result_type;
const char * result;
};
TEST(JSONSuite, SimpleTest)
{
std::vector<GetStringTestRecord> test_data =
{
{ R"("name")", ResultType::Return, "name" },
{ R"("Вафельница Vitek WX-1102 FL")", ResultType::Return, "Вафельница Vitek WX-1102 FL" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("184509")", ResultType::Return, "184509" },
{ R"("category")", ResultType::Return, "category" },
{ R"("Все для детей/Детская техника/Vitek")", ResultType::Return, "Все для детей/Детская техника/Vitek" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("В наличии")", ResultType::Return, "В наличии" },
{ R"("price")", ResultType::Return, "price" },
{ R"("2390.00")", ResultType::Return, "2390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("Карточка")", ResultType::Return, "Карточка" },
{ R"("position")", ResultType::Return, "position" },
{ R"("detail")", ResultType::Return, "detail" },
{ R"("actionField")", ResultType::Return, "actionField" },
{ R"("list")", ResultType::Return, "list" },
{ R"("http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc")", ResultType::Return, "http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc" },
{ R"("action")", ResultType::Return, "action" },
{ R"("detail")", ResultType::Return, "detail" },
{ R"("products")", ResultType::Return, "products" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Вафельница Vitek WX-1102 FL")", ResultType::Return, "Вафельница Vitek WX-1102 FL" },
{ R"("id")", ResultType::Return, "id" },
{ R"("184509")", ResultType::Return, "184509" },
{ R"("price")", ResultType::Return, "price" },
{ R"("2390.00")", ResultType::Return, "2390.00" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("Vitek")", ResultType::Return, "Vitek" },
{ R"("category")", ResultType::Return, "category" },
{ R"("Все для детей/Детская техника/Vitek")", ResultType::Return, "Все для детей/Детская техника/Vitek" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("В наличии")", ResultType::Return, "В наличии" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("isAuthorized")", ResultType::Return, "isAuthorized" },
{ R"("isSubscriber")", ResultType::Return, "isSubscriber" },
{ R"("postType")", ResultType::Return, "postType" },
{ R"("Новости")", ResultType::Return, "Новости" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("Электроплита GEFEST Брест ЭПНД 5140-01 0001")", ResultType::Return, "Электроплита GEFEST Брест ЭПНД 5140-01 0001" },
{ R"("price")", ResultType::Return, "price" },
{ R"("currencyCode")", ResultType::Return, "currencyCode" },
{ R"("RUB")", ResultType::Return, "RUB" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("trash_login")", ResultType::Return, "trash_login" },
{ R"("novikoff")", ResultType::Return, "novikoff" },
{ R"("trash_cat_link")", ResultType::Return, "trash_cat_link" },
{ R"("progs")", ResultType::Return, "progs" },
{ R"("trash_parent_link")", ResultType::Return, "trash_parent_link" },
{ R"("content")", ResultType::Return, "content" },
{ R"("trash_posted_parent")", ResultType::Return, "trash_posted_parent" },
{ R"("content.01.2016")", ResultType::Return, "content.01.2016" },
{ R"("trash_posted_cat")", ResultType::Return, "trash_posted_cat" },
{ R"("progs.01.2016")", ResultType::Return, "progs.01.2016" },
{ R"("trash_virus_count")", ResultType::Return, "trash_virus_count" },
{ R"("trash_is_android")", ResultType::Return, "trash_is_android" },
{ R"("trash_is_wp8")", ResultType::Return, "trash_is_wp8" },
{ R"("trash_is_ios")", ResultType::Return, "trash_is_ios" },
{ R"("trash_posted")", ResultType::Return, "trash_posted" },
{ R"("01.2016")", ResultType::Return, "01.2016" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("merchantId")", ResultType::Return, "merchantId" },
{ R"("13694_49246")", ResultType::Return, "13694_49246" },
{ R"("cps-source")", ResultType::Return, "cps-source" },
{ R"("wargaming")", ResultType::Return, "wargaming" },
{ R"("cps_provider")", ResultType::Return, "cps_provider" },
{ R"("default")", ResultType::Return, "default" },
{ R"("errorReason")", ResultType::Return, "errorReason" },
{ R"("no errors")", ResultType::Return, "no errors" },
{ R"("scid")", ResultType::Return, "scid" },
{ R"("isAuthPayment")", ResultType::Return, "isAuthPayment" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("rubric")", ResultType::Return, "rubric" },
{ R"("")", ResultType::Return, "" },
{ R"("rubric")", ResultType::Return, "rubric" },
{ R"("Мир")", ResultType::Return, "Мир" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("__ym")", ResultType::Return, "__ym" },
{ R"("ecommerce")", ResultType::Return, "ecommerce" },
{ R"("impressions")", ResultType::Return, "impressions" },
{ R"("id")", ResultType::Return, "id" },
{ R"("863813")", ResultType::Return, "863813" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Happy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Happy, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("863839")", ResultType::Return, "863839" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("863847")", ResultType::Return, "863847" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911480")", ResultType::Return, "911480" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Puppy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Puppy, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911484")", ResultType::Return, "911484" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Little bears, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Little bears, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911489")", ResultType::Return, "911489" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж")", ResultType::Return, "Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911496")", ResultType::Return, "911496" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Pretty, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Pretty, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911504")", ResultType::Return, "911504" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911508")", ResultType::Return, "911508" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Kittens, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Kittens, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911512")", ResultType::Return, "911512" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911516")", ResultType::Return, "911516" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911520")", ResultType::Return, "911520" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911524")", ResultType::Return, "911524" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911528")", ResultType::Return, "911528" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Turtle, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Turtle, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("888616")", ResultType::Return, "888616" },
{ R"("name")", ResultType::Return, "name" },
{ "\"3Д Футболка мужская \\\"Collorista\\\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж\"", ResultType::Return, "3Д Футболка мужская \"Collorista\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Одежда и обувь/Мужская одежда/Футболки/")", ResultType::Return, "/Одежда и обувь/Мужская одежда/Футболки/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("406.60")", ResultType::Return, "406.60" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("913361")", ResultType::Return, "913361" },
{ R"("name")", ResultType::Return, "name" },
{ R"("3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("470.00")", ResultType::Return, "470.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("913364")", ResultType::Return, "913364" },
{ R"("name")", ResultType::Return, "name" },
{ R"("3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("470.00")", ResultType::Return, "470.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("913367")", ResultType::Return, "913367" },
{ R"("name")", ResultType::Return, "name" },
{ R"("3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("470.00")", ResultType::Return, "470.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("913385")", ResultType::Return, "913385" },
{ R"("name")", ResultType::Return, "name" },
{ R"("3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("470.00")", ResultType::Return, "470.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("913391")", ResultType::Return, "913391" },
{ R"("name")", ResultType::Return, "name" },
{ R"("3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("470.00")", ResultType::Return, "470.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("usertype")", ResultType::Return, "usertype" },
{ R"("visitor")", ResultType::Return, "visitor" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("__ym")", ResultType::Return, "__ym" },
{ R"("ecommerce")", ResultType::Return, "ecommerce" },
{ R"("impressions")", ResultType::Return, "impressions" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("__ym")", ResultType::Return, "__ym" },
{ R"("ecommerce")", ResultType::Return, "ecommerce" },
{ R"("currencyCode")", ResultType::Return, "currencyCode" },
{ R"("RUR")", ResultType::Return, "RUR" },
{ R"("impressions")", ResultType::Return, "impressions" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Чайник электрический Mystery MEK-1627, белый")", ResultType::Return, "Чайник электрический Mystery MEK-1627, белый" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("Mystery")", ResultType::Return, "Mystery" },
{ R"("id")", ResultType::Return, "id" },
{ R"("187180")", ResultType::Return, "187180" },
{ R"("category")", ResultType::Return, "category" },
{ R"("Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery")", ResultType::Return, "Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("В наличии")", ResultType::Return, "В наличии" },
{ R"("price")", ResultType::Return, "price" },
{ R"("1630.00")", ResultType::Return, "1630.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("Карточка")", ResultType::Return, "Карточка" },
{ R"("position")", ResultType::Return, "position" },
{ R"("detail")", ResultType::Return, "detail" },
{ R"("actionField")", ResultType::Return, "actionField" },
{ R"("list")", ResultType::Return, "list" },
{ "\0\"", ResultType::Throw, "JSON: expected \", got \0" },
{ "\"/igrushki/konstruktory\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Творчество/Рисование/Инструменты и кра\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\0t", ResultType::Throw, "JSON: expected \", got \0" },
{ "\"/Хозтовары/Хранение вещей и организа\xD1\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Хозтовары/Товары для стир\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"li\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/734859/samolet-radioupravlyaemyy-istrebitel-rabotaet-o\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/kosmetika-i-parfyum/parfyumeriya/mu\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/ko\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "", ResultType::Throw, "JSON: begin >= end." },
{ "\"/stroitelstvo-i-remont/stroit\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/stroitelstvo-i-remont/stroitelnyy-instrument/av\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/s\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Строительство и ремонт/Строительный инструмент/Изм\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/avto/soputstvuy\0l", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/str\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Отвертка 2 в 1 \\\"TUNDRA basic\\\" 5х75 мм (+,-) \0\xFF", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/stroitelstvo-i-remont/stroitelnyy-instrument/avtoinstrumen\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Мелкая бытовая техника/Мелки\xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Пряжа \\\"Бамбук стрейч\\0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Карандаш чёрнографитны\xD0\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0l", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/1071547/karandash-chernografitnyy-volshebstvo-nv-kruglyy-d-7-2mm-dl-176mm-plast-tuba/\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"ca\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"ca\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/1165424/chipbord-vyrubnoy-dlya-skrapbukinga-malyshi-mikki-maus-disney-bebi\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/posuda/kuhonnye-prinadlezhnosti-i-i\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Канцтовары/Ежедневники и блокн\xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/kanctovary/ezhednevniki-i-blok\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Стакан \xD0\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Набор бумаги для скрапбукинга \\\"Мои первый годик\\\": Микки Маус, Дисней бэби, 12 листов 29.5 х 29.5 см, 160\0\x80", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"c\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Органайзер для хранения аксессуаров, \0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"quantity\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Сменный блок для тетрадей на кольцах А5, 160 листов клетка, офсет \xE2\x84\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Сувениры/Ф\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"\0\"", ResultType::Return, "\0" },
{ "\"\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"va\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"ca\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"В \0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/letnie-tovary/z\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Посудомоечная машина Ha\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Крупная бытов\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Полочная акустическая система Magnat Needl\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"brand\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"pos\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"c\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"var\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Телевизоры и видеотехника/Всё для домашних кинотеатр\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Флеш-диск Transcend JetFlash 620 8GB (TS8GJF62\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Табурет Мег\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"variant\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Катал\xD0\0\"", ResultType::Return, "Катал\xD0\0" },
{ "\"К\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Полочная акустическая система Magnat Needl\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"brand\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"pos\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"c\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"17\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/igrushki/razvivayusc\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Ключница \\\"\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Игр\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Игрушки/Игрушки для девочек/Игровые модули дл\xD1\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Крупная бытовая техника/Стиральные машины/С фронт\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\0 ", ResultType::Throw, "JSON: expected \", got \0" },
{ "\"Светодиодная лента SMD3528, 5 м. IP33, 60LED, зеленый, 4,8W/мет\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Сантехника/Мебель для ванных комнат/Стол\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\0o", ResultType::Throw, "JSON: expected \", got \0" },
{ "\"/igrushki/konstruktory\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/posuda/kuhonnye-prinadlezhnosti-i-instrumenty/kuhonnye-pr\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Творчество/Рисование/Инструменты и кра\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\0 ", ResultType::Throw, "JSON: expected \", got \0" },
{ "\"/Хозтовары/Хранение вещей и организа\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Хозтовары/Товары для стир\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"li\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/igrushki/igrus\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/734859/samolet-radioupravlyaemyy-istrebitel-rabotaet-o\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/kosmetika-i-parfyum/parfyumeriya/mu\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/ko\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/avto/avtomobilnyy\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/stroitelstvo-i-remont/stroit\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/stroitelstvo-i-remont/stroitelnyy-instrument/av\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/s\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Строительство и ремонт/Строительный инструмент/Изм\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/avto/soputstvuy\0\"", ResultType::Return, "/avto/soputstvuy\0" },
{ "\"/str\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Отвертка 2 в 1 \\\"TUNDRA basic\\\" 5х75 мм (+,-) \0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/stroitelstvo-i-remont/stroitelnyy-instrument/avtoinstrumen\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Чайник электрический Vitesse\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Мелкая бытовая техника/Мелки\xD0\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Пряжа \\\"Бамбук стрейч\\0о", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Карандаш чёрнографитны\xD0\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0\"", ResultType::Return, "/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0" },
{ "\"/1071547/karandash-chernografitnyy-volshebstvo-nv-kruglyy-d-7-2mm-dl-176mm-plast-tuba/\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"ca\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Подаро\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Средство для прочис\xD1\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"i\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/p\0\"", ResultType::Return, "/p\0" },
{ "\"/Сувениры/Магниты, н\xD0\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Дерев\xD0\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/prazdniki/svadba/svadebnaya-c\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Канцт\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Праздники/То\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"v\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Косметика \xD0\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Спорт и отдых/Настольные игры/Покер, руле\xD1\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"categ\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/retailr\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/retailrocket\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Ежедневник недат А5 140л кл,ляссе,обл пв\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/432809/ezhednevnik-organayzer-sredniy-s-remeshkom-na-knopke-v-oblozhke-kalkulyator-kalendar-do-\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/1165424/chipbord-vyrubnoy-dlya-skrapbukinga-malyshi-mikki-maus-disney-bebi\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/posuda/kuhonnye-prinadlezhnosti-i-i\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Канцтовары/Ежедневники и блокн\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/kanctovary/ezhednevniki-i-blok\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Стакан \xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Набор бумаги для скрапбукинга \\\"Мои первый годик\\\": Микки Маус, Дисней бэби, 12 листов 29.5 х 29.5 см, 160\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"c\0\"", ResultType::Return, "c\0" },
};
for (auto i : boost::irange(0, 1/*00000*/))
{
static_cast<void>(i);
for (auto & r : test_data)
{
try
{
JSON j(r.input, r.input + strlen(r.input));
ASSERT_EQ(j.getString(), r.result);
ASSERT_TRUE(r.result_type == ResultType::Return);
}
catch (JSONException & e)
{
ASSERT_TRUE(r.result_type == ResultType::Throw);
ASSERT_EQ(e.message(), r.result);
}
}
}
}

View File

@ -1,5 +1,8 @@
if (GLIBC_COMPATIBILITY)
set (ENABLE_FASTMEMCPY ON)
add_subdirectory(memcpy)
if(TARGET memcpy)
set(MEMCPY_LIBRARY memcpy)
endif()
enable_language(ASM)
include(CheckIncludeFile)
@ -27,13 +30,6 @@ if (GLIBC_COMPATIBILITY)
list(APPEND glibc_compatibility_sources musl/getentropy.c)
endif()
if (NOT ARCH_ARM)
# clickhouse_memcpy don't support ARCH_ARM, see https://github.com/ClickHouse/ClickHouse/issues/18951
add_library (clickhouse_memcpy OBJECT
${ClickHouse_SOURCE_DIR}/contrib/FastMemcpy/memcpy_wrapper.c
)
endif()
# Need to omit frame pointers to match the performance of glibc
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
@ -51,15 +47,16 @@ if (GLIBC_COMPATIBILITY)
target_compile_options(glibc-compatibility PRIVATE -fPIC)
endif ()
target_link_libraries(global-libs INTERFACE glibc-compatibility)
target_link_libraries(global-libs INTERFACE glibc-compatibility ${MEMCPY_LIBRARY})
install(
TARGETS glibc-compatibility
TARGETS glibc-compatibility ${MEMCPY_LIBRARY}
EXPORT global
ARCHIVE DESTINATION lib
)
message (STATUS "Some symbols from glibc will be replaced for compatibility")
elseif (YANDEX_OFFICIAL_BUILD)
message (WARNING "Option GLIBC_COMPATIBILITY must be turned on for production builds.")
endif ()

View File

@ -0,0 +1,8 @@
if (ARCH_AMD64)
add_library(memcpy STATIC memcpy.cpp)
# We allow to include memcpy.h from user code for better inlining.
target_include_directories(memcpy PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>)
target_compile_options(memcpy PRIVATE -fno-builtin-memcpy)
endif ()

View File

@ -0,0 +1,6 @@
#include "memcpy.h"
extern "C" void * memcpy(void * __restrict dst, const void * __restrict src, size_t size)
{
return inline_memcpy(dst, src, size);
}

View File

@ -0,0 +1,217 @@
#include <cstddef>
#include <emmintrin.h>
/** Custom memcpy implementation for ClickHouse.
* It has the following benefits over using glibc's implementation:
* 1. Avoiding dependency on specific version of glibc's symbol, like memcpy@@GLIBC_2.14 for portability.
* 2. Avoiding indirect call via PLT due to shared linking, that can be less efficient.
* 3. It's possible to include this header and call inline_memcpy directly for better inlining or interprocedural analysis.
* 4. Better results on our performance tests on current CPUs: up to 25% on some queries and up to 0.7%..1% in average across all queries.
*
* Writing our own memcpy is extremely difficult for the following reasons:
* 1. The optimal variant depends on the specific CPU model.
* 2. The optimal variant depends on the distribution of size arguments.
* 3. It depends on the number of threads copying data concurrently.
* 4. It also depends on how the calling code is using the copied data and how the different memcpy calls are related to each other.
* Due to vast range of scenarios it makes proper testing especially difficult.
* When writing our own memcpy there is a risk to overoptimize it
* on non-representative microbenchmarks while making real-world use cases actually worse.
*
* Most of the benchmarks for memcpy on the internet are wrong.
*
* Let's look at the details:
*
* For small size, the order of branches in code is important.
* There are variants with specific order of branches (like here or in glibc)
* or with jump table (in asm code see example from Cosmopolitan libc:
* https://github.com/jart/cosmopolitan/blob/de09bec215675e9b0beb722df89c6f794da74f3f/libc/nexgen32e/memcpy.S#L61)
* or with Duff device in C (see https://github.com/skywind3000/FastMemcpy/)
*
* It's also important how to copy uneven sizes.
* Almost every implementation, including this, is using two overlapping movs.
*
* It is important to disable -ftree-loop-distribute-patterns when compiling memcpy implementation,
* otherwise the compiler can replace internal loops to a call to memcpy that will lead to infinite recursion.
*
* For larger sizes it's important to choose the instructions used:
* - SSE or AVX or AVX-512;
* - rep movsb;
* Performance will depend on the size threshold, on the CPU model, on the "erms" flag
* ("Enhansed Rep MovS" - it indicates that performance of "rep movsb" is decent for large sizes)
* https://stackoverflow.com/questions/43343231/enhanced-rep-movsb-for-memcpy
*
* Using AVX-512 can be bad due to throttling.
* Using AVX can be bad if most code is using SSE due to switching penalty
* (it also depends on the usage of "vzeroupper" instruction).
* But in some cases AVX gives a win.
*
* It also depends on how many times the loop will be unrolled.
* We are unrolling the loop 8 times (by the number of available registers), but it not always the best.
*
* It also depends on the usage of aligned or unaligned loads/stores.
* We are using unaligned loads and aligned stores.
*
* It also depends on the usage of prefetch instructions. It makes sense on some Intel CPUs but can slow down performance on AMD.
* Setting up correct offset for prefetching is non-obvious.
*
* Non-temporary (cache bypassing) stores can be used for very large sizes (more than a half of L3 cache).
* But the exact threshold is unclear - when doing memcpy from multiple threads the optimal threshold can be lower,
* because L3 cache is shared (and L2 cache is partially shared).
*
* Very large size of memcpy typically indicates suboptimal (not cache friendly) algorithms in code or unrealistic scenarios,
* so we don't pay attention to using non-temporary stores.
*
* On recent Intel CPUs, the presence of "erms" makes "rep movsb" the most benefitial,
* even comparing to non-temporary aligned unrolled stores even with the most wide registers.
*
* memcpy can be written in asm, C or C++. The latter can also use inline asm.
* The asm implementation can be better to make sure that compiler won't make the code worse,
* to ensure the order of branches, the code layout, the usage of all required registers.
* But if it is located in separate translation unit, inlining will not be possible
* (inline asm can be used to overcome this limitation).
* Sometimes C or C++ code can be further optimized by compiler.
* For example, clang is capable replacing SSE intrinsics to AVX code if -mavx is used.
*
* Please note that compiler can replace plain code to memcpy and vice versa.
* - memcpy with compile-time known small size is replaced to simple instructions without a call to memcpy;
* it is controlled by -fbuiltin-memcpy and can be manually ensured by calling __builtin_memcpy.
* This is often used to implement unaligned load/store without undefined behaviour in C++.
* - a loop with copying bytes can be recognized and replaced by a call to memcpy;
* it is controlled by -ftree-loop-distribute-patterns.
* - also note that a loop with copying bytes can be unrolled, peeled and vectorized that will give you
* inline code somewhat similar to a decent implementation of memcpy.
*
* This description is up to date as of Mar 2021.
*
* How to test the memcpy implementation for performance:
* 1. Test on real production workload.
* 2. For synthetic test, see utils/memcpy-bench, but make sure you will do the best to exhaust the wide range of scenarios.
*
* TODO: Add self-tuning memcpy with bayesian bandits algorithm for large sizes.
* See https://habr.com/en/company/yandex/blog/457612/
*/
static inline void * inline_memcpy(void * __restrict dst_, const void * __restrict src_, size_t size)
{
/// We will use pointer arithmetic, so char pointer will be used.
/// Note that __restrict makes sense (otherwise compiler will reload data from memory
/// instead of using the value of registers due to possible aliasing).
char * __restrict dst = reinterpret_cast<char * __restrict>(dst_);
const char * __restrict src = reinterpret_cast<const char * __restrict>(src_);
/// Standard memcpy returns the original value of dst. It is rarely used but we have to do it.
/// If you use memcpy with small but non-constant sizes, you can call inline_memcpy directly
/// for inlining and removing this single instruction.
void * ret = dst;
tail:
/// Small sizes and tails after the loop for large sizes.
/// The order of branches is important but in fact the optimal order depends on the distribution of sizes in your application.
/// This order of branches is from the disassembly of glibc's code.
/// We copy chunks of possibly uneven size with two overlapping movs.
/// Example: to copy 5 bytes [0, 1, 2, 3, 4] we will copy tail [1, 2, 3, 4] first and then head [0, 1, 2, 3].
if (size <= 16)
{
if (size >= 8)
{
/// Chunks of 8..16 bytes.
__builtin_memcpy(dst + size - 8, src + size - 8, 8);
__builtin_memcpy(dst, src, 8);
}
else if (size >= 4)
{
/// Chunks of 4..7 bytes.
__builtin_memcpy(dst + size - 4, src + size - 4, 4);
__builtin_memcpy(dst, src, 4);
}
else if (size >= 2)
{
/// Chunks of 2..3 bytes.
__builtin_memcpy(dst + size - 2, src + size - 2, 2);
__builtin_memcpy(dst, src, 2);
}
else if (size >= 1)
{
/// A single byte.
*dst = *src;
}
/// No bytes remaining.
}
else
{
/// Medium and large sizes.
if (size <= 128)
{
/// Medium size, not enough for full loop unrolling.
/// We will copy the last 16 bytes.
_mm_storeu_si128(reinterpret_cast<__m128i *>(dst + size - 16), _mm_loadu_si128(reinterpret_cast<const __m128i *>(src + size - 16)));
/// Then we will copy every 16 bytes from the beginning in a loop.
/// The last loop iteration will possibly overwrite some part of already copied last 16 bytes.
/// This is Ok, similar to the code for small sizes above.
while (size > 16)
{
_mm_storeu_si128(reinterpret_cast<__m128i *>(dst), _mm_loadu_si128(reinterpret_cast<const __m128i *>(src)));
dst += 16;
src += 16;
size -= 16;
}
}
else
{
/// Large size with fully unrolled loop.
/// Align destination to 16 bytes boundary.
size_t padding = (16 - (reinterpret_cast<size_t>(dst) & 15)) & 15;
/// If not aligned - we will copy first 16 bytes with unaligned stores.
if (padding > 0)
{
__m128i head = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src));
_mm_storeu_si128(reinterpret_cast<__m128i*>(dst), head);
dst += padding;
src += padding;
size -= padding;
}
/// Aligned unrolled copy. We will use half of available SSE registers.
/// It's not possible to have both src and dst aligned.
/// So, we will use aligned stores and unaligned loads.
__m128i c0, c1, c2, c3, c4, c5, c6, c7;
while (size >= 128)
{
c0 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 0);
c1 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 1);
c2 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 2);
c3 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 3);
c4 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 4);
c5 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 5);
c6 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 6);
c7 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 7);
src += 128;
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 0), c0);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 1), c1);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 2), c2);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 3), c3);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 4), c4);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 5), c5);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 6), c6);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 7), c7);
dst += 128;
size -= 128;
}
/// The latest remaining 0..127 bytes will be processed as usual.
goto tail;
}
}
return ret;
}

View File

@ -1,5 +1,2 @@
add_executable (mysqlxx_test mysqlxx_test.cpp)
target_link_libraries (mysqlxx_test PRIVATE mysqlxx)
add_executable (mysqlxx_pool_test mysqlxx_pool_test.cpp)
target_link_libraries (mysqlxx_pool_test PRIVATE mysqlxx)

View File

@ -1,21 +0,0 @@
<?xml version = '1.0' encoding = 'utf-8'?>
<yandex>
<mysql_goals>
<port>3306</port>
<user>root</user>
<db>Metrica</db>
<password>qwerty</password>
<replica>
<host>example02t</host>
<priority>0</priority>
</replica>
<replica>
<host>example02t</host>
<port>3306</port>
<user>root</user>
<password>qwerty</password>
<db>Metrica</db>
<priority>1</priority>
</replica>
</mysql_goals>
</yandex>

View File

@ -1,77 +0,0 @@
#include <iostream>
#include <mysqlxx/mysqlxx.h>
int main(int, char **)
{
try
{
mysqlxx::Connection connection("test", "127.0.0.1", "root", "qwerty", 3306);
std::cerr << "Connected." << std::endl;
{
mysqlxx::Query query = connection.query();
query << "SELECT 1 x, '2010-01-01 01:01:01' d";
mysqlxx::UseQueryResult result = query.use();
std::cerr << "use() called." << std::endl;
while (mysqlxx::Row row = result.fetch())
{
std::cerr << "Fetched row." << std::endl;
std::cerr << row[0] << ", " << row["x"] << std::endl;
std::cerr << row[1] << ", " << row["d"]
<< ", " << row[1].getDate()
<< ", " << row[1].getDateTime()
<< ", " << row[1].getDate()
<< ", " << row[1].getDateTime()
<< std::endl
<< row[1].getDate() << ", " << row[1].getDateTime() << std::endl
<< row[1].getDate() << ", " << row[1].getDateTime() << std::endl
<< row[1].getDate() << ", " << row[1].getDateTime() << std::endl
<< row[1].getDate() << ", " << row[1].getDateTime() << std::endl
;
time_t t1 = row[0];
time_t t2 = row[1];
std::cerr << t1 << ", " << LocalDateTime(t1) << std::endl;
std::cerr << t2 << ", " << LocalDateTime(t2) << std::endl;
}
}
{
mysqlxx::UseQueryResult result = connection.query("SELECT 'abc\\\\def' x").use();
mysqlxx::Row row = result.fetch();
std::cerr << row << std::endl;
std::cerr << row << std::endl;
}
{
/// Копирование Query
mysqlxx::Query query1 = connection.query("SELECT");
mysqlxx::Query query2 = query1;
query2 << " 1";
std::cerr << query1.str() << ", " << query2.str() << std::endl;
}
{
/// NULL
mysqlxx::Null<int> x = mysqlxx::null;
std::cerr << (x == mysqlxx::null ? "Ok" : "Fail") << std::endl;
std::cerr << (x == 0 ? "Fail" : "Ok") << std::endl;
std::cerr << (x.isNull() ? "Ok" : "Fail") << std::endl;
x = 1;
std::cerr << (x == mysqlxx::null ? "Fail" : "Ok") << std::endl;
std::cerr << (x == 0 ? "Fail" : "Ok") << std::endl;
std::cerr << (x == 1 ? "Ok" : "Fail") << std::endl;
std::cerr << (x.isNull() ? "Fail" : "Ok") << std::endl;
}
}
catch (const mysqlxx::Exception & e)
{
std::cerr << e.code() << ", " << e.message() << std::endl;
throw;
}
return 0;
}

View File

@ -38,7 +38,6 @@ add_subdirectory (boost-cmake)
add_subdirectory (cctz-cmake)
add_subdirectory (consistent-hashing)
add_subdirectory (dragonbox-cmake)
add_subdirectory (FastMemcpy)
add_subdirectory (hyperscan-cmake)
add_subdirectory (jemalloc-cmake)
add_subdirectory (libcpuid-cmake)

View File

@ -1,28 +0,0 @@
option (ENABLE_FASTMEMCPY "Enable FastMemcpy library (only internal)" ${ENABLE_LIBRARIES})
if (NOT OS_LINUX OR ARCH_AARCH64)
set (ENABLE_FASTMEMCPY OFF)
endif ()
if (ENABLE_FASTMEMCPY)
set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/FastMemcpy)
set (SRCS
${LIBRARY_DIR}/FastMemcpy.c
memcpy_wrapper.c
)
add_library (FastMemcpy ${SRCS})
target_include_directories (FastMemcpy PUBLIC ${LIBRARY_DIR})
target_compile_definitions(FastMemcpy PUBLIC USE_FASTMEMCPY=1)
message (STATUS "Using FastMemcpy")
else ()
add_library (FastMemcpy INTERFACE)
target_compile_definitions(FastMemcpy INTERFACE USE_FASTMEMCPY=0)
message (STATUS "Not using FastMemcpy")
endif ()

View File

@ -1,220 +0,0 @@
//=====================================================================
//
// FastMemcpy.c - skywind3000@163.com, 2015
//
// feature:
// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc4.9)
//
//=====================================================================
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#if (defined(_WIN32) || defined(WIN32))
#include <windows.h>
#include <mmsystem.h>
#ifdef _MSC_VER
#pragma comment(lib, "winmm.lib")
#endif
#elif defined(__unix)
#include <sys/time.h>
#include <unistd.h>
#else
#error it can only be compiled under windows or unix
#endif
#include "FastMemcpy.h"
unsigned int gettime()
{
#if (defined(_WIN32) || defined(WIN32))
return timeGetTime();
#else
static struct timezone tz={ 0,0 };
struct timeval time;
gettimeofday(&time,&tz);
return (time.tv_sec * 1000 + time.tv_usec / 1000);
#endif
}
void sleepms(unsigned int millisec)
{
#if defined(_WIN32) || defined(WIN32)
Sleep(millisec);
#else
usleep(millisec * 1000);
#endif
}
void benchmark(int dstalign, int srcalign, size_t size, int times)
{
char *DATA1 = (char*)malloc(size + 64);
char *DATA2 = (char*)malloc(size + 64);
size_t LINEAR1 = ((size_t)DATA1);
size_t LINEAR2 = ((size_t)DATA2);
char *ALIGN1 = (char*)(((64 - (LINEAR1 & 63)) & 63) + LINEAR1);
char *ALIGN2 = (char*)(((64 - (LINEAR2 & 63)) & 63) + LINEAR2);
char *dst = (dstalign)? ALIGN1 : (ALIGN1 + 1);
char *src = (srcalign)? ALIGN2 : (ALIGN2 + 3);
unsigned int t1, t2;
int k;
sleepms(100);
t1 = gettime();
for (k = times; k > 0; k--) {
memcpy(dst, src, size);
}
t1 = gettime() - t1;
sleepms(100);
t2 = gettime();
for (k = times; k > 0; k--) {
memcpy_fast(dst, src, size);
}
t2 = gettime() - t2;
free(DATA1);
free(DATA2);
printf("result(dst %s, src %s): memcpy_fast=%dms memcpy=%d ms\n",
dstalign? "aligned" : "unalign",
srcalign? "aligned" : "unalign", (int)t2, (int)t1);
}
void bench(int copysize, int times)
{
printf("benchmark(size=%d bytes, times=%d):\n", copysize, times);
benchmark(1, 1, copysize, times);
benchmark(1, 0, copysize, times);
benchmark(0, 1, copysize, times);
benchmark(0, 0, copysize, times);
printf("\n");
}
void random_bench(int maxsize, int times)
{
static char A[11 * 1024 * 1024 + 2];
static char B[11 * 1024 * 1024 + 2];
static int random_offsets[0x10000];
static int random_sizes[0x8000];
unsigned int i, p1, p2;
unsigned int t1, t2;
for (i = 0; i < 0x10000; i++) { // generate random offsets
random_offsets[i] = rand() % (10 * 1024 * 1024 + 1);
}
for (i = 0; i < 0x8000; i++) { // generate random sizes
random_sizes[i] = 1 + rand() % maxsize;
}
sleepms(100);
t1 = gettime();
for (p1 = 0, p2 = 0, i = 0; i < times; i++) {
int offset1 = random_offsets[(p1++) & 0xffff];
int offset2 = random_offsets[(p1++) & 0xffff];
int size = random_sizes[(p2++) & 0x7fff];
memcpy(A + offset1, B + offset2, size);
}
t1 = gettime() - t1;
sleepms(100);
t2 = gettime();
for (p1 = 0, p2 = 0, i = 0; i < times; i++) {
int offset1 = random_offsets[(p1++) & 0xffff];
int offset2 = random_offsets[(p1++) & 0xffff];
int size = random_sizes[(p2++) & 0x7fff];
memcpy_fast(A + offset1, B + offset2, size);
}
t2 = gettime() - t2;
printf("benchmark random access:\n");
printf("memcpy_fast=%dms memcpy=%dms\n\n", (int)t2, (int)t1);
}
#ifdef _MSC_VER
#pragma comment(lib, "winmm.lib")
#endif
int main(void)
{
bench(32, 0x1000000);
bench(64, 0x1000000);
bench(512, 0x800000);
bench(1024, 0x400000);
bench(4096, 0x80000);
bench(8192, 0x40000);
bench(1024 * 1024 * 1, 0x800);
bench(1024 * 1024 * 4, 0x200);
bench(1024 * 1024 * 8, 0x100);
random_bench(2048, 8000000);
return 0;
}
/*
benchmark(size=32 bytes, times=16777216):
result(dst aligned, src aligned): memcpy_fast=78ms memcpy=260 ms
result(dst aligned, src unalign): memcpy_fast=78ms memcpy=250 ms
result(dst unalign, src aligned): memcpy_fast=78ms memcpy=266 ms
result(dst unalign, src unalign): memcpy_fast=78ms memcpy=234 ms
benchmark(size=64 bytes, times=16777216):
result(dst aligned, src aligned): memcpy_fast=109ms memcpy=281 ms
result(dst aligned, src unalign): memcpy_fast=109ms memcpy=328 ms
result(dst unalign, src aligned): memcpy_fast=109ms memcpy=343 ms
result(dst unalign, src unalign): memcpy_fast=93ms memcpy=344 ms
benchmark(size=512 bytes, times=8388608):
result(dst aligned, src aligned): memcpy_fast=125ms memcpy=218 ms
result(dst aligned, src unalign): memcpy_fast=156ms memcpy=484 ms
result(dst unalign, src aligned): memcpy_fast=172ms memcpy=546 ms
result(dst unalign, src unalign): memcpy_fast=172ms memcpy=515 ms
benchmark(size=1024 bytes, times=4194304):
result(dst aligned, src aligned): memcpy_fast=109ms memcpy=172 ms
result(dst aligned, src unalign): memcpy_fast=187ms memcpy=453 ms
result(dst unalign, src aligned): memcpy_fast=172ms memcpy=437 ms
result(dst unalign, src unalign): memcpy_fast=156ms memcpy=452 ms
benchmark(size=4096 bytes, times=524288):
result(dst aligned, src aligned): memcpy_fast=62ms memcpy=78 ms
result(dst aligned, src unalign): memcpy_fast=109ms memcpy=202 ms
result(dst unalign, src aligned): memcpy_fast=94ms memcpy=203 ms
result(dst unalign, src unalign): memcpy_fast=110ms memcpy=218 ms
benchmark(size=8192 bytes, times=262144):
result(dst aligned, src aligned): memcpy_fast=62ms memcpy=78 ms
result(dst aligned, src unalign): memcpy_fast=78ms memcpy=202 ms
result(dst unalign, src aligned): memcpy_fast=78ms memcpy=203 ms
result(dst unalign, src unalign): memcpy_fast=94ms memcpy=203 ms
benchmark(size=1048576 bytes, times=2048):
result(dst aligned, src aligned): memcpy_fast=203ms memcpy=191 ms
result(dst aligned, src unalign): memcpy_fast=219ms memcpy=281 ms
result(dst unalign, src aligned): memcpy_fast=218ms memcpy=328 ms
result(dst unalign, src unalign): memcpy_fast=218ms memcpy=312 ms
benchmark(size=4194304 bytes, times=512):
result(dst aligned, src aligned): memcpy_fast=312ms memcpy=406 ms
result(dst aligned, src unalign): memcpy_fast=296ms memcpy=421 ms
result(dst unalign, src aligned): memcpy_fast=312ms memcpy=468 ms
result(dst unalign, src unalign): memcpy_fast=297ms memcpy=452 ms
benchmark(size=8388608 bytes, times=256):
result(dst aligned, src aligned): memcpy_fast=281ms memcpy=452 ms
result(dst aligned, src unalign): memcpy_fast=280ms memcpy=468 ms
result(dst unalign, src aligned): memcpy_fast=298ms memcpy=514 ms
result(dst unalign, src unalign): memcpy_fast=344ms memcpy=472 ms
benchmark random access:
memcpy_fast=515ms memcpy=1014ms
*/

View File

@ -1,694 +0,0 @@
//=====================================================================
//
// FastMemcpy.c - skywind3000@163.com, 2015
//
// feature:
// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc5.1)
//
//=====================================================================
#ifndef __FAST_MEMCPY_H__
#define __FAST_MEMCPY_H__
#include <stddef.h>
#include <stdint.h>
#include <emmintrin.h>
//---------------------------------------------------------------------
// force inline for compilers
//---------------------------------------------------------------------
#ifndef INLINE
#ifdef __GNUC__
#if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1))
#define INLINE __inline__ __attribute__((always_inline))
#else
#define INLINE __inline__
#endif
#elif defined(_MSC_VER)
#define INLINE __forceinline
#elif (defined(__BORLANDC__) || defined(__WATCOMC__))
#define INLINE __inline
#else
#define INLINE
#endif
#endif
typedef __attribute__((__aligned__(1))) uint16_t uint16_unaligned_t;
typedef __attribute__((__aligned__(1))) uint32_t uint32_unaligned_t;
typedef __attribute__((__aligned__(1))) uint64_t uint64_unaligned_t;
//---------------------------------------------------------------------
// fast copy for different sizes
//---------------------------------------------------------------------
static INLINE void memcpy_sse2_16(void *dst, const void *src) {
__m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0);
_mm_storeu_si128(((__m128i*)dst) + 0, m0);
}
static INLINE void memcpy_sse2_32(void *dst, const void *src) {
__m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0);
__m128i m1 = _mm_loadu_si128(((const __m128i*)src) + 1);
_mm_storeu_si128(((__m128i*)dst) + 0, m0);
_mm_storeu_si128(((__m128i*)dst) + 1, m1);
}
static INLINE void memcpy_sse2_64(void *dst, const void *src) {
__m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0);
__m128i m1 = _mm_loadu_si128(((const __m128i*)src) + 1);
__m128i m2 = _mm_loadu_si128(((const __m128i*)src) + 2);
__m128i m3 = _mm_loadu_si128(((const __m128i*)src) + 3);
_mm_storeu_si128(((__m128i*)dst) + 0, m0);
_mm_storeu_si128(((__m128i*)dst) + 1, m1);
_mm_storeu_si128(((__m128i*)dst) + 2, m2);
_mm_storeu_si128(((__m128i*)dst) + 3, m3);
}
static INLINE void memcpy_sse2_128(void *dst, const void *src) {
__m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0);
__m128i m1 = _mm_loadu_si128(((const __m128i*)src) + 1);
__m128i m2 = _mm_loadu_si128(((const __m128i*)src) + 2);
__m128i m3 = _mm_loadu_si128(((const __m128i*)src) + 3);
__m128i m4 = _mm_loadu_si128(((const __m128i*)src) + 4);
__m128i m5 = _mm_loadu_si128(((const __m128i*)src) + 5);
__m128i m6 = _mm_loadu_si128(((const __m128i*)src) + 6);
__m128i m7 = _mm_loadu_si128(((const __m128i*)src) + 7);
_mm_storeu_si128(((__m128i*)dst) + 0, m0);
_mm_storeu_si128(((__m128i*)dst) + 1, m1);
_mm_storeu_si128(((__m128i*)dst) + 2, m2);
_mm_storeu_si128(((__m128i*)dst) + 3, m3);
_mm_storeu_si128(((__m128i*)dst) + 4, m4);
_mm_storeu_si128(((__m128i*)dst) + 5, m5);
_mm_storeu_si128(((__m128i*)dst) + 6, m6);
_mm_storeu_si128(((__m128i*)dst) + 7, m7);
}
//---------------------------------------------------------------------
// tiny memory copy with jump table optimized
//---------------------------------------------------------------------
/// Attribute is used to avoid an error with undefined behaviour sanitizer
/// ../contrib/FastMemcpy/FastMemcpy.h:91:56: runtime error: applying zero offset to null pointer
/// Found by 01307_orc_output_format.sh, cause - ORCBlockInputFormat and external ORC library.
__attribute__((__no_sanitize__("undefined"))) static INLINE void *memcpy_tiny(void *dst, const void *src, size_t size) {
unsigned char *dd = ((unsigned char*)dst) + size;
const unsigned char *ss = ((const unsigned char*)src) + size;
switch (size) {
case 64:
memcpy_sse2_64(dd - 64, ss - 64);
case 0:
break;
case 65:
memcpy_sse2_64(dd - 65, ss - 65);
case 1:
dd[-1] = ss[-1];
break;
case 66:
memcpy_sse2_64(dd - 66, ss - 66);
case 2:
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
break;
case 67:
memcpy_sse2_64(dd - 67, ss - 67);
case 3:
*((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3));
dd[-1] = ss[-1];
break;
case 68:
memcpy_sse2_64(dd - 68, ss - 68);
case 4:
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
break;
case 69:
memcpy_sse2_64(dd - 69, ss - 69);
case 5:
*((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5));
dd[-1] = ss[-1];
break;
case 70:
memcpy_sse2_64(dd - 70, ss - 70);
case 6:
*((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6));
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
break;
case 71:
memcpy_sse2_64(dd - 71, ss - 71);
case 7:
*((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7));
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
break;
case 72:
memcpy_sse2_64(dd - 72, ss - 72);
case 8:
*((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8));
break;
case 73:
memcpy_sse2_64(dd - 73, ss - 73);
case 9:
*((uint64_unaligned_t*)(dd - 9)) = *((uint64_unaligned_t*)(ss - 9));
dd[-1] = ss[-1];
break;
case 74:
memcpy_sse2_64(dd - 74, ss - 74);
case 10:
*((uint64_unaligned_t*)(dd - 10)) = *((uint64_unaligned_t*)(ss - 10));
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
break;
case 75:
memcpy_sse2_64(dd - 75, ss - 75);
case 11:
*((uint64_unaligned_t*)(dd - 11)) = *((uint64_unaligned_t*)(ss - 11));
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
break;
case 76:
memcpy_sse2_64(dd - 76, ss - 76);
case 12:
*((uint64_unaligned_t*)(dd - 12)) = *((uint64_unaligned_t*)(ss - 12));
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
break;
case 77:
memcpy_sse2_64(dd - 77, ss - 77);
case 13:
*((uint64_unaligned_t*)(dd - 13)) = *((uint64_unaligned_t*)(ss - 13));
*((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5));
dd[-1] = ss[-1];
break;
case 78:
memcpy_sse2_64(dd - 78, ss - 78);
case 14:
*((uint64_unaligned_t*)(dd - 14)) = *((uint64_unaligned_t*)(ss - 14));
*((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8));
break;
case 79:
memcpy_sse2_64(dd - 79, ss - 79);
case 15:
*((uint64_unaligned_t*)(dd - 15)) = *((uint64_unaligned_t*)(ss - 15));
*((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8));
break;
case 80:
memcpy_sse2_64(dd - 80, ss - 80);
case 16:
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 81:
memcpy_sse2_64(dd - 81, ss - 81);
case 17:
memcpy_sse2_16(dd - 17, ss - 17);
dd[-1] = ss[-1];
break;
case 82:
memcpy_sse2_64(dd - 82, ss - 82);
case 18:
memcpy_sse2_16(dd - 18, ss - 18);
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
break;
case 83:
memcpy_sse2_64(dd - 83, ss - 83);
case 19:
memcpy_sse2_16(dd - 19, ss - 19);
*((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3));
dd[-1] = ss[-1];
break;
case 84:
memcpy_sse2_64(dd - 84, ss - 84);
case 20:
memcpy_sse2_16(dd - 20, ss - 20);
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
break;
case 85:
memcpy_sse2_64(dd - 85, ss - 85);
case 21:
memcpy_sse2_16(dd - 21, ss - 21);
*((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5));
dd[-1] = ss[-1];
break;
case 86:
memcpy_sse2_64(dd - 86, ss - 86);
case 22:
memcpy_sse2_16(dd - 22, ss - 22);
*((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6));
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
break;
case 87:
memcpy_sse2_64(dd - 87, ss - 87);
case 23:
memcpy_sse2_16(dd - 23, ss - 23);
*((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7));
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
break;
case 88:
memcpy_sse2_64(dd - 88, ss - 88);
case 24:
memcpy_sse2_16(dd - 24, ss - 24);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 89:
memcpy_sse2_64(dd - 89, ss - 89);
case 25:
memcpy_sse2_16(dd - 25, ss - 25);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 90:
memcpy_sse2_64(dd - 90, ss - 90);
case 26:
memcpy_sse2_16(dd - 26, ss - 26);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 91:
memcpy_sse2_64(dd - 91, ss - 91);
case 27:
memcpy_sse2_16(dd - 27, ss - 27);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 92:
memcpy_sse2_64(dd - 92, ss - 92);
case 28:
memcpy_sse2_16(dd - 28, ss - 28);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 93:
memcpy_sse2_64(dd - 93, ss - 93);
case 29:
memcpy_sse2_16(dd - 29, ss - 29);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 94:
memcpy_sse2_64(dd - 94, ss - 94);
case 30:
memcpy_sse2_16(dd - 30, ss - 30);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 95:
memcpy_sse2_64(dd - 95, ss - 95);
case 31:
memcpy_sse2_16(dd - 31, ss - 31);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 96:
memcpy_sse2_64(dd - 96, ss - 96);
case 32:
memcpy_sse2_32(dd - 32, ss - 32);
break;
case 97:
memcpy_sse2_64(dd - 97, ss - 97);
case 33:
memcpy_sse2_32(dd - 33, ss - 33);
dd[-1] = ss[-1];
break;
case 98:
memcpy_sse2_64(dd - 98, ss - 98);
case 34:
memcpy_sse2_32(dd - 34, ss - 34);
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
break;
case 99:
memcpy_sse2_64(dd - 99, ss - 99);
case 35:
memcpy_sse2_32(dd - 35, ss - 35);
*((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3));
dd[-1] = ss[-1];
break;
case 100:
memcpy_sse2_64(dd - 100, ss - 100);
case 36:
memcpy_sse2_32(dd - 36, ss - 36);
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
break;
case 101:
memcpy_sse2_64(dd - 101, ss - 101);
case 37:
memcpy_sse2_32(dd - 37, ss - 37);
*((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5));
dd[-1] = ss[-1];
break;
case 102:
memcpy_sse2_64(dd - 102, ss - 102);
case 38:
memcpy_sse2_32(dd - 38, ss - 38);
*((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6));
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
break;
case 103:
memcpy_sse2_64(dd - 103, ss - 103);
case 39:
memcpy_sse2_32(dd - 39, ss - 39);
*((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7));
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
break;
case 104:
memcpy_sse2_64(dd - 104, ss - 104);
case 40:
memcpy_sse2_32(dd - 40, ss - 40);
*((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8));
break;
case 105:
memcpy_sse2_64(dd - 105, ss - 105);
case 41:
memcpy_sse2_32(dd - 41, ss - 41);
*((uint64_unaligned_t*)(dd - 9)) = *((uint64_unaligned_t*)(ss - 9));
dd[-1] = ss[-1];
break;
case 106:
memcpy_sse2_64(dd - 106, ss - 106);
case 42:
memcpy_sse2_32(dd - 42, ss - 42);
*((uint64_unaligned_t*)(dd - 10)) = *((uint64_unaligned_t*)(ss - 10));
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
break;
case 107:
memcpy_sse2_64(dd - 107, ss - 107);
case 43:
memcpy_sse2_32(dd - 43, ss - 43);
*((uint64_unaligned_t*)(dd - 11)) = *((uint64_unaligned_t*)(ss - 11));
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
break;
case 108:
memcpy_sse2_64(dd - 108, ss - 108);
case 44:
memcpy_sse2_32(dd - 44, ss - 44);
*((uint64_unaligned_t*)(dd - 12)) = *((uint64_unaligned_t*)(ss - 12));
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
break;
case 109:
memcpy_sse2_64(dd - 109, ss - 109);
case 45:
memcpy_sse2_32(dd - 45, ss - 45);
*((uint64_unaligned_t*)(dd - 13)) = *((uint64_unaligned_t*)(ss - 13));
*((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5));
dd[-1] = ss[-1];
break;
case 110:
memcpy_sse2_64(dd - 110, ss - 110);
case 46:
memcpy_sse2_32(dd - 46, ss - 46);
*((uint64_unaligned_t*)(dd - 14)) = *((uint64_unaligned_t*)(ss - 14));
*((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8));
break;
case 111:
memcpy_sse2_64(dd - 111, ss - 111);
case 47:
memcpy_sse2_32(dd - 47, ss - 47);
*((uint64_unaligned_t*)(dd - 15)) = *((uint64_unaligned_t*)(ss - 15));
*((uint64_unaligned_t*)(dd - 8)) = *((uint64_unaligned_t*)(ss - 8));
break;
case 112:
memcpy_sse2_64(dd - 112, ss - 112);
case 48:
memcpy_sse2_32(dd - 48, ss - 48);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 113:
memcpy_sse2_64(dd - 113, ss - 113);
case 49:
memcpy_sse2_32(dd - 49, ss - 49);
memcpy_sse2_16(dd - 17, ss - 17);
dd[-1] = ss[-1];
break;
case 114:
memcpy_sse2_64(dd - 114, ss - 114);
case 50:
memcpy_sse2_32(dd - 50, ss - 50);
memcpy_sse2_16(dd - 18, ss - 18);
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
break;
case 115:
memcpy_sse2_64(dd - 115, ss - 115);
case 51:
memcpy_sse2_32(dd - 51, ss - 51);
memcpy_sse2_16(dd - 19, ss - 19);
*((uint16_unaligned_t*)(dd - 3)) = *((uint16_unaligned_t*)(ss - 3));
dd[-1] = ss[-1];
break;
case 116:
memcpy_sse2_64(dd - 116, ss - 116);
case 52:
memcpy_sse2_32(dd - 52, ss - 52);
memcpy_sse2_16(dd - 20, ss - 20);
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
break;
case 117:
memcpy_sse2_64(dd - 117, ss - 117);
case 53:
memcpy_sse2_32(dd - 53, ss - 53);
memcpy_sse2_16(dd - 21, ss - 21);
*((uint32_unaligned_t*)(dd - 5)) = *((uint32_unaligned_t*)(ss - 5));
dd[-1] = ss[-1];
break;
case 118:
memcpy_sse2_64(dd - 118, ss - 118);
case 54:
memcpy_sse2_32(dd - 54, ss - 54);
memcpy_sse2_16(dd - 22, ss - 22);
*((uint32_unaligned_t*)(dd - 6)) = *((uint32_unaligned_t*)(ss - 6));
*((uint16_unaligned_t*)(dd - 2)) = *((uint16_unaligned_t*)(ss - 2));
break;
case 119:
memcpy_sse2_64(dd - 119, ss - 119);
case 55:
memcpy_sse2_32(dd - 55, ss - 55);
memcpy_sse2_16(dd - 23, ss - 23);
*((uint32_unaligned_t*)(dd - 7)) = *((uint32_unaligned_t*)(ss - 7));
*((uint32_unaligned_t*)(dd - 4)) = *((uint32_unaligned_t*)(ss - 4));
break;
case 120:
memcpy_sse2_64(dd - 120, ss - 120);
case 56:
memcpy_sse2_32(dd - 56, ss - 56);
memcpy_sse2_16(dd - 24, ss - 24);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 121:
memcpy_sse2_64(dd - 121, ss - 121);
case 57:
memcpy_sse2_32(dd - 57, ss - 57);
memcpy_sse2_16(dd - 25, ss - 25);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 122:
memcpy_sse2_64(dd - 122, ss - 122);
case 58:
memcpy_sse2_32(dd - 58, ss - 58);
memcpy_sse2_16(dd - 26, ss - 26);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 123:
memcpy_sse2_64(dd - 123, ss - 123);
case 59:
memcpy_sse2_32(dd - 59, ss - 59);
memcpy_sse2_16(dd - 27, ss - 27);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 124:
memcpy_sse2_64(dd - 124, ss - 124);
case 60:
memcpy_sse2_32(dd - 60, ss - 60);
memcpy_sse2_16(dd - 28, ss - 28);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 125:
memcpy_sse2_64(dd - 125, ss - 125);
case 61:
memcpy_sse2_32(dd - 61, ss - 61);
memcpy_sse2_16(dd - 29, ss - 29);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 126:
memcpy_sse2_64(dd - 126, ss - 126);
case 62:
memcpy_sse2_32(dd - 62, ss - 62);
memcpy_sse2_16(dd - 30, ss - 30);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 127:
memcpy_sse2_64(dd - 127, ss - 127);
case 63:
memcpy_sse2_32(dd - 63, ss - 63);
memcpy_sse2_16(dd - 31, ss - 31);
memcpy_sse2_16(dd - 16, ss - 16);
break;
case 128:
memcpy_sse2_128(dd - 128, ss - 128);
break;
}
return dst;
}
//---------------------------------------------------------------------
// main routine
//---------------------------------------------------------------------
static void* memcpy_fast(void *destination, const void *source, size_t size)
{
unsigned char *dst = (unsigned char*)destination;
const unsigned char *src = (const unsigned char*)source;
static size_t cachesize = 0x200000; // L2-cache size
size_t padding;
// small memory copy
if (size <= 128) {
return memcpy_tiny(dst, src, size);
}
// align destination to 16 bytes boundary
padding = (16 - (((size_t)dst) & 15)) & 15;
if (padding > 0) {
__m128i head = _mm_loadu_si128((const __m128i*)src);
_mm_storeu_si128((__m128i*)dst, head);
dst += padding;
src += padding;
size -= padding;
}
// medium size copy
if (size <= cachesize) {
__m128i c0, c1, c2, c3, c4, c5, c6, c7;
for (; size >= 128; size -= 128) {
c0 = _mm_loadu_si128(((const __m128i*)src) + 0);
c1 = _mm_loadu_si128(((const __m128i*)src) + 1);
c2 = _mm_loadu_si128(((const __m128i*)src) + 2);
c3 = _mm_loadu_si128(((const __m128i*)src) + 3);
c4 = _mm_loadu_si128(((const __m128i*)src) + 4);
c5 = _mm_loadu_si128(((const __m128i*)src) + 5);
c6 = _mm_loadu_si128(((const __m128i*)src) + 6);
c7 = _mm_loadu_si128(((const __m128i*)src) + 7);
_mm_prefetch((const char*)(src + 256), _MM_HINT_NTA);
src += 128;
_mm_store_si128((((__m128i*)dst) + 0), c0);
_mm_store_si128((((__m128i*)dst) + 1), c1);
_mm_store_si128((((__m128i*)dst) + 2), c2);
_mm_store_si128((((__m128i*)dst) + 3), c3);
_mm_store_si128((((__m128i*)dst) + 4), c4);
_mm_store_si128((((__m128i*)dst) + 5), c5);
_mm_store_si128((((__m128i*)dst) + 6), c6);
_mm_store_si128((((__m128i*)dst) + 7), c7);
dst += 128;
}
}
else { // big memory copy
__m128i c0, c1, c2, c3, c4, c5, c6, c7;
_mm_prefetch((const char*)(src), _MM_HINT_NTA);
if ((((size_t)src) & 15) == 0) { // source aligned
for (; size >= 128; size -= 128) {
c0 = _mm_load_si128(((const __m128i*)src) + 0);
c1 = _mm_load_si128(((const __m128i*)src) + 1);
c2 = _mm_load_si128(((const __m128i*)src) + 2);
c3 = _mm_load_si128(((const __m128i*)src) + 3);
c4 = _mm_load_si128(((const __m128i*)src) + 4);
c5 = _mm_load_si128(((const __m128i*)src) + 5);
c6 = _mm_load_si128(((const __m128i*)src) + 6);
c7 = _mm_load_si128(((const __m128i*)src) + 7);
_mm_prefetch((const char*)(src + 256), _MM_HINT_NTA);
src += 128;
_mm_stream_si128((((__m128i*)dst) + 0), c0);
_mm_stream_si128((((__m128i*)dst) + 1), c1);
_mm_stream_si128((((__m128i*)dst) + 2), c2);
_mm_stream_si128((((__m128i*)dst) + 3), c3);
_mm_stream_si128((((__m128i*)dst) + 4), c4);
_mm_stream_si128((((__m128i*)dst) + 5), c5);
_mm_stream_si128((((__m128i*)dst) + 6), c6);
_mm_stream_si128((((__m128i*)dst) + 7), c7);
dst += 128;
}
}
else { // source unaligned
for (; size >= 128; size -= 128) {
c0 = _mm_loadu_si128(((const __m128i*)src) + 0);
c1 = _mm_loadu_si128(((const __m128i*)src) + 1);
c2 = _mm_loadu_si128(((const __m128i*)src) + 2);
c3 = _mm_loadu_si128(((const __m128i*)src) + 3);
c4 = _mm_loadu_si128(((const __m128i*)src) + 4);
c5 = _mm_loadu_si128(((const __m128i*)src) + 5);
c6 = _mm_loadu_si128(((const __m128i*)src) + 6);
c7 = _mm_loadu_si128(((const __m128i*)src) + 7);
_mm_prefetch((const char*)(src + 256), _MM_HINT_NTA);
src += 128;
_mm_stream_si128((((__m128i*)dst) + 0), c0);
_mm_stream_si128((((__m128i*)dst) + 1), c1);
_mm_stream_si128((((__m128i*)dst) + 2), c2);
_mm_stream_si128((((__m128i*)dst) + 3), c3);
_mm_stream_si128((((__m128i*)dst) + 4), c4);
_mm_stream_si128((((__m128i*)dst) + 5), c5);
_mm_stream_si128((((__m128i*)dst) + 6), c6);
_mm_stream_si128((((__m128i*)dst) + 7), c7);
dst += 128;
}
}
_mm_sfence();
}
memcpy_tiny(dst, src, size);
return destination;
}
#endif

View File

@ -1,171 +0,0 @@
//=====================================================================
//
// FastMemcpy.c - skywind3000@163.com, 2015
//
// feature:
// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc4.9)
//
//=====================================================================
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#if (defined(_WIN32) || defined(WIN32))
#include <windows.h>
#include <mmsystem.h>
#ifdef _MSC_VER
#pragma comment(lib, "winmm.lib")
#endif
#elif defined(__unix)
#include <sys/time.h>
#include <unistd.h>
#else
#error it can only be compiled under windows or unix
#endif
#include "FastMemcpy_Avx.h"
unsigned int gettime()
{
#if (defined(_WIN32) || defined(WIN32))
return timeGetTime();
#else
static struct timezone tz={ 0,0 };
struct timeval time;
gettimeofday(&time,&tz);
return (time.tv_sec * 1000 + time.tv_usec / 1000);
#endif
}
void sleepms(unsigned int millisec)
{
#if defined(_WIN32) || defined(WIN32)
Sleep(millisec);
#else
usleep(millisec * 1000);
#endif
}
void benchmark(int dstalign, int srcalign, size_t size, int times)
{
char *DATA1 = (char*)malloc(size + 64);
char *DATA2 = (char*)malloc(size + 64);
size_t LINEAR1 = ((size_t)DATA1);
size_t LINEAR2 = ((size_t)DATA2);
char *ALIGN1 = (char*)(((64 - (LINEAR1 & 63)) & 63) + LINEAR1);
char *ALIGN2 = (char*)(((64 - (LINEAR2 & 63)) & 63) + LINEAR2);
char *dst = (dstalign)? ALIGN1 : (ALIGN1 + 1);
char *src = (srcalign)? ALIGN2 : (ALIGN2 + 3);
unsigned int t1, t2;
int k;
sleepms(100);
t1 = gettime();
for (k = times; k > 0; k--) {
memcpy(dst, src, size);
}
t1 = gettime() - t1;
sleepms(100);
t2 = gettime();
for (k = times; k > 0; k--) {
memcpy_fast(dst, src, size);
}
t2 = gettime() - t2;
free(DATA1);
free(DATA2);
printf("result(dst %s, src %s): memcpy_fast=%dms memcpy=%d ms\n",
dstalign? "aligned" : "unalign",
srcalign? "aligned" : "unalign", (int)t2, (int)t1);
}
void bench(int copysize, int times)
{
printf("benchmark(size=%d bytes, times=%d):\n", copysize, times);
benchmark(1, 1, copysize, times);
benchmark(1, 0, copysize, times);
benchmark(0, 1, copysize, times);
benchmark(0, 0, copysize, times);
printf("\n");
}
void random_bench(int maxsize, int times)
{
static char A[11 * 1024 * 1024 + 2];
static char B[11 * 1024 * 1024 + 2];
static int random_offsets[0x10000];
static int random_sizes[0x8000];
unsigned int i, p1, p2;
unsigned int t1, t2;
for (i = 0; i < 0x10000; i++) { // generate random offsets
random_offsets[i] = rand() % (10 * 1024 * 1024 + 1);
}
for (i = 0; i < 0x8000; i++) { // generate random sizes
random_sizes[i] = 1 + rand() % maxsize;
}
sleepms(100);
t1 = gettime();
for (p1 = 0, p2 = 0, i = 0; i < times; i++) {
int offset1 = random_offsets[(p1++) & 0xffff];
int offset2 = random_offsets[(p1++) & 0xffff];
int size = random_sizes[(p2++) & 0x7fff];
memcpy(A + offset1, B + offset2, size);
}
t1 = gettime() - t1;
sleepms(100);
t2 = gettime();
for (p1 = 0, p2 = 0, i = 0; i < times; i++) {
int offset1 = random_offsets[(p1++) & 0xffff];
int offset2 = random_offsets[(p1++) & 0xffff];
int size = random_sizes[(p2++) & 0x7fff];
memcpy_fast(A + offset1, B + offset2, size);
}
t2 = gettime() - t2;
printf("benchmark random access:\n");
printf("memcpy_fast=%dms memcpy=%dms\n\n", (int)t2, (int)t1);
}
#ifdef _MSC_VER
#pragma comment(lib, "winmm.lib")
#endif
int main(void)
{
#if 1
bench(32, 0x1000000);
bench(64, 0x1000000);
bench(512, 0x800000);
bench(1024, 0x400000);
#endif
bench(4096, 0x80000);
bench(8192, 0x40000);
#if 1
bench(1024 * 1024 * 1, 0x800);
bench(1024 * 1024 * 4, 0x200);
#endif
bench(1024 * 1024 * 8, 0x100);
random_bench(2048, 8000000);
return 0;
}
/*
*/

View File

@ -1,492 +0,0 @@
//=====================================================================
//
// FastMemcpy.c - skywind3000@163.com, 2015
//
// feature:
// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc5.1)
//
//=====================================================================
#ifndef __FAST_MEMCPY_H__
#define __FAST_MEMCPY_H__
#include <stddef.h>
#include <stdint.h>
#include <immintrin.h>
//---------------------------------------------------------------------
// force inline for compilers
//---------------------------------------------------------------------
#ifndef INLINE
#ifdef __GNUC__
#if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1))
#define INLINE __inline__ __attribute__((always_inline))
#else
#define INLINE __inline__
#endif
#elif defined(_MSC_VER)
#define INLINE __forceinline
#elif (defined(__BORLANDC__) || defined(__WATCOMC__))
#define INLINE __inline
#else
#define INLINE
#endif
#endif
//---------------------------------------------------------------------
// fast copy for different sizes
//---------------------------------------------------------------------
static INLINE void memcpy_avx_16(void *dst, const void *src) {
#if 1
__m128i m0 = _mm_loadu_si128(((const __m128i*)src) + 0);
_mm_storeu_si128(((__m128i*)dst) + 0, m0);
#else
*((uint64_t*)((char*)dst + 0)) = *((uint64_t*)((const char*)src + 0));
*((uint64_t*)((char*)dst + 8)) = *((uint64_t*)((const char*)src + 8));
#endif
}
static INLINE void memcpy_avx_32(void *dst, const void *src) {
__m256i m0 = _mm256_loadu_si256(((const __m256i*)src) + 0);
_mm256_storeu_si256(((__m256i*)dst) + 0, m0);
}
static INLINE void memcpy_avx_64(void *dst, const void *src) {
__m256i m0 = _mm256_loadu_si256(((const __m256i*)src) + 0);
__m256i m1 = _mm256_loadu_si256(((const __m256i*)src) + 1);
_mm256_storeu_si256(((__m256i*)dst) + 0, m0);
_mm256_storeu_si256(((__m256i*)dst) + 1, m1);
}
static INLINE void memcpy_avx_128(void *dst, const void *src) {
__m256i m0 = _mm256_loadu_si256(((const __m256i*)src) + 0);
__m256i m1 = _mm256_loadu_si256(((const __m256i*)src) + 1);
__m256i m2 = _mm256_loadu_si256(((const __m256i*)src) + 2);
__m256i m3 = _mm256_loadu_si256(((const __m256i*)src) + 3);
_mm256_storeu_si256(((__m256i*)dst) + 0, m0);
_mm256_storeu_si256(((__m256i*)dst) + 1, m1);
_mm256_storeu_si256(((__m256i*)dst) + 2, m2);
_mm256_storeu_si256(((__m256i*)dst) + 3, m3);
}
static INLINE void memcpy_avx_256(void *dst, const void *src) {
__m256i m0 = _mm256_loadu_si256(((const __m256i*)src) + 0);
__m256i m1 = _mm256_loadu_si256(((const __m256i*)src) + 1);
__m256i m2 = _mm256_loadu_si256(((const __m256i*)src) + 2);
__m256i m3 = _mm256_loadu_si256(((const __m256i*)src) + 3);
__m256i m4 = _mm256_loadu_si256(((const __m256i*)src) + 4);
__m256i m5 = _mm256_loadu_si256(((const __m256i*)src) + 5);
__m256i m6 = _mm256_loadu_si256(((const __m256i*)src) + 6);
__m256i m7 = _mm256_loadu_si256(((const __m256i*)src) + 7);
_mm256_storeu_si256(((__m256i*)dst) + 0, m0);
_mm256_storeu_si256(((__m256i*)dst) + 1, m1);
_mm256_storeu_si256(((__m256i*)dst) + 2, m2);
_mm256_storeu_si256(((__m256i*)dst) + 3, m3);
_mm256_storeu_si256(((__m256i*)dst) + 4, m4);
_mm256_storeu_si256(((__m256i*)dst) + 5, m5);
_mm256_storeu_si256(((__m256i*)dst) + 6, m6);
_mm256_storeu_si256(((__m256i*)dst) + 7, m7);
}
//---------------------------------------------------------------------
// tiny memory copy with jump table optimized
//---------------------------------------------------------------------
static INLINE void *memcpy_tiny(void *dst, const void *src, size_t size) {
unsigned char *dd = ((unsigned char*)dst) + size;
const unsigned char *ss = ((const unsigned char*)src) + size;
switch (size) {
case 128: memcpy_avx_128(dd - 128, ss - 128);
case 0: break;
case 129: memcpy_avx_128(dd - 129, ss - 129);
case 1: dd[-1] = ss[-1]; break;
case 130: memcpy_avx_128(dd - 130, ss - 130);
case 2: *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break;
case 131: memcpy_avx_128(dd - 131, ss - 131);
case 3: *((uint16_t*)(dd - 3)) = *((uint16_t*)(ss - 3)); dd[-1] = ss[-1]; break;
case 132: memcpy_avx_128(dd - 132, ss - 132);
case 4: *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
case 133: memcpy_avx_128(dd - 133, ss - 133);
case 5: *((uint32_t*)(dd - 5)) = *((uint32_t*)(ss - 5)); dd[-1] = ss[-1]; break;
case 134: memcpy_avx_128(dd - 134, ss - 134);
case 6: *((uint32_t*)(dd - 6)) = *((uint32_t*)(ss - 6)); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break;
case 135: memcpy_avx_128(dd - 135, ss - 135);
case 7: *((uint32_t*)(dd - 7)) = *((uint32_t*)(ss - 7)); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
case 136: memcpy_avx_128(dd - 136, ss - 136);
case 8: *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 137: memcpy_avx_128(dd - 137, ss - 137);
case 9: *((uint64_t*)(dd - 9)) = *((uint64_t*)(ss - 9)); dd[-1] = ss[-1]; break;
case 138: memcpy_avx_128(dd - 138, ss - 138);
case 10: *((uint64_t*)(dd - 10)) = *((uint64_t*)(ss - 10)); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break;
case 139: memcpy_avx_128(dd - 139, ss - 139);
case 11: *((uint64_t*)(dd - 11)) = *((uint64_t*)(ss - 11)); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
case 140: memcpy_avx_128(dd - 140, ss - 140);
case 12: *((uint64_t*)(dd - 12)) = *((uint64_t*)(ss - 12)); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
case 141: memcpy_avx_128(dd - 141, ss - 141);
case 13: *((uint64_t*)(dd - 13)) = *((uint64_t*)(ss - 13)); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 142: memcpy_avx_128(dd - 142, ss - 142);
case 14: *((uint64_t*)(dd - 14)) = *((uint64_t*)(ss - 14)); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 143: memcpy_avx_128(dd - 143, ss - 143);
case 15: *((uint64_t*)(dd - 15)) = *((uint64_t*)(ss - 15)); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 144: memcpy_avx_128(dd - 144, ss - 144);
case 16: memcpy_avx_16(dd - 16, ss - 16); break;
case 145: memcpy_avx_128(dd - 145, ss - 145);
case 17: memcpy_avx_16(dd - 17, ss - 17); dd[-1] = ss[-1]; break;
case 146: memcpy_avx_128(dd - 146, ss - 146);
case 18: memcpy_avx_16(dd - 18, ss - 18); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break;
case 147: memcpy_avx_128(dd - 147, ss - 147);
case 19: memcpy_avx_16(dd - 19, ss - 19); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
case 148: memcpy_avx_128(dd - 148, ss - 148);
case 20: memcpy_avx_16(dd - 20, ss - 20); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
case 149: memcpy_avx_128(dd - 149, ss - 149);
case 21: memcpy_avx_16(dd - 21, ss - 21); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 150: memcpy_avx_128(dd - 150, ss - 150);
case 22: memcpy_avx_16(dd - 22, ss - 22); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 151: memcpy_avx_128(dd - 151, ss - 151);
case 23: memcpy_avx_16(dd - 23, ss - 23); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 152: memcpy_avx_128(dd - 152, ss - 152);
case 24: memcpy_avx_16(dd - 24, ss - 24); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 153: memcpy_avx_128(dd - 153, ss - 153);
case 25: memcpy_avx_16(dd - 25, ss - 25); memcpy_avx_16(dd - 16, ss - 16); break;
case 154: memcpy_avx_128(dd - 154, ss - 154);
case 26: memcpy_avx_16(dd - 26, ss - 26); memcpy_avx_16(dd - 16, ss - 16); break;
case 155: memcpy_avx_128(dd - 155, ss - 155);
case 27: memcpy_avx_16(dd - 27, ss - 27); memcpy_avx_16(dd - 16, ss - 16); break;
case 156: memcpy_avx_128(dd - 156, ss - 156);
case 28: memcpy_avx_16(dd - 28, ss - 28); memcpy_avx_16(dd - 16, ss - 16); break;
case 157: memcpy_avx_128(dd - 157, ss - 157);
case 29: memcpy_avx_16(dd - 29, ss - 29); memcpy_avx_16(dd - 16, ss - 16); break;
case 158: memcpy_avx_128(dd - 158, ss - 158);
case 30: memcpy_avx_16(dd - 30, ss - 30); memcpy_avx_16(dd - 16, ss - 16); break;
case 159: memcpy_avx_128(dd - 159, ss - 159);
case 31: memcpy_avx_16(dd - 31, ss - 31); memcpy_avx_16(dd - 16, ss - 16); break;
case 160: memcpy_avx_128(dd - 160, ss - 160);
case 32: memcpy_avx_32(dd - 32, ss - 32); break;
case 161: memcpy_avx_128(dd - 161, ss - 161);
case 33: memcpy_avx_32(dd - 33, ss - 33); dd[-1] = ss[-1]; break;
case 162: memcpy_avx_128(dd - 162, ss - 162);
case 34: memcpy_avx_32(dd - 34, ss - 34); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break;
case 163: memcpy_avx_128(dd - 163, ss - 163);
case 35: memcpy_avx_32(dd - 35, ss - 35); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
case 164: memcpy_avx_128(dd - 164, ss - 164);
case 36: memcpy_avx_32(dd - 36, ss - 36); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
case 165: memcpy_avx_128(dd - 165, ss - 165);
case 37: memcpy_avx_32(dd - 37, ss - 37); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 166: memcpy_avx_128(dd - 166, ss - 166);
case 38: memcpy_avx_32(dd - 38, ss - 38); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 167: memcpy_avx_128(dd - 167, ss - 167);
case 39: memcpy_avx_32(dd - 39, ss - 39); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 168: memcpy_avx_128(dd - 168, ss - 168);
case 40: memcpy_avx_32(dd - 40, ss - 40); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 169: memcpy_avx_128(dd - 169, ss - 169);
case 41: memcpy_avx_32(dd - 41, ss - 41); memcpy_avx_16(dd - 16, ss - 16); break;
case 170: memcpy_avx_128(dd - 170, ss - 170);
case 42: memcpy_avx_32(dd - 42, ss - 42); memcpy_avx_16(dd - 16, ss - 16); break;
case 171: memcpy_avx_128(dd - 171, ss - 171);
case 43: memcpy_avx_32(dd - 43, ss - 43); memcpy_avx_16(dd - 16, ss - 16); break;
case 172: memcpy_avx_128(dd - 172, ss - 172);
case 44: memcpy_avx_32(dd - 44, ss - 44); memcpy_avx_16(dd - 16, ss - 16); break;
case 173: memcpy_avx_128(dd - 173, ss - 173);
case 45: memcpy_avx_32(dd - 45, ss - 45); memcpy_avx_16(dd - 16, ss - 16); break;
case 174: memcpy_avx_128(dd - 174, ss - 174);
case 46: memcpy_avx_32(dd - 46, ss - 46); memcpy_avx_16(dd - 16, ss - 16); break;
case 175: memcpy_avx_128(dd - 175, ss - 175);
case 47: memcpy_avx_32(dd - 47, ss - 47); memcpy_avx_16(dd - 16, ss - 16); break;
case 176: memcpy_avx_128(dd - 176, ss - 176);
case 48: memcpy_avx_32(dd - 48, ss - 48); memcpy_avx_16(dd - 16, ss - 16); break;
case 177: memcpy_avx_128(dd - 177, ss - 177);
case 49: memcpy_avx_32(dd - 49, ss - 49); memcpy_avx_32(dd - 32, ss - 32); break;
case 178: memcpy_avx_128(dd - 178, ss - 178);
case 50: memcpy_avx_32(dd - 50, ss - 50); memcpy_avx_32(dd - 32, ss - 32); break;
case 179: memcpy_avx_128(dd - 179, ss - 179);
case 51: memcpy_avx_32(dd - 51, ss - 51); memcpy_avx_32(dd - 32, ss - 32); break;
case 180: memcpy_avx_128(dd - 180, ss - 180);
case 52: memcpy_avx_32(dd - 52, ss - 52); memcpy_avx_32(dd - 32, ss - 32); break;
case 181: memcpy_avx_128(dd - 181, ss - 181);
case 53: memcpy_avx_32(dd - 53, ss - 53); memcpy_avx_32(dd - 32, ss - 32); break;
case 182: memcpy_avx_128(dd - 182, ss - 182);
case 54: memcpy_avx_32(dd - 54, ss - 54); memcpy_avx_32(dd - 32, ss - 32); break;
case 183: memcpy_avx_128(dd - 183, ss - 183);
case 55: memcpy_avx_32(dd - 55, ss - 55); memcpy_avx_32(dd - 32, ss - 32); break;
case 184: memcpy_avx_128(dd - 184, ss - 184);
case 56: memcpy_avx_32(dd - 56, ss - 56); memcpy_avx_32(dd - 32, ss - 32); break;
case 185: memcpy_avx_128(dd - 185, ss - 185);
case 57: memcpy_avx_32(dd - 57, ss - 57); memcpy_avx_32(dd - 32, ss - 32); break;
case 186: memcpy_avx_128(dd - 186, ss - 186);
case 58: memcpy_avx_32(dd - 58, ss - 58); memcpy_avx_32(dd - 32, ss - 32); break;
case 187: memcpy_avx_128(dd - 187, ss - 187);
case 59: memcpy_avx_32(dd - 59, ss - 59); memcpy_avx_32(dd - 32, ss - 32); break;
case 188: memcpy_avx_128(dd - 188, ss - 188);
case 60: memcpy_avx_32(dd - 60, ss - 60); memcpy_avx_32(dd - 32, ss - 32); break;
case 189: memcpy_avx_128(dd - 189, ss - 189);
case 61: memcpy_avx_32(dd - 61, ss - 61); memcpy_avx_32(dd - 32, ss - 32); break;
case 190: memcpy_avx_128(dd - 190, ss - 190);
case 62: memcpy_avx_32(dd - 62, ss - 62); memcpy_avx_32(dd - 32, ss - 32); break;
case 191: memcpy_avx_128(dd - 191, ss - 191);
case 63: memcpy_avx_32(dd - 63, ss - 63); memcpy_avx_32(dd - 32, ss - 32); break;
case 192: memcpy_avx_128(dd - 192, ss - 192);
case 64: memcpy_avx_64(dd - 64, ss - 64); break;
case 193: memcpy_avx_128(dd - 193, ss - 193);
case 65: memcpy_avx_64(dd - 65, ss - 65); dd[-1] = ss[-1]; break;
case 194: memcpy_avx_128(dd - 194, ss - 194);
case 66: memcpy_avx_64(dd - 66, ss - 66); *((uint16_t*)(dd - 2)) = *((uint16_t*)(ss - 2)); break;
case 195: memcpy_avx_128(dd - 195, ss - 195);
case 67: memcpy_avx_64(dd - 67, ss - 67); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
case 196: memcpy_avx_128(dd - 196, ss - 196);
case 68: memcpy_avx_64(dd - 68, ss - 68); *((uint32_t*)(dd - 4)) = *((uint32_t*)(ss - 4)); break;
case 197: memcpy_avx_128(dd - 197, ss - 197);
case 69: memcpy_avx_64(dd - 69, ss - 69); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 198: memcpy_avx_128(dd - 198, ss - 198);
case 70: memcpy_avx_64(dd - 70, ss - 70); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 199: memcpy_avx_128(dd - 199, ss - 199);
case 71: memcpy_avx_64(dd - 71, ss - 71); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 200: memcpy_avx_128(dd - 200, ss - 200);
case 72: memcpy_avx_64(dd - 72, ss - 72); *((uint64_t*)(dd - 8)) = *((uint64_t*)(ss - 8)); break;
case 201: memcpy_avx_128(dd - 201, ss - 201);
case 73: memcpy_avx_64(dd - 73, ss - 73); memcpy_avx_16(dd - 16, ss - 16); break;
case 202: memcpy_avx_128(dd - 202, ss - 202);
case 74: memcpy_avx_64(dd - 74, ss - 74); memcpy_avx_16(dd - 16, ss - 16); break;
case 203: memcpy_avx_128(dd - 203, ss - 203);
case 75: memcpy_avx_64(dd - 75, ss - 75); memcpy_avx_16(dd - 16, ss - 16); break;
case 204: memcpy_avx_128(dd - 204, ss - 204);
case 76: memcpy_avx_64(dd - 76, ss - 76); memcpy_avx_16(dd - 16, ss - 16); break;
case 205: memcpy_avx_128(dd - 205, ss - 205);
case 77: memcpy_avx_64(dd - 77, ss - 77); memcpy_avx_16(dd - 16, ss - 16); break;
case 206: memcpy_avx_128(dd - 206, ss - 206);
case 78: memcpy_avx_64(dd - 78, ss - 78); memcpy_avx_16(dd - 16, ss - 16); break;
case 207: memcpy_avx_128(dd - 207, ss - 207);
case 79: memcpy_avx_64(dd - 79, ss - 79); memcpy_avx_16(dd - 16, ss - 16); break;
case 208: memcpy_avx_128(dd - 208, ss - 208);
case 80: memcpy_avx_64(dd - 80, ss - 80); memcpy_avx_16(dd - 16, ss - 16); break;
case 209: memcpy_avx_128(dd - 209, ss - 209);
case 81: memcpy_avx_64(dd - 81, ss - 81); memcpy_avx_32(dd - 32, ss - 32); break;
case 210: memcpy_avx_128(dd - 210, ss - 210);
case 82: memcpy_avx_64(dd - 82, ss - 82); memcpy_avx_32(dd - 32, ss - 32); break;
case 211: memcpy_avx_128(dd - 211, ss - 211);
case 83: memcpy_avx_64(dd - 83, ss - 83); memcpy_avx_32(dd - 32, ss - 32); break;
case 212: memcpy_avx_128(dd - 212, ss - 212);
case 84: memcpy_avx_64(dd - 84, ss - 84); memcpy_avx_32(dd - 32, ss - 32); break;
case 213: memcpy_avx_128(dd - 213, ss - 213);
case 85: memcpy_avx_64(dd - 85, ss - 85); memcpy_avx_32(dd - 32, ss - 32); break;
case 214: memcpy_avx_128(dd - 214, ss - 214);
case 86: memcpy_avx_64(dd - 86, ss - 86); memcpy_avx_32(dd - 32, ss - 32); break;
case 215: memcpy_avx_128(dd - 215, ss - 215);
case 87: memcpy_avx_64(dd - 87, ss - 87); memcpy_avx_32(dd - 32, ss - 32); break;
case 216: memcpy_avx_128(dd - 216, ss - 216);
case 88: memcpy_avx_64(dd - 88, ss - 88); memcpy_avx_32(dd - 32, ss - 32); break;
case 217: memcpy_avx_128(dd - 217, ss - 217);
case 89: memcpy_avx_64(dd - 89, ss - 89); memcpy_avx_32(dd - 32, ss - 32); break;
case 218: memcpy_avx_128(dd - 218, ss - 218);
case 90: memcpy_avx_64(dd - 90, ss - 90); memcpy_avx_32(dd - 32, ss - 32); break;
case 219: memcpy_avx_128(dd - 219, ss - 219);
case 91: memcpy_avx_64(dd - 91, ss - 91); memcpy_avx_32(dd - 32, ss - 32); break;
case 220: memcpy_avx_128(dd - 220, ss - 220);
case 92: memcpy_avx_64(dd - 92, ss - 92); memcpy_avx_32(dd - 32, ss - 32); break;
case 221: memcpy_avx_128(dd - 221, ss - 221);
case 93: memcpy_avx_64(dd - 93, ss - 93); memcpy_avx_32(dd - 32, ss - 32); break;
case 222: memcpy_avx_128(dd - 222, ss - 222);
case 94: memcpy_avx_64(dd - 94, ss - 94); memcpy_avx_32(dd - 32, ss - 32); break;
case 223: memcpy_avx_128(dd - 223, ss - 223);
case 95: memcpy_avx_64(dd - 95, ss - 95); memcpy_avx_32(dd - 32, ss - 32); break;
case 224: memcpy_avx_128(dd - 224, ss - 224);
case 96: memcpy_avx_64(dd - 96, ss - 96); memcpy_avx_32(dd - 32, ss - 32); break;
case 225: memcpy_avx_128(dd - 225, ss - 225);
case 97: memcpy_avx_64(dd - 97, ss - 97); memcpy_avx_64(dd - 64, ss - 64); break;
case 226: memcpy_avx_128(dd - 226, ss - 226);
case 98: memcpy_avx_64(dd - 98, ss - 98); memcpy_avx_64(dd - 64, ss - 64); break;
case 227: memcpy_avx_128(dd - 227, ss - 227);
case 99: memcpy_avx_64(dd - 99, ss - 99); memcpy_avx_64(dd - 64, ss - 64); break;
case 228: memcpy_avx_128(dd - 228, ss - 228);
case 100: memcpy_avx_64(dd - 100, ss - 100); memcpy_avx_64(dd - 64, ss - 64); break;
case 229: memcpy_avx_128(dd - 229, ss - 229);
case 101: memcpy_avx_64(dd - 101, ss - 101); memcpy_avx_64(dd - 64, ss - 64); break;
case 230: memcpy_avx_128(dd - 230, ss - 230);
case 102: memcpy_avx_64(dd - 102, ss - 102); memcpy_avx_64(dd - 64, ss - 64); break;
case 231: memcpy_avx_128(dd - 231, ss - 231);
case 103: memcpy_avx_64(dd - 103, ss - 103); memcpy_avx_64(dd - 64, ss - 64); break;
case 232: memcpy_avx_128(dd - 232, ss - 232);
case 104: memcpy_avx_64(dd - 104, ss - 104); memcpy_avx_64(dd - 64, ss - 64); break;
case 233: memcpy_avx_128(dd - 233, ss - 233);
case 105: memcpy_avx_64(dd - 105, ss - 105); memcpy_avx_64(dd - 64, ss - 64); break;
case 234: memcpy_avx_128(dd - 234, ss - 234);
case 106: memcpy_avx_64(dd - 106, ss - 106); memcpy_avx_64(dd - 64, ss - 64); break;
case 235: memcpy_avx_128(dd - 235, ss - 235);
case 107: memcpy_avx_64(dd - 107, ss - 107); memcpy_avx_64(dd - 64, ss - 64); break;
case 236: memcpy_avx_128(dd - 236, ss - 236);
case 108: memcpy_avx_64(dd - 108, ss - 108); memcpy_avx_64(dd - 64, ss - 64); break;
case 237: memcpy_avx_128(dd - 237, ss - 237);
case 109: memcpy_avx_64(dd - 109, ss - 109); memcpy_avx_64(dd - 64, ss - 64); break;
case 238: memcpy_avx_128(dd - 238, ss - 238);
case 110: memcpy_avx_64(dd - 110, ss - 110); memcpy_avx_64(dd - 64, ss - 64); break;
case 239: memcpy_avx_128(dd - 239, ss - 239);
case 111: memcpy_avx_64(dd - 111, ss - 111); memcpy_avx_64(dd - 64, ss - 64); break;
case 240: memcpy_avx_128(dd - 240, ss - 240);
case 112: memcpy_avx_64(dd - 112, ss - 112); memcpy_avx_64(dd - 64, ss - 64); break;
case 241: memcpy_avx_128(dd - 241, ss - 241);
case 113: memcpy_avx_64(dd - 113, ss - 113); memcpy_avx_64(dd - 64, ss - 64); break;
case 242: memcpy_avx_128(dd - 242, ss - 242);
case 114: memcpy_avx_64(dd - 114, ss - 114); memcpy_avx_64(dd - 64, ss - 64); break;
case 243: memcpy_avx_128(dd - 243, ss - 243);
case 115: memcpy_avx_64(dd - 115, ss - 115); memcpy_avx_64(dd - 64, ss - 64); break;
case 244: memcpy_avx_128(dd - 244, ss - 244);
case 116: memcpy_avx_64(dd - 116, ss - 116); memcpy_avx_64(dd - 64, ss - 64); break;
case 245: memcpy_avx_128(dd - 245, ss - 245);
case 117: memcpy_avx_64(dd - 117, ss - 117); memcpy_avx_64(dd - 64, ss - 64); break;
case 246: memcpy_avx_128(dd - 246, ss - 246);
case 118: memcpy_avx_64(dd - 118, ss - 118); memcpy_avx_64(dd - 64, ss - 64); break;
case 247: memcpy_avx_128(dd - 247, ss - 247);
case 119: memcpy_avx_64(dd - 119, ss - 119); memcpy_avx_64(dd - 64, ss - 64); break;
case 248: memcpy_avx_128(dd - 248, ss - 248);
case 120: memcpy_avx_64(dd - 120, ss - 120); memcpy_avx_64(dd - 64, ss - 64); break;
case 249: memcpy_avx_128(dd - 249, ss - 249);
case 121: memcpy_avx_64(dd - 121, ss - 121); memcpy_avx_64(dd - 64, ss - 64); break;
case 250: memcpy_avx_128(dd - 250, ss - 250);
case 122: memcpy_avx_64(dd - 122, ss - 122); memcpy_avx_64(dd - 64, ss - 64); break;
case 251: memcpy_avx_128(dd - 251, ss - 251);
case 123: memcpy_avx_64(dd - 123, ss - 123); memcpy_avx_64(dd - 64, ss - 64); break;
case 252: memcpy_avx_128(dd - 252, ss - 252);
case 124: memcpy_avx_64(dd - 124, ss - 124); memcpy_avx_64(dd - 64, ss - 64); break;
case 253: memcpy_avx_128(dd - 253, ss - 253);
case 125: memcpy_avx_64(dd - 125, ss - 125); memcpy_avx_64(dd - 64, ss - 64); break;
case 254: memcpy_avx_128(dd - 254, ss - 254);
case 126: memcpy_avx_64(dd - 126, ss - 126); memcpy_avx_64(dd - 64, ss - 64); break;
case 255: memcpy_avx_128(dd - 255, ss - 255);
case 127: memcpy_avx_64(dd - 127, ss - 127); memcpy_avx_64(dd - 64, ss - 64); break;
case 256: memcpy_avx_256(dd - 256, ss - 256); break;
}
return dst;
}
//---------------------------------------------------------------------
// main routine
//---------------------------------------------------------------------
static void* memcpy_fast(void *destination, const void *source, size_t size)
{
unsigned char *dst = (unsigned char*)destination;
const unsigned char *src = (const unsigned char*)source;
static size_t cachesize = 0x200000; // L3-cache size
size_t padding;
// small memory copy
if (size <= 256) {
memcpy_tiny(dst, src, size);
_mm256_zeroupper();
return destination;
}
// align destination to 16 bytes boundary
padding = (32 - (((size_t)dst) & 31)) & 31;
#if 0
if (padding > 0) {
__m256i head = _mm256_loadu_si256((const __m256i*)src);
_mm256_storeu_si256((__m256i*)dst, head);
dst += padding;
src += padding;
size -= padding;
}
#else
__m256i head = _mm256_loadu_si256((const __m256i*)src);
_mm256_storeu_si256((__m256i*)dst, head);
dst += padding;
src += padding;
size -= padding;
#endif
// medium size copy
if (size <= cachesize) {
__m256i c0, c1, c2, c3, c4, c5, c6, c7;
for (; size >= 256; size -= 256) {
c0 = _mm256_loadu_si256(((const __m256i*)src) + 0);
c1 = _mm256_loadu_si256(((const __m256i*)src) + 1);
c2 = _mm256_loadu_si256(((const __m256i*)src) + 2);
c3 = _mm256_loadu_si256(((const __m256i*)src) + 3);
c4 = _mm256_loadu_si256(((const __m256i*)src) + 4);
c5 = _mm256_loadu_si256(((const __m256i*)src) + 5);
c6 = _mm256_loadu_si256(((const __m256i*)src) + 6);
c7 = _mm256_loadu_si256(((const __m256i*)src) + 7);
_mm_prefetch((const char*)(src + 512), _MM_HINT_NTA);
src += 256;
_mm256_storeu_si256((((__m256i*)dst) + 0), c0);
_mm256_storeu_si256((((__m256i*)dst) + 1), c1);
_mm256_storeu_si256((((__m256i*)dst) + 2), c2);
_mm256_storeu_si256((((__m256i*)dst) + 3), c3);
_mm256_storeu_si256((((__m256i*)dst) + 4), c4);
_mm256_storeu_si256((((__m256i*)dst) + 5), c5);
_mm256_storeu_si256((((__m256i*)dst) + 6), c6);
_mm256_storeu_si256((((__m256i*)dst) + 7), c7);
dst += 256;
}
}
else { // big memory copy
__m256i c0, c1, c2, c3, c4, c5, c6, c7;
/* __m256i c0, c1, c2, c3, c4, c5, c6, c7; */
_mm_prefetch((const char*)(src), _MM_HINT_NTA);
if ((((size_t)src) & 31) == 0) { // source aligned
for (; size >= 256; size -= 256) {
c0 = _mm256_load_si256(((const __m256i*)src) + 0);
c1 = _mm256_load_si256(((const __m256i*)src) + 1);
c2 = _mm256_load_si256(((const __m256i*)src) + 2);
c3 = _mm256_load_si256(((const __m256i*)src) + 3);
c4 = _mm256_load_si256(((const __m256i*)src) + 4);
c5 = _mm256_load_si256(((const __m256i*)src) + 5);
c6 = _mm256_load_si256(((const __m256i*)src) + 6);
c7 = _mm256_load_si256(((const __m256i*)src) + 7);
_mm_prefetch((const char*)(src + 512), _MM_HINT_NTA);
src += 256;
_mm256_stream_si256((((__m256i*)dst) + 0), c0);
_mm256_stream_si256((((__m256i*)dst) + 1), c1);
_mm256_stream_si256((((__m256i*)dst) + 2), c2);
_mm256_stream_si256((((__m256i*)dst) + 3), c3);
_mm256_stream_si256((((__m256i*)dst) + 4), c4);
_mm256_stream_si256((((__m256i*)dst) + 5), c5);
_mm256_stream_si256((((__m256i*)dst) + 6), c6);
_mm256_stream_si256((((__m256i*)dst) + 7), c7);
dst += 256;
}
}
else { // source unaligned
for (; size >= 256; size -= 256) {
c0 = _mm256_loadu_si256(((const __m256i*)src) + 0);
c1 = _mm256_loadu_si256(((const __m256i*)src) + 1);
c2 = _mm256_loadu_si256(((const __m256i*)src) + 2);
c3 = _mm256_loadu_si256(((const __m256i*)src) + 3);
c4 = _mm256_loadu_si256(((const __m256i*)src) + 4);
c5 = _mm256_loadu_si256(((const __m256i*)src) + 5);
c6 = _mm256_loadu_si256(((const __m256i*)src) + 6);
c7 = _mm256_loadu_si256(((const __m256i*)src) + 7);
_mm_prefetch((const char*)(src + 512), _MM_HINT_NTA);
src += 256;
_mm256_stream_si256((((__m256i*)dst) + 0), c0);
_mm256_stream_si256((((__m256i*)dst) + 1), c1);
_mm256_stream_si256((((__m256i*)dst) + 2), c2);
_mm256_stream_si256((((__m256i*)dst) + 3), c3);
_mm256_stream_si256((((__m256i*)dst) + 4), c4);
_mm256_stream_si256((((__m256i*)dst) + 5), c5);
_mm256_stream_si256((((__m256i*)dst) + 6), c6);
_mm256_stream_si256((((__m256i*)dst) + 7), c7);
dst += 256;
}
}
_mm_sfence();
}
memcpy_tiny(dst, src, size);
_mm256_zeroupper();
return destination;
}
#endif

View File

@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Linwei
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,20 +0,0 @@
Internal implementation of `memcpy` function.
It has the following advantages over `libc`-supplied implementation:
- it is linked statically, so the function is called directly, not through a `PLT` (procedure lookup table of shared library);
- it is linked statically, so the function can have position-dependent code;
- your binaries will not depend on `glibc`'s memcpy, that forces dependency on specific symbol version like `memcpy@@GLIBC_2.14` and consequently on specific version of `glibc` library;
- you can include `memcpy.h` directly and the function has the chance to be inlined, which is beneficial for small but unknown at compile time sizes of memory regions;
- this version of `memcpy` pretend to be faster (in our benchmarks, the difference is within few percents).
Currently it uses the implementation from **Linwei** (skywind3000@163.com).
Look at https://www.zhihu.com/question/35172305 for discussion.
Drawbacks:
- only use SSE 2, doesn't use wider (AVX, AVX 512) vector registers when available;
- no CPU dispatching; doesn't take into account actual cache size.
Also worth to look at:
- simple implementation from Facebook: https://github.com/facebook/folly/blob/master/folly/memcpy.S
- implementation from Agner Fog: http://www.agner.org/optimize/
- glibc source code.

View File

@ -1,6 +0,0 @@
#include "FastMemcpy.h"
void * memcpy(void * __restrict destination, const void * __restrict source, size_t size)
{
return memcpy_fast(destination, source, size);
}

2
contrib/poco vendored

@ -1 +1 @@
Subproject commit c55b91f394efa9c238c33957682501681ef9b716
Subproject commit 83beecccb09eec0c9fd2669cacea03ede1d9f138

View File

@ -4,7 +4,9 @@
set -eux
set -o pipefail
trap "exit" INT TERM
trap 'kill $(jobs -pr) ||:' EXIT
# The watchdog is in the separate process group, so we have to kill it separately
# if the script terminates earlier.
trap 'kill $(jobs -pr) ${watchdog_pid:-} ||:' EXIT
stage=${stage:-}
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
@ -14,35 +16,28 @@ BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-11_debug_none_bundled_unsplitted
function clone
{
(
# The download() function is dependent on CI binaries anyway, so we can take
# the repo from the CI as well. For local runs, start directly from the "fuzz"
# stage.
rm -rf ch ||:
mkdir ch
cd ch
git init
git remote add origin https://github.com/ClickHouse/ClickHouse
# Network is unreliable. GitHub neither.
for _ in {1..100}; do git fetch --depth=100 origin "$SHA_TO_TEST" && break; sleep 1; done
# Used to obtain the list of modified or added tests
for _ in {1..100}; do git fetch --depth=100 origin master && break; sleep 1; done
# If not master, try to fetch pull/.../{head,merge}
if [ "$PR_TO_TEST" != "0" ]
then
for _ in {1..100}; do git fetch --depth=100 origin "refs/pull/$PR_TO_TEST/*:refs/heads/pull/$PR_TO_TEST/*" && break; sleep 1; done
fi
git checkout "$SHA_TO_TEST"
)
mkdir ch ||:
wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz"
tar -C ch --strip-components=1 -xf clickhouse_no_subs.tar.gz
ls -lath ||:
}
function download
{
wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"
wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse" &
wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/ci-changed-files.txt" &
wait
chmod +x clickhouse
ln -s ./clickhouse ./clickhouse-server
ln -s ./clickhouse ./clickhouse-client
# clickhouse-server is in the current dir
export PATH="$PWD:$PATH"
}
function configure
@ -77,22 +72,21 @@ function watchdog
function fuzz
{
# Obtain the list of newly added tests. They will be fuzzed in more extreme way than other tests.
cd ch
NEW_TESTS=$(git diff --name-only "$(git merge-base origin/master "$SHA_TO_TEST"~)" "$SHA_TO_TEST" | grep -P 'tests/queries/0_stateless/.*\.sql' | sed -r -e 's!^!ch/!' | sort -R)
cd ..
# Don't overwrite the NEW_TESTS_OPT so that it can be set from the environment.
NEW_TESTS="$(grep -P 'tests/queries/0_stateless/.*\.sql' ci-changed-files.txt | sed -r -e 's!^!ch/!' | sort -R)"
if [[ -n "$NEW_TESTS" ]]
then
NEW_TESTS_OPT="--interleave-queries-file ${NEW_TESTS}"
NEW_TESTS_OPT="${NEW_TESTS_OPT:---interleave-queries-file ${NEW_TESTS}}"
else
NEW_TESTS_OPT=""
NEW_TESTS_OPT="${NEW_TESTS_OPT:-}"
fi
./clickhouse-server --config-file db/config.xml -- --path db 2>&1 | tail -100000 > server.log &
clickhouse-server --config-file db/config.xml -- --path db 2>&1 | tail -100000 > server.log &
server_pid=$!
kill -0 $server_pid
while ! ./clickhouse-client --query "select 1" && kill -0 $server_pid ; do echo . ; sleep 1 ; done
./clickhouse-client --query "select 1"
while ! clickhouse-client --query "select 1" && kill -0 $server_pid ; do echo . ; sleep 1 ; done
clickhouse-client --query "select 1"
kill -0 $server_pid
echo Server started
@ -111,14 +105,14 @@ continue
# SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric.
# SC2046: Quote this to prevent word splitting. Actually I need word splitting.
# shellcheck disable=SC2012,SC2046
./clickhouse-client --query-fuzzer-runs=1000 --queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) $NEW_TESTS_OPT \
clickhouse-client --query-fuzzer-runs=1000 --queries-file $(ls -1 ch/tests/queries/0_stateless/*.sql | sort -R) $NEW_TESTS_OPT \
> >(tail -n 100000 > fuzzer.log) \
2>&1 \
|| fuzzer_exit_code=$?
echo "Fuzzer exit code is $fuzzer_exit_code"
./clickhouse-client --query "select elapsed, query from system.processes" ||:
clickhouse-client --query "select elapsed, query from system.processes" ||:
killall clickhouse-server ||:
for _ in {1..10}
do

View File

@ -11,3 +11,10 @@ services:
default:
aliases:
- postgre-sql.local
postgres2:
image: postgres
restart: always
environment:
POSTGRES_PASSWORD: mysecretpassword
ports:
- 5441:5432

View File

@ -2,7 +2,9 @@
set -exu
set -o pipefail
trap "exit" INT TERM
trap 'kill $(jobs -pr) ||:' EXIT
# The watchdog is in the separate process group, so we have to kill it separately
# if the script terminates earlier.
trap 'kill $(jobs -pr) ${watchdog_pid:-} ||:' EXIT
stage=${stage:-}
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"

View File

@ -130,6 +130,7 @@ zgrep -Fa "########################################" /test_output/* > /dev/null
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
mv /var/log/clickhouse-server/stderr.log /test_output/
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
# Write check result into check_status.tsv
clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by (lower(test) like '%hung%') LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv

View File

@ -67,6 +67,10 @@ def prepare_for_hung_check():
logging.info("Will terminate gdb (if any)")
call("kill -TERM $(pidof gdb)", shell=True, stderr=STDOUT)
# Some tests set too low memory limit for default user and forget to reset in back.
# It may cause SYSTEM queries to fail, let's disable memory limit.
call("clickhouse client --max_memory_usage_for_user=0 -q 'SELECT 1 FORMAT Null'", shell=True, stderr=STDOUT)
# Some tests execute SYSTEM STOP MERGES or similar queries.
# It may cause some ALTERs to hang.
# Possibly we should fix tests and forbid to use such queries without specifying table.
@ -78,7 +82,13 @@ def prepare_for_hung_check():
call("clickhouse client -q 'SYSTEM START REPLICATED SENDS'", shell=True, stderr=STDOUT)
call("clickhouse client -q 'SYSTEM START REPLICATION QUEUES'", shell=True, stderr=STDOUT)
time.sleep(30)
# Issue #21004, live views are experimental, so let's just suppress it
call("""clickhouse client -q "KILL QUERY WHERE upper(query) LIKE 'WATCH %'" """, shell=True, stderr=STDOUT)
# Wait for last queries to finish if any, not longer than 120 seconds
call("""clickhouse client -q "select sleepEachRow((
select maxOrDefault(120 - elapsed) + 1 from system.processes where query not like '%from system.processes%' and elapsed < 120
) / 120) from numbers(120) format Null" """, shell=True, stderr=STDOUT)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')

View File

@ -35,7 +35,7 @@ RUN apt-get update \
ENV TZ=Europe/Moscow
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN pip3 install urllib3 testflows==1.6.72 docker-compose docker dicttoxml kazoo tzlocal
RUN pip3 install urllib3 testflows==1.6.74 docker-compose docker dicttoxml kazoo tzlocal
ENV DOCKER_CHANNEL stable
ENV DOCKER_VERSION 17.09.1-ce

1
docs/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
build

View File

@ -233,7 +233,7 @@ Google OSS-Fuzz can be found at `docker/fuzz`.
We also use simple fuzz test to generate random SQL queries and to check that the server doesnt die executing them.
You can find it in `00746_sql_fuzzy.pl`. This test should be run continuously (overnight and longer).
We also use sophisticated AST-based query fuzzer that is able to find huge amount of corner cases. It does random permutations and substitutions in queries AST. It remembers AST nodes from previous tests to use them for fuzzing of subsequent tests while processing them in random order.
We also use sophisticated AST-based query fuzzer that is able to find huge amount of corner cases. It does random permutations and substitutions in queries AST. It remembers AST nodes from previous tests to use them for fuzzing of subsequent tests while processing them in random order. You can learn more about this fuzzer in [this blog article](https://clickhouse.tech/blog/en/2021/fuzzing-clickhouse/).
## Stress test

View File

@ -39,4 +39,4 @@ ENGINE = EmbeddedRocksDB
PRIMARY KEY key
```
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/embedded-rocksdb/) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/embedded-rocksdb/) <!--hide-->

View File

@ -5,7 +5,7 @@ toc_title: HDFS
# HDFS {#table_engines-hdfs}
This engine provides integration with [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) ecosystem by allowing to manage data on [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)via ClickHouse. This engine is similar
This engine provides integration with [Apache Hadoop](https://en.wikipedia.org/wiki/Apache_Hadoop) ecosystem by allowing to manage data on [HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html) via ClickHouse. This engine is similar
to the [File](../../../engines/table-engines/special/file.md#table_engines-file) and [URL](../../../engines/table-engines/special/url.md#table_engines-url) engines, but provides Hadoop-specific features.
## Usage {#usage}
@ -174,7 +174,7 @@ Similar to GraphiteMergeTree, the HDFS engine supports extended configuration us
| dfs\_domain\_socket\_path | "" |
[HDFS Configuration Reference ](https://hawq.apache.org/docs/userguide/2.3.0.0-incubating/reference/HDFSConfigurationParameterReference.html) might explain some parameters.
[HDFS Configuration Reference](https://hawq.apache.org/docs/userguide/2.3.0.0-incubating/reference/HDFSConfigurationParameterReference.html) might explain some parameters.
#### ClickHouse extras {#clickhouse-extras}
@ -185,7 +185,6 @@ Similar to GraphiteMergeTree, the HDFS engine supports extended configuration us
|hadoop\_kerberos\_kinit\_command | kinit |
#### Limitations {#limitations}
* hadoop\_security\_kerberos\_ticket\_cache\_path can be global only, not user specific
## Kerberos support {#kerberos-support}
@ -207,4 +206,4 @@ If hadoop\_kerberos\_keytab, hadoop\_kerberos\_principal or hadoop\_kerberos\_ki
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/hdfs/) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/hdfs/) <!--hide-->

View File

@ -18,3 +18,6 @@ List of supported integrations:
- [Kafka](../../../engines/table-engines/integrations/kafka.md)
- [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md)
- [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md)
- [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md)
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/) <!--hide-->

View File

@ -85,4 +85,4 @@ FROM jdbc_table
- [JDBC table function](../../../sql-reference/table-functions/jdbc.md).
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/jdbc/) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/jdbc/) <!--hide-->

View File

@ -194,4 +194,4 @@ Example:
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size)
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/kafka/) <!--hide-->

View File

@ -54,4 +54,4 @@ SELECT COUNT() FROM mongo_table;
└─────────┘
```
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/integrations/mongodb/) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/mongodb/) <!--hide-->

View File

@ -24,6 +24,7 @@ The table structure can differ from the original MySQL table structure:
- Column names should be the same as in the original MySQL table, but you can use just some of these columns and in any order.
- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types.
- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is true, if false - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types.
**Engine Parameters**
@ -100,4 +101,4 @@ SELECT * FROM mysql_table
- [The mysql table function](../../../sql-reference/table-functions/mysql.md)
- [Using MySQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/mysql/) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/mysql/) <!--hide-->

View File

@ -29,6 +29,7 @@ The table structure can differ from the source table structure:
- Column names should be the same as in the source table, but you can use just some of these columns and in any order.
- Column types may differ from those in the source table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types.
- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is true, if false - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types.
**Engine Parameters**
@ -127,4 +128,4 @@ SELECT * FROM odbc_t
- [ODBC external dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-odbc)
- [ODBC table function](../../../sql-reference/table-functions/odbc.md)
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/odbc/) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/odbc/) <!--hide-->

View File

@ -0,0 +1,106 @@
---
toc_priority: 8
toc_title: PostgreSQL
---
# PosgtreSQL {#postgresql}
The PostgreSQL engine allows you to perform `SELECT` queries on data that is stored on a remote PostgreSQL server.
## Creating a Table {#creating-a-table}
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
...
) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password');
```
See a detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query.
The table structure can differ from the original PostgreSQL table structure:
- Column names should be the same as in the original PostgreSQL table, but you can use just some of these columns and in any order.
- Column types may differ from those in the original PostgreSQL table. ClickHouse tries to [cast](../../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) values to the ClickHouse data types.
- Setting `external_table_functions_use_nulls` defines how to handle Nullable columns. Default is 1, if 0 - table function will not make nullable columns and will insert default values instead of nulls. This is also applicable for null values inside array data types.
**Engine Parameters**
- `host:port` — PostgreSQL server address.
- `database` — Remote database name.
- `table` — Remote table name.
- `user` — PostgreSQL user.
- `password` — User password.
SELECT Queries on PostgreSQL side run as `COPY (SELECT ...) TO STDOUT` inside read-only PostgreSQL transaction with commit after each `SELECT` query.
Simple `WHERE` clauses such as `=, !=, >, >=, <, <=, IN` are executed on the PostgreSQL server.
All joins, aggregations, sorting, `IN [ array ]` conditions and the `LIMIT` sampling constraint are executed in ClickHouse only after the query to PostgreSQL finishes.
INSERT Queries on PostgreSQL side run as `COPY "table_name" (field1, field2, ... fieldN) FROM STDIN` inside PostgreSQL transaction with auto-commit after each `INSERT` statement.
PostgreSQL Array types converts into ClickHouse arrays.
Be careful in PostgreSQL an array data created like a type_name[] may contain multi-dimensional arrays of different dimensions in different table rows in same column, but in ClickHouse it is only allowed to have multidimensional arrays of the same count of dimensions in all table rows in same column.
## Usage Example {#usage-example}
Table in PostgreSQL:
``` text
postgres=# CREATE TABLE "public"."test" (
"int_id" SERIAL,
"int_nullable" INT NULL DEFAULT NULL,
"float" FLOAT NOT NULL,
"str" VARCHAR(100) NOT NULL DEFAULT '',
"float_nullable" FLOAT NULL DEFAULT NULL,
PRIMARY KEY (int_id));
CREATE TABLE
postgres=# insert into test (int_id, str, "float") VALUES (1,'test',2);
INSERT 0 1
postgresql> select * from test;
int_id | int_nullable | float | str | float_nullable
--------+--------------+-------+------+----------------
1 | | 2 | test |
(1 row)
```
Table in ClickHouse, retrieving data from the PostgreSQL table created above:
``` sql
CREATE TABLE default.postgresql_table
(
`float_nullable` Nullable(Float32),
`str` String,
`int_id` Int32
)
ENGINE = PostgreSQL('localhost:5432', 'public', 'test', 'postges_user', 'postgres_password');
```
``` sql
SELECT * FROM postgresql_table WHERE str IN ('test')
```
``` text
┌─float_nullable─┬─str──┬─int_id─┐
│ ᴺᵁᴸᴸ │ test │ 1 │
└────────────────┴──────┴────────┘
1 rows in set. Elapsed: 0.019 sec.
```
## See Also {#see-also}
- [The postgresql table function](../../../sql-reference/table-functions/postgresql.md)
- [Using PostgreSQL as a source of external dictionary](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/postgresql/) <!--hide-->

View File

@ -163,3 +163,5 @@ Example:
- `_redelivered` - `redelivered` flag of the message.
- `_message_id` - messageID of the received message; non-empty if was set, when message was published.
- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published.
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/rabbitmq/) <!--hide-->

View File

@ -6,11 +6,11 @@ toc_title: S3
# S3 {#table_engines-s3}
This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ecosystem. This engine is similar
to the [HDFS](../../../engines/table-engines/special/file.md#table_engines-hdfs) engine, but provides S3-specific features.
to the [HDFS](../../../engines/table-engines/integrations/hdfs.md#table_engines-hdfs) engine, but provides S3-specific features.
## Usage {#usage}
``` sql
```sql
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression])
```
@ -25,23 +25,23 @@ ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, structure,
**1.** Set up the `s3_engine_table` table:
``` sql
```sql
CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip')
```
**2.** Fill file:
``` sql
```sql
INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3)
```
**3.** Query the data:
``` sql
```sql
SELECT * FROM s3_engine_table LIMIT 2
```
``` text
```text
┌─name─┬─value─┐
│ one │ 1 │
│ two │ 2 │
@ -69,7 +69,7 @@ Constructions with `{}` are similar to the [remote](../../../sql-reference/table
**Example**
1. Suppose we have several files in TSV format with the following URIs on HDFS:
1. Suppose we have several files in CSV format with the following URIs on S3:
- https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv
- https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv
@ -82,19 +82,19 @@ Constructions with `{}` are similar to the [remote](../../../sql-reference/table
<!-- -->
``` sql
```sql
CREATE TABLE table_with_range (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV')
```
3. Another way:
``` sql
```sql
CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV')
```
4. Table consists of all the files in both directories (all files should satisfy format and schema described in query):
``` sql
```sql
CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV')
```
@ -105,7 +105,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https:
Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
``` sql
```sql
CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV')
```
@ -124,7 +124,7 @@ The following settings can be set before query execution or placed into configur
- `s3_max_single_part_upload_size` — Default value is `64Mb`. The maximum size of object to upload using singlepart upload to S3.
- `s3_min_upload_part_size` — Default value is `512Mb`. The minimum size of part to upload during multipart upload to [S3 Multipart upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
- `s3_max_redirects` — Default value is `10`. Max number of S3 redirects hops allowed.
- `s3_max_redirects` — Default value is `10`. Max number of HTTP redirects S3 hops allowed.
Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max_redirects` must be set to zero to avoid [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) attacks; or alternatively, `remote_host_filter` must be specified in server configuration.
@ -153,4 +153,4 @@ Example:
</s3>
```
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/s3/) <!--hide-->
[Original article](https://clickhouse.tech/docs/en/engines/table-engines/integrations/s3/) <!--hide-->

View File

@ -353,7 +353,7 @@ The `set` index can be used with all functions. Function subsets for other index
| Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter |
|------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------|
| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notEquals(!=, \<\>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ |
| [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ |
| [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ |
@ -361,10 +361,10 @@ The `set` index can be used with all functions. Function subsets for other index
| [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ |
| [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [less (<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [greater (>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [lessOrEquals (<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [greaterOrEquals (>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
@ -529,7 +529,7 @@ CREATE TABLE table_for_aggregation
y Int
)
ENGINE = MergeTree
ORDER BY k1, k2
ORDER BY (k1, k2)
TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y);
```
@ -748,7 +748,6 @@ Configuration markup:
</proxy>
<connect_timeout_ms>10000</connect_timeout_ms>
<request_timeout_ms>5000</request_timeout_ms>
<max_connections>100</max_connections>
<retry_attempts>10</retry_attempts>
<min_bytes_for_seek>1000</min_bytes_for_seek>
<metadata_path>/var/lib/clickhouse/disks/s3/</metadata_path>
@ -771,7 +770,6 @@ Optional parameters:
- `proxy` — Proxy configuration for S3 endpoint. Each `uri` element inside `proxy` block should contain a proxy URL.
- `connect_timeout_ms` — Socket connect timeout in milliseconds. Default value is `10 seconds`.
- `request_timeout_ms` — Request timeout in milliseconds. Default value is `5 seconds`.
- `max_connections` — S3 connections pool size. Default value is `100`.
- `retry_attempts` — Number of retry attempts in case of failed request. Default value is `10`.
- `min_bytes_for_seek` — Minimal number of bytes to use seek operation instead of sequential read. Default value is `1 Mb`.
- `metadata_path` — Path on local FS to store metadata files for S3. Default value is `/var/lib/clickhouse/disks/<disk_name>/`.

View File

@ -38,10 +38,10 @@ The queries are executed as a read-only user. It implies some limitations:
The following settings are also enforced:
- [max_result_bytes=10485760](../operations/settings/query_complexity/#max-result-bytes)
- [max_result_rows=2000](../operations/settings/query_complexity/#setting-max_result_rows)
- [result_overflow_mode=break](../operations/settings/query_complexity/#result-overflow-mode)
- [max_execution_time=60000](../operations/settings/query_complexity/#max-execution-time)
- [max_result_bytes=10485760](../operations/settings/query-complexity/#max-result-bytes)
- [max_result_rows=2000](../operations/settings/query-complexity/#setting-max_result_rows)
- [result_overflow_mode=break](../operations/settings/query-complexity/#result-overflow-mode)
- [max_execution_time=60000](../operations/settings/query-complexity/#max-execution-time)
## Examples {#examples}

View File

@ -1254,7 +1254,7 @@ ClickHouse supports configurable precision of `Decimal` type. The `INSERT` query
Unsupported Parquet data types: `DATE32`, `TIME32`, `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`.
Data types of ClickHouse table columns can differ from the corresponding fields of the Parquet data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [cast](../query_language/functions/type_conversion_functions/#type_conversion_function-cast) the data to that data type which is set for the ClickHouse table column.
Data types of ClickHouse table columns can differ from the corresponding fields of the Parquet data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [cast](../sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) the data to that data type which is set for the ClickHouse table column.
### Inserting and Selecting Data {#inserting-and-selecting-data}
@ -1359,15 +1359,15 @@ When working with the `Regexp` format, you can use the following settings:
- Escaped (similarly to [TSV](#tabseparated))
- Quoted (similarly to [Values](#data-format-values))
- Raw (extracts subpatterns as a whole, no escaping rules)
- `format_regexp_skip_unmatched` — [UInt8](../sql-reference/data-types/int-uint.md). Defines the need to throw an exeption in case the `format_regexp` expression does not match the imported data. Can be set to `0` or `1`.
- `format_regexp_skip_unmatched` — [UInt8](../sql-reference/data-types/int-uint.md). Defines the need to throw an exeption in case the `format_regexp` expression does not match the imported data. Can be set to `0` or `1`.
**Usage**
**Usage**
The regular expression from `format_regexp` setting is applied to every line of imported data. The number of subpatterns in the regular expression must be equal to the number of columns in imported dataset.
The regular expression from `format_regexp` setting is applied to every line of imported data. The number of subpatterns in the regular expression must be equal to the number of columns in imported dataset.
Lines of the imported data must be separated by newline character `'\n'` or DOS-style newline `"\r\n"`.
Lines of the imported data must be separated by newline character `'\n'` or DOS-style newline `"\r\n"`.
The content of every matched subpattern is parsed with the method of corresponding data type, according to `format_regexp_escaping_rule` setting.
The content of every matched subpattern is parsed with the method of corresponding data type, according to `format_regexp_escaping_rule` setting.
If the regular expression does not match the line and `format_regexp_skip_unmatched` is set to 1, the line is silently skipped. If `format_regexp_skip_unmatched` is set to 0, exception is thrown.

View File

@ -167,4 +167,21 @@ Features:
[How to configure ClickHouse in Looker.](https://docs.looker.com/setup-and-management/database-config/clickhouse)
### SeekTable {#seektable}
[SeekTable](https://www.seektable.com) is a self-service BI tool for data exploration and operational reporting. SeekTable is available both as a cloud service and a self-hosted version. SeekTable reports may be embedded into any web-app.
Features:
- Business users-friendly reports builder.
- Powerful report parameters for SQL filtering and report-specific query customizations.
- Can connect to ClickHouse both with a native TCP/IP endpoint and a HTTP(S) interface (2 different drivers).
- It is possible to use all power of CH SQL dialect in dimensions/measures definitions
- [Web API](https://www.seektable.com/help/web-api-integration) for automated reports generation.
- Supports reports development flow with account data [backup/restore](https://www.seektable.com/help/self-hosted-backup-restore), data models (cubes) / reports configuration is a human-readable XML and can be stored under version control.
SeekTable is [free](https://www.seektable.com/help/cloud-pricing) for personal/individual usage.
[How to configure ClickHouse connection in SeekTable.](https://www.seektable.com/help/clickhouse-pivot-table)
[Original article](https://clickhouse.tech/docs/en/interfaces/third-party/gui/) <!--hide-->

View File

@ -11,3 +11,5 @@ ClickHouse supports authenticating and managing users using external services.
The following external authenticators and directories are supported:
- [LDAP](./ldap.md#external-authenticators-ldap) [Authenticator](./ldap.md#ldap-external-authenticator) and [Directory](./ldap.md#ldap-external-user-directory)
[Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/index/) <!--hide-->

View File

@ -2,14 +2,16 @@
LDAP server can be used to authenticate ClickHouse users. There are two different approaches for doing this:
- use LDAP as an external authenticator for existing users, which are defined in `users.xml` or in local access control paths
- use LDAP as an external user directory and allow locally undefined users to be authenticated if they exist on the LDAP server
- Use LDAP as an external authenticator for existing users, which are defined in `users.xml` or in local access control paths.
- Use LDAP as an external user directory and allow locally undefined users to be authenticated if they exist on the LDAP server.
For both of these approaches, an internally named LDAP server must be defined in the ClickHouse config so that other parts of config are able to refer to it.
For both of these approaches, an internally named LDAP server must be defined in the ClickHouse config so that other parts of the config can refer to it.
## LDAP Server Definition {#ldap-server-definition}
To define LDAP server you must add `ldap_servers` section to the `config.xml`. For example,
To define LDAP server you must add `ldap_servers` section to the `config.xml`.
**Example**
```xml
<yandex>
@ -35,38 +37,35 @@ To define LDAP server you must add `ldap_servers` section to the `config.xml`. F
Note, that you can define multiple LDAP servers inside the `ldap_servers` section using distinct names.
Parameters:
**Parameters**
- `host` - LDAP server hostname or IP, this parameter is mandatory and cannot be empty.
- `port` - LDAP server port, default is `636` if `enable_tls` is set to `true`, `389` otherwise.
- `bind_dn` - template used to construct the DN to bind to.
- The resulting DN will be constructed by replacing all `{user_name}` substrings of the
template with the actual user name during each authentication attempt.
- `verification_cooldown` - a period of time, in seconds, after a successful bind attempt,
during which the user will be assumed to be successfully authenticated for all consecutive
requests without contacting the LDAP server.
- `host` — LDAP server hostname or IP, this parameter is mandatory and cannot be empty.
- `port` — LDAP server port, default is `636` if `enable_tls` is set to `true`, `389` otherwise.
- `bind_dn` — Template used to construct the DN to bind to.
- The resulting DN will be constructed by replacing all `{user_name}` substrings of the template with the actual user name during each authentication attempt.
- `verification_cooldown` — A period of time, in seconds, after a successful bind attempt, during which the user will be assumed to be successfully authenticated for all consecutive requests without contacting the LDAP server.
- Specify `0` (the default) to disable caching and force contacting the LDAP server for each authentication request.
- `enable_tls` - flag to trigger use of secure connection to the LDAP server.
- `enable_tls` — A flag to trigger the use of the secure connection to the LDAP server.
- Specify `no` for plain text `ldap://` protocol (not recommended).
- Specify `yes` for LDAP over SSL/TLS `ldaps://` protocol (recommended, the default).
- Specify `starttls` for legacy StartTLS protocol (plain text `ldap://` protocol, upgraded to TLS).
- `tls_minimum_protocol_version` - the minimum protocol version of SSL/TLS.
- `tls_minimum_protocol_version` — The minimum protocol version of SSL/TLS.
- Accepted values are: `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, `tls1.2` (the default).
- `tls_require_cert` - SSL/TLS peer certificate verification behavior.
- `tls_require_cert` SSL/TLS peer certificate verification behavior.
- Accepted values are: `never`, `allow`, `try`, `demand` (the default).
- `tls_cert_file` - path to certificate file.
- `tls_key_file` - path to certificate key file.
- `tls_ca_cert_file` - path to CA certificate file.
- `tls_ca_cert_dir` - path to the directory containing CA certificates.
- `tls_cipher_suite` - allowed cipher suite (in OpenSSL notation).
- `tls_cert_file` — Path to certificate file.
- `tls_key_file` — Path to certificate key file.
- `tls_ca_cert_file` — Path to CA certificate file.
- `tls_ca_cert_dir` — Path to the directory containing CA certificates.
- `tls_cipher_suite` — Allowed cipher suite (in OpenSSL notation).
## LDAP External Authenticator {#ldap-external-authenticator}
A remote LDAP server can be used as a method for verifying passwords for locally defined users (users defined in `users.xml` or in local access control paths). In order to achieve this, specify previously defined LDAP server name instead of `password` or similar sections in the user definition.
A remote LDAP server can be used as a method for verifying passwords for locally defined users (users defined in `users.xml` or in local access control paths). To achieve this, specify previously defined LDAP server name instead of `password` or similar sections in the user definition.
At each login attempt, ClickHouse will try to "bind" to the specified DN defined by the `bind_dn` parameter in the [LDAP server definition](#ldap-server-definition) using the provided credentials, and if successful, the user will be considered authenticated. This is often called a "simple bind" method.
At each login attempt, ClickHouse tries to "bind" to the specified DN defined by the `bind_dn` parameter in the [LDAP server definition](#ldap-server-definition) using the provided credentials, and if successful, the user is considered authenticated. This is often called a "simple bind" method.
For example,
**Example**
```xml
<yandex>
@ -85,19 +84,24 @@ For example,
Note, that user `my_user` refers to `my_ldap_server`. This LDAP server must be configured in the main `config.xml` file as described previously.
When SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled in ClickHouse, users that are authenticated by LDAP servers can also be created using the [CRATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement.
When SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled, users that are authenticated by LDAP servers can also be created using the [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement) statement.
Query:
```sql
CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server'
CREATE USER my_user IDENTIFIED WITH ldap SERVER 'my_ldap_server';
```
## LDAP Exernal User Directory {#ldap-external-user-directory}
In addition to the locally defined users, a remote LDAP server can be used as a source of user definitions. In order to achieve this, specify previously defined LDAP server name (see [LDAP Server Definition](#ldap-server-definition)) in an `ldap` section inside the `users_directories` section of the `config.xml` file.
In addition to the locally defined users, a remote LDAP server can be used as a source of user definitions. To achieve this, specify previously defined LDAP server name (see [LDAP Server Definition](#ldap-server-definition)) in the `ldap` section inside the `users_directories` section of the `config.xml` file.
At each login attempt, ClickHouse will try to find the user definition locally and authenticate it as usual, but if the user is not defined, ClickHouse will assume it exists in the external LDAP directory, and will try to "bind" to the specified DN at the LDAP server using the provided credentials. If successful, the user will be considered existing and authenticated. The user will be assigned roles from the list specified in the `roles` section. Additionally, LDAP "search" can be performed and results can be transformed and treated as role names and then be assigned to the user if the `role_mapping` section is also configured. All this implies that the SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled and roles are created using the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement.
At each login attempt, ClickHouse tries to find the user definition locally and authenticate it as usual. If the user is not defined, ClickHouse will assume the definition exists in the external LDAP directory and will try to "bind" to the specified DN at the LDAP server using the provided credentials. If successful, the user will be considered existing and authenticated. The user will be assigned roles from the list specified in the `roles` section. Additionally, LDAP "search" can be performed and results can be transformed and treated as role names and then be assigned to the user if the `role_mapping` section is also configured. All this implies that the SQL-driven [Access Control and Account Management](../access-rights.md#access-control) is enabled and roles are created using the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement.
Example (goes into `config.xml`):
**Example**
Goes into `config.xml`.
```xml
<yandex>
@ -122,33 +126,24 @@ Example (goes into `config.xml`):
</yandex>
```
Note that `my_ldap_server` referred in the `ldap` section inside the `user_directories` section must be a previously
defined LDAP server that is configured in the `config.xml` (see [LDAP Server Definition](#ldap-server-definition)).
Note that `my_ldap_server` referred in the `ldap` section inside the `user_directories` section must be a previously defined LDAP server that is configured in the `config.xml` (see [LDAP Server Definition](#ldap-server-definition)).
Parameters:
**Parameters**
- `server` - one of LDAP server names defined in the `ldap_servers` config section above.
This parameter is mandatory and cannot be empty.
- `roles` - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server.
- If no roles are specified here or assigned during role mapping (below), user will not be able
to perform any actions after authentication.
- `role_mapping` - section with LDAP search parameters and mapping rules.
- When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter`
and the name of the logged in user. For each entry found during that search, the value of the specified
attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed,
and the rest of the value becomes the name of a local role defined in ClickHouse,
which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement.
- `server` — One of LDAP server names defined in the `ldap_servers` config section above. This parameter is mandatory and cannot be empty.
- `roles` — Section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server.
- If no roles are specified here or assigned during role mapping (below), user will not be able to perform any actions after authentication.
- `role_mapping` — Section with LDAP search parameters and mapping rules.
- When a user authenticates, while still bound to LDAP, an LDAP search is performed using `search_filter` and the name of the logged-in user. For each entry found during that search, the value of the specified attribute is extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by the [CREATE ROLE](../../sql-reference/statements/create/role.md#create-role-statement) statement.
- There can be multiple `role_mapping` sections defined inside the same `ldap` section. All of them will be applied.
- `base_dn` - template used to construct the base DN for the LDAP search.
- The resulting DN will be constructed by replacing all `{user_name}` and `{bind_dn}`
substrings of the template with the actual user name and bind DN during each LDAP search.
- `scope` - scope of the LDAP search.
- `base_dn` — Template used to construct the base DN for the LDAP search.
- The resulting DN will be constructed by replacing all `{user_name}` and `{bind_dn}` substrings of the template with the actual user name and bind DN during each LDAP search.
- `scope` — Scope of the LDAP search.
- Accepted values are: `base`, `one_level`, `children`, `subtree` (the default).
- `search_filter` - template used to construct the search filter for the LDAP search.
- The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}`, and `{base_dn}`
substrings of the template with the actual user name, bind DN, and base DN during each LDAP search.
- `search_filter` — Template used to construct the search filter for the LDAP search.
- The resulting filter will be constructed by replacing all `{user_name}`, `{bind_dn}` and `{base_dn}` substrings of the template with the actual user name, bind DN and base DN during each LDAP search.
- Note, that the special characters must be escaped properly in XML.
- `attribute` - attribute name whose values will be returned by the LDAP search.
- `prefix` - prefix, that will be expected to be in front of each string in the original
list of strings returned by the LDAP search. Prefix will be removed from the original
strings and resulting strings will be treated as local role names. Empty, by default.
- `attribute` — Attribute name whose values will be returned by the LDAP search.
- `prefix` — Prefix, that will be expected to be in front of each string in the original list of strings returned by the LDAP search. The prefix will be removed from the original strings and the resulting strings will be treated as local role names. Empty by default.
[Original article](https://clickhouse.tech/docs/en/operations/external-authenticators/ldap/) <!--hide-->

View File

@ -1,6 +1,6 @@
# system.data_type_families {#system_tables-data_type_families}
Contains information about supported [data types](../../sql-reference/data-types/).
Contains information about supported [data types](../../sql-reference/data-types/index.md).
Columns:

View File

@ -7,11 +7,15 @@ Columns:
- `name` ([String](../../sql-reference/data-types/string.md)) — name of the error (`errorCodeToName`).
- `code` ([Int32](../../sql-reference/data-types/int-uint.md)) — code number of the error.
- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — the number of times this error has been happened.
- `last_error_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — time when the last error happened.
- `last_error_message` ([String](../../sql-reference/data-types/string.md)) — message for the last error.
- `last_error_stacktrace` ([String](../../sql-reference/data-types/string.md)) — stacktrace for the last error.
- `remote` ([UInt8](../../sql-reference/data-types/int-uint.md)) — remote exception (i.e. received during one of the distributed query).
**Example**
``` sql
SELECT *
SELECT name, code, value
FROM system.errors
WHERE value > 0
ORDER BY code ASC

View File

@ -70,12 +70,12 @@ num_tries: 36
last_exception: Code: 226, e.displayText() = DB::Exception: Marks file '/opt/clickhouse/data/merge/visits_v2/tmp_fetch_20201130_121373_121384_2/CounterID.mrk' doesn't exist (version 20.8.7.15 (official build))
last_attempt_time: 2020-12-08 17:35:54
num_postponed: 0
postpone_reason:
postpone_reason:
last_postpone_time: 1970-01-01 03:00:00
```
**See Also**
- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system.md/#query-language-system-replicated)
- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system.md#query-language-system-replicated)
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/replication_queue) <!--hide-->

View File

@ -250,4 +250,3 @@ FROM people
```
[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/combinators/) <!--hide-->

View File

@ -59,4 +59,3 @@ SELECT groupArray(y) FROM t_null_big
`groupArray` does not include `NULL` in the resulting array.
[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/) <!--hide-->

View File

@ -254,8 +254,8 @@ windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN)
**Parameters**
- `window` — Length of the sliding window. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond2 <= timestamp of cond1 + window`.
- `mode` - It is an optional argument.
- `'strict'` - When the `'strict'` is set, the windowFunnel() applies conditions only for the unique values.
- `mode` It is an optional argument.
- `'strict'` When the `'strict'` is set, the windowFunnel() applies conditions only for the unique values.
**Returned value**
@ -336,14 +336,14 @@ retention(cond1, cond2, ..., cond32);
**Arguments**
- `cond`an expression that returns a `UInt8` result (1 or 0).
- `cond`An expression that returns a `UInt8` result (1 or 0).
**Returned value**
The array of 1 or 0.
- 1 — condition was met for the event.
- 0 — condition wasnt met for the event.
- 1 — Condition was met for the event.
- 0 — Condition wasnt met for the event.
Type: `UInt8`.
@ -500,7 +500,6 @@ Problem: Generate a report that shows only keywords that produced at least 5 uni
Solution: Write in the GROUP BY query SearchPhrase HAVING uniqUpTo(4)(UserID) >= 5
```
[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/parametric_functions/) <!--hide-->
## sumMapFiltered(keys_to_keep)(keys, values) {#summapfilteredkeys-to-keepkeys-values}

View File

@ -7,8 +7,9 @@ toc_priority: 1
Counts the number of rows or not-NULL values.
ClickHouse supports the following syntaxes for `count`:
- `count(expr)` or `COUNT(DISTINCT expr)`.
- `count()` or `COUNT(*)`. The `count()` syntax is ClickHouse-specific.
- `count(expr)` or `COUNT(DISTINCT expr)`.
- `count()` or `COUNT(*)`. The `count()` syntax is ClickHouse-specific.
**Arguments**

View File

@ -9,7 +9,7 @@ Inserts a value into the array at the specified position.
**Syntax**
``` sql
groupArrayInsertAt(default_x, size)(x, pos);
groupArrayInsertAt(default_x, size)(x, pos)
```
If in one query several values are inserted into the same position, the function behaves in the following ways:
@ -21,8 +21,8 @@ If in one query several values are inserted into the same position, the function
- `x` — Value to be inserted. [Expression](../../../sql-reference/syntax.md#syntax-expressions) resulting in one of the [supported data types](../../../sql-reference/data-types/index.md).
- `pos` — Position at which the specified element `x` is to be inserted. Index numbering in the array starts from zero. [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges).
- `default_x`— Default value for substituting in empty positions. Optional parameter. [Expression](../../../sql-reference/syntax.md#syntax-expressions) resulting in the data type configured for the `x` parameter. If `default_x` is not defined, the [default values](../../../sql-reference/statements/create/table.md#create-default-values) are used.
- `size`— Length of the resulting array. Optional parameter. When using this parameter, the default value `default_x` must be specified. [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges).
- `default_x` — Default value for substituting in empty positions. Optional parameter. [Expression](../../../sql-reference/syntax.md#syntax-expressions) resulting in the data type configured for the `x` parameter. If `default_x` is not defined, the [default values](../../../sql-reference/statements/create/table.md#create-default-values) are used.
- `size` — Length of the resulting array. Optional parameter. When using this parameter, the default value `default_x` must be specified. [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges).
**Returned value**

View File

@ -14,7 +14,7 @@ groupBitmapOr(expr)
`expr` An expression that results in `AggregateFunction(groupBitmap, UInt*)` type.
**Return value**
**Returned value**
Value of the `UInt64` type.

View File

@ -14,7 +14,7 @@ groupBitmapOr(expr)
`expr` An expression that results in `AggregateFunction(groupBitmap, UInt*)` type.
**Return value**
**Returned value**
Value of the `UInt64` type.

View File

@ -14,7 +14,7 @@ groupBitOr(expr)
`expr` An expression that results in `UInt*` type.
**Return value**
**Returned value**
Value of the `UInt*` type.

View File

@ -10,7 +10,7 @@ Use it for tests or to process columns of types `AggregateFunction` and `Aggrega
**Syntax**
``` sql
initializeAggregation (aggregate_function, column_1, column_2);
initializeAggregation (aggregate_function, column_1, column_2)
```
**Arguments**

View File

@ -21,5 +21,5 @@ The kurtosis of the given distribution. Type — [Float64](../../../sql-referenc
**Example**
``` sql
SELECT kurtPop(value) FROM series_with_value_column
SELECT kurtPop(value) FROM series_with_value_column;
```

View File

@ -23,5 +23,5 @@ The kurtosis of the given distribution. Type — [Float64](../../../sql-referenc
**Example**
``` sql
SELECT kurtSamp(value) FROM series_with_value_column
SELECT kurtSamp(value) FROM series_with_value_column;
```

View File

@ -27,7 +27,7 @@ The null hypothesis is that two populations are stochastically equal. Also one-s
- `'two-sided'`;
- `'greater'`;
- `'less'`.
- `continuity_correction` - if not 0 then continuity correction in the normal approximation for the p-value is applied. (Optional, default: 1.) [UInt64](../../../sql-reference/data-types/int-uint.md).
- `continuity_correction` if not 0 then continuity correction in the normal approximation for the p-value is applied. (Optional, default: 1.) [UInt64](../../../sql-reference/data-types/int-uint.md).
**Returned values**

View File

@ -21,5 +21,5 @@ The skewness of the given distribution. Type — [Float64](../../../sql-referenc
**Example**
``` sql
SELECT skewPop(value) FROM series_with_value_column
SELECT skewPop(value) FROM series_with_value_column;
```

View File

@ -23,5 +23,5 @@ The skewness of the given distribution. Type — [Float64](../../../sql-referenc
**Example**
``` sql
SELECT skewSamp(value) FROM series_with_value_column
SELECT skewSamp(value) FROM series_with_value_column;
```

View File

@ -18,8 +18,8 @@ The null hypothesis is that means of populations are equal. Normal distribution
**Arguments**
- `sample_data`sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
- `sample_index`sample index. [Integer](../../../sql-reference/data-types/int-uint.md).
- `sample_data`Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
- `sample_index`Sample index. [Integer](../../../sql-reference/data-types/int-uint.md).
**Returned values**

View File

@ -18,13 +18,13 @@ We recommend using the `N < 10` value; performance is reduced with large `N` val
**Arguments**
- N is the number of elements to return.
- `N` The number of elements to return.
If the parameter is omitted, default value 10 is used.
**Arguments**
- x The value to calculate frequency.
- `x` The value to calculate frequency.
**Example**

View File

@ -18,7 +18,7 @@ topKWeighted(N)(x, weight)
**Arguments**
- `x` The value.
- `x` The value.
- `weight` — The weight. [UInt8](../../../sql-reference/data-types/int-uint.md).
**Returned value**

View File

@ -18,8 +18,8 @@ The null hypothesis is that means of populations are equal. Normal distribution
**Arguments**
- `sample_data`sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
- `sample_index`sample index. [Integer](../../../sql-reference/data-types/int-uint.md).
- `sample_data`Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
- `sample_index`Sample index. [Integer](../../../sql-reference/data-types/int-uint.md).
**Returned values**

View File

@ -65,4 +65,3 @@ For our example, the structure of dictionary can be the following:
</dictionary>
```
[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_hierarchical/) <!--hide-->

View File

@ -445,4 +445,3 @@ Other types are not supported yet. The function returns the attribute for the pr
Data must completely fit into RAM.
[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_layout/) <!--hide-->

View File

@ -19,6 +19,8 @@ Example of settings:
</dictionary>
```
or
``` sql
CREATE DICTIONARY (...)
...
@ -58,7 +60,7 @@ When upgrading the dictionaries, the ClickHouse server applies different logic d
- For MySQL source, the time of modification is checked using a `SHOW TABLE STATUS` query (in case of MySQL 8 you need to disable meta-information caching in MySQL by `set global information_schema_stats_expiry=0`.
- Dictionaries from other sources are updated every time by default.
For other sources (ODBC, ClickHouse, etc), you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps:
For other sources (ODBC, PostgreSQL, ClickHouse, etc), you can set up a query that will update the dictionaries only if they really changed, rather than each time. To do this, follow these steps:
- The dictionary table must have a field that always changes when the source data is updated.
- The settings of the source must specify a query that retrieves the changing field. The ClickHouse server interprets the query result as a row, and if this row has changed relative to its previous state, the dictionary is updated. Specify the query in the `<invalidate_query>` field in the settings for the [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md).
@ -84,4 +86,3 @@ SOURCE(ODBC(... invalidate_query 'SELECT update_time FROM dictionary_source wher
...
```
[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_lifetime/) <!--hide-->

View File

@ -65,6 +65,7 @@ Types of sources (`source_type`):
- DBMS
- [ODBC](#dicts-external_dicts_dict_sources-odbc)
- [MySQL](#dicts-external_dicts_dict_sources-mysql)
- [PostgreSQL](#dicts-external_dicts_dict_sources-postgresql)
- [ClickHouse](#dicts-external_dicts_dict_sources-clickhouse)
- [MongoDB](#dicts-external_dicts_dict_sources-mongodb)
- [Redis](#dicts-external_dicts_dict_sources-redis)
@ -659,7 +660,7 @@ Example of settings:
Setting fields:
- `host` The Cassandra host or comma-separated list of hosts.
- `port` The port on the Cassandra servers. If not specified, default port is used.
- `port` The port on the Cassandra servers. If not specified, default port 9042 is used.
- `user` Name of the Cassandra user.
- `password` Password of the Cassandra user.
- `keyspace` Name of the keyspace (database).
@ -673,4 +674,52 @@ Default value is 1 (the first key column is a partition key and other key column
- `where` Optional selection criteria.
- `max_threads` The maximum number of threads to use for loading data from multiple partitions in compose key dictionaries.
[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_sources/) <!--hide-->
### PosgreSQL {#dicts-external_dicts_dict_sources-postgresql}
Example of settings:
``` xml
<source>
<postgresql>
<port>5432</port>
<user>clickhouse</user>
<password>qwerty</password>
<db>db_name</db>
<table>table_name</table>
<where>id=10</where>
<invalidate_query>SQL_QUERY</invalidate_query>
</postgresql>
</source>
```
or
``` sql
SOURCE(POSTGRESQL(
port 5432
host 'postgresql-hostname'
user 'postgres_user'
password 'postgres_password'
db 'db_name'
table 'table_name'
replica(host 'example01-1' port 5432 priority 1)
replica(host 'example01-2' port 5432 priority 2)
where 'id=10'
invalidate_query 'SQL_QUERY'
))
```
Setting fields:
- `host` The host on the PostgreSQL server. You can specify it for all replicas, or for each one individually (inside `<replica>`).
- `port` The port on the PostgreSQL server. You can specify it for all replicas, or for each one individually (inside `<replica>`).
- `user` Name of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside `<replica>`).
- `password` Password of the PostgreSQL user. You can specify it for all replicas, or for each one individually (inside `<replica>`).
- `replica` Section of replica configurations. There can be multiple sections.
- `replica/host` The PostgreSQL host.
- `replica/port` The PostgreSQL port.
- `replica/priority` The replica priority. When attempting to connect, ClickHouse traverses the replicas in order of priority. The lower the number, the higher the priority.
- `db` Name of the database.
- `table` Name of the table.
- `where` The selection criteria. The syntax for conditions is the same as for `WHERE` clause in PostgreSQL, for example, `id > 10 AND id < 20`. Optional parameter.
- `invalidate_query` Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md).

View File

@ -170,4 +170,3 @@ Configuration fields:
- [Functions for working with external dictionaries](../../../sql-reference/functions/ext-dict-functions.md).
[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict_structure/) <!--hide-->

View File

@ -48,4 +48,3 @@ LIFETIME(...) -- Lifetime of dictionary in memory
- [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md) — Structure of the dictionary . A key and attributes that can be retrieved by this key.
- [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) — Frequency of dictionary updates.
[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts_dict/) <!--hide-->

View File

@ -57,4 +57,3 @@ You can [configure](../../../sql-reference/dictionaries/external-dictionaries/ex
- [Dictionary Key and Fields](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md)
- [Functions for Working with External Dictionaries](../../../sql-reference/functions/ext-dict-functions.md)
[Original article](https://clickhouse.tech/docs/en/query_language/dicts/external_dicts/) <!--hide-->

View File

@ -17,4 +17,3 @@ ClickHouse supports:
- [Built-in dictionaries](../../sql-reference/dictionaries/internal-dicts.md#internal_dicts) with a specific [set of functions](../../sql-reference/functions/ym-dict-functions.md).
- [Plug-in (external) dictionaries](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md#dicts-external-dicts) with a [set of functions](../../sql-reference/functions/ext-dict-functions.md).
[Original article](https://clickhouse.tech/docs/en/query_language/dicts/) <!--hide-->

View File

@ -50,4 +50,3 @@ We recommend periodically updating the dictionaries with the geobase. During an
There are also functions for working with OS identifiers and Yandex.Metrica search engines, but they shouldnt be used.
[Original article](https://clickhouse.tech/docs/en/query_language/dicts/internal_dicts/) <!--hide-->

View File

@ -82,4 +82,3 @@ An exception is thrown when dividing by zero or when dividing a minimal negative
Returns the least common multiple of the numbers.
An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one.
[Original article](https://clickhouse.tech/docs/en/query_language/functions/arithmetic_functions/) <!--hide-->

View File

@ -376,7 +376,7 @@ arrayPopBack(array)
**Example**
``` sql
SELECT arrayPopBack([1, 2, 3]) AS res
SELECT arrayPopBack([1, 2, 3]) AS res;
```
``` text
@ -400,7 +400,7 @@ arrayPopFront(array)
**Example**
``` sql
SELECT arrayPopFront([1, 2, 3]) AS res
SELECT arrayPopFront([1, 2, 3]) AS res;
```
``` text
@ -425,7 +425,7 @@ arrayPushBack(array, single_value)
**Example**
``` sql
SELECT arrayPushBack(['a'], 'b') AS res
SELECT arrayPushBack(['a'], 'b') AS res;
```
``` text
@ -450,7 +450,7 @@ arrayPushFront(array, single_value)
**Example**
``` sql
SELECT arrayPushFront(['b'], 'a') AS res
SELECT arrayPushFront(['b'], 'a') AS res;
```
``` text
@ -482,7 +482,7 @@ An array of length `size`.
**Examples of calls**
``` sql
SELECT arrayResize([1], 3)
SELECT arrayResize([1], 3);
```
``` text
@ -492,7 +492,7 @@ SELECT arrayResize([1], 3)
```
``` sql
SELECT arrayResize([1], 3, NULL)
SELECT arrayResize([1], 3, NULL);
```
``` text
@ -513,12 +513,12 @@ arraySlice(array, offset[, length])
- `array` Array of data.
- `offset` Indent from the edge of the array. A positive value indicates an offset on the left, and a negative value is an indent on the right. Numbering of the array items begins with 1.
- `length` - The length of the required slice. If you specify a negative value, the function returns an open slice `[offset, array_length - length)`. If you omit the value, the function returns the slice `[offset, the_end_of_array]`.
- `length` The length of the required slice. If you specify a negative value, the function returns an open slice `[offset, array_length - length)`. If you omit the value, the function returns the slice `[offset, the_end_of_array]`.
**Example**
``` sql
SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res
SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res;
```
``` text
@ -766,7 +766,7 @@ Type: [UInt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges)
Query:
``` sql
SELECT arrayDifference([1, 2, 3, 4])
SELECT arrayDifference([1, 2, 3, 4]);
```
Result:
@ -782,7 +782,7 @@ Example of the overflow due to result type Int64:
Query:
``` sql
SELECT arrayDifference([0, 10000000000000000000])
SELECT arrayDifference([0, 10000000000000000000]);
```
Result:
@ -816,7 +816,7 @@ Returns an array containing the distinct elements.
Query:
``` sql
SELECT arrayDistinct([1, 2, 2, 3, 1])
SELECT arrayDistinct([1, 2, 2, 3, 1]);
```
Result:
@ -883,7 +883,7 @@ arrayReduce(agg_func, arr1, arr2, ..., arrN)
Query:
``` sql
SELECT arrayReduce('max', [1, 2, 3])
SELECT arrayReduce('max', [1, 2, 3]);
```
Result:
@ -899,7 +899,7 @@ If an aggregate function takes multiple arguments, then this function must be ap
Query:
``` sql
SELECT arrayReduce('maxIf', [3, 5], [1, 0])
SELECT arrayReduce('maxIf', [3, 5], [1, 0]);
```
Result:
@ -915,7 +915,7 @@ Example with a parametric aggregate function:
Query:
``` sql
SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
SELECT arrayReduce('uniqUpTo(3)', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
```
Result:
@ -1014,7 +1014,7 @@ Alias: `flatten`.
**Examples**
``` sql
SELECT flatten([[[1]], [[2], [3]]])
SELECT flatten([[[1]], [[2], [3]]]);
```
``` text
@ -1048,7 +1048,7 @@ Type: `Array`.
Query:
``` sql
SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3])
SELECT arrayCompact([1, 1, nan, nan, 2, 3, 3, 3]);
```
Result:
@ -1086,7 +1086,7 @@ Type: [Array](../../sql-reference/data-types/array.md).
Query:
``` sql
SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1])
SELECT arrayZip(['a', 'b', 'c'], [5, 2, 1]);
```
Result:
@ -1108,17 +1108,20 @@ arrayAUC(arr_scores, arr_labels)
```
**Arguments**
- `arr_scores` — scores prediction model gives.
- `arr_labels` — labels of samples, usually 1 for positive sample and 0 for negtive sample.
**Returned value**
Returns AUC value with type Float64.
**Example**
Query:
``` sql
select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1])
select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]);
```
Result:
@ -1541,4 +1544,3 @@ SELECT arrayCumSumNonNegative([1, 1, -4, 1]) AS res
```
Note that the `arraySumNonNegative` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You can pass a lambda function to it as the first argument.
[Original article](https://clickhouse.tech/docs/en/query_language/functions/array_functions/) <!--hide-->

View File

@ -32,4 +32,3 @@ SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src
└─────┴───────────┴─────────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/array_join/) <!--hide-->

View File

@ -37,8 +37,8 @@ SELECT bitTest(number, index)
**Arguments**
- `number` integer number.
- `index` position of bit.
- `number` Integer number.
- `index` Position of bit.
**Returned values**
@ -53,7 +53,7 @@ For example, the number 43 in base-2 (binary) numeral system is 101011.
Query:
``` sql
SELECT bitTest(43, 1)
SELECT bitTest(43, 1);
```
Result:
@ -69,7 +69,7 @@ Another example:
Query:
``` sql
SELECT bitTest(43, 2)
SELECT bitTest(43, 2);
```
Result:
@ -102,8 +102,8 @@ SELECT bitTestAll(number, index1, index2, index3, index4, ...)
**Arguments**
- `number` integer number.
- `index1`, `index2`, `index3`, `index4` positions of bit. For example, for set of positions (`index1`, `index2`, `index3`, `index4`) is true if and only if all of its positions are true (`index1` ⋀ `index2`, ⋀ `index3``index4`).
- `number` Integer number.
- `index1`, `index2`, `index3`, `index4` Positions of bit. For example, for set of positions (`index1`, `index2`, `index3`, `index4`) is true if and only if all of its positions are true (`index1` ⋀ `index2`, ⋀ `index3``index4`).
**Returned values**
@ -118,7 +118,7 @@ For example, the number 43 in base-2 (binary) numeral system is 101011.
Query:
``` sql
SELECT bitTestAll(43, 0, 1, 3, 5)
SELECT bitTestAll(43, 0, 1, 3, 5);
```
Result:
@ -134,7 +134,7 @@ Another example:
Query:
``` sql
SELECT bitTestAll(43, 0, 1, 3, 5, 2)
SELECT bitTestAll(43, 0, 1, 3, 5, 2);
```
Result:
@ -167,8 +167,8 @@ SELECT bitTestAny(number, index1, index2, index3, index4, ...)
**Arguments**
- `number` integer number.
- `index1`, `index2`, `index3`, `index4` positions of bit.
- `number` Integer number.
- `index1`, `index2`, `index3`, `index4` Positions of bit.
**Returned values**
@ -183,7 +183,7 @@ For example, the number 43 in base-2 (binary) numeral system is 101011.
Query:
``` sql
SELECT bitTestAny(43, 0, 2)
SELECT bitTestAny(43, 0, 2);
```
Result:
@ -199,7 +199,7 @@ Another example:
Query:
``` sql
SELECT bitTestAny(43, 4, 2)
SELECT bitTestAny(43, 4, 2);
```
Result:
@ -239,7 +239,7 @@ Take for example the number 333. Its binary representation: 0000000101001101.
Query:
``` sql
SELECT bitCount(333)
SELECT bitCount(333);
```
Result:
@ -250,4 +250,3 @@ Result:
└───────────────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/bit_functions/) <!--hide-->

View File

@ -23,12 +23,12 @@ bitmapBuild(array)
**Arguments**
- `array` unsigned integer array.
- `array` Unsigned integer array.
**Example**
``` sql
SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res)
SELECT bitmapBuild([1, 2, 3, 4, 5]) AS res, toTypeName(res);
```
``` text
@ -47,12 +47,12 @@ bitmapToArray(bitmap)
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
``` sql
SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res
SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res;
```
``` text
@ -72,13 +72,13 @@ bitmapSubsetInRange(bitmap, range_start, range_end)
**Arguments**
- `bitmap` [Bitmap object](#bitmap_functions-bitmapbuild).
- `range_start` range start point. Type: [UInt32](../../sql-reference/data-types/int-uint.md).
- `range_end` range end point(excluded). Type: [UInt32](../../sql-reference/data-types/int-uint.md).
- `range_start` Range start point. Type: [UInt32](../../sql-reference/data-types/int-uint.md).
- `range_end` Range end point (excluded). Type: [UInt32](../../sql-reference/data-types/int-uint.md).
**Example**
``` sql
SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res
SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res;
```
``` text
@ -114,7 +114,7 @@ Type: `Bitmap object`.
Query:
``` sql
SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res
SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res;
```
Result:
@ -148,7 +148,7 @@ Type: `UInt8`.
**Example**
``` sql
SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res
SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res;
```
``` text
@ -169,7 +169,7 @@ If you are sure that `bitmap2` contains strictly one element, consider using the
**Arguments**
- `bitmap*` bitmap object.
- `bitmap*` Bitmap object.
**Return values**
@ -179,7 +179,7 @@ If you are sure that `bitmap2` contains strictly one element, consider using the
**Example**
``` sql
SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res
SELECT bitmapHasAny(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res;
```
``` text
@ -199,12 +199,12 @@ bitmapHasAll(bitmap,bitmap)
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
``` sql
SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res
SELECT bitmapHasAll(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res;
```
``` text
@ -223,12 +223,12 @@ bitmapCardinality(bitmap)
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
``` sql
SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res
SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])) AS res;
```
``` text
@ -245,17 +245,19 @@ Retrun the smallest value of type UInt64 in the set, UINT32_MAX if the set is em
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
``` sql
SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res
SELECT bitmapMin(bitmapBuild([1, 2, 3, 4, 5])) AS res;
```
┌─res─┐
│ 1 │
└─────┘
``` text
┌─res─┐
│ 1 │
└─────┘
```
## bitmapMax {#bitmapmax}
@ -265,17 +267,19 @@ Retrun the greatest value of type UInt64 in the set, 0 if the set is empty.
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
``` sql
SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res
SELECT bitmapMax(bitmapBuild([1, 2, 3, 4, 5])) AS res;
```
┌─res─┐
│ 5 │
└─────┘
``` text
┌─res─┐
│ 5 │
└─────┘
```
## bitmapTransform {#bitmaptransform}
@ -285,19 +289,21 @@ Transform an array of values in a bitmap to another array of values, the result
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
- `from_array` UInt32 array. For idx in range \[0, from_array.size()), if bitmap contains from_array\[idx\], then replace it with to_array\[idx\]. Note that the result depends on array ordering if there are common elements between from_array and to_array.
- `to_array` UInt32 array, its size shall be the same to from_array.
**Example**
``` sql
SELECT bitmapToArray(bitmapTransform(bitmapBuild([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), cast([5,999,2] as Array(UInt32)), cast([2,888,20] as Array(UInt32)))) AS res
SELECT bitmapToArray(bitmapTransform(bitmapBuild([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), cast([5,999,2] as Array(UInt32)), cast([2,888,20] as Array(UInt32)))) AS res;
```
┌─res───────────────────┐
│ [1,3,4,6,7,8,9,10,20] │
└───────────────────────┘
``` text
┌─res───────────────────┐
│ [1,3,4,6,7,8,9,10,20] │
└───────────────────────┘
```
## bitmapAnd {#bitmapand}
@ -309,12 +315,12 @@ bitmapAnd(bitmap,bitmap)
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
``` sql
SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res
SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res;
```
``` text
@ -333,12 +339,12 @@ bitmapOr(bitmap,bitmap)
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
``` sql
SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res
SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res;
```
``` text
@ -357,12 +363,12 @@ bitmapXor(bitmap,bitmap)
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
``` sql
SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res
SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res;
```
``` text
@ -381,12 +387,12 @@ bitmapAndnot(bitmap,bitmap)
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
``` sql
SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res
SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))) AS res;
```
``` text
@ -405,7 +411,7 @@ bitmapAndCardinality(bitmap,bitmap)
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
@ -429,7 +435,7 @@ bitmapOrCardinality(bitmap,bitmap)
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
@ -453,7 +459,7 @@ bitmapXorCardinality(bitmap,bitmap)
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
@ -477,7 +483,7 @@ bitmapAndnotCardinality(bitmap,bitmap)
**Arguments**
- `bitmap` bitmap object.
- `bitmap` Bitmap object.
**Example**
@ -491,4 +497,3 @@ SELECT bitmapAndnotCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])) AS res
└─────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/bitmap_functions/) <!--hide-->

View File

@ -32,4 +32,3 @@ Strings are compared by bytes. A shorter string is smaller than all strings that
## greaterOrEquals, \>= operator {#function-greaterorequals}
[Original article](https://clickhouse.tech/docs/en/query_language/functions/comparison_functions/) <!--hide-->

View File

@ -20,8 +20,8 @@ If the condition `cond` evaluates to a non-zero value, returns the result of the
**Arguments**
- `cond` The condition for evaluation that can be zero or not. The type is UInt8, Nullable(UInt8) or NULL.
- `then` - The expression to return if condition is met.
- `else` - The expression to return if condition is not met.
- `then` The expression to return if condition is met.
- `else` The expression to return if condition is not met.
**Returned values**
@ -32,7 +32,7 @@ The function executes `then` and `else` expressions and returns its result, depe
Query:
``` sql
SELECT if(1, plus(2, 2), plus(2, 6))
SELECT if(1, plus(2, 2), plus(2, 6));
```
Result:
@ -46,7 +46,7 @@ Result:
Query:
``` sql
SELECT if(0, plus(2, 2), plus(2, 6))
SELECT if(0, plus(2, 2), plus(2, 6));
```
Result:
@ -202,4 +202,3 @@ FROM LEFT_RIGHT
└──────┴───────┴──────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/conditional_functions/) <!--hide-->

View File

@ -1070,4 +1070,3 @@ Result:
└────────────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) <!--hide-->

View File

@ -30,7 +30,7 @@ Type: `String`.
Query:
``` sql
SELECT char(104.1, 101, 108.9, 108.9, 111) AS hello
SELECT char(104.1, 101, 108.9, 108.9, 111) AS hello;
```
Result:
@ -172,4 +172,3 @@ Accepts an integer. Returns a string containing the list of powers of two that t
Accepts an integer. Returns an array of UInt64 numbers containing the list of powers of two that total the source number when summed. Numbers in the array are in ascending order.
[Original article](https://clickhouse.tech/docs/en/query_language/functions/encoding_functions/) <!--hide-->

View File

@ -203,4 +203,3 @@ dictGet[Type]OrDefault('dict_name', 'attr_name', id_expr, default_value_expr)
ClickHouse throws an exception if it cannot parse the value of the attribute or the value doesnt match the attribute data type.
[Original article](https://clickhouse.tech/docs/en/query_language/functions/ext_dict_functions/) <!--hide-->

View File

@ -38,7 +38,7 @@ Input table
Query
``` sql
SELECT x FROM t_null WHERE isNull(y)
SELECT x FROM t_null WHERE isNull(y);
```
``` text
@ -78,7 +78,7 @@ Input table
Query
``` sql
SELECT x FROM t_null WHERE isNotNull(y)
SELECT x FROM t_null WHERE isNotNull(y);
```
``` text
@ -120,7 +120,7 @@ The `mail` and `phone` fields are of type String, but the `icq` field is `UInt32
Get the first available contact method for the customer from the contact list:
``` sql
SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook
SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook;
```
``` text
@ -151,7 +151,7 @@ ifNull(x,alt)
**Example**
``` sql
SELECT ifNull('a', 'b')
SELECT ifNull('a', 'b');
```
``` text
@ -161,7 +161,7 @@ SELECT ifNull('a', 'b')
```
``` sql
SELECT ifNull(NULL, 'b')
SELECT ifNull(NULL, 'b');
```
``` text
@ -190,7 +190,7 @@ nullIf(x, y)
**Example**
``` sql
SELECT nullIf(1, 1)
SELECT nullIf(1, 1);
```
``` text
@ -200,7 +200,7 @@ SELECT nullIf(1, 1)
```
``` sql
SELECT nullIf(1, 2)
SELECT nullIf(1, 2);
```
``` text
@ -231,7 +231,7 @@ assumeNotNull(x)
Consider the `t_null` table.
``` sql
SHOW CREATE TABLE t_null
SHOW CREATE TABLE t_null;
```
``` text
@ -250,7 +250,7 @@ SHOW CREATE TABLE t_null
Apply the `assumeNotNull` function to the `y` column.
``` sql
SELECT assumeNotNull(y) FROM t_null
SELECT assumeNotNull(y) FROM t_null;
```
``` text
@ -261,7 +261,7 @@ SELECT assumeNotNull(y) FROM t_null
```
``` sql
SELECT toTypeName(assumeNotNull(y)) FROM t_null
SELECT toTypeName(assumeNotNull(y)) FROM t_null;
```
``` text
@ -290,7 +290,7 @@ toNullable(x)
**Example**
``` sql
SELECT toTypeName(10)
SELECT toTypeName(10);
```
``` text
@ -300,7 +300,7 @@ SELECT toTypeName(10)
```
``` sql
SELECT toTypeName(toNullable(10))
SELECT toTypeName(toNullable(10));
```
``` text
@ -309,4 +309,3 @@ SELECT toTypeName(toNullable(10))
└────────────────────────────┘
```
[Original article](https://clickhouse.tech/docs/en/query_language/functions/functions_for_nulls/) <!--hide-->

Some files were not shown because too many files have changed in this diff Show More