Merge pull request #49126 from jinjunzh/iaadeflate_upgrade_qpl_v1.1.0

Upgrade QPL to v1.1.0 + get maximum jobs from libaccel_config
This commit is contained in:
Robert Schulze 2023-04-26 23:31:21 +02:00 committed by GitHub
commit af7730d285
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 74 additions and 49 deletions

2
contrib/qpl vendored

@ -1 +1 @@
Subproject commit d75a29d95d8a548297fce3549d21020005364dc8
Subproject commit 0bce2b03423f6fbeb8bce66cc8be0bf558058848

View File

@ -40,9 +40,10 @@ set (LOG_HW_INIT OFF)
set (SANITIZE_MEMORY OFF)
set (SANITIZE_THREADS OFF)
set (LIB_FUZZING_ENGINE OFF)
set (DYNAMIC_LOADING_LIBACCEL_CONFIG OFF)
function(GetLibraryVersion _content _outputVar)
string(REGEX MATCHALL "Qpl VERSION (.+) LANGUAGES" VERSION_REGEX "${_content}")
string(REGEX MATCHALL "QPL VERSION (.+) LANGUAGES" VERSION_REGEX "${_content}")
SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE)
endfunction()
@ -240,7 +241,9 @@ add_library(core_iaa OBJECT ${HW_PATH_SRC})
target_include_directories(core_iaa
PRIVATE ${UUID_DIR}
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/include>
PRIVATE $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/sources/include>
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-iaa/sources/include>
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/include> # status.h in own_checkers.h
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/sources/c_api> # own_checkers.h
PRIVATE $<TARGET_PROPERTY:qplcore_avx512,INTERFACE_INCLUDE_DIRECTORIES>)
target_compile_options(core_iaa
@ -339,4 +342,7 @@ target_link_libraries(_qpl
PRIVATE ${CMAKE_DL_LIBS})
add_library (ch_contrib::qpl ALIAS _qpl)
target_include_directories(_qpl SYSTEM BEFORE PUBLIC "${QPL_PROJECT_DIR}/include")
target_include_directories(_qpl SYSTEM BEFORE
PUBLIC "${QPL_PROJECT_DIR}/include"
PUBLIC "${LIBACCEL_SOURCE_DIR}/accfg"
PUBLIC ${UUID_DIR})

View File

@ -7,6 +7,7 @@
#include <Parsers/ASTIdentifier.h>
#include <Poco/Logger.h>
#include <Common/logger_useful.h>
#include "libaccel_config.h"
namespace DB
{
@ -16,11 +17,6 @@ namespace ErrorCodes
extern const int CANNOT_DECOMPRESS;
}
std::array<qpl_job *, DeflateQplJobHWPool::MAX_HW_JOB_NUMBER> DeflateQplJobHWPool::hw_job_ptr_pool;
std::array<std::atomic_bool, DeflateQplJobHWPool::MAX_HW_JOB_NUMBER> DeflateQplJobHWPool::hw_job_ptr_locks;
bool DeflateQplJobHWPool::job_pool_ready = false;
std::unique_ptr<uint8_t[]> DeflateQplJobHWPool::hw_jobs_buffer;
DeflateQplJobHWPool & DeflateQplJobHWPool::instance()
{
static DeflateQplJobHWPool pool;
@ -28,47 +24,69 @@ DeflateQplJobHWPool & DeflateQplJobHWPool::instance()
}
DeflateQplJobHWPool::DeflateQplJobHWPool()
: random_engine(std::random_device()())
, distribution(0, MAX_HW_JOB_NUMBER - 1)
: max_hw_jobs(0)
, random_engine(std::random_device()())
{
Poco::Logger * log = &Poco::Logger::get("DeflateQplJobHWPool");
UInt32 job_size = 0;
const char * qpl_version = qpl_get_library_version();
/// Get size required for saving a single qpl job object
qpl_get_job_size(qpl_path_hardware, &job_size);
/// Allocate entire buffer for storing all job objects
hw_jobs_buffer = std::make_unique<uint8_t[]>(job_size * MAX_HW_JOB_NUMBER);
/// Initialize pool for storing all job object pointers
/// Reallocate buffer by shifting address offset for each job object.
for (UInt32 index = 0; index < MAX_HW_JOB_NUMBER; ++index)
// loop all configured workqueue size to get maximum job number.
accfg_ctx * ctx_ptr = nullptr;
auto ctx_status = accfg_new(&ctx_ptr);
if (ctx_status == 0)
{
qpl_job * qpl_job_ptr = reinterpret_cast<qpl_job *>(hw_jobs_buffer.get() + index * job_size);
if (auto status = qpl_init_job(qpl_path_hardware, qpl_job_ptr); status != QPL_STS_OK)
auto * dev_ptr = accfg_device_get_first(ctx_ptr);
while (dev_ptr != nullptr)
{
for (auto * wq_ptr = accfg_wq_get_first(dev_ptr); wq_ptr != nullptr; wq_ptr = accfg_wq_get_next(wq_ptr))
max_hw_jobs += accfg_wq_get_size(wq_ptr);
dev_ptr = accfg_device_get_next(dev_ptr);
}
}
else
{
job_pool_ready = false;
LOG_WARNING(log, "Initialization of hardware-assisted DeflateQpl codec failed, falling back to software DeflateQpl codec. Failed to create new libaccel_config context -> status: {}, QPL Version: {}.", ctx_status, qpl_version);
return;
}
if (max_hw_jobs == 0)
{
job_pool_ready = false;
LOG_WARNING(log, "Initialization of hardware-assisted DeflateQpl codec failed, falling back to software DeflateQpl codec. Failed to get available workqueue size -> total_wq_size: {}, QPL Version: {}.", max_hw_jobs, qpl_version);
return;
}
distribution = std::uniform_int_distribution<int>(0, max_hw_jobs - 1);
/// Get size required for saving a single qpl job object
qpl_get_job_size(qpl_path_hardware, &per_job_size);
/// Allocate job buffer pool for storing all job objects
hw_jobs_buffer = std::make_unique<uint8_t[]>(per_job_size * max_hw_jobs);
hw_job_ptr_locks = std::make_unique<std::atomic_bool[]>(max_hw_jobs);
/// Initialize all job objects in job buffer pool
for (UInt32 index = 0; index < max_hw_jobs; ++index)
{
qpl_job * job_ptr = reinterpret_cast<qpl_job *>(hw_jobs_buffer.get() + index * per_job_size);
if (auto status = qpl_init_job(qpl_path_hardware, job_ptr); status != QPL_STS_OK)
{
job_pool_ready = false;
LOG_WARNING(log, "Initialization of hardware-assisted DeflateQpl codec failed: {} , falling back to software DeflateQpl codec. Please check if Intel In-Memory Analytics Accelerator (IAA) is properly set up. QPL Version: {}.", static_cast<UInt32>(status), qpl_version);
LOG_WARNING(log, "Initialization of hardware-assisted DeflateQpl codec failed, falling back to software DeflateQpl codec. Failed to Initialize qpl job -> status: {}, QPL Version: {}.", static_cast<UInt32>(status), qpl_version);
return;
}
hw_job_ptr_pool[index] = qpl_job_ptr;
unLockJob(index);
}
job_pool_ready = true;
LOG_DEBUG(log, "Hardware-assisted DeflateQpl codec is ready! QPL Version: {}",qpl_version);
LOG_DEBUG(log, "Hardware-assisted DeflateQpl codec is ready! QPL Version: {}, max_hw_jobs: {}",qpl_version, max_hw_jobs);
}
DeflateQplJobHWPool::~DeflateQplJobHWPool()
{
for (UInt32 i = 0; i < MAX_HW_JOB_NUMBER; ++i)
for (UInt32 i = 0; i < max_hw_jobs; ++i)
{
if (hw_job_ptr_pool[i])
{
while (!tryLockJob(i));
qpl_fini_job(hw_job_ptr_pool[i]);
unLockJob(i);
hw_job_ptr_pool[i] = nullptr;
}
qpl_job * job_ptr = reinterpret_cast<qpl_job *>(hw_jobs_buffer.get() + i * per_job_size);
while (!tryLockJob(i));
qpl_fini_job(job_ptr);
unLockJob(i);
}
job_pool_ready = false;
}
@ -83,14 +101,14 @@ qpl_job * DeflateQplJobHWPool::acquireJob(UInt32 & job_id)
{
index = distribution(random_engine);
retry++;
if (retry > MAX_HW_JOB_NUMBER)
if (retry > max_hw_jobs)
{
return nullptr;
}
}
job_id = MAX_HW_JOB_NUMBER - index;
assert(index < MAX_HW_JOB_NUMBER);
return hw_job_ptr_pool[index];
job_id = max_hw_jobs - index;
assert(index < max_hw_jobs);
return reinterpret_cast<qpl_job *>(hw_jobs_buffer.get() + index * per_job_size);
}
else
return nullptr;
@ -99,19 +117,19 @@ qpl_job * DeflateQplJobHWPool::acquireJob(UInt32 & job_id)
void DeflateQplJobHWPool::releaseJob(UInt32 job_id)
{
if (isJobPoolReady())
unLockJob(MAX_HW_JOB_NUMBER - job_id);
unLockJob(max_hw_jobs - job_id);
}
bool DeflateQplJobHWPool::tryLockJob(UInt32 index)
{
bool expected = false;
assert(index < MAX_HW_JOB_NUMBER);
assert(index < max_hw_jobs);
return hw_job_ptr_locks[index].compare_exchange_strong(expected, true);
}
void DeflateQplJobHWPool::unLockJob(UInt32 index)
{
assert(index < MAX_HW_JOB_NUMBER);
assert(index < max_hw_jobs);
hw_job_ptr_locks[index].store(false);
}

View File

@ -24,22 +24,23 @@ public:
static DeflateQplJobHWPool & instance();
qpl_job * acquireJob(UInt32 & job_id);
static void releaseJob(UInt32 job_id);
static const bool & isJobPoolReady() { return job_pool_ready; }
void releaseJob(UInt32 job_id);
const bool & isJobPoolReady() { return job_pool_ready; }
private:
static bool tryLockJob(UInt32 index);
static void unLockJob(UInt32 index);
bool tryLockJob(UInt32 index);
void unLockJob(UInt32 index);
/// size of each job objects
UInt32 per_job_size;
/// Maximum jobs running in parallel supported by IAA hardware
static constexpr auto MAX_HW_JOB_NUMBER = 1024;
UInt32 max_hw_jobs;
/// Entire buffer for storing all job objects
static std::unique_ptr<uint8_t[]> hw_jobs_buffer;
/// Job pool for storing all job object pointers
static std::array<qpl_job *, MAX_HW_JOB_NUMBER> hw_job_ptr_pool;
std::unique_ptr<uint8_t[]> hw_jobs_buffer;
/// Locks for accessing each job object pointers
static std::array<std::atomic_bool, MAX_HW_JOB_NUMBER> hw_job_ptr_locks;
static bool job_pool_ready;
std::unique_ptr<std::atomic_bool[]> hw_job_ptr_locks;
bool job_pool_ready;
std::mt19937 random_engine;
std::uniform_int_distribution<int> distribution;
};