|
NVIDIA DeepStream SDK API Reference
|
7.1 Release
|
Go to the documentation of this file.
20 #ifndef __NVDSINFER_SERVER_INFER_UTILS_H__
21 #define __NVDSINFER_SERVER_INFER_UTILS_H__
64 inline const char*
safeStr(
const char* str) {
65 return !str ?
"" : str;
68 inline const char*
safeStr(
const std::string& str) {
78 return !str || strlen(str) == 0;
88 return (access(path, F_OK) != -1);
111 std::string
dims2Str(
const InferDims& d);
125 bool fEqual(
float a,
float b);
137 DlLibHandle(
const std::string& path,
int mode = RTLD_LAZY);
152 const std::string&
getPath()
const {
return m_LibPath; }
159 template <
typename FuncPtr>
165 return (FuncPtr)dlsym(m_LibHandle, func);
168 template <
typename FuncPtr>
169 FuncPtr
symbol(
const std::string& func) {
170 return symbol<FuncPtr>(func.c_str());
178 void* m_LibHandle{
nullptr};
182 const std::string m_LibPath;
193 const char*
what() const noexcept
override {
return m_Msg.c_str(); }
202 template <
typename Container>
205 typedef typename Container::value_type
T;
210 std::unique_lock<std::mutex> lock(m_Mutex);
211 m_Queue.emplace_back(std::move(data));
221 std::unique_lock<std::mutex> lock(m_Mutex);
223 lock, [
this]() {
return m_WakeupOnce || !m_Queue.empty(); });
225 m_WakeupOnce =
false;
226 InferDebug(
"GuardQueue pop end on wakeup signal");
229 assert(!m_Queue.empty());
230 T ret = std::move(*m_Queue.begin());
231 m_Queue.erase(m_Queue.begin());
239 std::unique_lock<std::mutex> lock(m_Mutex);
248 std::unique_lock<std::mutex> lock(m_Mutex);
250 m_WakeupOnce =
false;
256 std::unique_lock<std::mutex> lock(m_Mutex);
257 return m_Queue.size();
268 std::condition_variable m_Cond;
276 bool m_WakeupOnce =
false;
286 template <
typename Container>
289 using Item =
typename Container::value_type;
300 std::promise<void> p;
301 std::future<void> f = p.get_future();
302 InferDebug(
"QueueThread starting new thread");
303 m_Thread = std::thread([&p,
this]() {
315 assert(!name.empty());
317 if (m_Thread.joinable()) {
318 const int kMakLen = 16;
320 strncpy(cName, name.c_str(), kMakLen);
321 cName[kMakLen - 1] = 0;
322 if (pthread_setname_np(m_Thread.native_handle(), cName) != 0) {
326 InferDebug(
"QueueThread set new thread name:%s", cName);
336 if (m_Thread.joinable()) {
337 m_Queue.wakeupOnce();
346 m_Queue.push(std::move(item));
361 Item item = m_Queue.pop();
362 if (!m_Run(std::move(item))) {
367 catch (
const WakeupException& e) {
373 "QueueThread:%s internal unexpected error, may cause stop",
385 std::thread m_Thread;
397 GuardQueue<Container> m_Queue;
404 template <
class UniPtr>
405 class BufferPool :
public std::enable_shared_from_this<BufferPool<UniPtr> > {
420 "BufferPool: %s deleted with free buffer size:%d",
safeStr(m_Name),
421 m_FreeBuffers.size());
431 m_FreeBuffers.push(std::move(buf));
432 InferDebug(
"BufferPool: %s set buf to free, available size:%d",
433 safeStr(m_Name), m_FreeBuffers.size());
439 int size() {
return m_FreeBuffers.size(); }
452 UniPtr p = m_FreeBuffers.pop();
453 auto deleter = p.get_deleter();
454 std::weak_ptr<BufferPool<UniPtr>> poolPtr =
455 this->shared_from_this();
457 p.release(), [poolPtr, d = deleter](
ItemType* buf) {
460 auto pool = poolPtr.lock();
462 InferDebug(
"BufferPool: %s release a buffer", safeStr(pool->m_Name));
463 pool->setBuffer(std::move(data));
465 InferError(
"BufferPool is deleted, check internal error.");
469 InferDebug(
"BufferPool: %s acquired buffer, available free buffer left:%d",
470 safeStr(m_Name), m_FreeBuffers.size());
474 "BufferPool: %s acquired buffer failed, queue maybe waked up.",
485 GuardQueue<std::deque<UniPtr>> m_FreeBuffers;
489 const std::string m_Name;
492 template <
class UniPtr>
500 template<
typename Key,
typename UniqBuffer>
516 "MapBufferPool: %s deleted with buffer pool size:%d",
517 safeStr(m_Name), (
int)m_MapPool.size());
537 std::unique_lock<std::shared_timed_mutex> uniqLock(m_MapPoolMutex);
541 uint32_t
id = m_MapPool.size() - 1;
542 std::string poolName = m_Name + std::to_string(
id);
543 pool = std::make_shared<BufferPool<UniqBuffer>>(poolName);
545 InferDebug(
"MapBufferPool: %s create new pool id:%d",
551 return pool->setBuffer(std::move(buf));
576 "MapBufferPool: %s acquire buffer failed, no key found",
581 return pool->acquireBuffer();
588 std::unique_lock<std::shared_timed_mutex> uniqLock(m_MapPoolMutex);
596 SharedPool findPool(
const Key& key) {
597 std::shared_lock<std::shared_timed_mutex> sharedLock(m_MapPoolMutex);
598 auto iter = m_MapPool.find(key);
599 if (iter != m_MapPool.end()) {
600 assert(iter->second);
610 std::map<Key, SharedPool> m_MapPool;
614 std::shared_timed_mutex m_MapPoolMutex;
618 const std::string m_Name;
626 case InferDataType::kInt32:
627 case InferDataType::kUint32:
628 case InferDataType::kFp32:
630 case InferDataType::kFp16:
631 case InferDataType::kInt16:
632 case InferDataType::kUint16:
634 case InferDataType::kInt8:
635 case InferDataType::kUint8:
636 case InferDataType::kBool:
638 case InferDataType::kString:
640 case InferDataType::kFp64:
641 case InferDataType::kInt64:
642 case InferDataType::kUint64:
645 InferError(
"Failed to get element size on Unknown datatype:%d",
646 static_cast<int>(t));
659 dims.d, dims.d + dims.numDims,
660 [](
int d) { return d <= INFER_WILDCARD_DIM_VALUE; });
675 return std::accumulate(
676 dims.d, dims.d + dims.numDims, 1,
677 [](
int s,
int i) { return s * i; });
695 bool operator<=(
const InferDims& a,
const InferDims& b);
696 bool operator>(
const InferDims& a,
const InferDims& b);
697 bool operator==(
const InferDims& a,
const InferDims& b);
698 bool operator!=(
const InferDims& a,
const InferDims& b);
701 struct LayerDescription;
720 const InferBufferDescription& desc,
void* buf =
nullptr);
743 const InferDims& a,
const InferDims& b, InferDims& c);
756 std::string
joinPath(
const std::string& a,
const std::string& b);
757 std::string
dirName(
const std::string& path);
759 bool realPath(
const std::string &inPath, std::string &absPath);
778 InferDims
fullDims(
int batchSize,
const InferDims& in);
789 const InferDims& full, InferDims& debatched, uint32_t& batch);
799 bool squeezeMatch(
const InferDims& a,
const InferDims& b);
815 bool reCalcBytes =
false);
827 bool reCalcBytes =
false);
870 const std::string& configStr,
const std::string& path, std::string& updated);
This is a header file for pre-processing cuda kernels with normalization and mean subtraction require...
InferDataType
Datatype of the tensor buffer.
bool isNonBatch(T b)
Checks if the input batch size is zero.
T pop()
Pop an item from the queue.
bool intersectDims(const InferDims &a, const InferDims &b, InferDims &c)
Get the intersection of the two input dimensions.
bool operator<=(const InferDims &a, const InferDims &b)
Comparison operators for the InferDims type.
InferDataType grpcStr2DataType(const std::string &type)
bool debatchFullDims(const InferDims &full, InferDims &debatched, uint32_t &batch)
Separates batch size from given dimensions.
bool operator>(const InferDims &a, const InferDims &b)
std::shared_ptr< BaseBatchBuffer > SharedBatchBuf
Common buffer interfaces (internal).
Template class for creating a thread safe queue for the given container class.
INFER_EXPORT_API bool validateInferConfigStr(const std::string &configStr, const std::string &path, std::string &updated)
Validates the provided nvinferserver configuration string.
std::string dims2Str(const InferDims &d)
Helper functions to convert the various data types to string values for debug, log information.
bool realPath(const std::string &inPath, std::string &absPath)
NvDsInferNetworkInfo dims2ImageInfo(const InferDims &d, InferTensorOrder order)
const char * safeStr(const std::string &str)
MapBufferPool(const std::string &name)
Construct the buffer pool map with a name.
NvDsInferDims toCapi(const InferDims &dims)
Convert the InferDims to NvDsInferDims of the library interface.
Header file for the data types used in the inference processing.
typename Container::value_type Item
uint32_t getElementSize(InferDataType t)
Get the size of the element from the data type.
std::string dataType2GrpcStr(const InferDataType type)
InferTensorOrder
The type of tensor order.
bool setBuffer(UniPtr buf)
Add a buffer to the pool.
Holds the dimensions of a layer.
std::string joinPath(const std::string &a, const std::string &b)
Helper functions for parsing the configuration file.
NvDsInferLayerInfo toCapiLayerInfo(const InferBufferDescription &desc, void *buf=nullptr)
Generate NvDsInferLayerInfo of the interface from the buffer description and buffer pointer.
FuncPtr symbol(const char *func)
const char * what() const noexcept override
Header file of batch buffer related class declarations.
virtual ~BufferPool()
Destructor.
std::string dirName(const std::string &path)
Helper class for dynamic loading of custom library.
std::shared_ptr< BufferPool< UniPtr > > SharedBufPool
void wakeupOnce()
Send the wakeup trigger to the queue thread.
size_t dimsSize(const InferDims &dims)
Calculate the total number of elements for the given dimensions.
Template class for a map of buffer pools.
std::unique_ptr< ItemType, std::function< void(ItemType *)> > RecylePtr
void push(T data)
Push an item to the queue.
FuncPtr symbol(const std::string &func)
NvDsInferLogLevel
Enum for the log levels of NvDsInferContext.
bool squeezeMatch(const InferDims &a, const InferDims &b)
Check that the two dimensions are equal ignoring single element values.
NvDsInferDataType
Specifies the data type of a layer.
void dsInferLogVPrint__(NvDsInferLogLevel level, const char *fmt, va_list args)
Helper function to print the nvinferserver logs.
void clear()
Remove all pools from the map.
uint32_t getPoolSize(const Key &key)
Get the size of a pool from the map.
Header file of the common declarations for the nvinferserver library.
Template class for buffer pool of the specified buffer type.
QueueThread(RunFunc runFunc, const std::string &name)
Create a new thread that runs the specified function over the queued items in a loop.
void dsInferLogPrint__(NvDsInferLogLevel level, const char *fmt,...)
Print the nvinferserver log messages as per the configured log level.
std::string batchDims2Str(const InferBatchDims &d)
bool queueItem(Item item)
Add an item to the queue for processing.
Template class for running the specified function on the queue items in a separate thread.
Holds information about the model network.
#define InferDebug(fmt,...)
bool file_accessible(const std::string &path)
Holds information about one layer in the model.
int size()
Current size of the queue.
std::string memType2Str(InferMemType type)
Returns a string object corresponding to the InferMemType name.
~QueueThread()
Destructor.
NvDsInferDataType toCapiDataType(InferDataType dt)
Convert the InferDataType to NvDsInferDataType of the library interface.
BufferPool(const std::string &name)
Constructor.
bool isAbsolutePath(const std::string &path)
Wrapper class for handling exception.
typename BufferPool< UniqBuffer >::RecylePtr RecylePtr
#define InferWarning(fmt,...)
bool isPrivateTensor(const std::string &tensorName)
Check if the given tensor is marked as private (contains INFER_SERVER_PRIVATE_BUF in the name).
std::string dataType2Str(const InferDataType type)
std::string tensorOrder2Str(InferTensorOrder order)
const std::string & getPath() const
std::function< bool(Item)> RunFunc
std::shared_ptr< CudaStream > SharedCuStream
Cuda based pointers.
void normalizeDims(InferDims &dims)
Recalculates the total number of elements for the dimensions.
RecylePtr acquireBuffer(const Key &key)
Acquire a buffer from the selected pool.
bool isCpuMem(InferMemType type)
Check if the memory type uses CPU memory (kCpu or kCpuCuda).
#define InferError(fmt,...)
virtual ~MapBufferPool()
Destructor.
bool setBuffer(const Key &key, UniqBuffer buf)
Add a buffer to the pool map.
SharedBufPool< UniqBuffer > SharedPool
InferMemType
The memory types of inference buffers.
bool hasWildcard(const InferDims &dims)
Check if any of the InferDims dimensions are of dynamic size (-1 or negative values).
const INFER_EXPORT_API char * NvDsInferStatus2Str(NvDsInferStatus status)
Returns the NvDsInferStatus enum name as a string.
bool operator==(const InferDims &a, const InferDims &b)
int size()
Get the number of free buffers.
RecylePtr acquireBuffer()
Acquire a buffer from the pool.
bool fEqual(float a, float b)
Check if the two floating point values are equal, the difference is less than or equal to the epsilon...
WakeupException(const std::string &s)
void setThreadName(const std::string &name)
Set the internal (m_Name) name of the thread and system name using pthread_setname_np().
bool operator!=(const InferDims &a, const InferDims &b)
SharedBatchBuf reshapeToFullDimsBuf(const SharedBatchBuf &buf, bool reCalcBytes=false)
Reshape the buffer dimensions with batch size added as new dimension.
void clear()
Clear the queue.
SharedBatchBuf ReshapeBuf(const SharedBatchBuf &in, uint32_t batch, const InferDims &dims, bool reCalcBytes=false)
Update the buffer dimensions as per provided new dimensions.
InferDims fullDims(int batchSize, const InferDims &in)
Extend the dimensions to include batch size.
bool string_empty(const char *str)
NvDsInferStatus tensorBufferCopy(const SharedBatchBuf &in, const SharedBatchBuf &out, const SharedCuStream &stream)
Copy one tensor buffer to another.
typename UniPtr::element_type ItemType
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.