|
NVIDIA DeepStream SDK API Reference
|
7.1 Release
|
Go to the documentation of this file.
13 #ifndef __NVDSINFER_FUNC_UTILS_H__
14 #define __NVDSINFER_FUNC_UTILS_H__
21 #include <condition_variable>
25 #include <unordered_set>
28 #include <NvInferRuntime.h>
35 #define DISABLE_CLASS_COPY(NoCopyClass) \
36 NoCopyClass(const NoCopyClass&) = delete; \
37 void operator=(const NoCopyClass&) = delete
39 #define SIMPLE_MOVE_COPY(Cls) \
40 Cls& operator=(Cls&& o) { \
41 move_copy(std::move(o)); \
44 Cls(Cls&& o) { move_copy(std::move(o)); }
46 #define CHECK_NVINFER_ERROR(err, action, fmt, ...) \
48 NvDsInferStatus ifStatus = (err); \
49 if (ifStatus != NVDSINFER_SUCCESS) { \
50 auto errStr = NvDsInferStatus2Str(ifStatus); \
51 dsInferError(fmt ", nvinfer error:%s", ##__VA_ARGS__, errStr); \
56 #define RETURN_NVINFER_ERROR(err, fmt, ...) \
57 CHECK_NVINFER_ERROR(err, return ifStatus, fmt, ##__VA_ARGS__)
59 #define CHECK_CUDA_ERR_W_ACTION(err, action, fmt, ...) \
61 cudaError_t errnum = (err); \
62 if (errnum != cudaSuccess) { \
63 dsInferError(fmt ", cuda err_no:%d, err_str:%s", ##__VA_ARGS__, \
64 (int)errnum, cudaGetErrorName(errnum)); \
69 #define CHECK_CUDA_ERR_NO_ACTION(err, fmt, ...) \
70 CHECK_CUDA_ERR_W_ACTION(err, , fmt, ##__VA_ARGS__)
72 #define RETURN_CUDA_ERR(err, fmt, ...) \
73 CHECK_CUDA_ERR_W_ACTION( \
74 err, return NVDSINFER_CUDA_ERROR, fmt, ##__VA_ARGS__)
76 #define READ_SYMBOL(lib, func_name) \
77 lib->symbol<decltype(&func_name)>(#func_name)
81 inline const char*
safeStr(
const char* str)
83 return !str ?
"" : str;
86 inline const char*
safeStr(
const std::string& str)
93 return !str || strlen(str) == 0;
99 return (access(path, F_OK) != -1);
107 std::string
dims2Str(
const nvinfer1::Dims& d);
118 DlLibHandle(
const std::string& path,
int mode = RTLD_LAZY);
122 const std::string&
getPath()
const {
return m_LibPath; }
124 template <
typename FuncPtr>
130 return (FuncPtr)dlsym(m_LibHandle, func);
133 template <
typename FuncPtr>
136 return symbol<FuncPtr>(func.c_str());
140 void* m_LibHandle{
nullptr};
141 const std::string m_LibPath;
145 template <
typename Container>
149 typedef typename Container::value_type
T;
152 std::unique_lock<std::mutex> lock(m_Mutex);
153 m_Queue.push_back(data);
158 std::unique_lock<std::mutex> lock(m_Mutex);
159 m_Cond.wait(lock, [
this]() {
return !m_Queue.empty(); });
160 assert(!m_Queue.empty());
161 T ret = std::move(*m_Queue.begin());
162 m_Queue.erase(m_Queue.begin());
167 std::unique_lock<std::mutex> lock(m_Mutex);
168 return m_Queue.empty();
172 std::unique_lock<std::mutex> lock(m_Mutex);
178 std::condition_variable m_Cond;
201 "Failed to get element size on Unknown datatype:%d", (
int)t);
233 bool operator<=(
const nvinfer1::Dims& a,
const nvinfer1::Dims& b);
234 bool operator>(
const nvinfer1::Dims& a,
const nvinfer1::Dims& b);
235 bool operator==(
const nvinfer1::Dims& a,
const nvinfer1::Dims& b);
236 bool operator!=(
const nvinfer1::Dims& a,
const nvinfer1::Dims& b);
252 const nvinfer1::INetworkDefinition& network);
256 nvinfer1::DeviceType
str2DeviceType(
const std::string& deviceType);
const std::string & getPath() const
bool isValidPrecisionType(const std::string &dataType)
nvinfer1::Dims ds2TrtDims(const NvDsInferDimsCHW &dims)
bool isValidOutputFormat(const std::string &fmt)
std::string dims2Str(const nvinfer1::Dims &d)
void normalizeDims(NvDsInferDims &dims)
DlLibHandle(const std::string &path, int mode=RTLD_LAZY)
nvinfer1::Dims CombineDimsBatch(const NvDsInferDims &dims, int batch)
bool operator>(const nvinfer1::Dims &a, const nvinfer1::Dims &b)
nvinfer1::DataType str2PrecisionType(const std::string &dataType)
Holds the dimensions of a layer.
std::string networkMode2Str(const NvDsInferNetworkMode type)
#define dsInferError(fmt,...)
nvinfer1::DataType str2DataType(const std::string &dataType)
NvDsInferDataType
Specifies the data type of a layer.
NvDsInferNetworkMode
Defines internal data formats used by the inference engine.
bool operator!=(const nvinfer1::Dims &a, const nvinfer1::Dims &b)
uint32_t str2TensorFormat(const std::string &fmt)
@ FLOAT
Specifies FP32 format.
bool hasWildcard(const nvinfer1::Dims &dims)
@ HALF
Specifies FP16 format.
@ INT32
Specifies INT32 format.
@ INT8
Specifies INT8 format.
void convertFullDims(const nvinfer1::Dims &fullDims, NvDsInferBatchDims &batchDims)
Holds the dimensions of a three-dimensional layer.
bool operator==(const nvinfer1::Dims &a, const nvinfer1::Dims &b)
bool operator<=(const nvinfer1::Dims &a, const nvinfer1::Dims &b)
std::string dataType2Str(const nvinfer1::DataType type)
FuncPtr symbol(const std::string &func)
bool file_accessible(const char *path)
FuncPtr symbol(const char *func)
@ INT64
Specifies INT64 format.
nvinfer1::DeviceType str2DeviceType(const std::string &deviceType)
bool string_empty(const char *str)
const char * safeStr(const char *str)
NvDsInferDims trt2DsDims(const nvinfer1::Dims &dims)
void SplitFullDims(const nvinfer1::Dims &fullDims, NvDsInferDims &dims, int &batch)
uint32_t getElementSize(NvDsInferDataType t)
Get the size of the element from the data type.
std::string batchDims2Str(const NvDsInferBatchDims &d)
bool validateIOTensorNames(const BuildParams ¶ms, const nvinfer1::INetworkDefinition &network)
InferDims fullDims(int batchSize, const InferDims &in)
Extend the dimensions to include batch size.
bool isValidDeviceType(const std::string &fmt)
bool isValidOutputDataType(const std::string &dataType)