NVIDIA DeepStream SDK API Reference

7.1 Release
nvdsinfer_func_utils.h
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4  *
5  * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6  * property and proprietary rights in and to this material, related
7  * documentation and any modifications thereto. Any use, reproduction,
8  * disclosure or distribution of this material and related documentation
9  * without an express license agreement from NVIDIA CORPORATION or
10  * its affiliates is strictly prohibited.
11  */
12 
13 #ifndef __NVDSINFER_FUNC_UTILS_H__
14 #define __NVDSINFER_FUNC_UTILS_H__
15 
16 #include <dlfcn.h>
17 #include <stdarg.h>
18 #include <string.h>
19 #include <unistd.h>
20 #include <cassert>
21 #include <condition_variable>
22 #include <mutex>
23 #include <sstream>
24 #include <string>
25 #include <unordered_set>
26 
27 #include <NvInfer.h>
28 #include <NvInferRuntime.h>
29 #include <nvdsinfer.h>
30 #include <nvdsinfer_context.h>
31 #include <nvdsinfer_logger.h>
32 
33 /* This file provides APIs/macros for some frequently used functionality. */
34 
35 #define DISABLE_CLASS_COPY(NoCopyClass) \
36  NoCopyClass(const NoCopyClass&) = delete; \
37  void operator=(const NoCopyClass&) = delete
38 
39 #define SIMPLE_MOVE_COPY(Cls) \
40  Cls& operator=(Cls&& o) { \
41  move_copy(std::move(o)); \
42  return *this; \
43  } \
44  Cls(Cls&& o) { move_copy(std::move(o)); }
45 
46 #define CHECK_NVINFER_ERROR(err, action, fmt, ...) \
47  do { \
48  NvDsInferStatus ifStatus = (err); \
49  if (ifStatus != NVDSINFER_SUCCESS) { \
50  auto errStr = NvDsInferStatus2Str(ifStatus); \
51  dsInferError(fmt ", nvinfer error:%s", ##__VA_ARGS__, errStr); \
52  action; \
53  } \
54  } while (0)
55 
56 #define RETURN_NVINFER_ERROR(err, fmt, ...) \
57  CHECK_NVINFER_ERROR(err, return ifStatus, fmt, ##__VA_ARGS__)
58 
59 #define CHECK_CUDA_ERR_W_ACTION(err, action, fmt, ...) \
60  do { \
61  cudaError_t errnum = (err); \
62  if (errnum != cudaSuccess) { \
63  dsInferError(fmt ", cuda err_no:%d, err_str:%s", ##__VA_ARGS__, \
64  (int)errnum, cudaGetErrorName(errnum)); \
65  action; \
66  } \
67  } while (0)
68 
69 #define CHECK_CUDA_ERR_NO_ACTION(err, fmt, ...) \
70  CHECK_CUDA_ERR_W_ACTION(err, , fmt, ##__VA_ARGS__)
71 
72 #define RETURN_CUDA_ERR(err, fmt, ...) \
73  CHECK_CUDA_ERR_W_ACTION( \
74  err, return NVDSINFER_CUDA_ERROR, fmt, ##__VA_ARGS__)
75 
76 #define READ_SYMBOL(lib, func_name) \
77  lib->symbol<decltype(&func_name)>(#func_name)
78 
79 namespace nvdsinfer {
80 
81 inline const char* safeStr(const char* str)
82 {
83  return !str ? "" : str;
84 }
85 
86 inline const char* safeStr(const std::string& str)
87 {
88  return str.c_str();
89 }
90 
91 inline bool string_empty(const char* str)
92 {
93  return !str || strlen(str) == 0;
94 }
95 
96 inline bool file_accessible(const char* path)
97 {
98  assert(path);
99  return (access(path, F_OK) != -1);
100 }
101 
102 inline bool file_accessible(const std::string& path)
103 {
104  return (!path.empty()) && file_accessible(path.c_str());
105 }
106 
107 std::string dims2Str(const nvinfer1::Dims& d);
108 std::string dims2Str(const NvDsInferDims& d);
109 std::string batchDims2Str(const NvDsInferBatchDims& d);
110 
111 std::string dataType2Str(const nvinfer1::DataType type);
112 std::string dataType2Str(const NvDsInferDataType type);
113 std::string networkMode2Str(const NvDsInferNetworkMode type);
114 
116 {
117 public:
118  DlLibHandle(const std::string& path, int mode = RTLD_LAZY);
119  ~DlLibHandle();
120 
121  bool isValid() const { return m_LibHandle; }
122  const std::string& getPath() const { return m_LibPath; }
123 
124  template <typename FuncPtr>
125  FuncPtr symbol(const char* func)
126  {
127  assert(!string_empty(func));
128  if (!m_LibHandle)
129  return nullptr;
130  return (FuncPtr)dlsym(m_LibHandle, func);
131  }
132 
133  template <typename FuncPtr>
134  FuncPtr symbol(const std::string& func)
135  {
136  return symbol<FuncPtr>(func.c_str());
137  }
138 
139 private:
140  void* m_LibHandle{nullptr};
141  const std::string m_LibPath;
142 };
143 
144 
145 template <typename Container>
147 {
148 public:
149  typedef typename Container::value_type T;
150  void push(const T& data)
151  {
152  std::unique_lock<std::mutex> lock(m_Mutex);
153  m_Queue.push_back(data);
154  m_Cond.notify_one();
155  }
156  T pop()
157  {
158  std::unique_lock<std::mutex> lock(m_Mutex);
159  m_Cond.wait(lock, [this]() { return !m_Queue.empty(); });
160  assert(!m_Queue.empty());
161  T ret = std::move(*m_Queue.begin());
162  m_Queue.erase(m_Queue.begin());
163  return ret;
164  }
165  bool isEmpty()
166  {
167  std::unique_lock<std::mutex> lock(m_Mutex);
168  return m_Queue.empty();
169  }
170  void clear()
171  {
172  std::unique_lock<std::mutex> lock(m_Mutex);
173  m_Queue.clear();
174  }
175 
176 private:
177  std::mutex m_Mutex;
178  std::condition_variable m_Cond;
179  Container m_Queue;
180 };
181 
185 inline uint32_t
187 {
188  switch (t)
189  {
190  case INT64:
191  return 8;
192  case INT32:
193  case FLOAT:
194  return 4;
195  case HALF:
196  return 2;
197  case INT8:
198  return 1;
199  default:
200  dsInferError(
201  "Failed to get element size on Unknown datatype:%d", (int)t);
202  return 0;
203  }
204 }
205 
206 /* Convert between TRT's nvinfer1::Dims representation and DeepStream's
207  * NvDsInferDimsCHW/NvDsInferDims representation. */
208 nvinfer1::Dims ds2TrtDims(const NvDsInferDimsCHW& dims);
209 nvinfer1::Dims ds2TrtDims(const NvDsInferDims& dims);
210 NvDsInferDims trt2DsDims(const nvinfer1::Dims& dims);
211 
212 /* Add batch size to provided dims to get full dims as nvinfer1::Dims. */
213 nvinfer1::Dims CombineDimsBatch(const NvDsInferDims& dims, int batch);
214 /* Split full dims provided in the form of nvinfer1::Dims into batch size and
215  * layer dims. */
216 void SplitFullDims(
217  const nvinfer1::Dims& fullDims, NvDsInferDims& dims, int& batch);
218 
219 /* Convert from TRT's nvinfer1::Dims representation to DeepStream's
220  * NvDsInferBatchDims representation. */
221 inline void
222 convertFullDims(const nvinfer1::Dims& fullDims, NvDsInferBatchDims& batchDims)
223 {
224  SplitFullDims(fullDims, batchDims.dims, batchDims.batchSize);
225 }
226 
227 void normalizeDims(NvDsInferDims& dims);
228 
229 bool hasWildcard(const nvinfer1::Dims& dims);
230 bool hasWildcard(const NvDsInferDims& dims);
231 
232 /* Equality / inequality operators implementation for nvinfer1::Dims */
233 bool operator<=(const nvinfer1::Dims& a, const nvinfer1::Dims& b);
234 bool operator>(const nvinfer1::Dims& a, const nvinfer1::Dims& b);
235 bool operator==(const nvinfer1::Dims& a, const nvinfer1::Dims& b);
236 bool operator!=(const nvinfer1::Dims& a, const nvinfer1::Dims& b);
237 
238 /* Equality / inequality operators implementation for NvDsInferDims */
239 bool operator<=(const NvDsInferDims& a, const NvDsInferDims& b);
240 bool operator>(const NvDsInferDims& a, const NvDsInferDims& b);
241 bool operator==(const NvDsInferDims& a, const NvDsInferDims& b);
242 bool operator!=(const NvDsInferDims& a, const NvDsInferDims& b);
243 
244 
245 bool isValidOutputFormat(const std::string& fmt);
246 bool isValidOutputDataType(const std::string& dataType);
247 nvinfer1::DataType str2DataType(const std::string& dataType);
248 uint32_t str2TensorFormat(const std::string& fmt);
249 
250 struct BuildParams;
251 bool validateIOTensorNames(const BuildParams& params,
252  const nvinfer1::INetworkDefinition& network);
253 bool isValidDeviceType(const std::string& fmt);
254 bool isValidPrecisionType(const std::string& dataType);
255 nvinfer1::DataType str2PrecisionType(const std::string& dataType);
256 nvinfer1::DeviceType str2DeviceType(const std::string& deviceType);
257 
258 } // namespace nvdsinfer
259 
260 #endif
nvdsinfer::DlLibHandle::getPath
const std::string & getPath() const
Definition: nvdsinfer_func_utils.h:122
nvdsinfer::GuardQueue
Definition: nvdsinfer_func_utils.h:146
nvdsinfer::isValidPrecisionType
bool isValidPrecisionType(const std::string &dataType)
nvdsinfer::ds2TrtDims
nvinfer1::Dims ds2TrtDims(const NvDsInferDimsCHW &dims)
nvdsinfer::isValidOutputFormat
bool isValidOutputFormat(const std::string &fmt)
nvdsinfer::GuardQueue::pop
T pop()
Definition: nvdsinfer_func_utils.h:156
nvdsinfer::GuardQueue::push
void push(const T &data)
Definition: nvdsinfer_func_utils.h:150
nvdsinfer::dims2Str
std::string dims2Str(const nvinfer1::Dims &d)
ds3d::DataType
DataType
Definition: idatatype.h:77
nvdsinfer::normalizeDims
void normalizeDims(NvDsInferDims &dims)
nvdsinfer::DlLibHandle::DlLibHandle
DlLibHandle(const std::string &path, int mode=RTLD_LAZY)
nvdsinfer::CombineDimsBatch
nvinfer1::Dims CombineDimsBatch(const NvDsInferDims &dims, int batch)
nvdsinfer::operator>
bool operator>(const nvinfer1::Dims &a, const nvinfer1::Dims &b)
nvdsinfer::str2PrecisionType
nvinfer1::DataType str2PrecisionType(const std::string &dataType)
NvDsInferDims
Holds the dimensions of a layer.
Definition: nvdsinfer.h:46
nvdsinfer::networkMode2Str
std::string networkMode2Str(const NvDsInferNetworkMode type)
dsInferError
#define dsInferError(fmt,...)
Definition: nvdsinfer_logger.h:28
nvdsinfer::str2DataType
nvinfer1::DataType str2DataType(const std::string &dataType)
NvDsInferDataType
NvDsInferDataType
Specifies the data type of a layer.
Definition: nvdsinfer.h:72
NvDsInferNetworkMode
NvDsInferNetworkMode
Defines internal data formats used by the inference engine.
Definition: nvdsinfer_context.h:120
nvdsinfer
Definition: nvdsinfer_model_builder.h:41
nvdsinfer::GuardQueue::T
Container::value_type T
Definition: nvdsinfer_func_utils.h:149
nvdsinfer::operator!=
bool operator!=(const nvinfer1::Dims &a, const nvinfer1::Dims &b)
nvdsinfer::str2TensorFormat
uint32_t str2TensorFormat(const std::string &fmt)
FLOAT
@ FLOAT
Specifies FP32 format.
Definition: nvdsinfer.h:75
nvdsinfer::hasWildcard
bool hasWildcard(const nvinfer1::Dims &dims)
nvdsinfer::GuardQueue::clear
void clear()
Definition: nvdsinfer_func_utils.h:170
HALF
@ HALF
Specifies FP16 format.
Definition: nvdsinfer.h:77
INT32
@ INT32
Specifies INT32 format.
Definition: nvdsinfer.h:81
nvdsinfer_context.h
INT8
@ INT8
Specifies INT8 format.
Definition: nvdsinfer.h:79
nvdsinfer::convertFullDims
void convertFullDims(const nvinfer1::Dims &fullDims, NvDsInferBatchDims &batchDims)
Definition: nvdsinfer_func_utils.h:222
NvDsInferDimsCHW
Holds the dimensions of a three-dimensional layer.
Definition: nvdsinfer.h:59
nvdsinfer_logger.h
nvdsinfer::operator==
bool operator==(const nvinfer1::Dims &a, const nvinfer1::Dims &b)
nvdsinfer::DlLibHandle
Definition: nvdsinfer_func_utils.h:115
nvdsinfer::operator<=
bool operator<=(const nvinfer1::Dims &a, const nvinfer1::Dims &b)
nvdsinfer::dataType2Str
std::string dataType2Str(const nvinfer1::DataType type)
nvdsinfer::DlLibHandle::symbol
FuncPtr symbol(const std::string &func)
Definition: nvdsinfer_func_utils.h:134
nvdsinfer::file_accessible
bool file_accessible(const char *path)
Definition: nvdsinfer_func_utils.h:96
nvdsinfer::DlLibHandle::~DlLibHandle
~DlLibHandle()
nvdsinfer::DlLibHandle::symbol
FuncPtr symbol(const char *func)
Definition: nvdsinfer_func_utils.h:125
INT64
@ INT64
Specifies INT64 format.
Definition: nvdsinfer.h:83
nvdsinfer.h
nvdsinfer::str2DeviceType
nvinfer1::DeviceType str2DeviceType(const std::string &deviceType)
nvdsinfer::string_empty
bool string_empty(const char *str)
Definition: nvdsinfer_func_utils.h:91
nvdsinfer::safeStr
const char * safeStr(const char *str)
Definition: nvdsinfer_func_utils.h:81
nvdsinfer::trt2DsDims
NvDsInferDims trt2DsDims(const nvinfer1::Dims &dims)
nvdsinfer::SplitFullDims
void SplitFullDims(const nvinfer1::Dims &fullDims, NvDsInferDims &dims, int &batch)
nvdsinfer::GuardQueue::isEmpty
bool isEmpty()
Definition: nvdsinfer_func_utils.h:165
nvdsinfer::getElementSize
uint32_t getElementSize(NvDsInferDataType t)
Get the size of the element from the data type.
Definition: nvdsinfer_func_utils.h:186
nvdsinfer::batchDims2Str
std::string batchDims2Str(const NvDsInferBatchDims &d)
nvdsinfer::validateIOTensorNames
bool validateIOTensorNames(const BuildParams &params, const nvinfer1::INetworkDefinition &network)
INFER_EXPORT_API::fullDims
InferDims fullDims(int batchSize, const InferDims &in)
Extend the dimensions to include batch size.
nvdsinfer::isValidDeviceType
bool isValidDeviceType(const std::string &fmt)
nvdsinfer::DlLibHandle::isValid
bool isValid() const
Definition: nvdsinfer_func_utils.h:121
nvdsinfer::isValidOutputDataType
bool isValidOutputDataType(const std::string &dataType)