NVIDIA DeepStream SDK API Reference

7.1 Release
nvdsinfer_context_impl.h
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4  *
5  * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6  * property and proprietary rights in and to this material, related
7  * documentation and any modifications thereto. Any use, reproduction,
8  * disclosure or distribution of this material and related documentation
9  * without an express license agreement from NVIDIA CORPORATION or
10  * its affiliates is strictly prohibited.
11  */
12 
13 #ifndef __NVDSINFER_CONTEXT_IMPL_H__
14 #define __NVDSINFER_CONTEXT_IMPL_H__
15 
16 #include <stdarg.h>
17 #include <condition_variable>
18 #include <functional>
19 #include <list>
20 #include <memory>
21 #include <mutex>
22 #include <queue>
23 #include <iostream>
24 #include <fstream>
25 
26 #include <NvInfer.h>
27 #include <cuda_runtime_api.h>
28 
29 #pragma GCC diagnostic push
30 #if __GNUC__ >= 8
31 #pragma GCC diagnostic ignored "-Wclass-memaccess"
32 #endif
33 #ifdef WITH_OPENCV
34 #include <opencv2/objdetect/objdetect.hpp>
35 #endif
36 #pragma GCC diagnostic pop
37 
38 #include <nvdsinfer_context.h>
39 #include <nvdsinfer_custom_impl.h>
40 #include <nvdsinfer_utils.h>
41 #include <nvdsinfer_logger.h>
42 
43 #include "nvdsinfer_backend.h"
44 
45 namespace nvdsinfer {
46 
48  std::function<void(NvDsInferLogLevel, const char* msg)>;
49 
53 typedef struct
54 {
55  std::vector<void*> m_DeviceBuffers;
56  std::vector<std::unique_ptr<CudaHostBuffer>> m_HostBuffers;
57 
58  std::vector<std::unique_ptr<CudaDeviceBuffer>> m_OutputDeviceBuffers;
59 
60  unsigned int m_BatchSize = 0;
61  std::unique_ptr<CudaEvent> m_OutputCopyDoneEvent = nullptr;
62  bool m_BuffersWithContext = true;
63 
65 
70 {
71 public:
73  const NvDsInferBatchDimsLayerInfo& layerInfo, int id = 0);
74  virtual ~InferPreprocessor() = default;
75 
77  {
78  m_LoggingFunc = func;
79  }
80  bool setScaleOffsets(float scale, const std::vector<float>& offsets = {});
81  bool setMeanFile(const std::string& file);
82  bool setInputOrder(const NvDsInferTensorOrder order);
83 
86 
88  void* devBuf, CudaStream& mainStream, CudaEvent* waitingEvent);
89 
90 private:
91  NvDsInferStatus readMeanImageFile();
92  DISABLE_CLASS_COPY(InferPreprocessor);
93 
94 private:
95  int m_UniqueID = 0;
96  NvDsInferLoggingFunc m_LoggingFunc;
97 
98  NvDsInferNetworkInfo m_NetworkInfo = {0};
100  NvDsInferFormat m_NetworkInputFormat = NvDsInferFormat_RGB;
102  NvDsInferBatchDimsLayerInfo m_NetworkInputLayer;
103  float m_Scale = 1.0f;
104  std::vector<float> m_ChannelMeans; // same as channels
105  std::string m_MeanFile;
106 
107  std::unique_ptr<CudaStream> m_PreProcessStream;
108  /* Cuda Event for synchronizing completion of pre-processing. */
109  std::shared_ptr<CudaEvent> m_PreProcessCompleteEvent;
110  std::unique_ptr<CudaDeviceBuffer> m_MeanDataBuffer;
111 };
112 
117 {
118 protected:
119  InferPostprocessor(NvDsInferNetworkType type, int id, int gpuId)
120  : m_NetworkType(type), m_UniqueID(id), m_GpuID(gpuId) {}
121 
122 public:
123  virtual ~InferPostprocessor();
124  void setDlHandle(const std::shared_ptr<DlLibHandle>& dlHandle)
125  {
126  m_CustomLibHandle = dlHandle;
127  }
129  {
130  m_NetworkInfo = info;
131  }
132  void setAllLayerInfo(std::vector<NvDsInferBatchDimsLayerInfo>& info)
133  {
134  m_AllLayerInfo.resize(info.size());
135  std::copy(info.begin(), info.end(), m_AllLayerInfo.begin());
136  }
137  void setOutputLayerInfo(std::vector<NvDsInferBatchDimsLayerInfo>& info)
138  {
139  m_OutputLayerInfo.resize(info.size());
140  std::copy(info.begin(), info.end(), m_OutputLayerInfo.begin());
141  }
143  {
144  m_LoggingFunc = func;
145  }
146  const std::vector<std::vector<std::string>>& getLabels() const
147  {
148  return m_Labels;
149  }
150  bool needInputCopy() const { return m_CopyInputToHostBuffers; }
151 
153 
155  const NvDsInferContextInitParams& initParams);
156 
157  /* Copy inference output from device to host memory. */
159  NvDsInferBatch& buffer, CudaStream& mainStream);
160 
163 
164  void freeBatchOutput(NvDsInferContextBatchOutput& batchOutput);
165 
166 private:
167  /* Parse the output of each frame in batch. */
168  virtual NvDsInferStatus parseEachBatch(
169  const std::vector<NvDsInferLayerInfo>& outputLayers,
170  NvDsInferFrameOutput& result) = 0;
171 
172 protected:
173  NvDsInferStatus parseLabelsFile(const std::string& path);
175  void releaseFrameOutput(NvDsInferFrameOutput& frameOutput);
176 
177 private:
178  DISABLE_CLASS_COPY(InferPostprocessor);
179 
180 protected:
181  /* Processor type */
183 
184  int m_UniqueID = 0;
185  uint32_t m_GpuID = 0;
187 
188  /* Custom library implementation. */
189  std::shared_ptr<DlLibHandle> m_CustomLibHandle;
192  bool m_DumpOpTensor = false;
193  std::vector<std::pair<std::string, std::string>> m_DumpOpTensorFiles;
194  bool m_OverwriteOpTensor = false;
195  std::vector<std::pair<std::string, int>> m_OverwriteOpTensorFilePairs;
196  std::vector<std::ifstream *> m_OverwriteOpTensorFiles;
197  /* Network input information. */
199  std::vector<NvDsInferLayerInfo> m_AllLayerInfo;
200  std::vector<NvDsInferLayerInfo> m_OutputLayerInfo;
201 
202  /* Holds the string labels for classes. */
203  std::vector<std::vector<std::string>> m_Labels;
204 };
205 
208 {
209 public:
210  DetectPostprocessor(int id, int gpuId = 0)
212  ~DetectPostprocessor() override = default;
213 
215  const NvDsInferContextInitParams& initParams) override;
216 
217 private:
218  NvDsInferStatus parseEachBatch(
219  const std::vector<NvDsInferLayerInfo>& outputLayers,
220  NvDsInferFrameOutput& result) override;
221 
222  bool parseBoundingBox(
223  std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
224  NvDsInferNetworkInfo const& networkInfo,
225  NvDsInferParseDetectionParams const& detectionParams,
226  std::vector<NvDsInferObjectDetectionInfo>& objectList);
227 
228  std::vector<int> nonMaximumSuppression
229  (std::vector<std::pair<float, int>>& scoreIndex,
230  std::vector<NvDsInferParseObjectInfo>& bbox,
231  const float nmsThreshold);
232  void clusterAndFillDetectionOutputNMS(NvDsInferDetectionOutput &output);
233  void clusterAndFillDetectionOutputCV(NvDsInferDetectionOutput& output);
234  void clusterAndFillDetectionOutputDBSCAN(NvDsInferDetectionOutput& output);
235  void clusterAndFillDetectionOutputHybrid(NvDsInferDetectionOutput& output);
236  void fillUnclusteredOutput(NvDsInferDetectionOutput& output);
237  NvDsInferStatus fillDetectionOutput(
238  const std::vector<NvDsInferLayerInfo>& outputLayers,
239  NvDsInferDetectionOutput& output);
240  void preClusteringThreshold(NvDsInferParseDetectionParams const &detectionParams,
241  std::vector<NvDsInferObjectDetectionInfo> &objectList);
242  void filterTopKOutputs(const int topK,
243  std::vector<NvDsInferObjectDetectionInfo> &objectList);
244 
245 private:
246  _DS_DEPRECATED_("Use m_ClusterMode instead")
247  bool m_UseDBScan = false;
248  std::shared_ptr<NvDsInferDBScan> m_DBScanHandle;
249  NvDsInferClusterMode m_ClusterMode;
250 
251  /* Number of classes detected by the model. */
252  uint32_t m_NumDetectedClasses = 0;
253 
254  /* Detection / grouping parameters. */
255  std::vector<NvDsInferDetectionParams> m_PerClassDetectionParams;
256  NvDsInferParseDetectionParams m_DetectionParams = {0, {}, {}};
257 
258  /* Vector for all parsed objects. */
259  std::vector<NvDsInferObjectDetectionInfo> m_ObjectList;
260 #ifdef WITH_OPENCV
261  /* Vector of cv::Rect vectors for each class. */
262  std::vector<std::vector<cv::Rect>> m_PerClassCvRectList;
263 #endif
264  /* Vector of NvDsInferObjectDetectionInfo vectors for each class. */
265  std::vector<std::vector<NvDsInferObjectDetectionInfo>> m_PerClassObjectList;
266 
267  NvDsInferParseCustomFunc m_CustomBBoxParseFunc = nullptr;
268 };
269 
272 {
273 public:
274  InstanceSegmentPostprocessor(int id, int gpuId = 0)
276  ~InstanceSegmentPostprocessor() override = default;
277 
279  const NvDsInferContextInitParams& initParams) override;
280 
281 private:
282  NvDsInferStatus parseEachBatch(
283  const std::vector<NvDsInferLayerInfo>& outputLayers,
284  NvDsInferFrameOutput& result) override;
285 
286  void fillUnclusteredOutput(NvDsInferDetectionOutput& output);
287  NvDsInferStatus fillDetectionOutput(
288  const std::vector<NvDsInferLayerInfo>& outputLayers,
289  NvDsInferDetectionOutput& output);
290  void preClusteringThreshold(NvDsInferParseDetectionParams const &detectionParams,
291  std::vector<NvDsInferInstanceMaskInfo> &objectList);
292  void filterTopKOutputs(const int topK,
293  std::vector<NvDsInferInstanceMaskInfo> &objectList);
294 
295 private:
296  NvDsInferClusterMode m_ClusterMode;
297 
298  /* Number of classes detected by the model. */
299  uint32_t m_NumDetectedClasses = 0;
300 
301  /* Detection / grouping parameters. */
302  std::vector<NvDsInferDetectionParams> m_PerClassDetectionParams;
303  NvDsInferParseDetectionParams m_DetectionParams = {0, {}, {}};
304 
305  /* Vector for all parsed instance masks. */
306  std::vector<NvDsInferInstanceMaskInfo> m_InstanceMaskList;
307  /* Vector of NvDsInferInstanceMaskInfo vectors for each class. */
308  std::vector<std::vector<NvDsInferInstanceMaskInfo>> m_PerClassInstanceMaskList;
309 
310  NvDsInferInstanceMaskParseCustomFunc m_CustomParseFunc = nullptr;
311 };
312 
315 {
316 public:
317  ClassifyPostprocessor(int id, int gpuId = 0)
319 
321  const NvDsInferContextInitParams& initParams) override;
322 
323 private:
324  NvDsInferStatus parseEachBatch(
325  const std::vector<NvDsInferLayerInfo>& outputLayers,
326  NvDsInferFrameOutput& result) override;
327 
328  NvDsInferStatus fillClassificationOutput(
329  const std::vector<NvDsInferLayerInfo>& outputLayers,
331 
332  bool parseAttributesFromSoftmaxLayers(
333  std::vector<NvDsInferLayerInfo> const& outputLayersInfo,
334  NvDsInferNetworkInfo const& networkInfo, float classifierThreshold,
335  std::vector<NvDsInferAttribute>& attrList, std::string& attrString);
336 
337 private:
338  float m_ClassifierThreshold = 0.0f;
339  NvDsInferClassiferParseCustomFunc m_CustomClassifierParseFunc = nullptr;
340 };
341 
344 {
345 public:
346  SegmentPostprocessor(int id, int gpuId = 0)
348 
350  const NvDsInferContextInitParams& initParams) override;
351 
352 private:
353  NvDsInferStatus parseEachBatch(
354  const std::vector<NvDsInferLayerInfo>& outputLayers,
355  NvDsInferFrameOutput& result) override;
356 
357  NvDsInferStatus fillSegmentationOutput(
358  const std::vector<NvDsInferLayerInfo>& outputLayers,
360 
361 private:
362  float m_SegmentationThreshold = 0.0f;
363  NvDsInferTensorOrder m_SegmentationOutputOrder = NvDsInferTensorOrder_kNCHW;
364 };
365 
367 {
368 public:
369  OtherPostprocessor(int id, int gpuId = 0)
371 
373  const NvDsInferContextInitParams& initParams) override;
374 
375 private:
376  NvDsInferStatus parseEachBatch(
377  const std::vector<NvDsInferLayerInfo>& outputLayers,
378  NvDsInferFrameOutput& result) override {
379  return NVDSINFER_SUCCESS;
380  }
381 };
382 
383 class BackendContext;
384 
388 class NvDsInferContextImpl : public INvDsInferContext
389 {
390 public:
395 
401  void *userCtx, NvDsInferContextLoggingFunc logFunc);
402 
403 private:
407  ~NvDsInferContextImpl() override;
408 
409  /* Implementation of the public methods of INvDsInferContext interface. */
410  NvDsInferStatus queueInputBatch(NvDsInferContextBatchInput &batchInput) override;
411  NvDsInferStatus queueInputBatchPreprocessed(NvDsInferContextBatchPreprocessedInput &batchInput) override;
412  NvDsInferStatus dequeueOutputBatch(NvDsInferContextBatchOutput &batchOutput) override;
413  void releaseBatchOutput(NvDsInferContextBatchOutput &batchOutput) override;
414  void fillLayersInfo(std::vector<NvDsInferLayerInfo> &layersInfo) override;
415  void getNetworkInfo(NvDsInferNetworkInfo &networkInfo) override;
416  const std::vector<std::vector<std::string>>& getLabels() override;
417  void destroy() override;
418 
419  /* Other private methods. */
420  NvDsInferStatus initInferenceInfo(
421  const NvDsInferContextInitParams& initParams, BackendContext& ctx);
422  NvDsInferStatus preparePreprocess(
423  const NvDsInferContextInitParams& initParams);
424  NvDsInferStatus preparePostprocess(
425  const NvDsInferContextInitParams& initParams);
426 
427  std::unique_ptr<BackendContext> generateBackendContext(
428  NvDsInferContextInitParams& initParams);
429  std::unique_ptr<BackendContext> buildModel(
430  NvDsInferContextInitParams& initParams);
431  bool deserializeEngineAndBackend(const std::string enginePath, int dla,
432  std::shared_ptr<TrtEngine>& engine,
433  std::unique_ptr<BackendContext>& backend);
434  NvDsInferStatus checkBackendParams(
435  BackendContext& ctx, const NvDsInferContextInitParams& initParams);
436 
437  NvDsInferStatus getBoundLayersInfo();
438  NvDsInferStatus resizeOutputBufferpool(uint32_t numBuffers);
439  NvDsInferStatus allocateBuffers();
440  NvDsInferStatus initNonImageInputLayers();
441 
442  /* Input layer has a binding index of 0 */
443  static const int INPUT_LAYER_INDEX = 0;
444 
447  uint32_t m_UniqueID = 0;
448  uint32_t m_GpuID = 0;
449 
450  /* Custom unique_ptrs. These TensorRT objects will get deleted automatically
451  * when the NvDsInferContext object is deleted. */
452  std::unique_ptr<BackendContext> m_BackendContext;
453  std::shared_ptr<DlLibHandle> m_CustomLibHandle;
454 
455  std::unique_ptr<InferPreprocessor> m_Preprocessor;
456  std::unique_ptr<InferPostprocessor> m_Postprocessor;
457 
458  uint32_t m_MaxBatchSize = 0;
459  /* Network input information. */
460  NvDsInferNetworkInfo m_NetworkInfo;
461 
462  /* Vectors for holding information about bound layers. */
463  std::vector<NvDsInferBatchDimsLayerInfo> m_AllLayerInfo;
464  std::vector<NvDsInferBatchDimsLayerInfo> m_OutputLayerInfo;
465  NvDsInferBatchDimsLayerInfo m_InputImageLayerInfo;
466 
467  std::vector<void *> m_BindingBuffers;
468  std::vector<std::unique_ptr<CudaDeviceBuffer>> m_InputDeviceBuffers;
469 
470  uint32_t m_OutputBufferPoolSize = NVDSINFER_MIN_OUTPUT_BUFFERPOOL_SIZE;
471  std::vector<std::shared_ptr<NvDsInferBatch>> m_Batches;
472  std::mutex m_BatchesMutex;
473 
474  /* Queues and synchronization members for processing multiple batches
475  * in parallel.
476  */
477  GuardQueue<std::list<NvDsInferBatch*>> m_FreeBatchQueue;
478  GuardQueue<std::list<NvDsInferBatch*>> m_ProcessBatchQueue;
479 
480  std::unique_ptr<CudaStream> m_InferStream;
481  std::unique_ptr<CudaStream> m_PostprocessStream;
482 
483  /* Cuda Event for synchronizing input consumption by TensorRT CUDA engine. */
484  std::shared_ptr<CudaEvent> m_InputConsumedEvent;
485 
486  /* Cuda Event for synchronizing infer completion by TensorRT CUDA engine. */
487  std::shared_ptr<CudaEvent> m_InferCompleteEvent;
488 
489  NvDsInferLoggingFunc m_LoggingFunc;
490 
491  bool m_Initialized = false;
492  uint32_t m_AutoIncMem = 1;
493  double m_MaxGPUMem = 99;
494  bool m_DumpIpTensor = false;
495  std::string m_DumpIpTensorFilePath = " ";
496  bool m_OverwriteIpTensor = false;
497  std::string m_OverwriteIpTensorFilePath = " ";
498  std::ifstream m_OverwriteIpTensorFile;
499 };
500 
501 }
502 
503 #define printMsg(level, tag_str, fmt, ...) \
504  do { \
505  char* baseName = strrchr((char*)__FILE__, '/'); \
506  baseName = (baseName) ? (baseName + 1) : (char*)__FILE__; \
507  char logMsgBuffer[5 * _MAX_STR_LENGTH + 1]; \
508  snprintf(logMsgBuffer, 5 * _MAX_STR_LENGTH, \
509  tag_str " NvDsInferContextImpl::%s() <%s:%d> [UID = %d]: " fmt, \
510  __func__, baseName, __LINE__, m_UniqueID, ##__VA_ARGS__); \
511  if (m_LoggingFunc) { \
512  m_LoggingFunc(level, logMsgBuffer); \
513  } else { \
514  fprintf(stderr, "%s\n", logMsgBuffer); \
515  } \
516  } while (0)
517 
518 #define printError(fmt, ...) \
519  do { \
520  printMsg (NVDSINFER_LOG_ERROR, "Error in", fmt, ##__VA_ARGS__); \
521  } while (0)
522 
523 #define printWarning(fmt, ...) \
524  do { \
525  printMsg (NVDSINFER_LOG_WARNING, "Warning from", fmt, ##__VA_ARGS__); \
526  } while (0)
527 
528 #define printInfo(fmt, ...) \
529  do { \
530  printMsg (NVDSINFER_LOG_INFO, "Info from", fmt, ##__VA_ARGS__); \
531  } while (0)
532 
533 #define printDebug(fmt, ...) \
534  do { \
535  printMsg (NVDSINFER_LOG_DEBUG, "DEBUG", fmt, ##__VA_ARGS__); \
536  } while (0)
537 
538 #endif
nvdsinfer::InferPostprocessor::setAllLayerInfo
void setAllLayerInfo(std::vector< NvDsInferBatchDimsLayerInfo > &info)
Definition: nvdsinfer_context_impl.h:132
nvdsinfer::DetectPostprocessor::~DetectPostprocessor
~DetectPostprocessor() override=default
nvdsinfer::InferPreprocessor::setLoggingFunc
void setLoggingFunc(const NvDsInferLoggingFunc &func)
Definition: nvdsinfer_context_impl.h:76
nvdsinfer::NvDsInferLoggingFunc
std::function< void(NvDsInferLogLevel, const char *msg)> NvDsInferLoggingFunc
Definition: nvdsinfer_context_impl.h:48
nvdsinfer::InferPreprocessor::setMeanFile
bool setMeanFile(const std::string &file)
nvdsinfer_utils.h
NvDsInferTensorOrder
NvDsInferTensorOrder
Defines UFF input layer orders.
Definition: nvdsinfer_context.h:176
nvdsinfer::GuardQueue
Definition: nvdsinfer_func_utils.h:146
NvDsInferNetworkType_Classifier
@ NvDsInferNetworkType_Classifier
Specifies a classifier.
Definition: nvdsinfer_context.h:138
nvdsinfer::DetectPostprocessor::DetectPostprocessor
DetectPostprocessor(int id, int gpuId=0)
Definition: nvdsinfer_context_impl.h:210
nvdsinfer::InferPostprocessor::initResource
virtual NvDsInferStatus initResource(const NvDsInferContextInitParams &initParams)
NvDsInferFormat
NvDsInferFormat
Defines color formats.
Definition: nvdsinfer_context.h:156
nvdsinfer::InferPostprocessor::setLoggingFunc
void setLoggingFunc(const NvDsInferLoggingFunc &func)
Definition: nvdsinfer_context_impl.h:142
nvdsinfer::BackendContext
Abstract interface for managing the actual inferencing implementation.
Definition: nvdsinfer_backend.h:167
nvdsinfer::InferPostprocessor::needOutputCopyB4Processing
bool needOutputCopyB4Processing() const
Definition: nvdsinfer_context_impl.h:152
nvdsinfer::InferPostprocessor::~InferPostprocessor
virtual ~InferPostprocessor()
nvdsinfer::CudaStream
Helper class for managing Cuda Streams.
Definition: nvdsinfer_backend.h:39
nvdsinfer::InferPostprocessor::m_GpuID
uint32_t m_GpuID
Definition: nvdsinfer_context_impl.h:185
nvdsinfer_backend.h
nvdsinfer::InferPostprocessor::releaseFrameOutput
void releaseFrameOutput(NvDsInferFrameOutput &frameOutput)
nvdsinfer::InferPostprocessor::m_CustomLibHandle
std::shared_ptr< DlLibHandle > m_CustomLibHandle
Definition: nvdsinfer_context_impl.h:189
nvdsinfer::InferPostprocessor::getLabels
const std::vector< std::vector< std::string > > & getLabels() const
Definition: nvdsinfer_context_impl.h:146
NvDsInferSegmentationOutput
Holds the information parsed from segmentation network output for one frame.
Definition: infer_post_datatypes.h:75
nvdsinfer::InferPostprocessor::allocDeviceResource
NvDsInferStatus allocDeviceResource()
nvdsinfer::InferPostprocessor::setDlHandle
void setDlHandle(const std::shared_ptr< DlLibHandle > &dlHandle)
Definition: nvdsinfer_context_impl.h:124
nvdsinfer::InferPostprocessor::needInputCopy
bool needInputCopy() const
Definition: nvdsinfer_context_impl.h:150
NVDSINFER_SUCCESS
@ NVDSINFER_SUCCESS
NvDsInferContext operation succeeded.
Definition: nvdsinfer.h:222
nvdsinfer::InferPostprocessor::postProcessHost
virtual NvDsInferStatus postProcessHost(NvDsInferBatch &buffer, NvDsInferContextBatchOutput &output)
nvdsinfer::InstanceSegmentPostprocessor
Implementation of post-processing class for instance segmentation networks.
Definition: nvdsinfer_context_impl.h:271
nvdsinfer::InferPostprocessor::setOutputLayerInfo
void setOutputLayerInfo(std::vector< NvDsInferBatchDimsLayerInfo > &info)
Definition: nvdsinfer_context_impl.h:137
nvdsinfer::InferPostprocessor::freeBatchOutput
void freeBatchOutput(NvDsInferContextBatchOutput &batchOutput)
nvdsinfer::InferPostprocessor::m_disableOutputHostCopy
bool m_disableOutputHostCopy
Definition: nvdsinfer_context_impl.h:191
nvdsinfer::InferPostprocessor::m_OverwriteOpTensor
bool m_OverwriteOpTensor
Definition: nvdsinfer_context_impl.h:194
nvdsinfer::InferPostprocessor
Base class for post-processing on inference output.
Definition: nvdsinfer_context_impl.h:116
nvdsinfer::InferPostprocessor::m_OverwriteOpTensorFilePairs
std::vector< std::pair< std::string, int > > m_OverwriteOpTensorFilePairs
Definition: nvdsinfer_context_impl.h:195
NvDsInferLogLevel
NvDsInferLogLevel
Enum for the log levels of NvDsInferContext.
Definition: nvdsinfer.h:251
nvdsinfer::InferPostprocessor::copyBuffersToHostMemory
virtual NvDsInferStatus copyBuffersToHostMemory(NvDsInferBatch &buffer, CudaStream &mainStream)
nvdsinfer::ClassifyPostprocessor::ClassifyPostprocessor
ClassifyPostprocessor(int id, int gpuId=0)
Definition: nvdsinfer_context_impl.h:317
NvDsInferClassificationOutput
Holds information on all attributes classifed by a classifier network for one frame.
Definition: nvdsinfer_context.h:543
NvDsInferNetworkType_Detector
@ NvDsInferNetworkType_Detector
Specifies a detector.
Definition: nvdsinfer_context.h:135
nvdsinfer
Definition: nvdsinfer_model_builder.h:41
NvDsInferParseDetectionParams
Holds the detection parameters required for parsing objects.
Definition: nvdsinfer_custom_impl.h:179
NvDsInferContextBatchInput
Holds information about one batch to be inferred.
Definition: nvdsinfer_context.h:468
nvdsinfer::DetectPostprocessor::initResource
NvDsInferStatus initResource(const NvDsInferContextInitParams &initParams) override
NvDsInferFormat_RGB
@ NvDsInferFormat_RGB
Specifies 24-bit interleaved R-G-B format.
Definition: nvdsinfer_context.h:159
NvDsInferInstanceMaskParseCustomFunc
bool(* NvDsInferInstanceMaskParseCustomFunc)(std::vector< NvDsInferLayerInfo > const &outputLayersInfo, NvDsInferNetworkInfo const &networkInfo, NvDsInferParseDetectionParams const &detectionParams, std::vector< NvDsInferInstanceMaskInfo > &objectList)
Type definition for the custom bounding box and instance mask parsing function.
Definition: nvdsinfer_custom_impl.h:239
CudaStream
Helper class for managing Cuda Streams.
Definition: nvdspreprocess_impl.h:97
NvDsInferClusterMode
NvDsInferClusterMode
Enum for clustering mode for detectors.
Definition: nvdsinfer_context.h:228
nvdsinfer::InferPostprocessor::m_OverwriteOpTensorFiles
std::vector< std::ifstream * > m_OverwriteOpTensorFiles
Definition: nvdsinfer_context_impl.h:196
nvdsinfer::InferPostprocessor::setNetworkInfo
void setNetworkInfo(const NvDsInferNetworkInfo &info)
Definition: nvdsinfer_context_impl.h:128
nvdsinfer::NvDsInferContextImpl::NvDsInferContextImpl
NvDsInferContextImpl()
Default constructor.
NVDSINFER_MIN_OUTPUT_BUFFERPOOL_SIZE
#define NVDSINFER_MIN_OUTPUT_BUFFERPOOL_SIZE
Defines the minimum number of sets of output buffers that must be allocated.
Definition: nvdsinfer_context.h:111
nvdsinfer::NvDsInferBatch::m_OutputDeviceBuffers
std::vector< std::unique_ptr< CudaDeviceBuffer > > m_OutputDeviceBuffers
Definition: nvdsinfer_context_impl.h:58
nvdsinfer::SegmentPostprocessor::SegmentPostprocessor
SegmentPostprocessor(int id, int gpuId=0)
Definition: nvdsinfer_context_impl.h:346
nvdsinfer::InferPreprocessor::transform
NvDsInferStatus transform(NvDsInferContextBatchInput &batchInput, void *devBuf, CudaStream &mainStream, CudaEvent *waitingEvent)
nvdsinfer::SegmentPostprocessor::initResource
NvDsInferStatus initResource(const NvDsInferContextInitParams &initParams) override
NvDsInferDetectionOutput
Holds the information on all objects detected by a detector network in one frame.
Definition: infer_post_datatypes.h:43
nvdsinfer::InferPostprocessor::m_OutputLayerInfo
std::vector< NvDsInferLayerInfo > m_OutputLayerInfo
Definition: nvdsinfer_context_impl.h:200
nvdsinfer::NvDsInferBatch::m_DeviceBuffers
std::vector< void * > m_DeviceBuffers
Definition: nvdsinfer_context_impl.h:55
NvDsInferNetworkInfo
Holds information about the model network.
Definition: nvdsinfer.h:112
nvdsinfer::InferPreprocessor
Provides pre-processing functionality like mean subtraction and normalization.
Definition: nvdsinfer_context_impl.h:69
nvdsinfer::NvDsInferContextImpl
Implementation of the INvDsInferContext interface.
Definition: nvdsinfer_context_impl.h:388
nvdsinfer::NvDsInferBatch::m_HostBuffers
std::vector< std::unique_ptr< CudaHostBuffer > > m_HostBuffers
Definition: nvdsinfer_context_impl.h:56
nvdsinfer_context.h
nvdsinfer_custom_impl.h
nvdsinfer::InferPostprocessor::m_NetworkInfo
NvDsInferNetworkInfo m_NetworkInfo
Definition: nvdsinfer_context_impl.h:198
nvdsinfer::OtherPostprocessor::initResource
NvDsInferStatus initResource(const NvDsInferContextInitParams &initParams) override
NvDsInferTensorOrder_kNCHW
@ NvDsInferTensorOrder_kNCHW
Definition: nvdsinfer_context.h:177
NvDsInferClassiferParseCustomFunc
bool(* NvDsInferClassiferParseCustomFunc)(std::vector< NvDsInferLayerInfo > const &outputLayersInfo, NvDsInferNetworkInfo const &networkInfo, float classifierThreshold, std::vector< NvDsInferAttribute > &attrList, std::string &descString)
Type definition for the custom classifier output parsing function.
Definition: nvdsinfer_custom_impl.h:270
nvdsinfer::InferPostprocessor::parseLabelsFile
NvDsInferStatus parseLabelsFile(const std::string &path)
nvdsinfer_logger.h
nvdsinfer::InferPostprocessor::m_Labels
std::vector< std::vector< std::string > > m_Labels
Definition: nvdsinfer_context_impl.h:203
nvdsinfer::InferPreprocessor::setScaleOffsets
bool setScaleOffsets(float scale, const std::vector< float > &offsets={})
NvDsInferParseCustomFunc
bool(* NvDsInferParseCustomFunc)(std::vector< NvDsInferLayerInfo > const &outputLayersInfo, NvDsInferNetworkInfo const &networkInfo, NvDsInferParseDetectionParams const &detectionParams, std::vector< NvDsInferObjectDetectionInfo > &objectList)
Type definition for the custom bounding box parsing function.
Definition: nvdsinfer_custom_impl.h:210
nvdsinfer::InstanceSegmentPostprocessor::InstanceSegmentPostprocessor
InstanceSegmentPostprocessor(int id, int gpuId=0)
Definition: nvdsinfer_context_impl.h:274
NvDsInferNetworkType_InstanceSegmentation
@ NvDsInferNetworkType_InstanceSegmentation
Specifies a instance segmentation network.
Definition: nvdsinfer_context.h:145
_NvDsInferContextInitParams
Holds the initialization parameters required for the NvDsInferContext interface.
Definition: nvdsinfer_context.h:239
nvdsinfer::InferPostprocessor::InferPostprocessor
InferPostprocessor(NvDsInferNetworkType type, int id, int gpuId)
Definition: nvdsinfer_context_impl.h:119
nvdsinfer::InferPostprocessor::m_CopyInputToHostBuffers
bool m_CopyInputToHostBuffers
Definition: nvdsinfer_context_impl.h:190
nvdsinfer::InstanceSegmentPostprocessor::initResource
NvDsInferStatus initResource(const NvDsInferContextInitParams &initParams) override
nvdsinfer::InferPostprocessor::m_NetworkType
NvDsInferNetworkType m_NetworkType
Definition: nvdsinfer_context_impl.h:182
nvdsinfer::InferPostprocessor::m_DumpOpTensorFiles
std::vector< std::pair< std::string, std::string > > m_DumpOpTensorFiles
Definition: nvdsinfer_context_impl.h:193
nvdsinfer::ClassifyPostprocessor
Implementation of post-processing class for classification networks.
Definition: nvdsinfer_context_impl.h:314
NvDsInferContextLoggingFunc
void(* NvDsInferContextLoggingFunc)(NvDsInferContextHandle handle, unsigned int uniqueID, NvDsInferLogLevel logLevel, const char *logMessage, void *userCtx)
Type declaration for a logging callback.
Definition: nvdsinfer_context.h:647
_DS_DEPRECATED_
#define _DS_DEPRECATED_(STR)
Definition: nvdsinfer.h:41
NvDsInferNetworkType_Segmentation
@ NvDsInferNetworkType_Segmentation
Specifies a segmentation network.
Definition: nvdsinfer_context.h:141
NvDsInferDetectionParams
Holds detection and bounding box grouping parameters.
Definition: nvdsinfer_context.h:192
nvdsinfer::InferPostprocessor::m_DumpOpTensor
bool m_DumpOpTensor
Definition: nvdsinfer_context_impl.h:192
NvDsInferFrameOutput
Holds the information inferred by the network on one frame.
Definition: nvdsinfer_context.h:579
nvdsinfer::InferPreprocessor::~InferPreprocessor
virtual ~InferPreprocessor()=default
nvdsinfer::OtherPostprocessor
Definition: nvdsinfer_context_impl.h:366
NvDsInferNetworkType
NvDsInferNetworkType
Defines network types.
Definition: nvdsinfer_context.h:131
NvDsInferContextBatchPreprocessedInput
Definition: nvdsinfer_context.h:486
NvDsInferContextBatchOutput
Holds the output for all of the frames in a batch (an array of frame), and related buffer information...
Definition: nvdsinfer_context.h:605
nvdsinfer::InferPostprocessor::m_UniqueID
int m_UniqueID
Definition: nvdsinfer_context_impl.h:184
nvdsinfer::ClassifyPostprocessor::initResource
NvDsInferStatus initResource(const NvDsInferContextInitParams &initParams) override
NvDsInferNetworkType_Other
@ NvDsInferNetworkType_Other
Specifies other.
Definition: nvdsinfer_context.h:150
nvdsinfer::NvDsInferContextImpl::initialize
NvDsInferStatus initialize(NvDsInferContextInitParams &initParams, void *userCtx, NvDsInferContextLoggingFunc logFunc)
Initializes the Infer engine, allocates layer buffers and other required initialization steps.
nvdsinfer::InferPreprocessor::InferPreprocessor
InferPreprocessor(const NvDsInferNetworkInfo &info, NvDsInferFormat format, const NvDsInferBatchDimsLayerInfo &layerInfo, int id=0)
nvdsinfer::DetectPostprocessor
Implementation of post-processing class for object detection networks.
Definition: nvdsinfer_context_impl.h:207
nvdsinfer::NvDsInferBatch
Holds information for one batch for processing.
Definition: nvdsinfer_context_impl.h:53
nvdsinfer::InstanceSegmentPostprocessor::~InstanceSegmentPostprocessor
~InstanceSegmentPostprocessor() override=default
nvdsinfer::OtherPostprocessor::OtherPostprocessor
OtherPostprocessor(int id, int gpuId=0)
Definition: nvdsinfer_context_impl.h:369
nvdsinfer::InferPreprocessor::setInputOrder
bool setInputOrder(const NvDsInferTensorOrder order)
nvdsinfer::InferPostprocessor::m_LoggingFunc
NvDsInferLoggingFunc m_LoggingFunc
Definition: nvdsinfer_context_impl.h:186
nvdsinfer::SegmentPostprocessor
Implementation of post-processing class for segmentation networks.
Definition: nvdsinfer_context_impl.h:343
nvdsinfer::InferPreprocessor::allocateResource
NvDsInferStatus allocateResource()
nvdsinfer::InferPostprocessor::m_AllLayerInfo
std::vector< NvDsInferLayerInfo > m_AllLayerInfo
Definition: nvdsinfer_context_impl.h:199
NvDsInferStatus
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.
Definition: nvdsinfer.h:220
nvdsinfer::InferPreprocessor::syncStream
NvDsInferStatus syncStream()