|
NVIDIA DeepStream SDK API Reference
|
7.1 Release
|
Go to the documentation of this file.
13 #ifndef __NVDSINFERSERVER_SIMPLE_RUNTIME_H__
14 #define __NVDSINFERSERVER_SIMPLE_RUNTIME_H__
28 m_RequestOutputs = names;
42 const std::string& tensor,
size_t bytes,
InferMemType memType, int64_t devId);
44 static void releaseSimpleRes(
const std::string& tensor,
SharedSysMem mem);
47 std::set<std::string> m_RequestOutputs;
This is a header file for pre-processing cuda kernels with normalization and mean subtraction require...
const std::string & model() const
NvDsInferStatus enqueue(SharedBatchArray inputs, SharedCuStream stream, InputsConsumed bufConsumed, InferenceDone inferenceDone) override
TritonSimpleRuntime(std::string model, int64_t version)
std::vector< InputShapeTuple > InputShapes
Header file of Triton Inference Server inference backend.
std::shared_ptr< SysMem > SharedSysMem
std::function< void(SharedBatchArray)> InputsConsumed
Function wrapper called after the input buffer is consumed.
Triton backend processing class.
Header file of the common declarations for the nvinferserver library.
std::function< void(NvDsInferStatus, SharedBatchArray)> InferenceDone
Function wrapper for post inference processing.
NvDsInferStatus specifyInputDims(const InputShapes &shapes) override
~TritonSimpleRuntime() override
std::shared_ptr< CudaStream > SharedCuStream
Cuda based pointers.
void setOutputs(const std::set< std::string > &names)
InferMemType
The memory types of inference buffers.
NvDsInferStatus initialize() override
std::shared_ptr< BaseBatchArray > SharedBatchArray
void requestTritonOutputNames(std::set< std::string > &names) override
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.