NVIDIA DeepStream SDK API Reference
7.1 Release
infer_lstm.h
Go to the documentation of this file.
1
/*
2
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
* SPDX-License-Identifier: LicenseRef-NvidiaProprietary
4
*
5
* NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
6
* property and proprietary rights in and to this material, related
7
* documentation and any modifications thereto. Any use, reproduction,
8
* disclosure or distribution of this material and related documentation
9
* without an express license agreement from NVIDIA CORPORATION or
10
* its affiliates is strictly prohibited.
11
*/
12
13
#ifndef __INFER_LSTM_CONTROL_H__
14
#define __INFER_LSTM_CONTROL_H__
15
16
#include "
infer_base_context.h
"
17
#include "
infer_common.h
"
18
#include "
infer_datatypes.h
"
19
#include "
infer_proto_utils.h
"
20
#include "
infer_utils.h
"
21
22
namespace
nvdsinferserver
{
23
24
class
LstmController
{
25
public
:
26
LstmController
(
const
ic::LstmParams& params,
int
devId,
int
maxBatchSize)
27
{
28
m_Params.CopyFrom(params);
29
m_DevId = devId;
30
m_MaxBatchSize = maxBatchSize;
31
}
32
~LstmController
() =
default
;
33
34
NvDsInferStatus
initInputState
(
BaseBackend
& backend);
35
NvDsInferStatus
feedbackInputs
(
SharedBatchArray
& outTensors);
36
NvDsInferStatus
waitAndGetInputs
(
SharedBatchArray
& inputs);
37
void
notifyError
(
NvDsInferStatus
status);
38
void
destroy
()
39
{
40
UniqLock
locker(m_Mutex);
41
m_InProgress = 0;
42
m_Cond.notify_all();
43
locker.unlock();
44
m_LoopStateMap.clear();
45
m_LstmInputs.clear();
46
}
47
48
private
:
49
// check input/output tensor names/dims/datatype must be same
50
NvDsInferStatus
checkTensorInfo(
BaseBackend
& backend);
51
struct
LoopState {
52
std::string inputName;
53
SharedCudaTensorBuf
inputTensor;
54
SharedBatchBuf
outputTensor;
55
bool
keepOutputParsing =
false
;
56
};
57
58
private
:
59
ic::LstmParams m_Params;
60
int
m_DevId = 0;
61
int
m_MaxBatchSize = 1;
62
// map<outputName, loopState>
63
std::unordered_map<std::string, LoopState> m_LoopStateMap;
64
std::vector<SharedCudaTensorBuf> m_LstmInputs;
65
std::atomic<int32_t> m_InProgress{0};
66
std::mutex m_Mutex;
67
std::condition_variable m_Cond;
68
SharedCuEvent
m_InputReadyEvent;
69
SharedCuStream
m_LstmStream;
70
};
71
72
}
// namespace nvdsinferserver
73
74
#endif
nvdsinferserver
This is a header file for pre-processing cuda kernels with normalization and mean subtraction require...
Definition:
infer_custom_process.h:24
nvdsinferserver::LstmController::destroy
void destroy()
Definition:
infer_lstm.h:38
nvdsinferserver::SharedBatchBuf
std::shared_ptr< BaseBatchBuffer > SharedBatchBuf
Common buffer interfaces (internal).
Definition:
infer_common.h:71
infer_datatypes.h
Header file for the data types used in the inference processing.
infer_utils.h
Header file containing utility functions and classes used by the nvinferserver low level library.
nvdsinferserver::LstmController::waitAndGetInputs
NvDsInferStatus waitAndGetInputs(SharedBatchArray &inputs)
infer_common.h
Header file of the common declarations for the nvinferserver library.
nvdsinferserver::LstmController::~LstmController
~LstmController()=default
nvdsinferserver::LstmController
Definition:
infer_lstm.h:24
infer_proto_utils.h
nvdsinferserver::SharedCuStream
std::shared_ptr< CudaStream > SharedCuStream
Cuda based pointers.
Definition:
infer_common.h:84
nvdsinferserver::LstmController::notifyError
void notifyError(NvDsInferStatus status)
nvdsinferserver::LstmController::LstmController
LstmController(const ic::LstmParams ¶ms, int devId, int maxBatchSize)
Definition:
infer_lstm.h:26
nvdsinferserver::SharedCuEvent
std::shared_ptr< CudaEvent > SharedCuEvent
Definition:
infer_common.h:86
nvdsinferserver::BaseBackend
Base class of inference backend processing.
Definition:
infer_base_backend.h:40
nvdsinferserver::LstmController::initInputState
NvDsInferStatus initInputState(BaseBackend &backend)
nvdsinferserver::LstmController::feedbackInputs
NvDsInferStatus feedbackInputs(SharedBatchArray &outTensors)
nvdsinferserver::UniqLock
std::unique_lock< std::mutex > UniqLock
Miscellaneous declarations.
Definition:
infer_common.h:108
nvdsinferserver::SharedBatchArray
std::shared_ptr< BaseBatchArray > SharedBatchArray
Definition:
infer_common.h:75
infer_base_context.h
Header file of the base class for inference context.
nvdsinferserver::SharedCudaTensorBuf
std::shared_ptr< CudaTensorBuf > SharedCudaTensorBuf
Definition:
infer_common.h:91
NvDsInferStatus
NvDsInferStatus
Enum for the status codes returned by NvDsInferContext.
Definition:
nvdsinfer.h:220
Advance Information | Subject to Change | Generated by NVIDIA | Mon Oct 14 2024 13:27:45 | PR-09318-R32