pointpillar部署-TensorRT实现(二)
模型推理部分
模型推理主要使用tensorRT进行推理计算
1.整体过程
class TRT {
private:
// Params params_;
cudaEvent_t start_, stop_;
Logger_pp gLogger_;
nvinfer1::IExecutionContext *context_ = nullptr;
nvinfer1::ICudaEngine *engine_ = nullptr;
cudaStream_t stream_ = 0;
public:
TRT(std::string modelFile, cudaStream_t stream = 0);
~TRT(void);
int doinfer(void **buffers);
};
2.模型加载与推理
TRT::TRT(std::string modelFile, cudaStream_t stream) : stream_(stream) {
std::string modelCache = modelFile + ".cache";
std::fstream trtCache(modelCache, std::ifstream::in);
checkCudaErrors(cudaEventCreate(&start_));
checkCudaErrors(cuda