grabframethread.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. #pragma execution_character_set("utf-8")
  2. #include "grabframethread.h"
  3. #include <QDebug>
  4. #include <QImage>
  5. #include<opencv2/core/core.hpp>
  6. #include<opencv2/highgui/highgui.hpp>
  7. #include <opencv2/opencv.hpp>
  8. #include "NvInfer.h"
  9. #include "cuda_runtime_api.h"
  10. #include <fstream>
  11. #include <iostream>
  12. #include <map>
  13. #include <sstream>
  14. #include <vector>
  15. #include <chrono>
  16. #include <cmath>
  17. #include <cassert>
  18. #include <algorithm>
  19. #include <calibrator.h>
  20. int frame_count = 0;
  21. #define CHECK(status) \
  22. do\
  23. {\
  24. auto ret = (status);\
  25. if (ret != 0)\
  26. {\
  27. std::cerr << "Cuda failure: " << ret << std::endl;\
  28. abort();\
  29. }\
  30. } while (0)
  31. //static Logger gLogger;
  32. //构建Logger
  33. class Logger : public ILogger
  34. {
  35. void log(Severity severity, const char* msg) noexcept override
  36. {
  37. // suppress info-level messages
  38. if (severity <= Severity::kWARNING)
  39. std::cout << msg << std::endl;
  40. }
  41. } gLogger;
  42. GrabFrameThread::GrabFrameThread(QObject *parent) : QObject(parent)
  43. {
  44. }
  45. void GrabFrameThread::setFrameResolution(int w, int h)
  46. {
  47. qDebug() << tr("设置分辨率:%1*%2").arg(w).arg(h);
  48. if(!g_cap.set(cv::CAP_PROP_FRAME_WIDTH,w)){
  49. qDebug() << tr("设置帧宽失败");
  50. emit signal_ErrGrabFrameThread(2); // 2表示设置图像分辨率失败
  51. }
  52. if(!g_cap.set(cv::CAP_PROP_FRAME_HEIGHT,h)){
  53. qDebug() << tr("设置帧高失败");
  54. emit signal_ErrGrabFrameThread(2); // 2表示设置图像分辨率失败
  55. }
  56. std::cout<<"size: "<<g_cap.get(cv::CAP_PROP_FRAME_HEIGHT)<<std::endl;
  57. }
  58. void GrabFrameThread::setParameter(float conf, float nms)
  59. {
  60. qDebug() << tr("设置检测参数:%1*%2").arg(conf).arg(nms);
  61. conf_thr = conf;
  62. nms_thr = nms;
  63. }
  64. void GrabFrameThread::startDetect()
  65. {
  66. qDebug() << tr("打开检测");
  67. detect_flag = true;
  68. }
  69. void GrabFrameThread::closeDetect()
  70. {
  71. qDebug() << tr("关闭检测");
  72. detect_flag = false;
  73. }
  74. void GrabFrameThread::destroyEngine()
  75. {
  76. qDebug() << tr("销毁engine");
  77. context->destroy();
  78. engine->destroy();
  79. runtime->destroy();
  80. }
  81. void GrabFrameThread::setfp16(bool flage)
  82. {
  83. usefp16 = flage;
  84. }
  85. void GrabFrameThread::setint8(bool flage)
  86. {
  87. useint8 = flage;
  88. }
  89. void GrabFrameThread::openCamera(int camID)
  90. {
  91. qDebug() << tr("打开摄像头%1").arg(camID);
  92. if(!g_cap.isOpened()) {
  93. if(!g_cap.open(camID))
  94. {
  95. qDebug() << tr("打开摄像头失败");
  96. emit signal_ErrGrabFrameThread(1); // 1表示打开摄像头失败
  97. }
  98. } else {
  99. qDebug() << tr("摄像头处于打开状态");
  100. }
  101. if(usefp16)
  102. engine_path = "./weight_fp16.engine";
  103. else if(useint8)
  104. engine_path = "./weight_int8.engine";
  105. else engine_path = "./weight_fp32.engine";
  106. if(!LoadEngine(engine_path))
  107. {
  108. cout<<"Build engine to "<< engine_path <<endl;
  109. get_trtengine();
  110. cout << "Build engine done!"<<endl;
  111. cout<<"Reload engine from "<< engine_path <<endl;
  112. LoadEngine(engine_path);
  113. }
  114. }
  115. void GrabFrameThread::closeCamera()
  116. {
  117. qDebug() << tr("关闭摄像头");
  118. if(g_cap.isOpened())
  119. g_cap.release();
  120. }
  121. void GrabFrameThread::init()
  122. {
  123. qDebug() << tr("抓帧线程初始化");
  124. }
  125. void GrabFrameThread::refreshFrame()
  126. {
  127. // 接收到主线程定时器的超时信号,显示新的帧
  128. cv::Mat frame;
  129. if(g_cap.read(frame)){
  130. cv::Mat readimage = cv::imread("/home/nvidia/红绿灯测试/002740.png");
  131. cv::resize(readimage,frame,cv::Size(frame.cols,frame.rows));
  132. if(detect_flag)
  133. {
  134. vector<Detection> results;
  135. vector<Detection>results_track;
  136. //=============== infer ===========
  137. cv::Mat testimage = cv::imread("/home/nvidia/红绿灯测试/003018.png");
  138. infer(testimage,results);
  139. od::bbox_t bbox_t_90; //转成跟踪格式
  140. vector<od::bbox_t> outs_90;
  141. for (int i = 0; i < results.size(); i++)
  142. {
  143. //-------------判断红绿灯是否为横向,width=(x1-x2),height=(y1-y2)-----------
  144. bbox_t_90.x = results.at(i).bbox[0];
  145. bbox_t_90.y = results.at(i).bbox[1];
  146. bbox_t_90.w = results.at(i).bbox[2];
  147. bbox_t_90.h = results.at(i).bbox[3];
  148. bbox_t_90.prob = results.at(i).conf;
  149. bbox_t_90.obj_id = results.at(i).class_id;
  150. outs_90.push_back(bbox_t_90);
  151. }
  152. vector<od::TrackingBox>track_result_90;
  153. bool track_flag_90 = od::TrackObstacle(frame_count,trackers_90,outs_90,track_result_90);
  154. for(unsigned int i=0;i < track_result_90.size(); i++)
  155. {
  156. Detection obstacle;
  157. obstacle.bbox[0] = track_result_90[i].box.x;
  158. obstacle.bbox[1] = track_result_90[i].box.y;
  159. obstacle.bbox[2] = track_result_90[i].box.width;
  160. obstacle.bbox[3] = track_result_90[i].box.height;
  161. //通过判断5帧数输出颜色
  162. vector<int> class_history;
  163. class_history = track_result_90[i].class_history;
  164. if(class_history.size()>0)
  165. {
  166. vector<int> color_num(3);
  167. for(int j=0;j<class_history.size();j++)
  168. {
  169. int class_id = class_history[j];
  170. color_num[class_id] += 1;
  171. }
  172. std::vector<int>::iterator biggest = std::max_element(std::begin(color_num),std::end(color_num));
  173. int maxindex = std::distance(std::begin(color_num),biggest);
  174. obstacle.class_id = maxindex;
  175. }
  176. else {obstacle.class_id = track_result_90[i].class_id;}
  177. obstacle.conf = track_result_90[i].prob;
  178. results_track.push_back(obstacle);
  179. cv::resize(testimage,frame,cv::Size(frame.cols,frame.rows));
  180. draw_rect(frame,results_track);
  181. frame_count ++;
  182. }
  183. }
  184. QImage image = cvmat_to_qimage(frame);
  185. emit signal_refreshFrame(image);
  186. }
  187. }
  188. QImage GrabFrameThread::cvmat_to_qimage(const cv::Mat &img)
  189. {
  190. QImage image(img.data,img.cols,img.rows,img.step,QImage::Format_RGB888);
  191. return image.rgbSwapped();
  192. }
  193. // Creat the engine using only the API and not any parser.
  194. ICudaEngine* GrabFrameThread::createEngine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config)
  195. {
  196. INetworkDefinition* network = builder->createNetworkV2(1U); //此处重点1U为OU就有问题
  197. IParser* parser = createParser(*network, gLogger);
  198. parser->parseFromFile(onnx_path.c_str(), static_cast<int32_t>(ILogger::Severity::kWARNING));
  199. //解析有错误将返回
  200. for (int32_t i = 0; i < parser->getNbErrors(); ++i) { std::cout << parser->getError(i)->desc() << std::endl; }
  201. std::cout << "successfully parse the onnx model" << std::endl;
  202. // Build engine
  203. builder->setMaxBatchSize(maxBatchSize);
  204. config->setMaxWorkspaceSize(1 << 20);
  205. if(usefp16)
  206. config->setFlag(nvinfer1::BuilderFlag::kFP16); // 设置精度计算
  207. else if(useint8)
  208. {
  209. std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
  210. assert(builder->platformHasFastInt8());
  211. config->setFlag(BuilderFlag::kINT8);
  212. Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./imagedata", "int8calib.table", INPUT_BLOB_NAME);
  213. config->setInt8Calibrator(calibrator);
  214. }
  215. ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
  216. std::cout << "successfully convert onnx to engine!!! " << std::endl;
  217. //销毁
  218. network->destroy();
  219. //parser->destroy();
  220. return engine;
  221. }
  222. void GrabFrameThread::APIToModel(unsigned int maxBatchSize, IHostMemory** modelStream)
  223. {
  224. // Create builder
  225. IBuilder* builder = createInferBuilder(gLogger);
  226. IBuilderConfig* config = builder->createBuilderConfig();
  227. // Create model to populate the network, then set the outputs and create an engine
  228. ICudaEngine* engine = createEngine(maxBatchSize, builder, config);
  229. assert(engine != nullptr);
  230. // Serialize the engine
  231. (*modelStream) = engine->serialize();
  232. // Close everything down
  233. engine->destroy();
  234. builder->destroy();
  235. config->destroy();
  236. }
  237. int GrabFrameThread::get_trtengine() {
  238. IHostMemory* modelStream{ nullptr };
  239. APIToModel(1, &modelStream);
  240. assert(modelStream != nullptr);
  241. std::ofstream p(engine_path, std::ios::binary);
  242. if (!p)
  243. {
  244. std::cerr << "could not open plan output file" << std::endl;
  245. return -1;
  246. }
  247. p.write(reinterpret_cast<const char*>(modelStream->data()), modelStream->size());
  248. modelStream->destroy();
  249. return 0;
  250. }
  251. void GrabFrameThread::doInference(IExecutionContext& context, float* input, float* output, int batchSize)
  252. {
  253. const ICudaEngine& engine = context.getEngine();
  254. // Pointers to input and output device buffers to pass to engine.
  255. // Engine requires exactly IEngine::getNbBindings() number of buffers.
  256. assert(engine.getNbBindings() == 2);
  257. void* buffers[2];
  258. // In order to bind the buffers, we need to know the names of the input and output tensors.
  259. // Note that indices are guaranteed to be less than IEngine::getNbBindings()
  260. const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);
  261. const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
  262. //std::cout<<inputIndex<<" "<<outputIndex<<std::endl;
  263. //const int inputIndex = 0;
  264. //const int outputIndex = 1;
  265. // Create GPU buffers on device
  266. cudaMalloc(&buffers[inputIndex], batchSize * 3 * INPUT_H * INPUT_W * sizeof(float));
  267. cudaMalloc(&buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float));
  268. // Create stream
  269. cudaStream_t stream;
  270. CHECK(cudaStreamCreate(&stream));
  271. // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
  272. CHECK(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
  273. context.enqueue(batchSize, buffers, stream, nullptr);
  274. //std::cout<<buffers[outputIndex+1]<<std::endl;
  275. CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
  276. cudaStreamSynchronize(stream);
  277. // Release stream and buffers
  278. cudaStreamDestroy(stream);
  279. CHECK(cudaFree(buffers[inputIndex]));
  280. CHECK(cudaFree(buffers[outputIndex]));
  281. }
  282. //加工图片变成拥有batch的输入, tensorrt输入需要的格式,为一个维度
  283. void GrabFrameThread::ProcessImage(cv::Mat image, float input_data[]) {
  284. //只处理一张图片,总之结果为一维[batch*3*INPUT_W*INPUT_H]
  285. //以下代码为投机取巧了
  286. cv::Mat resize_img ;
  287. cv::resize(image, resize_img, cv::Size(INPUT_W, INPUT_H), 0, 0, cv::INTER_LINEAR);
  288. std::vector<cv::Mat> InputImage;
  289. InputImage.push_back(resize_img);
  290. int ImgCount = InputImage.size();
  291. //float input_data[BatchSize * 3 * INPUT_H * INPUT_W];
  292. for (int b = 0; b < ImgCount; b++) {
  293. cv::Mat img = InputImage.at(b);
  294. int w = img.cols;
  295. int h = img.rows;
  296. int i = 0;
  297. for (int row = 0; row < h; ++row) {
  298. uchar* uc_pixel = img.data + row * img.step;
  299. for (int col = 0; col < INPUT_W; ++col) {
  300. input_data[b * 3 * INPUT_H * INPUT_W + i] = (float)uc_pixel[2] / 255.0;
  301. input_data[b * 3 * INPUT_H * INPUT_W + i + INPUT_H * INPUT_W] = (float)uc_pixel[1] / 255.0;
  302. input_data[b * 3 * INPUT_H * INPUT_W + i + 2 * INPUT_H * INPUT_W] = (float)uc_pixel[0] / 255.0;
  303. uc_pixel += 3;
  304. ++i;
  305. }
  306. }
  307. }
  308. }
  309. //********************************************** NMS code **********************************//
  310. float GrabFrameThread::iou(Bbox box1, Bbox box2) {
  311. int x1 = max(box1.x, box2.x);
  312. int y1 = max(box1.y, box2.y);
  313. int x2 = min(box1.x + box1.w, box2.x + box2.w);
  314. int y2 = min(box1.y + box1.h, box2.y + box2.h);
  315. int w = max(0, x2 - x1);
  316. int h = max(0, y2 - y1);
  317. float over_area = w * h;
  318. return over_area / (box1.w * box1.h + box2.w * box2.h - over_area);
  319. }
  320. int GrabFrameThread::get_max_index(vector<Detection> pre_detection) {
  321. //获得最佳置信度的值,并返回对应的索引值
  322. int index;
  323. float conf;
  324. if (pre_detection.size() > 0) {
  325. index = 0;
  326. conf = pre_detection.at(0).conf;
  327. for (int i = 0; i < pre_detection.size(); i++) {
  328. if (conf < pre_detection.at(i).conf) {
  329. index = i;
  330. conf = pre_detection.at(i).conf;
  331. }
  332. }
  333. return index;
  334. }
  335. else {
  336. return -1;
  337. }
  338. }
  339. bool GrabFrameThread::judge_in_lst(int index, vector<int> index_lst) {
  340. //若index在列表index_lst中则返回true,否则返回false
  341. if (index_lst.size() > 0) {
  342. for (int i = 0; i < index_lst.size(); i++) {
  343. if (index == index_lst.at(i)) {
  344. return true;
  345. }
  346. }
  347. }
  348. return false;
  349. }
  350. vector<int> GrabFrameThread::nms(vector<Detection> pre_detection, float iou_thr)
  351. {
  352. /*
  353. 返回需保存box的pre_detection对应位置索引值
  354. */
  355. int index;
  356. vector<Detection> pre_detection_new;
  357. //Detection det_best;
  358. Bbox box_best, box;
  359. float iou_value;
  360. vector<int> keep_index;
  361. vector<int> del_index;
  362. bool keep_bool;
  363. bool del_bool;
  364. int rr = 0;
  365. int zz = 0;
  366. if (pre_detection.size() > 0) {
  367. pre_detection_new.clear();
  368. // 循环将预测结果建立索引
  369. for (int i = 0; i < pre_detection.size(); i++) {
  370. pre_detection.at(i).index = i;
  371. pre_detection_new.push_back(pre_detection.at(i));
  372. }
  373. //循环遍历获得保留box位置索引-相对输入pre_detection位置
  374. while (pre_detection_new.size() > 0) {
  375. index = get_max_index(pre_detection_new);
  376. if (index >= 0) {
  377. keep_index.push_back(pre_detection_new.at(index).index); //保留索引位置
  378. // 更新最佳保留box
  379. box_best.x = pre_detection_new.at(index).bbox[0];
  380. box_best.y = pre_detection_new.at(index).bbox[1];
  381. box_best.w = pre_detection_new.at(index).bbox[2];
  382. box_best.h = pre_detection_new.at(index).bbox[3];
  383. for (int j = 0; j < pre_detection.size(); j++) {
  384. keep_bool = judge_in_lst(pre_detection.at(j).index, keep_index);
  385. del_bool = judge_in_lst(pre_detection.at(j).index, del_index);
  386. if ((!keep_bool) && (!del_bool)) { //不在keep_index与del_index才计算iou
  387. box.x = pre_detection.at(j).bbox[0];
  388. box.y = pre_detection.at(j).bbox[1];
  389. box.w = pre_detection.at(j).bbox[2];
  390. box.h = pre_detection.at(j).bbox[3];
  391. iou_value = iou(box_best, box);
  392. if (iou_value > iou_thr) {
  393. del_index.push_back(j); //记录大于阈值将删除对应的位置
  394. }
  395. }
  396. }
  397. //更新pre_detection_new
  398. pre_detection_new.clear();
  399. for (int j = 0; j < pre_detection.size(); j++) {
  400. keep_bool = judge_in_lst(pre_detection.at(j).index, keep_index);
  401. del_bool = judge_in_lst(pre_detection.at(j).index, del_index);
  402. if ((!keep_bool) && (!del_bool)) {
  403. pre_detection_new.push_back(pre_detection.at(j));
  404. }
  405. }
  406. }
  407. }
  408. }
  409. del_index.clear();
  410. del_index.shrink_to_fit();
  411. pre_detection_new.clear();
  412. pre_detection_new.shrink_to_fit();
  413. return keep_index;
  414. }
  415. void GrabFrameThread::postprocess(float* prob,vector<Detection> &results,float conf_thr=0.2,float nms_thr=0.4)
  416. {
  417. /*
  418. #####################此函数处理一张图预测结果#########################
  419. prob为[x y w h score multi-pre] 如80类-->(1,anchor_num,85)
  420. */
  421. vector<Detection> pre_results;
  422. vector<int> nms_keep_index;
  423. bool keep_bool;
  424. Detection pre_res;
  425. float conf;
  426. int tmp_idx;
  427. float tmp_cls_score;
  428. for (int i = 0; i < anchor_output_num; i++) {
  429. tmp_idx = i * (cls_num + 5);
  430. pre_res.bbox[0] = prob[tmp_idx + 0];
  431. pre_res.bbox[1] = prob[tmp_idx + 1];
  432. pre_res.bbox[2] = prob[tmp_idx + 2];
  433. pre_res.bbox[3] = prob[tmp_idx + 3];
  434. conf = prob[tmp_idx + 4]; //是为目标的置信度
  435. tmp_cls_score = prob[tmp_idx + 5] * conf;
  436. pre_res.class_id = 0;
  437. pre_res.conf = tmp_cls_score;
  438. for (int j = 1; j < cls_num; j++) {
  439. tmp_idx = i * (cls_num + 5) + 5 + j; //获得对应类别索引
  440. if (tmp_cls_score < prob[tmp_idx] * conf)
  441. {
  442. tmp_cls_score = prob[tmp_idx] * conf;
  443. pre_res.class_id = j;
  444. pre_res.conf = tmp_cls_score;
  445. }
  446. }
  447. if (conf >= conf_thr) {
  448. pre_results.push_back(pre_res);
  449. }
  450. }
  451. //使用nms
  452. nms_keep_index=nms(pre_results,nms_thr);
  453. for (int i = 0; i < pre_results.size(); i++) {
  454. keep_bool = judge_in_lst(i, nms_keep_index);
  455. if (keep_bool) {
  456. results.push_back(pre_results.at(i));
  457. }
  458. }
  459. pre_results.clear();
  460. pre_results.shrink_to_fit();
  461. nms_keep_index.clear();
  462. nms_keep_index.shrink_to_fit();
  463. }
  464. void GrabFrameThread::draw_rect(cv::Mat &image, vector<Detection> &results) {
  465. /*
  466. image 为图像
  467. struct Detection {
  468. float bbox[4]; //center_x center_y w h
  469. float conf; // 置信度
  470. int class_id; //类别id
  471. int index; //可忽略
  472. };
  473. */
  474. int w1 = image.cols;
  475. int h1 = image.rows;
  476. int w2 = INPUT_W;
  477. int h2 = INPUT_H;
  478. float ratio_w = float(w1)/float(w2);
  479. float ratio_h = float(h1)/float(h2);
  480. float x;
  481. float y;
  482. float w;
  483. float h;
  484. cv::Rect rect;
  485. for (int i = 0; i < results.size(); i++) {
  486. x = results.at(i).bbox[0] * ratio_w;
  487. y= results.at(i).bbox[1] * ratio_h;
  488. w= results.at(i).bbox[2] * ratio_w;
  489. h=results.at(i).bbox[3] * ratio_h;
  490. x = (int)(x - w / 2);
  491. y = (int)(y - h / 2);
  492. w = (int)w;
  493. h = (int)h;
  494. string info;
  495. //info = "id:";
  496. //info.append(to_string(results.at(i).class_id));
  497. //info.append(classnames[results.at(i).class_id]);
  498. //info.append(":");
  499. info.append(to_string((int)(results.at(i).conf*100) ) );
  500. info.append("%");
  501. rect= cv::Rect(x, y, w, h);
  502. if(results.at(i).class_id == 0){ // red light
  503. cv::rectangle(image, rect, cv::Scalar(0, 0, 255), 1, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
  504. cv::putText(image, info, cv::Point(x, y-5), cv::FONT_HERSHEY_SIMPLEX, 0.3, cv::Scalar(0, 0, 255), 0.6, 1, false);
  505. }else if(results.at(i).class_id == 1){ // green light
  506. cv::rectangle(image, rect, cv::Scalar(0, 255, 0), 1, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
  507. cv::putText(image, info, cv::Point(x, y-5), cv::FONT_HERSHEY_SIMPLEX, 0.3, cv::Scalar(0, 255, 0), 0.6, 1, false);
  508. }else if(results.at(i).class_id == 2){ // yellow light
  509. cv::rectangle(image, rect, cv::Scalar(0, 255, 255), 1, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
  510. cv::putText(image, info, cv::Point(x, y-5), cv::FONT_HERSHEY_SIMPLEX, 0.3, cv::Scalar(0, 255, 255), 0.6, 1, false);
  511. }else{
  512. cv::rectangle(image, rect, cv::Scalar(255, 255, 255), 1, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
  513. cv::putText(image, info, cv::Point(x, y-5), cv::FONT_HERSHEY_SIMPLEX, 0.3, cv::Scalar(255, 255, 255), 0.6, 1, false);
  514. }
  515. std::cout<<classnames[results.at(i).class_id]<<" "<<info<<std::endl;
  516. }
  517. }
  518. bool GrabFrameThread::LoadEngine(const std::string engine_path){
  519. //加载engine引擎
  520. char* trtModelStream{ nullptr };
  521. size_t size{ 0 };
  522. std::ifstream file(engine_path, std::ios::binary);
  523. if(!file)
  524. {
  525. cout<<engine_path<<" not found!"<<endl;
  526. return false;
  527. }
  528. if (file.good()) {
  529. file.seekg(0, file.end);
  530. size = file.tellg();
  531. file.seekg(0, file.beg);
  532. trtModelStream = new char[size];
  533. assert(trtModelStream);
  534. file.read(trtModelStream, size);
  535. file.close();
  536. }
  537. //反序列为engine,创建context
  538. runtime = createInferRuntime(gLogger);
  539. assert(runtime != nullptr);
  540. engine = runtime->deserializeCudaEngine(trtModelStream, size, nullptr);
  541. //assert(engine != nullptr);
  542. if(engine == nullptr)
  543. return false;
  544. context = engine->createExecutionContext();
  545. assert(context != nullptr);
  546. delete[] trtModelStream;
  547. //在主机上分配页锁定内存
  548. CHECK(cudaHostAlloc((void **)&prob, OUTPUT_SIZE * sizeof(float), cudaHostAllocDefault));
  549. return true;
  550. }
  551. void GrabFrameThread::infer(cv::Mat img,vector<Detection> &results) {
  552. // 处理图片为固定输出
  553. auto start = std::chrono::system_clock::now(); //时间函数
  554. static float data[3 * INPUT_H * INPUT_W];
  555. ProcessImage(img, data);
  556. auto end = std::chrono::system_clock::now();
  557. //time_read_img = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() + time_read_img;
  558. //cout<<"read img time: "<<std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()<<"ms"<<endl;
  559. //Run inference
  560. start = std::chrono::system_clock::now(); //时间函数
  561. //cout<<"doinference"<<endl;
  562. doInference(*context, data, prob, 1);
  563. end = std::chrono::system_clock::now();
  564. //time_infer = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() + time_infer;
  565. std::cout <<"doinference: "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
  566. postprocess(prob, results, conf_thr, nms_thr);
  567. //cout << "ok" << endl;
  568. //time_num++;
  569. }