main.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. #include <opencv2/opencv.hpp>
  2. //txs
  3. #include "opencv2/video/tracking.hpp"
  4. #include "opencv2/highgui/highgui.hpp"
  5. #include "cameraobjectarray.pb.h"
  6. //txs
  7. #include "TrtNet.h"
  8. #include "argsParser.h"
  9. #include "configs.h"
  10. #include <chrono>
  11. #include "YoloLayer.h"
  12. #include "dataReader.h"
  13. #include "eval.h"
  14. #include "imageBuffer.h"
  15. #include "modulecomm.h"
  16. #include "xmlparam.h"
  17. #include "rawpic.pb.h"
  18. #include <vector>
  19. #include <QFile>
  20. #include <QTextStream>
  21. #include "localization.h"
  22. using namespace std;
  23. using namespace argsParser;
  24. using namespace Tn;
  25. using namespace Yolo;
  26. //txs
  27. using namespace cv;
  28. iv::vision::cameraobjectarray gobj;
  29. int gntemp = 0;
  30. void * gpa;
  31. void * gpcamout;
  32. void * gdetec;
  33. QTime gTime;
  34. //txs
  35. ConsumerProducerQueue<cv::Mat> *imageBuffer = new ConsumerProducerQueue<cv::Mat>(1, true);
  36. static vector<string> GetNamesFromFile(string filePathAndName)
  37. {
  38. vector<string> retVec;
  39. QFile qf(QString::fromStdString(filePathAndName));
  40. if(qf.exists())
  41. {
  42. if(qf.open(QIODevice::ReadOnly))
  43. {
  44. QTextStream in(&qf);
  45. while(!in.atEnd())
  46. {
  47. retVec.push_back(in.readLine(1024).toStdString());
  48. }
  49. qf.close();
  50. }
  51. }
  52. return retVec;
  53. }
  54. vector<float> prepareImage(cv::Mat& img)
  55. {
  56. using namespace cv;
  57. int c = parser::getIntValue("C");
  58. int h = parser::getIntValue("H"); //net h
  59. int w = parser::getIntValue("W"); //net w
  60. float scale = min(float(w)/img.cols,float(h)/img.rows);
  61. auto scaleSize = cv::Size(img.cols * scale,img.rows * scale);
  62. cv::Mat rgb ;
  63. cv::cvtColor(img, rgb, CV_BGR2RGB);
  64. cv::Mat resized;
  65. cv::resize(rgb, resized,scaleSize,0,0,INTER_CUBIC);
  66. cv::Mat cropped(h, w,CV_8UC3, 127);
  67. Rect rect((w- scaleSize.width)/2, (h-scaleSize.height)/2, scaleSize.width,scaleSize.height);
  68. resized.copyTo(cropped(rect));
  69. cv::Mat img_float;
  70. if (c == 3)
  71. cropped.convertTo(img_float, CV_32FC3, 1/255.0);
  72. else
  73. cropped.convertTo(img_float, CV_32FC1 ,1/255.0);
  74. //HWC TO CHW
  75. vector<Mat> input_channels(c);
  76. cv::split(img_float, input_channels);
  77. vector<float> result(h*w*c);
  78. auto data = result.data();
  79. int channelLength = h * w;
  80. for (int i = 0; i < c; ++i) {
  81. memcpy(data,input_channels[i].data,channelLength*sizeof(float));
  82. data += channelLength;
  83. }
  84. return result;
  85. }
  86. void DoNms(vector<Detection>& detections,int classes ,float nmsThresh)
  87. {
  88. auto t_start = chrono::high_resolution_clock::now();
  89. vector<vector<Detection>> resClass;
  90. resClass.resize(classes);
  91. for (const auto& item : detections)
  92. resClass[item.classId].push_back(item);
  93. auto iouCompute = [](float * lbox, float* rbox)
  94. {
  95. float interBox[] = {
  96. max(lbox[0] - lbox[2]/2.f , rbox[0] - rbox[2]/2.f), //left
  97. min(lbox[0] + lbox[2]/2.f , rbox[0] + rbox[2]/2.f), //right
  98. max(lbox[1] - lbox[3]/2.f , rbox[1] - rbox[3]/2.f), //top
  99. min(lbox[1] + lbox[3]/2.f , rbox[1] + rbox[3]/2.f), //bottom
  100. };
  101. if(interBox[2] > interBox[3] || interBox[0] > interBox[1])
  102. return 0.0f;
  103. float interBoxS =(interBox[1]-interBox[0])*(interBox[3]-interBox[2]);
  104. return interBoxS/(lbox[2]*lbox[3] + rbox[2]*rbox[3] -interBoxS);
  105. };
  106. vector<Detection> result;
  107. for (int i = 0;i<classes;++i)
  108. {
  109. auto& dets =resClass[i];
  110. if(dets.size() == 0)
  111. continue;
  112. sort(dets.begin(),dets.end(),[=](const Detection& left,const Detection& right){
  113. return left.prob > right.prob;
  114. });
  115. for (unsigned int m = 0;m < dets.size() ; ++m)
  116. {
  117. auto& item = dets[m];
  118. result.push_back(item);
  119. for(unsigned int n = m + 1;n < dets.size() ; ++n)
  120. {
  121. if (iouCompute(item.bbox,dets[n].bbox) > nmsThresh)
  122. {
  123. dets.erase(dets.begin()+n);
  124. --n;
  125. }
  126. }
  127. }
  128. }
  129. //swap(detections,result);
  130. detections = move(result);
  131. auto t_end = chrono::high_resolution_clock::now();
  132. float total = chrono::duration<float, milli>(t_end - t_start).count();
  133. mTensorIvlog->info("Time taken for nms is %f ms.", total);
  134. // cout << "Time taken for nms is " << total << " ms." << endl;
  135. }
  136. vector<Bbox> postProcessImg(cv::Mat& img,vector<Detection>& detections,int classes)
  137. {
  138. using namespace cv;
  139. int h = parser::getIntValue("H"); //net h
  140. int w = parser::getIntValue("W"); //net w
  141. //scale bbox to img
  142. int width = img.cols;
  143. int height = img.rows;
  144. float scale = min(float(w)/width,float(h)/height);
  145. float scaleSize[] = {width * scale,height * scale};
  146. //correct box
  147. for (auto& item : detections)
  148. {
  149. auto& bbox = item.bbox;
  150. bbox[0] = (bbox[0] * w - (w - scaleSize[0])/2.f) / scaleSize[0];
  151. bbox[1] = (bbox[1] * h - (h - scaleSize[1])/2.f) / scaleSize[1];
  152. bbox[2] /= scaleSize[0];
  153. bbox[3] /= scaleSize[1];
  154. }
  155. //nms
  156. float nmsThresh = parser::getFloatValue("nms");
  157. if(nmsThresh > 0)
  158. DoNms(detections,classes,nmsThresh);
  159. vector<Bbox> boxes;
  160. for(const auto& item : detections)
  161. {
  162. auto& b = item.bbox;
  163. Bbox bbox =
  164. {
  165. item.classId, //classId
  166. max(int((b[0]-b[2]/2.)*width),0), //left
  167. min(int((b[0]+b[2]/2.)*width),width), //right
  168. max(int((b[1]-b[3]/2.)*height),0), //top
  169. min(int((b[1]+b[3]/2.)*height),height), //bot
  170. item.prob //score
  171. };
  172. boxes.push_back(bbox);
  173. }
  174. return boxes;
  175. }
  176. vector<string> split(const string& str, char delim)
  177. {
  178. stringstream ss(str);
  179. string token;
  180. vector<string> container;
  181. while (getline(ss, token, delim)) {
  182. container.push_back(token);
  183. }
  184. return container;
  185. }
  186. qint64 gpictime;
  187. qint64 gpicindex;
  188. //cv::Mat gmatimage;
  189. void Listenpic(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
  190. {
  191. if(nSize<1000)return;
  192. iv::vision::rawpic pic;
  193. if(false == pic.ParseFromArray(strdata,nSize))
  194. {
  195. // std::cout<<"picview Listenpic fail."<<std::endl;
  196. mTensorIvlog->info("picview Listenpic fail.");
  197. return;
  198. }
  199. cv::Mat mat(pic.height(),pic.width(),pic.mattype());
  200. if(pic.type() == 1)
  201. memcpy(mat.data,pic.picdata().data(),mat.rows*mat.cols*mat.elemSize());
  202. else
  203. {
  204. mat.release();
  205. std::vector<unsigned char> buff(pic.picdata().data(),pic.picdata().data() + pic.picdata().size());
  206. mat = cv::imdecode(buff,CV_LOAD_IMAGE_COLOR);
  207. }
  208. gpictime = pic.time();
  209. gpicindex = pic.index();
  210. imageBuffer->add(mat);
  211. mat.release();
  212. }
  213. //std::vector<Obstacle> objvec,iv::lidar::objectarray & lidarobjvec
  214. //txs
  215. void GetCameraObj(std::vector<Bbox> objvec,iv::vision::cameraobjectarray &cameraobjvec)
  216. {
  217. mTensorIvlog->info("Bbox count: %d", objvec.size());
  218. // std::cout<<"Bbox count: "<<objvec.size()<<std::endl;
  219. int i;
  220. for(i=0;i<objvec.size();i++){
  221. Bbox x;
  222. //iv::vision::cameraobject cameraobj;
  223. x = objvec.at(i);
  224. mTensorIvlog->info("left: %d\tright: %d", x.left, x.right);
  225. // std::cout<<"left: " <<x.left<<"\tright: "<<x.right<<std::endl;
  226. iv::vision::cameraobject obj;
  227. const char id = x.classId;
  228. obj.set_type(&id);
  229. obj.set_outpointx((x.left+x.right)/2);
  230. //2019-10-10 09-01AM
  231. //obj.set_outpointy((x.bot+x.top)/2);
  232. obj.set_outpointy(x.bot);
  233. obj.set_con(x.score);
  234. mTensorIvlog->info("raw output x: %f\traw output y: %f", obj.outpointx(), obj.outpointy());
  235. // std::cout<<"raw output x: "<<obj.outpointx()<<"\traw output y:" <<obj.outpointy()<<std::endl;
  236. iv::vision::cameraobject * po = cameraobjvec.add_obj();
  237. po->CopyFrom(obj);
  238. }
  239. }
  240. //txs
  241. int main( int argc, char* argv[] )
  242. {
  243. parser::ADD_ARG_STRING("prototxt",Desc("input yolov3 deploy"),DefaultValue(INPUT_PROTOTXT),ValueDesc("file"));
  244. parser::ADD_ARG_STRING("caffemodel",Desc("input yolov3 caffemodel"),DefaultValue(INPUT_CAFFEMODEL),ValueDesc("file"));
  245. parser::ADD_ARG_INT("C",Desc("channel"),DefaultValue(to_string(INPUT_CHANNEL)));
  246. parser::ADD_ARG_INT("H",Desc("height"),DefaultValue(to_string(INPUT_HEIGHT)));
  247. parser::ADD_ARG_INT("W",Desc("width"),DefaultValue(to_string(INPUT_WIDTH)));
  248. parser::ADD_ARG_STRING("calib",Desc("calibration image List"),DefaultValue(CALIBRATION_LIST),ValueDesc("file"));
  249. parser::ADD_ARG_STRING("mode",Desc("runtime mode"),DefaultValue(MODE), ValueDesc("fp32/fp16/int8"));
  250. parser::ADD_ARG_STRING("outputs",Desc("output nodes name"),DefaultValue(OUTPUTS));
  251. parser::ADD_ARG_INT("class",Desc("num of classes"),DefaultValue(to_string(DETECT_CLASSES)));
  252. parser::ADD_ARG_FLOAT("nms",Desc("non-maximum suppression value"),DefaultValue(to_string(NMS_THRESH)));
  253. parser::ADD_ARG_INT("batchsize",Desc("batch size for input"),DefaultValue("1"));
  254. parser::ADD_ARG_STRING("enginefile",Desc("load from engine"),DefaultValue(""));
  255. //input
  256. parser::ADD_ARG_STRING("input",Desc("input image file"),DefaultValue(INPUT_IMAGE),ValueDesc("file"));
  257. parser::ADD_ARG_STRING("evallist",Desc("eval gt list"),DefaultValue(EVAL_LIST),ValueDesc("file"));
  258. gTensorFault = new iv::Ivfault("TensorRT_yolov3");
  259. mTensorIvlog = new iv::Ivlog("TensorRT_youlov3");
  260. // //txs
  261. // KalmanFilter KF(2,1,0);
  262. // Mat state();
  263. // //txs
  264. // if(argc < 2){
  265. // parser::printDesc();
  266. // exit(-1);
  267. // }
  268. // parser::parseArgs(argc,argv);
  269. QString strpath = "./vision_TensorRT_yolov3.xml";
  270. mTensorIvlog->info("%s", strpath.data());
  271. // std::cout<<strpath.toStdString()<<std::endl;
  272. iv::xmlparam::Xmlparam xp(strpath.toStdString());
  273. std::string xmlmsgname = xp.GetParam("msgname","image00");
  274. //string engineName = xp.GetParam("engineName","/home/nvidia/TensorRT-Yolov3/yolov3_fp16_608.engine");
  275. string engineName = "/home/nvidia/code/sd/src/detection/vision_TensorRT_yolov3/model/yolov3.engine";
  276. // string strwidth = xp.GetParam("width",608);
  277. // string strheight = xp.GetParam("height",608);
  278. mTensorIvlog->info("msgname is %s",xmlmsgname.data());
  279. // std::cout<<"msgname is "<<xmlmsgname<<std::endl;
  280. // void * mpa = iv::modulecomm::RegisterRecv(xmlmsgname.data(),Listenpic);
  281. void *pa;
  282. pa = iv::modulecomm::RegisterRecv("camera",Listenpic);
  283. gpcamout = iv::modulecomm::RegisterSend("cameraoutput",1000000,1);
  284. //txs
  285. gdetec = iv::modulecomm::RegisterSend("camera_detect",10000000,1);
  286. //txs
  287. std::unique_ptr<trtNet> net;
  288. // int batchSize = parser::getIntValue("batchsize");
  289. // string engineName = parser::getStringValue("enginefile");
  290. // if(engineName.length() > 0)
  291. // {
  292. // net.reset(new trtNet(engineName));
  293. // assert(net->getBatchSize() == batchSize);
  294. // }
  295. // else
  296. // {
  297. // string deployFile = parser::getStringValue("prototxt");
  298. // string caffemodelFile = parser::getStringValue("caffemodel");
  299. // vector<vector<float>> calibData;
  300. // string calibFileList = parser::getStringValue("calib");
  301. // string mode = parser::getStringValue("mode");
  302. // if(calibFileList.length() > 0 && mode == "int8")
  303. // {
  304. // cout << "find calibration file,loading ..." << endl;
  305. // ifstream file(calibFileList);
  306. // if(!file.is_open())
  307. // {
  308. // cout << "read file list error,please check file :" << calibFileList << endl;
  309. // exit(-1);
  310. // }
  311. // string strLine;
  312. // while( getline(file,strLine) )
  313. // {
  314. // cv::Mat img = cv::imread(strLine);
  315. // auto data = prepareImage(img);
  316. // calibData.emplace_back(data);
  317. // }
  318. // file.close();
  319. // }
  320. int batchSize = 1;
  321. net.reset(new trtNet(engineName));
  322. assert(net->getBatchSize() == batchSize);
  323. // RUN_MODE run_mode = RUN_MODE::FLOAT32;
  324. // if(mode == "int8")
  325. // {
  326. // if(calibFileList.length() == 0)
  327. // cout << "run int8 please input calibration file, will run in fp32" << endl;
  328. // else
  329. // run_mode = RUN_MODE::INT8;
  330. // }
  331. // else if(mode == "fp16")
  332. // {
  333. // run_mode = RUN_MODE::FLOAT16;
  334. // }
  335. // string outputNodes = parser::getStringValue("outputs");
  336. // auto outputNames = split(outputNodes,',');
  337. // //save Engine name
  338. // string saveName = "yolov3_" + mode + ".engine";
  339. // net.reset(new trtNet(deployFile,caffemodelFile,outputNames,calibData,run_mode,batchSize));
  340. // cout << "save Engine..." << saveName <<endl;
  341. // net->saveEngine(saveName);
  342. // }
  343. RUN_MODE run_mode = RUN_MODE::FLOAT16;
  344. int outputCount = net->getOutputSize()/sizeof(float);
  345. unique_ptr<float[]> outputData(new float[outputCount]);
  346. // string listFile = parser::getStringValue("evallist");
  347. string listFile;
  348. list<string> fileNames;
  349. list<vector<Bbox>> groundTruth;
  350. // if(listFile.length() > 0)
  351. // {
  352. // std::cout << "loading from eval list " << listFile << std::endl;
  353. // tie(fileNames,groundTruth) = readObjectLabelFileList(listFile);
  354. // }
  355. // else
  356. // {
  357. // string inputFileName = parser::getStringValue("input");
  358. // fileNames.push_back(inputFileName);
  359. // }
  360. string inputFileName = "/home/nvidia/TensorRT-Yolov3/test.jpg";
  361. fileNames.push_back(inputFileName);
  362. list<vector<Bbox>> outputs;
  363. // int classNum = parser::getIntValue("class");
  364. // int c = parser::getIntValue("C");
  365. // int h = parser::getIntValue("H");
  366. // int w = parser::getIntValue("W");
  367. int classNum = 80;
  368. int c = 608;
  369. int h = 608;
  370. int w = 3;
  371. int batchCount = 0;
  372. vector<float> inputData;
  373. inputData.reserve(h*w*c*batchSize);
  374. vector<cv::Mat> inputImgs;
  375. auto iter = fileNames.begin();
  376. //txs
  377. cv::namedWindow("result",WINDOW_NORMAL);
  378. cv::VideoWriter ou;
  379. ou.open("test.avi",CV_FOURCC('X', 'V', 'I', 'D'),20.0,Size(1280, 720), true);
  380. //txs
  381. cv::Mat img2;
  382. int count = 0;
  383. while(1)
  384. {
  385. imageBuffer->consume(img2);
  386. if(!img2.rows)
  387. {
  388. gTensorFault->SetFaultState(1, 0, "img2.rows state Error");
  389. break;
  390. }
  391. vector<float> curInput = prepareImage(img2);
  392. net->doInference(curInput.data(), outputData.get(),1);
  393. mTensorIvlog->info("run");
  394. // std::cout<<"run"<<std::endl;
  395. auto output = outputData.get();
  396. int detCount = output[0];
  397. //later detect result
  398. vector<Detection> result;
  399. result.resize(detCount);
  400. memcpy(result.data(), &output[1], detCount*sizeof(Detection));
  401. outputs.clear();
  402. auto boxes = postProcessImg(img2,result,classNum);
  403. outputs.emplace_back(boxes);
  404. vector<string> cocoNames = GetNamesFromFile("/home/nvidia/code/sd/src/detection/vision_TensorRT_yolov3/model/coco.names");
  405. // cout<<"cocoNames: "<<endl<<endl;
  406. mTensorIvlog->info("cocoName:");
  407. for(int i=0; i<cocoNames.size(); i++)
  408. {
  409. // cout<<cocoNames[i]<<endl;
  410. mTensorIvlog->info("%s",cocoNames[i].data());
  411. }
  412. // cout<<"cocoNames end"<<endl<<endl;
  413. mTensorIvlog->info("cocoNames End");
  414. //txs
  415. iv::vision::cameraobjectarray cameraobjvec;
  416. GetCameraObj(boxes,cameraobjvec);
  417. int ntlen;
  418. std::string out = cameraobjvec.SerializeAsString();
  419. iv::modulecomm::ModuleSendMsg(gpcamout,out.data(),out.length());
  420. mTensorIvlog->info("--------------");
  421. // std::cout<<"------------------"<<std::endl;
  422. mTensorIvlog->info("lenth is %d",out.length());
  423. //txs
  424. auto bbox = *outputs.begin();
  425. for(const auto& item : bbox)
  426. {
  427. cv::rectangle(img2,cv::Point(item.left,item.top),cv::Point(item.right,item.bot),cv::Scalar(0,0,255),3,8,0);
  428. std::cout << "class=" << item.classId << " prob=" << item.score*100 << std::endl;
  429. cout << "left=" << item.left << " right=" << item.right << " top=" << item.top << " bot=" << item.bot << endl;
  430. cout<<cocoNames[item.classId]<<endl;
  431. //txs
  432. MatrixXd local(2,1);
  433. local<<(item.left +item.right)/2,(item.bot +item.bot)/2;
  434. // local<<(item.bot +item.bot)/2,(item.left +item.right)/2;
  435. MatrixXd location=TranslateToStandardLocation::translateCameraToStandardLocation(local);
  436. // string str = cocoNames[item.classId] +":" + std::to_string(item.score*100)+":"+std::to_string(location(0,0))+std::to_string(location(1,0));
  437. string str = std::to_string((int)(location(0,0)/100.0))+" " + std::to_string((int)(location(1,0)/100.0));
  438. string str1=cocoNames[item.classId];
  439. cv::putText(img2,str, cv::Point(item.left, item.top), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(255, 0, 0));
  440. cv::putText(img2,str1, cv::Point(item.left, item.top-15), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(0, 255, 0));
  441. //txs
  442. }
  443. // ou.write(img2);
  444. //txs
  445. // cv::imwrite("img/"+ std::to_string(count)+".jpg", img2);
  446. //txs
  447. iv::vision::rawpic pic;
  448. pic.set_time(gpictime);
  449. pic.set_index(gpicindex);
  450. pic.set_elemsize(img2.elemSize());
  451. pic.set_width(img2.cols);
  452. pic.set_height(img2.rows);
  453. pic.set_mattype(img2.type());
  454. std::vector<int> param = std::vector<int>(2);
  455. param[0] = CV_IMWRITE_JPEG_QUALITY;
  456. param[1] = 95; // default(95) 0-100
  457. std::vector<unsigned char> buff;
  458. cv::imencode(".jpg",img2,buff,param);
  459. pic.set_picdata(buff.data(),buff.size());
  460. buff.clear();
  461. pic.set_type(2);
  462. unsigned int nsize = pic.ByteSize();
  463. char * strser = new char[nsize];
  464. bool bser = pic.SerializeToArray(strser,nsize);
  465. if(bser)iv::modulecomm::ModuleSendMsg(gdetec,strser,nsize);
  466. delete[] strser;
  467. count += 1;
  468. cv::imshow("result",img2);
  469. cv::waitKey(1);
  470. curInput.clear();
  471. // free(output_cpu);
  472. // free(output2_cpu);
  473. // img2.release();
  474. // ou.release();
  475. }
  476. img2.release();
  477. ou.release();
  478. for (unsigned int i = 0;i < fileNames.size(); ++i ,++iter)
  479. {
  480. const string& filename = *iter;
  481. std::cout << "process: " << filename << std::endl;
  482. cv::Mat img = cv::imread(filename);
  483. vector<float> curInput = prepareImage(img);
  484. if (!curInput.data())
  485. continue;
  486. inputImgs.emplace_back(img);
  487. inputData.insert(inputData.end(), curInput.begin(), curInput.end());
  488. batchCount++;
  489. if(batchCount < batchSize && i + 1 < fileNames.size())
  490. continue;
  491. net->doInference(inputData.data(), outputData.get(),batchCount);
  492. //Get Output
  493. auto output = outputData.get();
  494. auto outputSize = net->getOutputSize()/ sizeof(float) / batchCount;
  495. for(int i = 0;i< batchCount ; ++i)
  496. {
  497. //first detect count
  498. int detCount = output[0];
  499. //later detect result
  500. vector<Detection> result;
  501. result.resize(detCount);
  502. memcpy(result.data(), &output[1], detCount*sizeof(Detection));
  503. auto boxes = postProcessImg(inputImgs[i],result,classNum);
  504. outputs.emplace_back(boxes);
  505. output += outputSize;
  506. }
  507. inputImgs.clear();
  508. inputData.clear();
  509. batchCount = 0;
  510. }
  511. net->printTime();
  512. if(groundTruth.size() > 0)
  513. {
  514. //eval map
  515. evalMAPResult(outputs,groundTruth,classNum,0.5f);
  516. evalMAPResult(outputs,groundTruth,classNum,0.75f);
  517. }
  518. if(fileNames.size() == 1)
  519. {
  520. //draw on image
  521. cv::Mat img = cv::imread(*fileNames.begin());
  522. auto bbox = *outputs.begin();
  523. for(const auto& item : bbox)
  524. {
  525. cv::rectangle(img,cv::Point(item.left,item.top),cv::Point(item.right,item.bot),cv::Scalar(0,0,255),3,8,0);
  526. cout << "class=" << item.classId << " prob=" << item.score*100 << endl;
  527. cout << "left=" << item.left << " right=" << item.right << " top=" << item.top << " bot=" << item.bot << endl;
  528. }
  529. cv::imwrite("result.jpg",img);
  530. cv::imshow("result",img);
  531. cv::waitKey(0);
  532. }
  533. gTensorFault->SetFaultState(0, 0, "ok");
  534. return 0;
  535. }