main_multibatch.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. #include <opencv2/opencv.hpp>
  2. #include <opencv2/imgproc.hpp>
  3. #include <opencv2/highgui.hpp>
  4. #include <vector>
  5. #include <fstream>
  6. #include <thread>
  7. #include <cmath>
  8. #include "imageBuffer.h"
  9. #include "modulecomm.h"
  10. #include "rawpic.pb.h"
  11. #include "obstacles.pb.h"
  12. #include "yolodetect.h"
  13. #include "detect_obstacle.h"
  14. using namespace std;
  15. using namespace cv;
  16. uint max_batch_size = 2;
  17. string yaml_path="/home/nvidia/models/camera/external.yaml";
  18. bool test_video = true;
  19. string video_path = "20201231144029.avi";
  20. void * g_obstacle;
  21. string obstacledata="obstacledata";
  22. void * mpa_camera;
  23. string cameradata="picleft";
  24. typedef struct frame_info
  25. {
  26. cv::Mat frame;
  27. long long timestamp;
  28. }frame_info;
  29. typedef struct obstacle_info
  30. {
  31. string category;
  32. uint64 class_id;
  33. Point3f bbox_3d;
  34. Rect_<float> bbox;
  35. }obstacle_info;
  36. typedef struct obstacles_info
  37. {
  38. vector<obstacle_info> bboxes;
  39. uint64 time;
  40. }obstacles_info;
  41. int capture_width=1280,capture_height=720;
  42. cv::Mat cameraMatrix = (cv::Mat_<double>(3,3)<< 1.0538648949999999e+03, 0., 9.7134830299999999e+02, 0.,
  43. 9.6887771299999997e+02, 5.5096488099999999e+02, 0., 0., 1.);
  44. cv::Mat distCoeffs = (cv::Mat_<double>(1,5)<<-2.9078999999999999e-01, 6.0994000000000000e-02,
  45. 3.6489999999999999e-03, -2.0590000000000001e-03, 0.);
  46. cv::Mat cameraMatrix_inv = (cv::Mat_<double>(3,3)<<9.4888823486240150e-04, 0., -9.2170097667025908e-01, 0.,
  47. 1.0321219970099572e-03, -5.6866297326007331e-01, 0., 0., 1.);
  48. cv::Mat rvecM_inv = (cv::Mat_<double>(3,3)<<9.9361999532145029e-01, 1.5481286202195165e-02,
  49. 1.1171228524619221e-01, -1.1071480863577153e-01,
  50. -5.4789655210038264e-02, 9.9234083098031878e-01,
  51. 2.1483390005894807e-02, -9.9837789611924532e-01,
  52. -5.2726089314150459e-02 );
  53. cv::Mat tvec = (cv::Mat_<double>(3,1)<<1.5367854556890524e+02, 1.5365463964586140e+03,
  54. -1.2444937322334890e+03);
  55. ConsumerProducerQueue<frame_info> * imageBuffer = new ConsumerProducerQueue<frame_info>(3,true);
  56. ConsumerProducerQueue<frame_info> * imageBuffer_30 = new ConsumerProducerQueue<frame_info>(3,true);
  57. ConsumerProducerQueue<frame_info> * imageBuffer_90 = new ConsumerProducerQueue<frame_info>(3,true);
  58. // 获取系统当前时间,从格林威治标准时间(1970-01-01 00:00:00.000)开始到指定时间的毫秒数。
  59. long long getCurrentSystemTime()
  60. {
  61. auto now = std::chrono::system_clock::now();
  62. long long millseconds = std::chrono::duration_cast<std::chrono::milliseconds>(now.time_since_epoch()).count();
  63. /*
  64. uint64_t dis_millseconds = std::chrono::duration_cast<std::chrono::milliseconds>(now.time_since_epoch()).count()
  65. - std::chrono::duration_cast<std::chrono::seconds>(now.time_since_epoch()).count() * 1000;
  66. auto tt = std::chrono::system_clock::to_time_t(now);
  67. struct tm* ptm = localtime(&tt);
  68. char date[60] = { 0 };
  69. sprintf(date, "%d/%02d/%02d/%02d/%02d/%02d/%03d",
  70. (int)ptm->tm_year + 1900, (int)ptm->tm_mon + 1, (int)ptm->tm_mday,
  71. (int)ptm->tm_hour, (int)ptm->tm_min, (int)ptm->tm_sec, (int)dis_millseconds);
  72. return std::string(date);
  73. */
  74. return millseconds;
  75. }
  76. //读取视频数据
  77. void ReadFunc(int n)
  78. {
  79. cv::VideoCapture cap_30(0);
  80. cv::VideoCapture cap_90(1);
  81. if(!cap_30.isOpened() | !cap_90.isOpened())
  82. {
  83. cout<<"camera failed to open"<<endl;
  84. }
  85. while(1)
  86. {
  87. frame_info frameInfo_30,frameInfo_90;
  88. cv::Mat frame_30,frame_90;
  89. //读视频的时候加上,读摄像头去掉
  90. if(imageBuffer_30->isFull() | imageBuffer_90->isFull())
  91. {
  92. continue;
  93. }
  94. if(cap_30.read(frame_30) && cap_90.read(frame_90))
  95. {
  96. long long timestamp = getCurrentSystemTime();
  97. frameInfo_30.frame = frame_30;
  98. frameInfo_30.timestamp = timestamp;
  99. imageBuffer_30->add(frameInfo_30);
  100. frameInfo_90.frame = frame_90;
  101. frameInfo_90.timestamp = timestamp;
  102. imageBuffer_90->add(frameInfo_90);
  103. }
  104. else
  105. {
  106. std::this_thread::sleep_for(std::chrono::milliseconds(1));
  107. }
  108. }
  109. }
  110. //从共享内存中接收摄像头数据
  111. void Listencamera(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
  112. {
  113. if(nSize<1000)return;
  114. iv::vision::rawpic pic;
  115. if(false == pic.ParseFromArray(strdata,nSize))
  116. {
  117. std::cout<<"picview Listenpic fail."<<std::endl;
  118. return;
  119. }
  120. cv::Mat mat(pic.height(),pic.width(),pic.mattype());
  121. if(pic.type() == 1)
  122. memcpy(mat.data,pic.picdata().data(),mat.rows*mat.cols*mat.elemSize());
  123. else
  124. {
  125. // mat.release();
  126. std::vector<unsigned char> buff(pic.picdata().data(),pic.picdata().data() + pic.picdata().size());
  127. mat = cv::imdecode(buff,IMREAD_COLOR);
  128. }
  129. frame_info img_info;
  130. img_info.frame = mat;
  131. img_info.timestamp = pic.time();
  132. imageBuffer->add(img_info);
  133. mat.release();
  134. }
  135. //向共享内存中存入障碍物检测结果
  136. void SendDetectResult(obstacles_info &obstacles,void* g_name)
  137. {
  138. iv::vision::ObstacleInfo detectResult;
  139. detectResult.set_time(obstacles.time);
  140. for(unsigned int i=0;i <obstacles.bboxes.size();i++)
  141. {
  142. iv::vision::Bbox3D obstacle;
  143. obstacle.set_category(obstacles.bboxes[i].category);
  144. obstacle.set_class_id(obstacles.bboxes[i].class_id);
  145. obstacle.set_x_3d(obstacles.bboxes[i].bbox_3d.x);
  146. obstacle.set_y_3d(obstacles.bboxes[i].bbox_3d.y);
  147. obstacle.set_z_3d(obstacles.bboxes[i].bbox_3d.z);
  148. obstacle.set_x(obstacles.bboxes[i].bbox.x);
  149. obstacle.set_y(obstacles.bboxes[i].bbox.y);
  150. obstacle.set_w(obstacles.bboxes[i].bbox.width);
  151. obstacle.set_h(obstacles.bboxes[i].bbox.height);
  152. iv::vision::Bbox3D * bb = detectResult.add_bbox_3d();
  153. bb->CopyFrom(obstacle);
  154. }
  155. std::string out_result = detectResult.SerializeAsString();
  156. iv::modulecomm::ModuleSendMsg(g_name,out_result.data(),out_result.length());
  157. }
  158. //像素坐标系转换到车体坐标系
  159. cv::Point3f CameraToWorld(cv::Point2f CameraInfo)
  160. {
  161. cv::Mat imagePoint = cv::Mat::ones(3, 1, cv::DataType<double>::type);//u,v,1
  162. cv::Mat tempMat, tempMat2;
  163. //输入一个2D坐标点,便可以求出相应的s
  164. imagePoint.at<double>(0,0)=double(CameraInfo.x);
  165. imagePoint.at<double>(1,0)=double(CameraInfo.y);
  166. double zConst = 0;//实际坐标系的距离
  167. //计算参数s
  168. double s;
  169. tempMat = rvecM_inv * cameraMatrix_inv * imagePoint;
  170. tempMat2 = rvecM_inv * tvec;
  171. s = zConst + tempMat2.at<double>(2, 0);
  172. s /= tempMat.at<double>(2, 0);
  173. //cout<<"s : "<<s<<endl;
  174. //////////////////////camera_coordinates////////////////
  175. cv::Mat camera_cordinates=-rvecM_inv*tvec;
  176. /////////////////////2D to 3D///////////////////////
  177. cv::Mat wcPoint = rvecM_inv * (cameraMatrix_inv *s*imagePoint - tvec);
  178. cv::Point3f worldPoint(wcPoint.at<double>(0, 0), wcPoint.at<double>(1, 0),
  179. wcPoint.at<double>(2, 0));
  180. return worldPoint;
  181. }
  182. int main(int argc, char** argv )
  183. {
  184. if(argc==3)
  185. {
  186. test_video = (strcmp(argv[1], "true") == 0)?true:false;
  187. if(test_video)
  188. video_path = argv[2];
  189. else
  190. cameradata = argv[2];
  191. }
  192. if(argc==2)
  193. test_video = (strcmp(argv[1], "true") == 0)?true:false;
  194. if(test_video)
  195. std::thread * readthread = new std::thread(ReadFunc,1);
  196. else
  197. mpa_camera= iv::modulecomm::RegisterRecv(&cameradata[0],Listencamera);
  198. g_obstacle = iv::modulecomm::RegisterSend(&obstacledata[0],1000000,1);
  199. //导入相机内参和畸变系数矩阵
  200. FileStorage file_storage(yaml_path, FileStorage::READ);
  201. if( file_storage.isOpened())
  202. {
  203. file_storage["camera_matrix"] >> cameraMatrix;
  204. file_storage["camera_matrix_inv"] >> cameraMatrix_inv;
  205. file_storage["distortion_coefficients"] >> distCoeffs;
  206. file_storage["RotationR_inv"] >> rvecM_inv;
  207. file_storage["tvec"] >> tvec;
  208. file_storage.release();
  209. }
  210. else{
  211. cout << "Error: can not open the external parameters file"<<endl;
  212. return 0;
  213. }
  214. NetworkInfo networkInfo;
  215. networkInfo.networkType = "yolov4";
  216. networkInfo.configFilePath = "/home/nvidia/models/camera/yolov4-shigong.cfg";
  217. networkInfo.wtsFilePath = "/home/nvidia/models/camera/yolov4-shigong_final.weights";
  218. networkInfo.deviceType = "kGPU";
  219. networkInfo.inputBlobName = "data";
  220. networkInfo.maxbatchSize = max_batch_size;
  221. std::string modelname = "/home/nvidia/models/camera/yolov4_shigong.engine";
  222. IExecutionContext* yolo_context{nullptr};
  223. YoloDetect detector(networkInfo,modelname);
  224. /*
  225. string classesFile = "model/shigong.names";
  226. //加载类别名
  227. vector<string> classes;
  228. ifstream ifs(classesFile.c_str());
  229. string line;
  230. while (getline(ifs, line)) classes.push_back(line);
  231. */
  232. vector<string> classes = {"shigong"};
  233. //加载网络模型,0是指定第一块GPU
  234. cudaSetDevice(0);
  235. if(!detector.loadModel(yolo_context))
  236. {
  237. cout<<"load yolo model failed"<<endl;
  238. return -1;
  239. }
  240. //Size size(cap.get(CV_CAP_PROP_FRAME_WIDTH), cap.get(CV_CAP_PROP_FRAME_HEIGHT));
  241. //VideoWriter writer("./data/result.avi", cv::VideoWriter::fourcc('M', 'J', 'P', 'G'), 25, size);
  242. vector<KalmanTracker> trackers_30,trackers_90;
  243. KalmanTracker::kf_count = 0; // tracking id relies on this, so we have to reset it in each seq.
  244. int frame_count = 0;
  245. frame_info frameInfo_30,frameInfo_90;
  246. Mat frame_30,frame_90;
  247. long long millseconds; //时间戳
  248. double waittime = (double)getTickCount();
  249. while (1)
  250. {
  251. if(imageBuffer_30->isEmpty() | imageBuffer_90->isEmpty())
  252. {
  253. double waittotal = (double)getTickCount() - waittime;
  254. if(waittotal/cv::getTickFrequency()>30.0)
  255. {
  256. cout<<"Can't get frame and quit"<<endl;
  257. break;
  258. }
  259. continue;
  260. }
  261. imageBuffer_30->consume(frameInfo_30);
  262. frame_30 = frameInfo_30.frame;
  263. imageBuffer_90->consume(frameInfo_90);
  264. frame_90 = frameInfo_90.frame;
  265. vector<Mat> frame_vec;
  266. frame_vec.push_back(frame_30);
  267. frame_vec.push_back(frame_90);
  268. obstacles_info detect_result_info;
  269. detect_result_info.time = frameInfo_30.timestamp;
  270. frame_count++;
  271. double start = (double)getTickCount();
  272. //前向预测
  273. float ignore_thresh=0.4;
  274. float nms_thresh = 0.4;
  275. //两个摄像头30,90度
  276. std::vector<std::vector<Detection>> detect_results_yolo;
  277. std::vector<Detection> result_30,result_90;
  278. detect_results_yolo.push_back(result_30);
  279. detect_results_yolo.push_back(result_90);
  280. bool flag_30 = false;
  281. bool flag_90 = false;
  282. od::bbox_t bbox_t_30;
  283. od::bbox_t bbox_t_90;
  284. vector<od::bbox_t> outs_30;
  285. vector<od::bbox_t> outs_90;
  286. if(detector.process(*yolo_context,max_batch_size,frame_vec,
  287. detect_results_yolo,ignore_thresh,nms_thresh))
  288. {
  289. for (size_t i = 0; i < detect_results_yolo[0].size(); i++) {
  290. cv::Rect r = detector.get_rect(frame_30, detect_results_yolo[0][i].bbox,detector.m_input_w,detector.m_input_h);
  291. bbox_t_30.x = r.x;
  292. bbox_t_30.y = r.y;
  293. bbox_t_30.w = r.width;
  294. bbox_t_30.h = r.height;
  295. bbox_t_30.prob = detect_results_yolo[0][i].det_confidence;
  296. bbox_t_30.obj_id = detect_results_yolo[0][i].class_id;
  297. outs_30.push_back(bbox_t_30);
  298. }
  299. for (size_t i = 0; i < detect_results_yolo[1].size(); i++) {
  300. cv::Rect r = detector.get_rect(frame_90, detect_results_yolo[1][i].bbox,
  301. detector.m_input_w,detector.m_input_h);
  302. bbox_t_90.x = r.x;
  303. bbox_t_90.y = r.y;
  304. bbox_t_90.w = r.width;
  305. bbox_t_90.h = r.height;
  306. bbox_t_90.prob = detect_results_yolo[1][i].det_confidence;
  307. bbox_t_90.obj_id = detect_results_yolo[1][i].class_id;
  308. outs_90.push_back(bbox_t_90);
  309. od::Drawer(frame_90, outs_90, classes);
  310. }
  311. }
  312. // double infertime = (double)getTickCount() - start;
  313. // std::cout<< "Total Cost of infertime: " <<infertime*1000.0/cv::getTickFrequency()<<" ms"<<std::endl;
  314. vector<od::TrackingBox>track_result_30,track_result_90;
  315. bool track_flag_30 = od::TrackObstacle(frame_count,trackers_30,outs_30,track_result_30);
  316. bool track_flag_90 = od::TrackObstacle(frame_count,trackers_90,outs_90,track_result_90);
  317. if(track_flag_30)
  318. {
  319. vector<obstacle_info>obstacle_vector;
  320. for(unsigned int i=0;i < track_result_30.size(); i++)
  321. {
  322. obstacle_info obstacle;
  323. obstacle.category = classes[track_result_30[i].class_id];
  324. obstacle.class_id = track_result_30[i].class_id;
  325. //求bbox的接地点
  326. Point2f bottom_center = Point2f(track_result_30[i].box.x+0.5*track_result_30[i].box.width,
  327. track_result_30[i].box.y+track_result_30[i].box.height);
  328. obstacle.bbox_3d = CameraToWorld(bottom_center);
  329. obstacle.bbox = track_result_30[i].box;
  330. obstacle_vector.push_back(obstacle);
  331. }
  332. detect_result_info.bboxes = obstacle_vector;
  333. SendDetectResult(detect_result_info,g_obstacle);
  334. od::Drawer(frame_30, track_result_30, classes);
  335. }
  336. else if(track_flag_90)
  337. {
  338. vector<obstacle_info>obstacle_vector;
  339. for(unsigned int i=0;i < track_result_90.size(); i++)
  340. {
  341. obstacle_info obstacle;
  342. obstacle.category = classes[track_result_90[i].class_id];
  343. obstacle.class_id = track_result_90[i].class_id;
  344. //求bbox的接地点
  345. Point2f bottom_center = Point2f(track_result_90[i].box.x+0.5*track_result_90[i].box.width,
  346. track_result_90[i].box.y+track_result_90[i].box.height);
  347. obstacle.bbox_3d = CameraToWorld(bottom_center);
  348. obstacle.bbox = track_result_90[i].box;
  349. obstacle_vector.push_back(obstacle);
  350. }
  351. detect_result_info.bboxes = obstacle_vector;
  352. SendDetectResult(detect_result_info,g_obstacle);
  353. od::Drawer(frame_90, track_result_90, classes);
  354. }
  355. double total = (double)getTickCount() - start;
  356. std::cout<< "Total Cost of Detection: " <<total*1000.0/cv::getTickFrequency()<<" ms"<<endl;
  357. namedWindow("Result_30",WINDOW_NORMAL);
  358. imshow("Result_30",frame_30);
  359. namedWindow("Result_90",WINDOW_NORMAL);
  360. imshow("Result_90",frame_90);
  361. if(waitKey(1) == 'q')
  362. break;
  363. if(waitKey(1) == 's')
  364. waitKey(0);
  365. waittime = (double)getTickCount();
  366. }
  367. destroyAllWindows();
  368. trackers_30.clear();
  369. trackers_90.clear();
  370. yolo_context->destroy();
  371. return 0;
  372. }