front_vision_node.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631
  1. /**
  2. * \brief
  3. * \author pengcheng (pengcheng@yslrpch@126.com)
  4. * \date 2020-06-18
  5. * \attention Copyright©ADC Technology(tianjin)Co.Ltd
  6. * \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
  7. */
  8. #include <opencv2/opencv.hpp>
  9. #include <cmw_app/front_vision.h>
  10. #include <memory>
  11. #include <yaml-cpp/yaml.h>
  12. #include<vector>
  13. #include <opencv2/viz.hpp>
  14. #include <common/impl/cxxopts.hpp>
  15. #include <mutex>
  16. #include <thread>
  17. #include <chrono>
  18. #include <ctime>
  19. #include <iostream>
  20. #include <unistd.h>
  21. #include <queue>
  22. #include <yaml-cpp/yaml.h>
  23. #include "common/project_root.h"
  24. #include <glog/logging.h>
  25. #include "modulecomm.h"
  26. #include "obstacles.pb.h"
  27. #include "rawpic.pb.h"
  28. #define DEBUG
  29. std::shared_ptr<adc::StereoSgmApp> stereo_app_ptr;
  30. typedef std::vector<cv::Vec3b> PointColorType;
  31. typedef std::vector<cv::Point3f> PointCloudType;
  32. typedef std::chrono::system_clock::time_point TimeType;
  33. typedef cv::Mat PointCloudMat;
  34. typedef std::shared_ptr<std::list<vector<Tn::Bbox>>> OutputDataType;
  35. typedef std::list<vector<Tn::Bbox>> OutputDataInstanceType;
  36. typedef std::chrono::system_clock::time_point TimeType;
  37. typedef cv::Mat ImageType;
  38. typedef std::vector<std::string> ClassNameType;
  39. typedef detection::ImageBBox2D<float> ImageBBox2DType;
  40. typedef std::vector<ImageBBox2DType> ImageBBoxes2DType;
  41. cv::viz::Viz3d plot3d("3d Frame");
  42. cv::Mat left_img, right_img;
  43. std::mutex mu_left, mu_right, mutex_sync_bbox, mutex_sync_depth;
  44. cv::VideoCapture left_cap;
  45. cv::VideoCapture right_cap;
  46. cv::Mat point_cloud;
  47. int crop_top, crop_bottom;
  48. void* g_obstacles_handle;
  49. void* g_mark_ground_handle;
  50. void* g_CCTSDB_handle;
  51. void* g_origin_image_handle;
  52. const unsigned int kCountBuff = 10;
  53. bool fetch_left = true;
  54. bool fetch_right = true;
  55. struct DepthTime
  56. {
  57. PointCloudMat point_mat;
  58. TimeType time_stamp;
  59. };
  60. struct BboxTime
  61. {
  62. vector<Tn::Bbox> bboxs;
  63. ClassNameType names;
  64. TimeType time_stamp;
  65. cv::Mat img;
  66. };
  67. std::queue<DepthTime> depth_time_cahce;
  68. std::queue<BboxTime> bbox_time_cahce;
  69. std::string gstreamer_pipeline( int capture_width,
  70. int capture_height,
  71. int display_width,
  72. int display_height,
  73. int frame_rate,
  74. int filp_method,
  75. int sensor_id = 0,
  76. int sensor_mode = 3
  77. )
  78. {
  79. return "nvarguscamerasrc sensor-id=" + std::to_string(sensor_id) +
  80. " sensor-mode=" + std::to_string(sensor_mode) +
  81. " ! video/x-raw(memory:NVMM), " +
  82. "width=(int)" + std::to_string(capture_width) +
  83. ", height=(int)" + std::to_string(capture_height) +
  84. ", format=(string)NV12, " +
  85. "framerate=(fraction)" + std::to_string(frame_rate) +
  86. "/1 ! nvvidconv flip-method=" + std::to_string(filp_method) +
  87. " ! video/x-raw, width=(int)" + std::to_string(display_width) +
  88. ", height=(int)" + std::to_string(display_height) +
  89. ", format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink";
  90. }
  91. void ThreadFetchImageLeft()
  92. {
  93. while(fetch_left)
  94. {
  95. mu_left.lock();
  96. left_cap.read(left_img);
  97. mu_left.unlock();
  98. usleep(20000);
  99. }
  100. }
  101. void ThreadFetchImageRight()
  102. {
  103. while(fetch_right)
  104. {
  105. mu_right.lock();
  106. right_cap.read(right_img);
  107. mu_right.unlock();
  108. usleep(20000);
  109. }
  110. }
  111. void callback_deal_points_mat(PointCloudMat& point_cloud_float, TimeType& tm)
  112. {
  113. DepthTime dt;
  114. dt.point_mat = point_cloud_float.clone();
  115. dt.time_stamp = tm;
  116. {
  117. std::lock_guard<std::mutex> mutex_sync_depthes(mutex_sync_depth);
  118. depth_time_cahce.push(dt);
  119. if(depth_time_cahce.size() > kCountBuff) depth_time_cahce.pop();
  120. }
  121. }
  122. void callback_deal_point(PointCloudType& pt_cloud, PointColorType& pt_color, TimeType &time)
  123. {
  124. if(pt_cloud.empty()||pt_color.empty()) return;
  125. // static bool o = true;
  126. cv::viz::WCloud cloud_widget = cv::viz::WCloud(pt_cloud, pt_color);
  127. cloud_widget.setRenderingProperty(cv::viz::POINT_SIZE, 2);
  128. plot3d.showWidget("ref cloud", cloud_widget);
  129. plot3d.wasStopped();
  130. plot3d.spinOnce(1, true);
  131. }
  132. void callback_vis_bbox(OutputDataType& outputs, TimeType& time, ImageType& img, ClassNameType& class_name )
  133. {
  134. auto output = (*outputs).begin();
  135. for(vector<Tn::Bbox>::iterator bbox = output->begin(); bbox != output->end(); ++bbox)
  136. {
  137. auto box = *bbox;
  138. cv::rectangle(img,cv::Point(box.left,box.top),cv::Point(box.right,box.bot),cv::Scalar(0,0,255),3,8,0);
  139. std::cout << "class=" << box.classId << " prob=" << box.score*100 << std::endl;
  140. cout << "left=" << box.left << " right=" << box.right << " top=" << box.top << " bot=" << box.bot << endl;
  141. string str1=class_name[box.classId];
  142. cv::putText(img,str1, cv::Point(box.left, box.top-15), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(0, 255, 0));
  143. }
  144. // cv::imshow("detection",img);
  145. // cv::waitKey(10);
  146. BboxTime bt;
  147. bt.bboxs = (*output);
  148. bt.time_stamp = time;
  149. bt.names = class_name;
  150. bt.img = img;
  151. {
  152. std::lock_guard<std::mutex> mutex_sync_bboxes(mutex_sync_bbox);
  153. bbox_time_cahce.push(bt);
  154. if(bbox_time_cahce.size() > kCountBuff) bbox_time_cahce.pop();
  155. }
  156. }
  157. void callback_bbox(OutputDataType& outputs, TimeType& time, ImageType& img, ClassNameType& class_name )
  158. {
  159. //(void) bboxes;
  160. // (void) img;
  161. //cv::imwrite("/home/nvidia/data/"+std::to_string(i++)+".png",img);
  162. // cv::imshow("img",img);
  163. // cv::waitKey(10);
  164. // std::cout<<"++++++++++++++++"<<std::endl;
  165. auto output = (*outputs).begin();
  166. BboxTime bt;
  167. bt.bboxs = (*output);
  168. bt.time_stamp = time;
  169. bt.names = class_name;
  170. bt.img = img;
  171. {
  172. std::lock_guard<std::mutex> mutex_sync_bboxes(mutex_sync_bbox);
  173. bbox_time_cahce.push(bt);
  174. if(bbox_time_cahce.size() > kCountBuff) bbox_time_cahce.pop();
  175. }
  176. }
  177. void callback_mark_ground(ImageBBoxes2DType& bboxes, TimeType& time)
  178. {
  179. iv::vision::ObstacleInfo obstacles_info;
  180. std::chrono::time_point<std::chrono::system_clock, std::chrono::milliseconds> tp =
  181. std::chrono::time_point_cast<std::chrono::milliseconds>(time);
  182. auto tmp = std::chrono::duration_cast<std::chrono::milliseconds>(tp.time_since_epoch());
  183. uint64_t timestamp = tmp.count();
  184. obstacles_info.set_time(timestamp);
  185. for(auto bbox : bboxes)
  186. {
  187. iv::vision::Bbox3D* bbox_3d = obstacles_info.add_bbox_3d();
  188. bbox_3d->set_class_id(bbox.class_id);
  189. bbox_3d->set_top_left_x(bbox.box2d.x1);
  190. bbox_3d->set_top_left_y(bbox.box2d.y1);
  191. bbox_3d->set_bottom_right_x(bbox.box2d.x1 + bbox.box2d.width);
  192. bbox_3d->set_top_left_y(bbox.box2d.y1 + bbox.box2d.height);
  193. }
  194. std::string publish_str = obstacles_info.SerializeAsString();
  195. iv::modulecomm::ModuleSendMsg(g_mark_ground_handle, publish_str.data(), publish_str.length());
  196. }
  197. void callback_CCTSDB(ImageBBoxes2DType& bboxes, cv::Mat &img, TimeType& time)
  198. {
  199. iv::vision::ObstacleInfo obstacles_info;
  200. std::chrono::time_point<std::chrono::system_clock, std::chrono::milliseconds> tp =
  201. std::chrono::time_point_cast<std::chrono::milliseconds>(time);
  202. auto tmp = std::chrono::duration_cast<std::chrono::milliseconds>(tp.time_since_epoch());
  203. uint64_t timestamp = tmp.count();
  204. obstacles_info.set_time(timestamp);
  205. for(auto bbox : bboxes)
  206. {
  207. iv::vision::Bbox3D* bbox_3d = obstacles_info.add_bbox_3d();
  208. bbox_3d->set_class_id(bbox.class_id);
  209. bbox_3d->set_top_left_x(bbox.box2d.x1);
  210. bbox_3d->set_top_left_y(bbox.box2d.y1);
  211. bbox_3d->set_bottom_right_x(bbox.box2d.x1 + bbox.box2d.width);
  212. bbox_3d->set_top_left_y(bbox.box2d.y1 + bbox.box2d.height);
  213. // for(auto bbox : bboxes)
  214. cv::Rect rect(bbox.box2d.x1, bbox.box2d.y1, bbox.box2d.width, bbox.box2d.height);
  215. cv::rectangle(img, rect, cv::Scalar(10, 255, 10), 2);
  216. }
  217. //cv::imshow("sgin", img);
  218. //cv::waitKey(10);
  219. std::string publish_str = obstacles_info.SerializeAsString();
  220. iv::modulecomm::ModuleSendMsg(g_CCTSDB_handle, publish_str.data(), publish_str.length());
  221. }
  222. void callback_mark_ground_vis(ImageBBoxes2DType& bboxes, cv::Mat &img, TimeType& time)
  223. {
  224. //(void) bboxes;
  225. // (void) img;
  226. //int i=0;
  227. (void) time;
  228. // cv::imwrite("/home/nvidia/data"+std::to_string(i++)+".png",img);
  229. for(auto bbox : bboxes)
  230. {
  231. cv::Rect rect(bbox.box2d.x1, bbox.box2d.y1, bbox.box2d.width, bbox.box2d.height);
  232. cv::rectangle(img, rect, cv::Scalar(10, 255, 10), 2);
  233. }
  234. //cv::imshow("mark_ground", img);
  235. //cv::waitKey(10);
  236. }
  237. void MergeData(DepthTime& depth_data, BboxTime& bbox_data)
  238. {
  239. PointCloudMat mat_point = depth_data.point_mat;
  240. vector<Tn::Bbox> bboxs = bbox_data.bboxs;
  241. ClassNameType names = bbox_data.names;
  242. cv::Mat img = bbox_data.img;
  243. cv::Mat origin_img = img.clone();
  244. iv::vision::ObstacleInfo obstacles_info;
  245. iv::vision::rawpic cameraPic;
  246. TimeType time = depth_data.time_stamp;
  247. std::chrono::time_point<std::chrono::system_clock, std::chrono::milliseconds> tp =
  248. std::chrono::time_point_cast<std::chrono::milliseconds>(time);
  249. auto tmp = std::chrono::duration_cast<std::chrono::milliseconds>(tp.time_since_epoch());
  250. uint64_t timestamp = tmp.count();
  251. cameraPic.set_time(timestamp);
  252. cameraPic.set_elemsize(origin_img.elemSize());
  253. cameraPic.set_width(origin_img.cols);
  254. cameraPic.set_height(origin_img.rows);
  255. cameraPic.set_mattype(origin_img.type());
  256. std::vector<unsigned char> buff;
  257. std::vector<int> param = std::vector<int>(2);
  258. param[0] = cv::IMWRITE_JPEG_QUALITY;
  259. param[1] = 95; // default(95) 0-100
  260. cv::imencode(".jpg",origin_img,buff,param);
  261. cameraPic.set_picdata(buff.data(),buff.size());
  262. buff.clear();
  263. cameraPic.set_type(2);
  264. obstacles_info.set_time(timestamp);
  265. std::vector<cv::Mat> channels(3);
  266. cv::split(mat_point.clone(), channels);
  267. cv::Mat color, depth_8u;
  268. cv::Mat depth = channels[2];
  269. depth.convertTo(depth_8u, CV_8U, 255/80);
  270. cv::applyColorMap(depth_8u, color, cv::COLORMAP_RAINBOW);
  271. color.setTo(cv::Scalar(0, 0, 0), depth < 0);
  272. for(auto bbox : bboxs)
  273. {
  274. int t = bbox.top + (bbox.bot - bbox.top)/4;
  275. int b = bbox.bot - (bbox.bot - bbox.top)/4;
  276. int l = bbox.left + (bbox.right - bbox.left)/4;
  277. int r = bbox.right - (bbox.right - bbox.left)/4;
  278. cv::Mat loacl_point = mat_point(cv::Range(t, b), cv::Range(l, r)).clone();
  279. cv::rectangle(img,cv::Point(bbox.left,bbox.top),cv::Point(bbox.right,bbox.bot),cv::Scalar(0,0,255),3,8,0);
  280. cv::rectangle(color,cv::Point(bbox.left,bbox.top),cv::Point(bbox.right,bbox.bot),cv::Scalar(0,0,255),3,8,0);
  281. cv::rectangle(color,cv::Point(l, t),cv::Point(r,b),cv::Scalar(255,0,0),1,8,0);
  282. float *data_ptr = loacl_point.ptr<float>();
  283. float location[] = {10000,10000,10000};
  284. for(int index = 0; index < loacl_point.rows * loacl_point.cols; index++)
  285. {
  286. if( data_ptr[2] > 0 && location[2] > data_ptr[2])
  287. {
  288. location[0] = data_ptr[0];
  289. location[1] = data_ptr[1];
  290. location[2] = data_ptr[2];
  291. data_ptr += 3;
  292. }
  293. }
  294. iv::vision::Bbox3D* bbox_3d = obstacles_info.add_bbox_3d();
  295. bbox_3d->set_category(names[bbox.classId]);
  296. bbox_3d->set_x( location[0]);
  297. bbox_3d->set_y( location[1]);
  298. bbox_3d->set_z( location[2]);
  299. bbox_3d->set_top_left_x(bbox.left);
  300. bbox_3d->set_top_left_y(bbox.top);
  301. bbox_3d->set_bottom_right_x(bbox.right);
  302. bbox_3d->set_top_left_y(bbox.bot);
  303. std::string label ="z:" + std::to_string(location[2]);
  304. cv::putText(img,label, cv::Point((bbox.left + bbox.right)/2, (bbox.top + bbox.bot)/2), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(0, 255, 0));
  305. }
  306. std::string publish_str = obstacles_info.SerializeAsString();
  307. std::string out_img = cameraPic.SerializeAsString();
  308. iv::modulecomm::ModuleSendMsg(g_obstacles_handle, publish_str.data(), publish_str.length());
  309. iv::modulecomm::ModuleSendMsg(g_origin_image_handle, out_img.data(), out_img.length());
  310. cv::imshow("depth", color);
  311. cv::imshow("detectiosn", img);
  312. cv::waitKey(10);
  313. }
  314. void ThreadSyncData()
  315. {
  316. BboxTime bt;
  317. DepthTime dt;
  318. while(true)
  319. {
  320. bool synced = false;
  321. {
  322. std::lock_guard<std::mutex> mutex_sync_bboxes(mutex_sync_bbox);
  323. std::lock_guard<std::mutex> mutex_sync_depthes(mutex_sync_depth);
  324. while (!bbox_time_cahce.empty() && !depth_time_cahce.empty())
  325. {
  326. bt = bbox_time_cahce.front();
  327. dt = depth_time_cahce.front();
  328. TimeType bbox_time = bt.time_stamp;
  329. TimeType depth_time = dt.time_stamp;
  330. std::chrono::duration<double> diff = (bbox_time - depth_time);
  331. auto duration = diff.count();//s
  332. //LOG(INFO)<<"SYNC: "<<duration ;
  333. if(duration <= 5e-1 && duration >= -5e-1)
  334. {
  335. synced = true;
  336. bbox_time_cahce.pop();
  337. depth_time_cahce.pop();
  338. break;
  339. }
  340. else if (duration > 5e-1)
  341. {
  342. depth_time_cahce.pop();
  343. }
  344. else if (duration < -5e-1)
  345. {
  346. bbox_time_cahce.pop();
  347. }
  348. }
  349. }
  350. if(synced)
  351. {
  352. MergeData(dt, bt);
  353. }
  354. usleep(2000);
  355. }
  356. }
  357. void InitCamrea()
  358. {
  359. //std::string pipline = gstreamer_pipeline(capture_width, capture_height, display_width,
  360. // display_height, frame_rate, flip_method,
  361. // camera_id, sensor_mode)
  362. std::string pipline_left = gstreamer_pipeline(1152, 768, 1152,768, 25, 0, 1);
  363. std::string pipline_right = gstreamer_pipeline(1152, 768, 1152,768, 25, 0, 2);
  364. LOG(INFO)<<"openning left camera...: "<<pipline_left;
  365. left_cap.open(pipline_left, cv::CAP_GSTREAMER);
  366. LOG(INFO)<<"openning right camera...";
  367. right_cap.open(pipline_right, cv::CAP_GSTREAMER);
  368. if(!left_cap.isOpened())
  369. {
  370. LOG(ERROR)<<"Can not open left camera";;
  371. exit(1);
  372. }
  373. if(!right_cap.isOpened())
  374. {
  375. LOG(ERROR)<<"Can not open right camera";
  376. exit(1);
  377. }
  378. }
  379. int main(int argc, char **argv)
  380. {
  381. cxxopts::Options options(argv[0], "double_camera_node");
  382. options.add_options()
  383. ("h,help", "this app is used for double_camera_node")
  384. ("c,config", "configuration file path",
  385. cxxopts::value<std::string>()->default_value("front_vision.yaml"))
  386. ("r,root_path", "root path",
  387. cxxopts::value<std::string>()->default_value("/opt/adc/"))
  388. ;
  389. cxxopts::ParseResult opts = options.parse(argc, argv);
  390. // std::string root_path = opts["root_path"].as<std::string>();
  391. // adc::RootPath::Ptr root_path_ptr = adc::RootPath::GetInstance();
  392. // root_path_ptr->SetRoot(root_path);
  393. std::string config_file = opts["config"].as<std::string>();
  394. config_file = adc::RootPath::GetAbsolutePath(config_file);
  395. LOG(INFO)<<"Front vision config: "<<config_file;
  396. std::string obstacle_detector_config;
  397. std::string stereo_vison_config;
  398. std::string mark_ground_detector_config;
  399. std::string cctsdb_detector_config;
  400. std::string topic_obstcle;
  401. std::string topic_mark_ground;
  402. std::string topic_cctsdb;
  403. std::string topic_origin_image;
  404. std::string left_camera_params;
  405. bool used_loop_fetch =false;
  406. bool mono = false;
  407. bool vis_mark_ground = false;
  408. bool vis_pointcloud = false;
  409. bool vis_obstacle = false;
  410. bool fusion_depth_and_obstacle = false;
  411. float hz = 10;
  412. try
  413. {
  414. YAML::Node node = YAML::LoadFile(config_file);
  415. obstacle_detector_config = node["obstacle_detector_config"].as<std::string>();
  416. stereo_vison_config = node["stereo_vison_config"].as<std::string>();
  417. cctsdb_detector_config = node["cctsdb_detector_config"].as<std::string>();
  418. mark_ground_detector_config = node["mark_ground_detector_config"].as<std::string>();
  419. used_loop_fetch = node["used_loop_fetch"].as<bool>();
  420. mono = node["mono"].as<bool>();
  421. crop_top = node["crop_top"].as<int>(200);
  422. crop_bottom = node["crop_bottom"].as<int>(570);
  423. hz = node["hz"].as<float>(10.0);
  424. vis_mark_ground = node["vis_mark_ground"].as<bool>();
  425. vis_pointcloud = node["vis_pointcloud"].as<bool>();
  426. vis_obstacle = node["vis_obstacle"].as<bool>();
  427. fusion_depth_and_obstacle = node["fusion_depth_and_obstacle"].as<bool>();
  428. topic_obstcle = node["topic_obstcle"].as<std::string>();
  429. topic_mark_ground = node["topic_mark_ground"].as<std::string>();
  430. topic_cctsdb = node["topic_cctsdb"].as<std::string>();
  431. left_camera_params = node["left_camera_params"].as<std::string>();
  432. topic_origin_image = node["topic_origin_image"].as<std::string>();
  433. }
  434. catch (const std::exception& e)
  435. {
  436. LOG(ERROR)<<"Init Yaml Faile!";
  437. std::cerr << e.what() << '\n';
  438. }
  439. g_obstacles_handle = iv::modulecomm::RegisterSend(topic_obstcle.c_str(), 1000000, 1);
  440. g_mark_ground_handle = iv::modulecomm::RegisterSend(topic_mark_ground.c_str(), 1000000, 1);
  441. g_CCTSDB_handle = iv::modulecomm::RegisterSend(topic_cctsdb.c_str(), 1000000, 1);
  442. g_origin_image_handle = iv::modulecomm::RegisterSend(topic_origin_image.c_str(), 1000000, 1);
  443. left_camera_params = adc::RootPath::GetAbsolutePath(left_camera_params);
  444. adc::FrontVisionApp::Ptr front_vision_app_ptr = std::make_shared<adc::FrontVisionApp>(left_camera_params);
  445. front_vision_app_ptr->SetTask(obstacle_detector_config, adc::FrontVisionApp::TASK::OBSTACLE_DETECTION_YOLO, hz);
  446. front_vision_app_ptr->SetTask(stereo_vison_config, adc::FrontVisionApp::TASK::STEREO, hz);
  447. front_vision_app_ptr->SetTask(mark_ground_detector_config, adc::FrontVisionApp::TASK::MARK_GROUND_DETECTION_CENTERNET, hz);
  448. front_vision_app_ptr->SetTask(cctsdb_detector_config, adc::FrontVisionApp::TASK::CCTSDB_DETECTION_CENTERNET, hz);
  449. LOG(INFO)<<"complated front vision set task";
  450. if(vis_pointcloud)
  451. {
  452. front_vision_app_ptr->SetStereoPointAndColorCallback(callback_deal_point);
  453. plot3d.showWidget("Coordinate Widget", cv::viz::WCoordinateSystem());
  454. }
  455. front_vision_app_ptr->SetStereoPointCloudMatCallback(callback_deal_points_mat);
  456. if(vis_obstacle)
  457. {
  458. front_vision_app_ptr->SetDetectionObjectCallback(callback_vis_bbox);
  459. }
  460. else
  461. {
  462. front_vision_app_ptr->SetDetectionObjectCallback(callback_bbox);
  463. }
  464. if(vis_mark_ground)
  465. {
  466. front_vision_app_ptr->SetMarkGroundDetectionWithImgCallback(callback_mark_ground_vis);
  467. }
  468. else
  469. {
  470. front_vision_app_ptr->SetMarkGroundDetectionCallback(callback_mark_ground);
  471. }
  472. front_vision_app_ptr->SetCCTSDBDetectionWithImgCallback(callback_CCTSDB);
  473. InitCamrea();
  474. std::thread thread_sync_res;
  475. if(fusion_depth_and_obstacle)
  476. {
  477. thread_sync_res = std::thread(ThreadSyncData);
  478. thread_sync_res.detach();
  479. }
  480. if(!used_loop_fetch)
  481. {
  482. std::thread thread_left(ThreadFetchImageLeft);
  483. std::thread thread_right(ThreadFetchImageRight);
  484. thread_left.detach();
  485. thread_right.detach();
  486. }
  487. front_vision_app_ptr->Ready();
  488. for(int i = 0; i < 50; i++)
  489. {
  490. left_cap.read(left_img);
  491. right_cap.read(right_img);
  492. }
  493. while(true)
  494. {
  495. if(used_loop_fetch)
  496. {
  497. if(mono)
  498. {
  499. left_cap.read(left_img);
  500. if(!left_img.empty())
  501. {
  502. left_img = left_img(cv::Range(crop_top, crop_bottom),cv::Range(0, left_img.cols));
  503. std::chrono::system_clock::time_point tm = std::chrono::system_clock::now();
  504. front_vision_app_ptr->SetImage(left_img, tm);
  505. left_img = cv::Mat();
  506. }
  507. }
  508. else
  509. {
  510. left_cap.read(left_img);
  511. right_cap.read(right_img);
  512. if(!left_img.empty() && !right_img.empty() )
  513. {
  514. left_img = left_img(cv::Range(crop_top, crop_bottom),cv::Range(0, left_img.cols));
  515. right_img = right_img(cv::Range(crop_top, crop_bottom),cv::Range(0, right_img.cols));
  516. // cv::imshow("left",left_img);
  517. // cv::imshow("right",right_img);
  518. // cv::waitKey(10);
  519. std::chrono::system_clock::time_point tm = std::chrono::system_clock::now();
  520. front_vision_app_ptr->SetImages(left_img, right_img, tm);
  521. left_img = cv::Mat();
  522. right_img = cv::Mat();
  523. }
  524. }
  525. }
  526. else
  527. {
  528. if(mono)
  529. {
  530. mu_left.lock();
  531. if(!left_img.empty())
  532. {
  533. left_img = left_img(cv::Range(crop_top, crop_bottom),cv::Range(0, left_img.cols));
  534. std::chrono::system_clock::time_point tm = std::chrono::system_clock::now();
  535. front_vision_app_ptr->SetImage(left_img, tm);
  536. left_img = cv::Mat();
  537. }
  538. mu_left.unlock();
  539. }
  540. else
  541. {
  542. mu_left.lock();
  543. mu_right.lock();
  544. if(!left_img.empty() && !right_img.empty() )
  545. {
  546. left_img = left_img(cv::Range(crop_top, crop_bottom),cv::Range(0, left_img.cols));
  547. right_img = right_img(cv::Range(crop_top, crop_bottom),cv::Range(0, right_img.cols));
  548. std::chrono::system_clock::time_point tm = std::chrono::system_clock::now();
  549. front_vision_app_ptr->SetImages(left_img, right_img, tm);
  550. left_img = cv::Mat();
  551. right_img = cv::Mat();
  552. }
  553. mu_left.unlock();
  554. mu_right.unlock();
  555. }
  556. }
  557. }
  558. fetch_left = false;
  559. fetch_right = false;
  560. //thread_sync_res.join();
  561. return 0;
  562. }