| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631 |
- /**
- * \brief
- * \author pengcheng (pengcheng@yslrpch@126.com)
- * \date 2020-06-18
- * \attention Copyright©ADC Technology(tianjin)Co.Ltd
- * \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
- */
- #include <opencv2/opencv.hpp>
- #include <cmw_app/front_vision.h>
- #include <memory>
- #include <yaml-cpp/yaml.h>
- #include<vector>
- #include <opencv2/viz.hpp>
- #include <common/impl/cxxopts.hpp>
- #include <mutex>
- #include <thread>
- #include <chrono>
- #include <ctime>
- #include <iostream>
- #include <unistd.h>
- #include <queue>
- #include <yaml-cpp/yaml.h>
- #include "common/project_root.h"
- #include <glog/logging.h>
- #include "modulecomm.h"
- #include "obstacles.pb.h"
- #include "rawpic.pb.h"
- #define DEBUG
- std::shared_ptr<adc::StereoSgmApp> stereo_app_ptr;
- typedef std::vector<cv::Vec3b> PointColorType;
- typedef std::vector<cv::Point3f> PointCloudType;
- typedef std::chrono::system_clock::time_point TimeType;
- typedef cv::Mat PointCloudMat;
- typedef std::shared_ptr<std::list<vector<Tn::Bbox>>> OutputDataType;
- typedef std::list<vector<Tn::Bbox>> OutputDataInstanceType;
- typedef std::chrono::system_clock::time_point TimeType;
- typedef cv::Mat ImageType;
- typedef std::vector<std::string> ClassNameType;
- typedef detection::ImageBBox2D<float> ImageBBox2DType;
- typedef std::vector<ImageBBox2DType> ImageBBoxes2DType;
- cv::viz::Viz3d plot3d("3d Frame");
- cv::Mat left_img, right_img;
- std::mutex mu_left, mu_right, mutex_sync_bbox, mutex_sync_depth;
- cv::VideoCapture left_cap;
- cv::VideoCapture right_cap;
- cv::Mat point_cloud;
- int crop_top, crop_bottom;
- void* g_obstacles_handle;
- void* g_mark_ground_handle;
- void* g_CCTSDB_handle;
- void* g_origin_image_handle;
- const unsigned int kCountBuff = 10;
- bool fetch_left = true;
- bool fetch_right = true;
- struct DepthTime
- {
- PointCloudMat point_mat;
- TimeType time_stamp;
- };
- struct BboxTime
- {
- vector<Tn::Bbox> bboxs;
- ClassNameType names;
- TimeType time_stamp;
- cv::Mat img;
- };
- std::queue<DepthTime> depth_time_cahce;
- std::queue<BboxTime> bbox_time_cahce;
- std::string gstreamer_pipeline( int capture_width,
- int capture_height,
- int display_width,
- int display_height,
- int frame_rate,
- int filp_method,
- int sensor_id = 0,
- int sensor_mode = 3
- )
- {
- return "nvarguscamerasrc sensor-id=" + std::to_string(sensor_id) +
- " sensor-mode=" + std::to_string(sensor_mode) +
- " ! video/x-raw(memory:NVMM), " +
- "width=(int)" + std::to_string(capture_width) +
- ", height=(int)" + std::to_string(capture_height) +
- ", format=(string)NV12, " +
- "framerate=(fraction)" + std::to_string(frame_rate) +
- "/1 ! nvvidconv flip-method=" + std::to_string(filp_method) +
- " ! video/x-raw, width=(int)" + std::to_string(display_width) +
- ", height=(int)" + std::to_string(display_height) +
- ", format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink";
- }
-
- void ThreadFetchImageLeft()
- {
- while(fetch_left)
- {
- mu_left.lock();
- left_cap.read(left_img);
- mu_left.unlock();
- usleep(20000);
- }
- }
- void ThreadFetchImageRight()
- {
- while(fetch_right)
- {
- mu_right.lock();
- right_cap.read(right_img);
- mu_right.unlock();
- usleep(20000);
- }
- }
- void callback_deal_points_mat(PointCloudMat& point_cloud_float, TimeType& tm)
- {
- DepthTime dt;
- dt.point_mat = point_cloud_float.clone();
- dt.time_stamp = tm;
- {
- std::lock_guard<std::mutex> mutex_sync_depthes(mutex_sync_depth);
- depth_time_cahce.push(dt);
- if(depth_time_cahce.size() > kCountBuff) depth_time_cahce.pop();
- }
- }
- void callback_deal_point(PointCloudType& pt_cloud, PointColorType& pt_color, TimeType &time)
- {
- if(pt_cloud.empty()||pt_color.empty()) return;
- // static bool o = true;
- cv::viz::WCloud cloud_widget = cv::viz::WCloud(pt_cloud, pt_color);
- cloud_widget.setRenderingProperty(cv::viz::POINT_SIZE, 2);
- plot3d.showWidget("ref cloud", cloud_widget);
- plot3d.wasStopped();
- plot3d.spinOnce(1, true);
- }
- void callback_vis_bbox(OutputDataType& outputs, TimeType& time, ImageType& img, ClassNameType& class_name )
- {
- auto output = (*outputs).begin();
- for(vector<Tn::Bbox>::iterator bbox = output->begin(); bbox != output->end(); ++bbox)
- {
- auto box = *bbox;
- cv::rectangle(img,cv::Point(box.left,box.top),cv::Point(box.right,box.bot),cv::Scalar(0,0,255),3,8,0);
- std::cout << "class=" << box.classId << " prob=" << box.score*100 << std::endl;
- cout << "left=" << box.left << " right=" << box.right << " top=" << box.top << " bot=" << box.bot << endl;
- string str1=class_name[box.classId];
- cv::putText(img,str1, cv::Point(box.left, box.top-15), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(0, 255, 0));
- }
- // cv::imshow("detection",img);
- // cv::waitKey(10);
- BboxTime bt;
- bt.bboxs = (*output);
- bt.time_stamp = time;
- bt.names = class_name;
- bt.img = img;
- {
- std::lock_guard<std::mutex> mutex_sync_bboxes(mutex_sync_bbox);
- bbox_time_cahce.push(bt);
- if(bbox_time_cahce.size() > kCountBuff) bbox_time_cahce.pop();
- }
- }
- void callback_bbox(OutputDataType& outputs, TimeType& time, ImageType& img, ClassNameType& class_name )
- {
- //(void) bboxes;
- // (void) img;
-
- //cv::imwrite("/home/nvidia/data/"+std::to_string(i++)+".png",img);
- // cv::imshow("img",img);
- // cv::waitKey(10);
- // std::cout<<"++++++++++++++++"<<std::endl;
-
- auto output = (*outputs).begin();
-
- BboxTime bt;
- bt.bboxs = (*output);
- bt.time_stamp = time;
- bt.names = class_name;
- bt.img = img;
- {
- std::lock_guard<std::mutex> mutex_sync_bboxes(mutex_sync_bbox);
- bbox_time_cahce.push(bt);
- if(bbox_time_cahce.size() > kCountBuff) bbox_time_cahce.pop();
- }
- }
- void callback_mark_ground(ImageBBoxes2DType& bboxes, TimeType& time)
- {
-
- iv::vision::ObstacleInfo obstacles_info;
- std::chrono::time_point<std::chrono::system_clock, std::chrono::milliseconds> tp =
- std::chrono::time_point_cast<std::chrono::milliseconds>(time);
- auto tmp = std::chrono::duration_cast<std::chrono::milliseconds>(tp.time_since_epoch());
- uint64_t timestamp = tmp.count();
- obstacles_info.set_time(timestamp);
- for(auto bbox : bboxes)
- {
- iv::vision::Bbox3D* bbox_3d = obstacles_info.add_bbox_3d();
- bbox_3d->set_class_id(bbox.class_id);
- bbox_3d->set_top_left_x(bbox.box2d.x1);
- bbox_3d->set_top_left_y(bbox.box2d.y1);
- bbox_3d->set_bottom_right_x(bbox.box2d.x1 + bbox.box2d.width);
- bbox_3d->set_top_left_y(bbox.box2d.y1 + bbox.box2d.height);
- }
- std::string publish_str = obstacles_info.SerializeAsString();
- iv::modulecomm::ModuleSendMsg(g_mark_ground_handle, publish_str.data(), publish_str.length());
- }
- void callback_CCTSDB(ImageBBoxes2DType& bboxes, cv::Mat &img, TimeType& time)
- {
-
- iv::vision::ObstacleInfo obstacles_info;
- std::chrono::time_point<std::chrono::system_clock, std::chrono::milliseconds> tp =
- std::chrono::time_point_cast<std::chrono::milliseconds>(time);
- auto tmp = std::chrono::duration_cast<std::chrono::milliseconds>(tp.time_since_epoch());
- uint64_t timestamp = tmp.count();
- obstacles_info.set_time(timestamp);
- for(auto bbox : bboxes)
- {
- iv::vision::Bbox3D* bbox_3d = obstacles_info.add_bbox_3d();
- bbox_3d->set_class_id(bbox.class_id);
- bbox_3d->set_top_left_x(bbox.box2d.x1);
- bbox_3d->set_top_left_y(bbox.box2d.y1);
- bbox_3d->set_bottom_right_x(bbox.box2d.x1 + bbox.box2d.width);
- bbox_3d->set_top_left_y(bbox.box2d.y1 + bbox.box2d.height);
- // for(auto bbox : bboxes)
-
- cv::Rect rect(bbox.box2d.x1, bbox.box2d.y1, bbox.box2d.width, bbox.box2d.height);
- cv::rectangle(img, rect, cv::Scalar(10, 255, 10), 2);
-
- }
- //cv::imshow("sgin", img);
- //cv::waitKey(10);
- std::string publish_str = obstacles_info.SerializeAsString();
- iv::modulecomm::ModuleSendMsg(g_CCTSDB_handle, publish_str.data(), publish_str.length());
- }
- void callback_mark_ground_vis(ImageBBoxes2DType& bboxes, cv::Mat &img, TimeType& time)
- {
- //(void) bboxes;
- // (void) img;
- //int i=0;
- (void) time;
- // cv::imwrite("/home/nvidia/data"+std::to_string(i++)+".png",img);
-
- for(auto bbox : bboxes)
- {
- cv::Rect rect(bbox.box2d.x1, bbox.box2d.y1, bbox.box2d.width, bbox.box2d.height);
- cv::rectangle(img, rect, cv::Scalar(10, 255, 10), 2);
- }
- //cv::imshow("mark_ground", img);
- //cv::waitKey(10);
- }
- void MergeData(DepthTime& depth_data, BboxTime& bbox_data)
- {
- PointCloudMat mat_point = depth_data.point_mat;
- vector<Tn::Bbox> bboxs = bbox_data.bboxs;
- ClassNameType names = bbox_data.names;
- cv::Mat img = bbox_data.img;
- cv::Mat origin_img = img.clone();
- iv::vision::ObstacleInfo obstacles_info;
- iv::vision::rawpic cameraPic;
-
- TimeType time = depth_data.time_stamp;
- std::chrono::time_point<std::chrono::system_clock, std::chrono::milliseconds> tp =
- std::chrono::time_point_cast<std::chrono::milliseconds>(time);
- auto tmp = std::chrono::duration_cast<std::chrono::milliseconds>(tp.time_since_epoch());
- uint64_t timestamp = tmp.count();
- cameraPic.set_time(timestamp);
- cameraPic.set_elemsize(origin_img.elemSize());
- cameraPic.set_width(origin_img.cols);
- cameraPic.set_height(origin_img.rows);
- cameraPic.set_mattype(origin_img.type());
- std::vector<unsigned char> buff;
- std::vector<int> param = std::vector<int>(2);
- param[0] = cv::IMWRITE_JPEG_QUALITY;
- param[1] = 95; // default(95) 0-100
- cv::imencode(".jpg",origin_img,buff,param);
- cameraPic.set_picdata(buff.data(),buff.size());
- buff.clear();
- cameraPic.set_type(2);
-
- obstacles_info.set_time(timestamp);
- std::vector<cv::Mat> channels(3);
- cv::split(mat_point.clone(), channels);
- cv::Mat color, depth_8u;
- cv::Mat depth = channels[2];
- depth.convertTo(depth_8u, CV_8U, 255/80);
- cv::applyColorMap(depth_8u, color, cv::COLORMAP_RAINBOW);
- color.setTo(cv::Scalar(0, 0, 0), depth < 0);
- for(auto bbox : bboxs)
- {
- int t = bbox.top + (bbox.bot - bbox.top)/4;
- int b = bbox.bot - (bbox.bot - bbox.top)/4;
- int l = bbox.left + (bbox.right - bbox.left)/4;
- int r = bbox.right - (bbox.right - bbox.left)/4;
- cv::Mat loacl_point = mat_point(cv::Range(t, b), cv::Range(l, r)).clone();
- cv::rectangle(img,cv::Point(bbox.left,bbox.top),cv::Point(bbox.right,bbox.bot),cv::Scalar(0,0,255),3,8,0);
- cv::rectangle(color,cv::Point(bbox.left,bbox.top),cv::Point(bbox.right,bbox.bot),cv::Scalar(0,0,255),3,8,0);
- cv::rectangle(color,cv::Point(l, t),cv::Point(r,b),cv::Scalar(255,0,0),1,8,0);
- float *data_ptr = loacl_point.ptr<float>();
- float location[] = {10000,10000,10000};
- for(int index = 0; index < loacl_point.rows * loacl_point.cols; index++)
- {
- if( data_ptr[2] > 0 && location[2] > data_ptr[2])
- {
- location[0] = data_ptr[0];
- location[1] = data_ptr[1];
- location[2] = data_ptr[2];
- data_ptr += 3;
- }
- }
- iv::vision::Bbox3D* bbox_3d = obstacles_info.add_bbox_3d();
- bbox_3d->set_category(names[bbox.classId]);
- bbox_3d->set_x( location[0]);
- bbox_3d->set_y( location[1]);
- bbox_3d->set_z( location[2]);
- bbox_3d->set_top_left_x(bbox.left);
- bbox_3d->set_top_left_y(bbox.top);
- bbox_3d->set_bottom_right_x(bbox.right);
- bbox_3d->set_top_left_y(bbox.bot);
- std::string label ="z:" + std::to_string(location[2]);
- cv::putText(img,label, cv::Point((bbox.left + bbox.right)/2, (bbox.top + bbox.bot)/2), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(0, 255, 0));
- }
- std::string publish_str = obstacles_info.SerializeAsString();
- std::string out_img = cameraPic.SerializeAsString();
- iv::modulecomm::ModuleSendMsg(g_obstacles_handle, publish_str.data(), publish_str.length());
- iv::modulecomm::ModuleSendMsg(g_origin_image_handle, out_img.data(), out_img.length());
-
- cv::imshow("depth", color);
- cv::imshow("detectiosn", img);
- cv::waitKey(10);
- }
- void ThreadSyncData()
- {
- BboxTime bt;
- DepthTime dt;
- while(true)
- {
-
- bool synced = false;
- {
- std::lock_guard<std::mutex> mutex_sync_bboxes(mutex_sync_bbox);
- std::lock_guard<std::mutex> mutex_sync_depthes(mutex_sync_depth);
- while (!bbox_time_cahce.empty() && !depth_time_cahce.empty())
- {
- bt = bbox_time_cahce.front();
- dt = depth_time_cahce.front();
- TimeType bbox_time = bt.time_stamp;
- TimeType depth_time = dt.time_stamp;
- std::chrono::duration<double> diff = (bbox_time - depth_time);
- auto duration = diff.count();//s
- //LOG(INFO)<<"SYNC: "<<duration ;
- if(duration <= 5e-1 && duration >= -5e-1)
-
- {
- synced = true;
- bbox_time_cahce.pop();
- depth_time_cahce.pop();
- break;
- }
- else if (duration > 5e-1)
- {
-
- depth_time_cahce.pop();
- }
- else if (duration < -5e-1)
- {
-
- bbox_time_cahce.pop();
- }
- }
- }
- if(synced)
- {
- MergeData(dt, bt);
- }
- usleep(2000);
- }
-
- }
- void InitCamrea()
- {
- //std::string pipline = gstreamer_pipeline(capture_width, capture_height, display_width,
- // display_height, frame_rate, flip_method,
- // camera_id, sensor_mode)
- std::string pipline_left = gstreamer_pipeline(1152, 768, 1152,768, 25, 0, 1);
- std::string pipline_right = gstreamer_pipeline(1152, 768, 1152,768, 25, 0, 2);
- LOG(INFO)<<"openning left camera...: "<<pipline_left;
- left_cap.open(pipline_left, cv::CAP_GSTREAMER);
- LOG(INFO)<<"openning right camera...";
- right_cap.open(pipline_right, cv::CAP_GSTREAMER);
- if(!left_cap.isOpened())
- {
- LOG(ERROR)<<"Can not open left camera";;
- exit(1);
- }
- if(!right_cap.isOpened())
- {
- LOG(ERROR)<<"Can not open right camera";
- exit(1);
- }
- }
- int main(int argc, char **argv)
- {
- cxxopts::Options options(argv[0], "double_camera_node");
- options.add_options()
- ("h,help", "this app is used for double_camera_node")
- ("c,config", "configuration file path",
- cxxopts::value<std::string>()->default_value("front_vision.yaml"))
- ("r,root_path", "root path",
- cxxopts::value<std::string>()->default_value("/opt/adc/"))
- ;
- cxxopts::ParseResult opts = options.parse(argc, argv);
- // std::string root_path = opts["root_path"].as<std::string>();
- // adc::RootPath::Ptr root_path_ptr = adc::RootPath::GetInstance();
- // root_path_ptr->SetRoot(root_path);
- std::string config_file = opts["config"].as<std::string>();
- config_file = adc::RootPath::GetAbsolutePath(config_file);
- LOG(INFO)<<"Front vision config: "<<config_file;
- std::string obstacle_detector_config;
- std::string stereo_vison_config;
- std::string mark_ground_detector_config;
- std::string cctsdb_detector_config;
- std::string topic_obstcle;
- std::string topic_mark_ground;
- std::string topic_cctsdb;
- std::string topic_origin_image;
- std::string left_camera_params;
- bool used_loop_fetch =false;
- bool mono = false;
- bool vis_mark_ground = false;
- bool vis_pointcloud = false;
- bool vis_obstacle = false;
- bool fusion_depth_and_obstacle = false;
- float hz = 10;
- try
- {
- YAML::Node node = YAML::LoadFile(config_file);
- obstacle_detector_config = node["obstacle_detector_config"].as<std::string>();
- stereo_vison_config = node["stereo_vison_config"].as<std::string>();
- cctsdb_detector_config = node["cctsdb_detector_config"].as<std::string>();
- mark_ground_detector_config = node["mark_ground_detector_config"].as<std::string>();
- used_loop_fetch = node["used_loop_fetch"].as<bool>();
- mono = node["mono"].as<bool>();
- crop_top = node["crop_top"].as<int>(200);
- crop_bottom = node["crop_bottom"].as<int>(570);
- hz = node["hz"].as<float>(10.0);
- vis_mark_ground = node["vis_mark_ground"].as<bool>();
- vis_pointcloud = node["vis_pointcloud"].as<bool>();
- vis_obstacle = node["vis_obstacle"].as<bool>();
- fusion_depth_and_obstacle = node["fusion_depth_and_obstacle"].as<bool>();
- topic_obstcle = node["topic_obstcle"].as<std::string>();
- topic_mark_ground = node["topic_mark_ground"].as<std::string>();
- topic_cctsdb = node["topic_cctsdb"].as<std::string>();
- left_camera_params = node["left_camera_params"].as<std::string>();
- topic_origin_image = node["topic_origin_image"].as<std::string>();
- }
- catch (const std::exception& e)
- {
- LOG(ERROR)<<"Init Yaml Faile!";
- std::cerr << e.what() << '\n';
- }
- g_obstacles_handle = iv::modulecomm::RegisterSend(topic_obstcle.c_str(), 1000000, 1);
- g_mark_ground_handle = iv::modulecomm::RegisterSend(topic_mark_ground.c_str(), 1000000, 1);
- g_CCTSDB_handle = iv::modulecomm::RegisterSend(topic_cctsdb.c_str(), 1000000, 1);
- g_origin_image_handle = iv::modulecomm::RegisterSend(topic_origin_image.c_str(), 1000000, 1);
- left_camera_params = adc::RootPath::GetAbsolutePath(left_camera_params);
- adc::FrontVisionApp::Ptr front_vision_app_ptr = std::make_shared<adc::FrontVisionApp>(left_camera_params);
- front_vision_app_ptr->SetTask(obstacle_detector_config, adc::FrontVisionApp::TASK::OBSTACLE_DETECTION_YOLO, hz);
- front_vision_app_ptr->SetTask(stereo_vison_config, adc::FrontVisionApp::TASK::STEREO, hz);
- front_vision_app_ptr->SetTask(mark_ground_detector_config, adc::FrontVisionApp::TASK::MARK_GROUND_DETECTION_CENTERNET, hz);
- front_vision_app_ptr->SetTask(cctsdb_detector_config, adc::FrontVisionApp::TASK::CCTSDB_DETECTION_CENTERNET, hz);
- LOG(INFO)<<"complated front vision set task";
- if(vis_pointcloud)
- {
- front_vision_app_ptr->SetStereoPointAndColorCallback(callback_deal_point);
- plot3d.showWidget("Coordinate Widget", cv::viz::WCoordinateSystem());
- }
- front_vision_app_ptr->SetStereoPointCloudMatCallback(callback_deal_points_mat);
-
- if(vis_obstacle)
- {
- front_vision_app_ptr->SetDetectionObjectCallback(callback_vis_bbox);
- }
- else
- {
- front_vision_app_ptr->SetDetectionObjectCallback(callback_bbox);
- }
- if(vis_mark_ground)
- {
- front_vision_app_ptr->SetMarkGroundDetectionWithImgCallback(callback_mark_ground_vis);
- }
- else
- {
- front_vision_app_ptr->SetMarkGroundDetectionCallback(callback_mark_ground);
- }
- front_vision_app_ptr->SetCCTSDBDetectionWithImgCallback(callback_CCTSDB);
-
- InitCamrea();
- std::thread thread_sync_res;
- if(fusion_depth_and_obstacle)
- {
- thread_sync_res = std::thread(ThreadSyncData);
- thread_sync_res.detach();
- }
- if(!used_loop_fetch)
- {
- std::thread thread_left(ThreadFetchImageLeft);
- std::thread thread_right(ThreadFetchImageRight);
- thread_left.detach();
- thread_right.detach();
- }
- front_vision_app_ptr->Ready();
- for(int i = 0; i < 50; i++)
- {
- left_cap.read(left_img);
- right_cap.read(right_img);
- }
- while(true)
- {
- if(used_loop_fetch)
- {
- if(mono)
- {
- left_cap.read(left_img);
- if(!left_img.empty())
- {
- left_img = left_img(cv::Range(crop_top, crop_bottom),cv::Range(0, left_img.cols));
- std::chrono::system_clock::time_point tm = std::chrono::system_clock::now();
- front_vision_app_ptr->SetImage(left_img, tm);
- left_img = cv::Mat();
- }
- }
- else
- {
- left_cap.read(left_img);
- right_cap.read(right_img);
- if(!left_img.empty() && !right_img.empty() )
- {
- left_img = left_img(cv::Range(crop_top, crop_bottom),cv::Range(0, left_img.cols));
- right_img = right_img(cv::Range(crop_top, crop_bottom),cv::Range(0, right_img.cols));
- // cv::imshow("left",left_img);
- // cv::imshow("right",right_img);
- // cv::waitKey(10);
- std::chrono::system_clock::time_point tm = std::chrono::system_clock::now();
- front_vision_app_ptr->SetImages(left_img, right_img, tm);
- left_img = cv::Mat();
- right_img = cv::Mat();
- }
- }
- }
- else
- {
- if(mono)
- {
- mu_left.lock();
- if(!left_img.empty())
- {
- left_img = left_img(cv::Range(crop_top, crop_bottom),cv::Range(0, left_img.cols));
- std::chrono::system_clock::time_point tm = std::chrono::system_clock::now();
- front_vision_app_ptr->SetImage(left_img, tm);
- left_img = cv::Mat();
- }
- mu_left.unlock();
- }
- else
- {
- mu_left.lock();
- mu_right.lock();
- if(!left_img.empty() && !right_img.empty() )
- {
- left_img = left_img(cv::Range(crop_top, crop_bottom),cv::Range(0, left_img.cols));
- right_img = right_img(cv::Range(crop_top, crop_bottom),cv::Range(0, right_img.cols));
- std::chrono::system_clock::time_point tm = std::chrono::system_clock::now();
- front_vision_app_ptr->SetImages(left_img, right_img, tm);
- left_img = cv::Mat();
- right_img = cv::Mat();
- }
- mu_left.unlock();
- mu_right.unlock();
- }
- }
- }
- fetch_left = false;
- fetch_right = false;
- //thread_sync_res.join();
- return 0;
- }
|