#include //txs #include "opencv2/video/tracking.hpp" #include "opencv2/highgui/highgui.hpp" #include "cameraobjectarray.pb.h" //txs #include "TrtNet.h" #include "argsParser.h" #include "configs.h" #include #include "YoloLayer.h" #include "dataReader.h" #include "eval.h" #include "imageBuffer.h" #include "modulecomm.h" #include "xmlparam.h" #include "rawpic.pb.h" #include #include #include #include "localization.h" #include #include "Tracking.h" using namespace std; using namespace argsParser; using namespace Tn; using namespace Yolo; //txs using namespace cv; //iv::vision::cameraobjectarray gobj; int gntemp = 0; void * gpa; void * gpcamout; void * gdetec; qint64 pictime; //txs vector cocoNames; ConsumerProducerQueue *imageBuffer = new ConsumerProducerQueue(1, true); std::string gstreamer_pipeline (int capture_width, int capture_height, int display_width, int display_height, int framerate, int flip_method) { return "nvarguscamerasrc ! video/x-raw(memory:NVMM), width=(int)" + std::to_string(capture_width) + ", height=(int)" + std::to_string(capture_height) + ", format=(string)NV12, framerate=(fraction)" + std::to_string(framerate) + "/1 ! nvvidconv flip-method=" + std::to_string(flip_method) + " ! video/x-raw, width=(int)" + std::to_string(display_width) + ", height=(int)" + std::to_string(display_height) + ", format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink"; } static vector GetNamesFromFile(string filePathAndName) { vector retVec; QFile qf(QString::fromStdString(filePathAndName)); if(qf.exists()) { if(qf.open(QIODevice::ReadOnly)) { QTextStream in(&qf); while(!in.atEnd()) { retVec.push_back(in.readLine(1024).toStdString()); } qf.close(); } } return retVec; } vector prepareImage(cv::Mat& img) { using namespace cv; int c = parser::getIntValue("C"); int h = parser::getIntValue("H"); //net h int w = parser::getIntValue("W"); //net w float scale = min(float(w)/img.cols,float(h)/img.rows); auto scaleSize = cv::Size(img.cols * scale,img.rows * scale); cv::Mat rgb ; cv::cvtColor(img, rgb, CV_BGR2RGB); cv::Mat resized; cv::resize(rgb, resized,scaleSize,0,0,INTER_CUBIC); cv::Mat cropped(h, w,CV_8UC3, 127); Rect rect((w- scaleSize.width)/2, (h-scaleSize.height)/2, scaleSize.width,scaleSize.height); resized.copyTo(cropped(rect)); cv::Mat img_float; if (c == 3) cropped.convertTo(img_float, CV_32FC3, 1/255.0); else cropped.convertTo(img_float, CV_32FC1 ,1/255.0); //HWC TO CHW vector input_channels(c); cv::split(img_float, input_channels); vector result(h*w*c); auto data = result.data(); int channelLength = h * w; for (int i = 0; i < c; ++i) { memcpy(data,input_channels[i].data,channelLength*sizeof(float)); data += channelLength; } return result; } void DoNms(vector& detections,int classes ,float nmsThresh) { auto t_start = chrono::high_resolution_clock::now(); vector> resClass; resClass.resize(classes); for (const auto& item : detections) resClass[item.classId].push_back(item); auto iouCompute = [](float * lbox, float* rbox) { float interBox[] = { max(lbox[0] - lbox[2]/2.f , rbox[0] - rbox[2]/2.f), //left min(lbox[0] + lbox[2]/2.f , rbox[0] + rbox[2]/2.f), //right max(lbox[1] - lbox[3]/2.f , rbox[1] - rbox[3]/2.f), //top min(lbox[1] + lbox[3]/2.f , rbox[1] + rbox[3]/2.f), //bottom }; if(interBox[2] > interBox[3] || interBox[0] > interBox[1]) return 0.0f; float interBoxS =(interBox[1]-interBox[0])*(interBox[3]-interBox[2]); return interBoxS/(lbox[2]*lbox[3] + rbox[2]*rbox[3] -interBoxS); }; vector result; for (int i = 0;i right.prob; }); for (unsigned int m = 0;m < dets.size() ; ++m) { auto& item = dets[m]; result.push_back(item); for(unsigned int n = m + 1;n < dets.size() ; ++n) { if (iouCompute(item.bbox,dets[n].bbox) > nmsThresh) { dets.erase(dets.begin()+n); --n; } } } } //swap(detections,result); detections = move(result); auto t_end = chrono::high_resolution_clock::now(); float total = chrono::duration(t_end - t_start).count(); mTensorIvlog->info("Time taken for nms is %f ms.", total); // cout << "Time taken for nms is " << total << " ms." << endl; } vector postProcessImg(cv::Mat& img,vector& detections,int classes) { using namespace cv; int h = parser::getIntValue("H"); //net h int w = parser::getIntValue("W"); //net w //scale bbox to img int width = img.cols; int height = img.rows; float scale = min(float(w)/width,float(h)/height); float scaleSize[] = {width * scale,height * scale}; //correct box for (auto& item : detections) { auto& bbox = item.bbox; bbox[0] = (bbox[0] * w - (w - scaleSize[0])/2.f) / scaleSize[0]; bbox[1] = (bbox[1] * h - (h - scaleSize[1])/2.f) / scaleSize[1]; bbox[2] /= scaleSize[0]; bbox[3] /= scaleSize[1]; } //nms float nmsThresh = parser::getFloatValue("nms"); if(nmsThresh > 0) DoNms(detections,classes,nmsThresh); vector boxes; for(const auto& item : detections) { auto& b = item.bbox; Bbox bbox = { item.classId, //classId max(int((b[0]-b[2]/2.)*width),0), //left min(int((b[0]+b[2]/2.)*width),width), //right max(int((b[1]-b[3]/2.)*height),0), //top min(int((b[1]+b[3]/2.)*height),height), //bot item.prob //score }; boxes.push_back(bbox); } return boxes; } vector split(const string& str, char delim) { stringstream ss(str); string token; vector container; while (getline(ss, token, delim)) { container.push_back(token); } return container; } //std::vector objvec,iv::lidar::objectarray & lidarobjvec //txs void GetCameraObj(std::vector objvec,iv::vision::cameraobjectarray &cameraobjvec,double pictime) { mTensorIvlog->info("Bbox count: %d", objvec.size()); // std::cout<<"Bbox count: "<info("left: %d\tright: %d", x.left, x.right); // std::cout<<"left: " <info("raw output x: %f\traw output y: %f", obj.outpointx(), obj.outpointy()); // std::cout<<"raw output x: "<CopyFrom(obj); cameraobjvec.set_mstime(pictime); } } //txs int main( int argc, char* argv[] ) { parser::ADD_ARG_STRING("prototxt",Desc("input yolov3 deploy"),DefaultValue(INPUT_PROTOTXT),ValueDesc("file")); parser::ADD_ARG_STRING("caffemodel",Desc("input yolov3 caffemodel"),DefaultValue(INPUT_CAFFEMODEL),ValueDesc("file")); parser::ADD_ARG_INT("C",Desc("channel"),DefaultValue(to_string(INPUT_CHANNEL))); parser::ADD_ARG_INT("H",Desc("height"),DefaultValue(to_string(INPUT_HEIGHT))); parser::ADD_ARG_INT("W",Desc("width"),DefaultValue(to_string(INPUT_WIDTH))); parser::ADD_ARG_STRING("calib",Desc("calibration image List"),DefaultValue(CALIBRATION_LIST),ValueDesc("file")); parser::ADD_ARG_STRING("mode",Desc("runtime mode"),DefaultValue(MODE), ValueDesc("fp32/fp16/int8")); parser::ADD_ARG_STRING("outputs",Desc("output nodes name"),DefaultValue(OUTPUTS)); parser::ADD_ARG_INT("class",Desc("num of classes"),DefaultValue(to_string(DETECT_CLASSES))); parser::ADD_ARG_FLOAT("nms",Desc("non-maximum suppression value"),DefaultValue(to_string(NMS_THRESH))); parser::ADD_ARG_INT("batchsize",Desc("batch size for input"),DefaultValue("1")); parser::ADD_ARG_STRING("enginefile",Desc("load from engine"),DefaultValue("")); //input parser::ADD_ARG_STRING("input",Desc("input image file"),DefaultValue(INPUT_IMAGE),ValueDesc("file")); parser::ADD_ARG_STRING("evallist",Desc("eval gt list"),DefaultValue(EVAL_LIST),ValueDesc("file")); gTensorFault = new iv::Ivfault("TensorRT_yolov3"); mTensorIvlog = new iv::Ivlog("TensorRT_youlov3"); int capture_width = 1920 ; int capture_height = 1080 ; int display_width = 1920 ; int display_height = 1080 ; int framerate = 60 ; int flip_method = 0 ; QString strpath = "./vision_TensorRT_yolov3.xml"; mTensorIvlog->info("%s", strpath.data()); iv::xmlparam::Xmlparam xp(strpath.toStdString()); std::string xmlmsgname = xp.GetParam("msgname","image00"); // string engineName = "./../vision_TensorRT_yolov3/model/yolov3_fp16.engine"; string engineName = "/home/nvidia/beiqi/models/vision/yolov3_fp16.engine"; mTensorIvlog->info("msgname is %s",xmlmsgname.data()); gpcamout = iv::modulecomm::RegisterSend("camera_output",1000000,1);//data //txs gdetec = iv::modulecomm::RegisterSend("camera_detect",10000000,1);//img std::unique_ptr net; int batchSize = 1; net.reset(new trtNet(engineName)); assert(net->getBatchSize() == batchSize); RUN_MODE run_mode = RUN_MODE::FLOAT16; int outputCount = net->getOutputSize()/sizeof(float); unique_ptr outputData(new float[outputCount]); // string inputFileName = "/home/nvidia/TensorRT-Yolov3/test.jpg"; // fileNames.push_back(inputFileName); list> outputs; int classNum = 80; int c = 608; int h = 608; int w = 3; int batchCount = 0; vector inputData; // inputData.reserve(h*w*c*batchSize); // vector inputImgs; // auto iter = fileNames.begin(); //txs cv::namedWindow("result",WINDOW_NORMAL); std::string pipeline = gstreamer_pipeline(capture_width, capture_height, display_width, display_height, framerate, flip_method); std::cout << "Using pipeline: \n\t" << pipeline << "\n"; cv::VideoCapture cap(pipeline, cv::CAP_GSTREAMER); if(!cap.isOpened()) { std::cout<<"Failed to open camera."<add(img2); imageBuffer->consume(img2); if(!img2.rows) { gTensorFault->SetFaultState(1, 0, "img2.rows state Error"); break; } vector curInput = prepareImage(img2); net->doInference(curInput.data(), outputData.get(),1); mTensorIvlog->info("run"); // std::cout<<"run"< result; result.resize(detCount); memcpy(result.data(), &output[1], detCount*sizeof(Detection)); outputs.clear(); auto boxes = postProcessImg(img2,result,classNum); //--------------------------------------------- init tracker ------------------------------------------------- cv::UMat ufirst = img2.getUMat(cv::ACCESS_READ); if (!m_isTrackerInitialized) { m_isTrackerInitialized = InitTracker(ufirst,tracker); if (!m_isTrackerInitialized) { std::cerr << "Tracker initialize error!!!" << std::endl; break; } } Tracking(img2,boxes,tracker); //-------------------------------------------- end tracking --------------------------------------------------- outputs.emplace_back(boxes); // vector cocoNames = GetNamesFromFile("./../vision_TensorRT_yolov3/model/coco.names"); cocoNames = GetNamesFromFile("/home/nvidia/beiqi/models/vision/coco.names"); // cout<<"cocoNames: "<info("cocoName:"); // for(int i=0; iinfo("%s",cocoNames[i].data()); // } // cout<<"cocoNames end"<info("cocoNames End"); //txs iv::vision::cameraobjectarray cameraobjvec; GetCameraObj(boxes,cameraobjvec,pictime); std::cout<<"time :"<info("--------------"); // std::cout<<"------------------"<info("lenth is %d",out.length()); //txs auto bbox = *outputs.begin(); for(const auto& item : bbox) { cv::rectangle(img2,cv::Point(item.left,item.top),cv::Point(item.right,item.bot),cv::Scalar(0,0,255),3,8,0); std::cout << "id=" << item.id << " class=" << item.classId << " prob=" << item.score*100 << std::endl; cout << "left=" << item.left << " right=" << item.right << " top=" << item.top << " bot=" << item.bot << endl; cout << "speedx=" << item.speedx << " speedy=" << item.speedy << endl; // cout< param = std::vector(2); param[0] = CV_IMWRITE_JPEG_QUALITY; param[1] = 95; // default(95) 0-100 std::vector buff; cv::imencode(".jpg",img2,buff,param); pic.set_picdata(buff.data(),buff.size()); buff.clear(); pic.set_type(2); unsigned int nsize = pic.ByteSize(); char * strser = new char[nsize]; bool bser = pic.SerializeToArray(strser,nsize); if(bser)iv::modulecomm::ModuleSendMsg(gdetec,strser,nsize); delete[] strser; count += 1; cv::imshow("result",img2); cv::waitKey(1); curInput.clear(); // free(output_cpu); // free(output2_cpu); // img2.release(); // ou.release(); } img2.release(); // ou.release(); // gTensorFault->SetFaultState(0, 0, "ok"); return 0; }