提交 4ca963b4 编写于 作者: G ghdawn31 提交者: gchen-apollo

perception: speed up lane process (#3838)

* perception: speed up lane process

* perception: no need to skip frame

* perception: sytle
上级 412a5ecd
......@@ -187,7 +187,7 @@
#--async_fusion=true
--publish_fusion_event=false
--use_navigation_mode=true
--skip_camera_frame=true
--skip_camera_frame=false
--camera_hz=20
--radar_extrinsic_file=modules/perception/data/params/radar_imu_extrinsics.yaml
--fusion_publish_sensor_id=camera
\ No newline at end of file
--fusion_publish_sensor_id=camera
......@@ -52,3 +52,7 @@ max_size_to_fit_straight_line_image: 300.0
online_pitch_angle: -2.5
ground_height: 1.6
max_distance_to_see_for_transformer: 500.0
lane_map_scale: 2.0
start_y_pos: 312
lane_map_width: 960
lane_map_height: 384
......@@ -271,20 +271,11 @@ bool YoloCameraDetector::Multitask(const cv::Mat &frame,
return false;
}
cv::Mat local_mask(lane_output_height_, lane_output_width_, CV_32FC1);
memcpy(local_mask.data,
*mask = cv::Mat(lane_output_height_, lane_output_width_, CV_32FC1);
memcpy(mask->data,
seg_blob->cpu_data() + lane_output_width_ * lane_output_height_,
lane_output_width_ * lane_output_height_ * sizeof(float));
int roi_w = image_width_;
int roi_h = image_height_ - offset_y_;
cv::Rect roi(0, offset_y_, roi_w, roi_h);
if (roi_w == lane_output_width_ && roi_h == lane_output_height_) {
local_mask.copyTo((*mask)(roi));
} else {
cv::resize(local_mask, (*mask)(roi), cv::Size(roi_w, roi_h));
}
return true;
}
......
......@@ -209,14 +209,19 @@ bool CCLanePostProcessor::Init() {
time_stamp_ = 0.0;
frame_id_ = 0;
int lane_map_width = config_.lane_map_width();
int lane_map_height = config_.lane_map_height();
#if CUDA_CC
cc_generator_.reset(
new ConnectedComponentGeneratorGPU(image_width_, image_height_, roi_));
#else
cc_generator_.reset(
new ConnectedComponentGenerator(image_width_, image_height_, roi_));
new ConnectedComponentGenerator(lane_map_width, lane_map_height,
cv::Rect(0, 0, lane_map_width, lane_map_height)));
#endif
scale_ = config_.lane_map_scale();
start_y_pos_ = config_.start_y_pos();
cur_frame_.reset(new LaneFrame);
is_init_ = true;
......@@ -477,9 +482,10 @@ bool CCLanePostProcessor::GenerateLaneInstances(const cv::Mat &lane_map) {
cur_frame_.reset(new LaneFrame);
if (options_.frame.space_type == SpaceType::IMAGE) {
cur_frame_->Init(cc_list, non_mask_, options_.frame);
cur_frame_->Init(cc_list, non_mask_, options_.frame, scale_, start_y_pos_);
} else if (options_.frame.space_type == SpaceType::VEHICLE) {
cur_frame_->Init(cc_list, non_mask_, projector_, options_.frame);
cur_frame_->Init(cc_list, non_mask_, projector_,
options_.frame, scale_, start_y_pos_);
} else {
AERROR << "unknown space type: " << options_.frame.space_type;
return false;
......@@ -504,16 +510,6 @@ bool CCLanePostProcessor::Process(const cv::Mat &lane_map,
AERROR << "input lane map is empty";
return false;
}
if (lane_map.cols != image_width_) {
AERROR << "input lane map width does not match: "
<< "(" << lane_map.cols << " vs. " << image_width_ << ")";
return false;
}
if (lane_map.rows != image_height_) {
AERROR << "input lane map height does not match: "
<< "(" << lane_map.rows << " vs. " << image_height_ << ")";
return false;
}
time_stamp_ = options.timestamp;
// AINFO << "use history: " << options.use_lane_history;
......
......@@ -130,6 +130,8 @@ class CCLanePostProcessor : public BaseCameraLanePostProcessor {
int image_height_ = 1920;
cv::Rect roi_;
double scale_;
int start_y_pos_;
bool is_x_longitude_ = true;
std::shared_ptr<Projector<ScalarType>> projector_;
......
......@@ -151,7 +151,9 @@ ScalarType LaneFrame::ComputeMarkerPairDistance(const Marker& ref,
bool LaneFrame::Init(const vector<ConnectedComponentPtr>& input_cc,
const shared_ptr<NonMask>& non_mask,
const LaneFrameOptions& options) {
const LaneFrameOptions& options,
const double scale,
const int start_y_pos) {
if (options.space_type != SpaceType::IMAGE) {
AERROR << "the space type is not IMAGE.";
return false;
......@@ -174,7 +176,8 @@ bool LaneFrame::Init(const vector<ConnectedComponentPtr>& input_cc,
marker.shape_type = MarkerShapeType::LINE_SEGMENT;
marker.space_type = opts_.space_type;
marker.pos = cc_ptr->GetVertex(edge_ptr->end_vertex_id);
marker.pos = cc_ptr->GetVertex(edge_ptr->end_vertex_id,
scale, start_y_pos);
marker.image_pos = marker.pos;
if (opts_.use_non_mask &&
non_mask->IsInsideMask(marker.image_pos)) {
......@@ -186,7 +189,8 @@ bool LaneFrame::Init(const vector<ConnectedComponentPtr>& input_cc,
marker.vis_pos = cv::Point(static_cast<int>(marker.pos.x()),
static_cast<int>(marker.pos.y()));
marker.start_pos = cc_ptr->GetVertex(edge_ptr->start_vertex_id);
marker.start_pos = cc_ptr->GetVertex(edge_ptr->start_vertex_id,
scale, start_y_pos);
marker.image_start_pos = marker.start_pos;
if (opts_.use_non_mask &&
non_mask->IsInsideMask(marker.image_start_pos)) {
......@@ -296,7 +300,9 @@ bool LaneFrame::Init(const vector<ConnectedComponentPtr>& input_cc,
bool LaneFrame::Init(const vector<ConnectedComponentPtr>& input_cc,
const shared_ptr<NonMask>& non_mask,
const shared_ptr<Projector<ScalarType>>& projector,
const LaneFrameOptions& options) {
const LaneFrameOptions& options,
const double scale,
const int start_y_pos) {
if (options.space_type != SpaceType::VEHICLE) {
AERROR << "the space type is not VEHICLE.";
return false;
......@@ -318,8 +324,10 @@ bool LaneFrame::Init(const vector<ConnectedComponentPtr>& input_cc,
int n = 0;
for (int j = 0; j < static_cast<int>(inner_edges->size()); ++j) {
const ConnectedComponent::Edge* edge_ptr = &(inner_edges->at(j));
Vector2D pos = cc_ptr->GetVertex(edge_ptr->end_vertex_id);
Vector2D start_pos = cc_ptr->GetVertex(edge_ptr->start_vertex_id);
Vector2D pos = cc_ptr->GetVertex(edge_ptr->end_vertex_id,
scale, start_y_pos);
Vector2D start_pos = cc_ptr->GetVertex(edge_ptr->start_vertex_id,
scale, start_y_pos);
Marker marker;
marker.shape_type = MarkerShapeType::LINE_SEGMENT;
......
......@@ -81,12 +81,16 @@ class LaneFrame {
public:
bool Init(const std::vector<ConnectedComponentPtr>& input_cc,
const std::shared_ptr<NonMask>& non_mask,
const LaneFrameOptions& options);
const LaneFrameOptions& options,
const double scale,
const int start_y_pos);
bool Init(const std::vector<ConnectedComponentPtr>& input_cc,
const std::shared_ptr<NonMask>& non_mask,
const std::shared_ptr<Projector<ScalarType>>& projector,
const LaneFrameOptions& options);
const LaneFrameOptions& options,
const double scale,
const int start_y_pos);
void SetTransformer(const std::shared_ptr<Projector<ScalarType>>& projector) {
projector_ = projector;
......@@ -147,6 +151,8 @@ class LaneFrame {
std::vector<Graph> graphs_;
// tight bounding boxes of lane clusters
std::vector<Bbox> boxes_;
double scale_;
double start_y_pos_;
};
} // namespace perception
......
......@@ -815,6 +815,7 @@ bool ConnectedComponentGenerator::FindConnectedComponents(
<< curt_label << " vs. " << root_map_.size() << std::endl;
return false;
}
if (root_map_[curt_label] != -1) {
cc->at(root_map_[curt_label])->AddPixel(x, y);
} else {
......
......@@ -257,11 +257,14 @@ class ConnectedComponent {
return vertices_;
}
Vertex GetVertex(int vertex_id) const {
Vertex GetVertex(int vertex_id, double scale, double start_y_pos) const {
// assert(vertex_id >= 0 && vertex_id < this->getVertexCount());
return vertices_->at(vertex_id);
Vertex ver_pnt = vertices_->at(vertex_id);
ver_pnt[0] = static_cast<int>(ver_pnt[0] * scale);
ver_pnt[1] = static_cast<int>(ver_pnt[1] * scale + start_y_pos);
return ver_pnt;
// return vertices_->at(vertex_id);
}
int GetVertexCount() const { return static_cast<int>(vertices_->size()); }
// edges
......
......@@ -16,6 +16,7 @@
#include "modules/perception/obstacle/onboard/camera_process_subnode.h"
#include "modules/perception/cuda_util/util.h"
#include "modules/common/time/time_util.h"
namespace apollo {
namespace perception {
......@@ -95,6 +96,8 @@ void CameraProcessSubnode::ImgCallback(const sensor_msgs::Image &message) {
double timestamp = message.header.stamp.toSec();
ADEBUG << "CameraProcessSubnode ImgCallback: timestamp: ";
ADEBUG << std::fixed << std::setprecision(64) << timestamp;
AINFO << "camera received image : "<< GLOG_TIMESTAMP(timestamp)
<< " at time: " <<GLOG_TIMESTAMP(TimeUtil::GetCurrentTime());
double curr_timestamp = timestamp * 1e9;
if (FLAGS_skip_camera_frame && timestamp_ns_ > 0.0) {
......@@ -118,23 +121,23 @@ void CameraProcessSubnode::ImgCallback(const sensor_msgs::Image &message) {
img = cv::imread(FLAGS_image_file_path, CV_LOAD_IMAGE_COLOR);
}
std::vector<VisualObjectPtr> objects;
cv::Mat mask = cv::Mat::zeros(img.rows, img.cols, CV_32FC1);
PERF_BLOCK_END("CameraProcessSubnode Image Preprocess");
cv::Mat mask;
PERF_BLOCK_END("CameraProcessSubnode_Image_Preprocess");
detector_->Multitask(img, CameraDetectorOptions(), &objects, &mask);
PERF_BLOCK_END("CameraProcessSubnode detector_");
PERF_BLOCK_END("CameraProcessSubnode_detector_");
converter_->Convert(&objects);
PERF_BLOCK_END("CameraProcessSubnode converter_");
PERF_BLOCK_END("CameraProcessSubnode_converter_");
transformer_->Transform(&objects);
PERF_BLOCK_END("CameraProcessSubnode transformer_");
PERF_BLOCK_END("CameraProcessSubnode_transformer_");
tracker_->Associate(img, timestamp, &objects);
PERF_BLOCK_END("CameraProcessSubnode tracker_");
PERF_BLOCK_END("CameraProcessSubnode_tracker_");
filter_->Filter(timestamp, &objects);
PERF_BLOCK_END("CameraProcessSubnode filter_");
PERF_BLOCK_END("CameraProcessSubnode_filter_");
std::shared_ptr<SensorObjects> out_objs(new SensorObjects);
out_objs->timestamp = timestamp;
......
......@@ -205,6 +205,13 @@ Status FusionSubnode::Process(const EventMeta &event_meta,
} else if (event_meta.event_id == radar_event_id_) {
PERF_BLOCK_END("fusion_radar");
} else if (event_meta.event_id == camera_event_id_) {
for (auto &obj : sensor_objs) {
if (obj.sensor_type == SensorType::CAMERA) {
AINFO << "fusion received image : "<< GLOG_TIMESTAMP(obj.timestamp)
<< " at time: " << GLOG_TIMESTAMP(TimeUtil::GetCurrentTime());
break;
}
}
PERF_BLOCK_END("fusion_camera");
}
......
......@@ -49,4 +49,8 @@ message ModelConfigs {
optional float online_pitch_angle = 44 [ default = -2.5 ];
optional float ground_height = 45 [ default = 1.6 ];
optional float max_distance_to_see_for_transformer = 46 [ default = 500.0 ];
optional float lane_map_scale = 47 [ default = 2.0];
optional int32 start_y_pos = 48 [ default = 312];
optional int32 lane_map_width = 49 [ default = 960];
optional int32 lane_map_height = 50 [ default = 384];
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册