优化项目结构

This commit is contained in:
2025-11-19 21:18:09 +08:00
parent f1748b10b2
commit 14bd5c4bee
22 changed files with 21 additions and 26 deletions

272
src/ArmorDetector.cpp Normal file
View File

@@ -0,0 +1,272 @@
#include "ArmorDetector.h"
#include <cmath>
#include <algorithm>
ArmorDetector::ArmorDetector() {
angle_threshold_rad = HORIZONTAL_ANGLE_THRESHOLD_RAD;
}
void ArmorDetector::detect(const cv::Mat& mask, const std::string& target_color,
std::vector<LightBar>& valid_light_bars,
std::vector<ArmorPlate>& armor_plates) {
std::vector<LightBar> initial_light_bars = extract_initial_light_bars(mask);
if (initial_light_bars.empty()) {
valid_light_bars.clear();
armor_plates.clear();
return;
}
std::vector<std::vector<cv::Point2f>> processed_boxes = merge_nearby_light_bars(initial_light_bars);
if (processed_boxes.empty()) {
valid_light_bars.clear();
armor_plates.clear();
return;
}
valid_light_bars = filter_valid_light_bars(processed_boxes);
if (valid_light_bars.size() < 2) {
armor_plates.clear();
return;
}
armor_plates = pair_light_bars_to_armor(valid_light_bars, target_color);
if (armor_plates.empty()) {
return;
}
}
std::vector<LightBar> ArmorDetector::extract_initial_light_bars(const cv::Mat& mask) {
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(mask, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
std::vector<LightBar> initial_light_bars;
for (size_t i = 0; i < contours.size(); i++) {
if (contours[i].size() < 5) continue; // Need at least 5 points to fit ellipse
cv::RotatedRect rect = cv::minAreaRect(contours[i]);
cv::Point2f vertices[4];
rect.points(vertices);
std::vector<cv::Point2f> box;
for (int j = 0; j < 4; j++) {
box.push_back(vertices[j]);
}
float contour_area = cv::contourArea(contours[i]);
float rect_area = rect.size.width * rect.size.height;
// Filter: sufficient area + high fill ratio
if (rect_area > 30 && rect_area > 0 && (contour_area / rect_area) > 0.4) {
LightBar light_bar;
light_bar.center = rect.center;
light_bar.size = rect.size;
light_bar.angle = rect.angle;
light_bar.area = rect_area;
light_bar.box = box;
initial_light_bars.push_back(light_bar);
}
}
return initial_light_bars;
}
std::vector<std::vector<cv::Point2f>> ArmorDetector::merge_nearby_light_bars(const std::vector<LightBar>& initial_light_bars) {
std::vector<std::vector<cv::Point2f>> processed_boxes;
std::vector<bool> visited(initial_light_bars.size(), false);
for (size_t i = 0; i < initial_light_bars.size(); i++) {
if (visited[i]) continue;
std::vector<int> nearby_indices = {static_cast<int>(i)};
cv::Point2f center1 = initial_light_bars[i].center;
for (size_t j = i + 1; j < initial_light_bars.size(); j++) {
if (visited[j]) continue;
cv::Point2f center2 = initial_light_bars[j].center;
float distance = std::sqrt(std::pow(center2.x - center1.x, 2) + std::pow(center2.y - center1.y, 2));
// Distance + IOU filtering
if (distance < NEARBY_LIGHT_BAR_THRESHOLD) {
cv::Rect b1 = cv::boundingRect(initial_light_bars[i].box);
cv::Rect b2 = cv::boundingRect(initial_light_bars[j].box);
double iou = calculate_iou(b1, b2);
if (iou > LIGHT_BAR_IOU_THRESHOLD) {
nearby_indices.push_back(static_cast<int>(j));
visited[j] = true;
}
}
}
// Merge nearby light bar vertices to generate new rectangle
std::vector<cv::Point2f> all_points;
for (int k : nearby_indices) {
for (const auto& point : initial_light_bars[k].box) {
all_points.push_back(point);
}
}
if (!all_points.empty()) {
cv::RotatedRect merged_rect = cv::minAreaRect(all_points);
cv::Point2f vertices[4];
merged_rect.points(vertices);
std::vector<cv::Point2f> merged_box;
for (int k = 0; k < 4; k++) {
merged_box.push_back(vertices[k]);
}
processed_boxes.push_back(merged_box);
visited[i] = true;
}
}
return processed_boxes;
}
std::vector<LightBar> ArmorDetector::filter_valid_light_bars(const std::vector<std::vector<cv::Point2f>>& processed_boxes) {
std::vector<LightBar> valid_light_bars;
for (const auto& box : processed_boxes) {
if (box.size() < 4) continue;
cv::RotatedRect rect = cv::minAreaRect(box);
cv::Point2f center = rect.center;
float width = rect.size.width;
float height = rect.size.height;
float length = width > height ? width : height;
float bar_width = width > height ? height : width;
float main_angle = rect.angle;
if (width <= height) {
main_angle += 90.0;
}
// Normalize angle to [-90, 90]
main_angle = std::fmod(main_angle, 180);
if (main_angle > 90) main_angle -= 180;
if (main_angle < -90) main_angle += 180;
float main_angle_rad = main_angle * CV_PI / 180.0f;
cv::Point2f end1(
center.x + (length / 2) * std::cos(main_angle_rad),
center.y + (length / 2) * std::sin(main_angle_rad)
);
cv::Point2f end2(
center.x - (length / 2) * std::cos(main_angle_rad),
center.y - (length / 2) * std::sin(main_angle_rad)
);
float rect_area = length * bar_width;
float contour_area = cv::contourArea(box);
float area_ratio = rect_area > 0 ? contour_area / rect_area : 0;
// Filter: sufficient area + high fill ratio + non-horizontal
if (rect_area > 40 && area_ratio > 0.4 && std::abs(main_angle_rad) > angle_threshold_rad) {
LightBar light_bar;
light_bar.center = center;
light_bar.size = cv::Size2f(length, bar_width);
light_bar.angle = main_angle;
light_bar.angle_rad = main_angle_rad;
light_bar.area = rect_area;
light_bar.center_line = {end1, end2};
light_bar.center_line_length = length;
light_bar.box = box;
valid_light_bars.push_back(light_bar);
}
}
return valid_light_bars;
}
std::vector<ArmorPlate> ArmorDetector::pair_light_bars_to_armor(const std::vector<LightBar>& light_bars, const std::string& target_color) {
std::vector<ArmorPlate> armor_plates;
std::vector<bool> used(light_bars.size(), false);
for (size_t i = 0; i < light_bars.size(); i++) {
if (used[i]) continue;
const LightBar& lb1 = light_bars[i];
for (size_t j = i + 1; j < light_bars.size(); j++) {
if (used[j]) continue;
const LightBar& lb2 = light_bars[j];
// Angle difference filtering
float angle_diff = std::abs(lb1.angle_rad - lb2.angle_rad);
float angle_diff_2 = 2.0f * CV_PI - angle_diff;
angle_diff = std::min(angle_diff, angle_diff_2);
if (angle_diff > ARMOR_ANGLE_DIFF_THRESHOLD) {
continue;
}
// Distance ratio filtering
float dx = lb2.center.x - lb1.center.x;
float dy = lb2.center.y - lb1.center.y;
float distance = std::sqrt(dx * dx + dy * dy);
float avg_length = (lb1.center_line_length + lb2.center_line_length) / 2;
float distance_ratio = avg_length > 0 ? distance / avg_length : 0;
if (distance_ratio <= ARMOR_DISTANCE_RATIO_MIN || distance_ratio >= ARMOR_DISTANCE_RATIO_MAX) {
continue;
}
// Length difference filtering
float length_diff_ratio = avg_length > 0 ?
std::abs(lb1.center_line_length - lb2.center_line_length) / avg_length : 0;
if (length_diff_ratio > ARMOR_LENGTH_DIFF_RATIO) {
continue;
}
cv::Point2f armor_center(
(lb1.center.x + lb2.center.x) / 2,
(lb1.center.y + lb2.center.y) / 2
);
double confidence = (lb1.area + lb2.area) / (distance + 1);
ArmorPlate armor_plate;
armor_plate.color = target_color;
armor_plate.center = armor_center;
armor_plate.confidence = confidence;
armor_plate.pair = std::make_pair(lb1, lb2);
armor_plate.corners_2d = std::vector<cv::Point2f>(); // Will be computed later if needed
armor_plates.push_back(armor_plate);
used[i] = true;
used[j] = true;
break;
}
}
// Sort by confidence in descending order
std::sort(armor_plates.begin(), armor_plates.end(),
[](const ArmorPlate& a, const ArmorPlate& b) {
return a.confidence > b.confidence;
});
return armor_plates;
}
double ArmorDetector::calculate_iou(const cv::Rect& b1, const cv::Rect& b2) {
cv::Point2f inter_tl(std::max(b1.x, b2.x), std::max(b1.y, b2.y));
cv::Point2f inter_br(std::min(b1.x + b1.width, b2.x + b2.width),
std::min(b1.y + b1.height, b2.y + b2.height));
if (inter_br.x <= inter_tl.x || inter_br.y <= inter_tl.y) {
return 0.0;
}
float inter_area = (inter_br.x - inter_tl.x) * (inter_br.y - inter_tl.y);
float b1_area = b1.width * b1.height;
float b2_area = b2.width * b2.height;
float union_area = b1_area + b2_area - inter_area;
return union_area > 0 ? inter_area / union_area : 0.0;
}

View File

@@ -0,0 +1,61 @@
#include "BallisticPredictor.h"
#include "config.h"
#include <cmath>
BallisticPredictor::BallisticPredictor(float bullet_speed)
: bullet_speed(bullet_speed), last_ballistic_point(nullptr) {
armor_half_width = ARMOR_WIDTH / 2000.0f; // Convert to meters
armor_half_height = ARMOR_HEIGHT / 2000.0f; // Convert to meters
}
cv::Point2f* BallisticPredictor::predict_ballistic_point(const cv::Point2f* predicted_center,
const cv::Point2f& img_center,
const cv::Point2f& target_speed) {
if (predicted_center == nullptr) {
if (last_ballistic_point != nullptr) {
delete last_ballistic_point;
last_ballistic_point = nullptr;
}
return nullptr;
}
// 1. Estimate target distance (meters)
float distance = calculate_distance(*predicted_center, img_center);
// 2. Calculate bullet flight time (seconds): distance / bullet speed
float flight_time = bullet_speed > 0 ? distance / bullet_speed : 0.0f;
// 3. Calculate target movement in flight time (pixels)
// target speed (pixels/s) * flight time (s) = predicted movement in pixels
float delta_x = target_speed.x * flight_time;
float delta_y = target_speed.y * flight_time;
// 4. Ballistic prediction point = Kalman prediction center + target movement compensation
float ballistic_x = predicted_center->x + delta_x;
float ballistic_y = predicted_center->y + delta_y;
cv::Point2f ballistic_point(ballistic_x, ballistic_y);
// Update last ballistic point
if (last_ballistic_point != nullptr) {
delete last_ballistic_point;
}
last_ballistic_point = new cv::Point2f(ballistic_point);
return last_ballistic_point;
}
float BallisticPredictor::calculate_distance(const cv::Point2f& armor_center, const cv::Point2f& img_center, float focal_length) {
// Calculate the pixel distance from armor center to image center
float dx = std::abs(armor_center.x - img_center.x);
float dy = std::abs(armor_center.y - img_center.y);
float pixel_distance = std::sqrt(dx * dx + dy * dy);
// If target is near the center, avoid division by zero
if (pixel_distance < 10) {
return 5.0f; // Default 5 meters (adjust based on actual scenario)
}
// Distance estimation formula: (actual size * focal length) / pixel size
float distance = (armor_half_width * focal_length) / (pixel_distance * 0.5f);
return std::max(0.5f, distance); // Limit minimum distance to 0.5 meters
}

90
src/Camera.cpp Normal file
View File

@@ -0,0 +1,90 @@
#include "Camera.h"
#include <iostream>
Camera::Camera(int cam_id, const std::string& target_color) {
// 先设置为 MJPEG 模式再打开(某些驱动要求)
cap.open(cam_id, cv::CAP_V4L2);
if (!cap.isOpened()) {
throw std::runtime_error("Cannot open camera!");
}
// 必须先设 FOURCC再设分辨率和帧率顺序很重要
cap.set(cv::CAP_PROP_FOURCC, cv::VideoWriter::fourcc('M', 'J', 'P', 'G'));
cap.set(cv::CAP_PROP_FRAME_WIDTH, 640);
cap.set(cv::CAP_PROP_FRAME_HEIGHT, 480);
cap.set(cv::CAP_PROP_FPS, 100);
is_opened = true;
this->target_color = target_color;
if (this->target_color != "red" && this->target_color != "blue") {
throw std::invalid_argument("Only 'red' or 'blue' colors are supported");
}
set_cam_params(); // 再次应用参数(包括曝光等)
}
Camera::~Camera() {
release();
}
void Camera::set_cam_params() {
// 先设 FOURCC顺序很重要
cap.set(cv::CAP_PROP_FOURCC, cv::VideoWriter::fourcc('M', 'J', 'P', 'G'));
const int width = 640;
const int height = 480;
const int fps = 60; // 从 80 降到 60
cap.set(cv::CAP_PROP_FRAME_WIDTH, width);
cap.set(cv::CAP_PROP_FRAME_HEIGHT, height);
cap.set(cv::CAP_PROP_FPS, fps);
cap.set(cv::CAP_PROP_AUTOFOCUS, 1);
cap.set(cv::CAP_PROP_AUTO_WB, 1);
cap.set(cv::CAP_PROP_AUTO_EXPOSURE, 0); // 手动曝光
if (target_color == "red") {
cap.set(cv::CAP_PROP_EXPOSURE, -50); // 可微调
//cap.set(cv::CAP_PROP_CONTRAST,50);
} else if (target_color == "blue") {
cap.set(cv::CAP_PROP_EXPOSURE, -8);
}
cap.set(cv::CAP_PROP_WB_TEMPERATURE, 3200);
// 验证
double w = cap.get(cv::CAP_PROP_FRAME_WIDTH);
double h = cap.get(cv::CAP_PROP_FRAME_HEIGHT);
double f = cap.get(cv::CAP_PROP_FPS);
double fourcc = cap.get(cv::CAP_PROP_FOURCC);
char str[5] = {0};
memcpy(str, &fourcc, 4);
std::cout << "Actual: " << w << "x" << h << " @ " << f << "fps, FOURCC='" << str << "'" << std::endl;
}
bool Camera::read_frame(cv::Mat& frame) {
if (!is_opened) {
return false;
}
return cap.read(frame);
}
void Camera::release() {
if (is_opened) {
cap.release();
is_opened = false;
}
}
bool Camera::switch_color(const std::string& new_color) {
std::string lower_color = new_color;
// Convert to lowercase manually
for (auto& c : lower_color) {
c = std::tolower(c);
}
if ((lower_color == "red" || lower_color == "blue") && lower_color != target_color) {
target_color = lower_color;
set_cam_params();
return true;
}
return false;
}

55
src/ImagePreprocessor.cpp Normal file
View File

@@ -0,0 +1,55 @@
#include "ImagePreprocessor.h"
#include <cctype>
ImagePreprocessor::ImagePreprocessor() {
// Constructor implementation
}
ImagePreprocessor::~ImagePreprocessor() {
// Destructor implementation
}
cv::Mat ImagePreprocessor::get_mask(const cv::Mat& frame, const std::string& target_color) {
cv::Mat hsv_frame;
cv::cvtColor(frame, hsv_frame, cv::COLOR_BGR2HSV);
cv::Mat mask;
if (target_color == "red") {
// Red color range in HSV
cv::Mat mask1, mask2;
cv::inRange(hsv_frame, cv::Scalar(0, 43, 49), cv::Scalar(25, 255, 255), mask1);
cv::inRange(hsv_frame, cv::Scalar(156, 46, 49), cv::Scalar(180, 255, 255), mask2);
mask = mask1 | mask2;
} else if (target_color == "blue") {
// Blue color range in HSV
cv::inRange(hsv_frame, cv::Scalar(83, 200, 44), cv::Scalar(130, 255, 255), mask);
} else {
// Default to empty mask if color not recognized
mask = cv::Mat::zeros(frame.size(), CV_8UC1);
}
// Apply morphological operations to reduce noise
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
cv::morphologyEx(mask, mask, cv::MORPH_OPEN, kernel);
cv::morphologyEx(mask, mask, cv::MORPH_CLOSE, kernel);
return mask;
}
cv::Mat ImagePreprocessor::get_color_only_frame(const cv::Mat& frame, const std::string& target_color) {
cv::Mat mask = get_mask(frame, target_color);
// 不再反转掩膜,直接使用原始掩膜
// 目标颜色区域为白色(255),非目标颜色区域为黑色(0)
cv::Mat color_only_frame;
frame.copyTo(color_only_frame, mask); // 使用掩膜复制原始帧,只保留目标颜色部分
return color_only_frame;
}
void ImagePreprocessor::process_frame(const cv::Mat& frame, const std::string& target_color,
cv::Mat& mask, cv::Mat& color_only_frame) {
mask = get_mask(frame, target_color);
color_only_frame = get_color_only_frame(frame, target_color);
}

65
src/KalmanFilter.cpp Normal file
View File

@@ -0,0 +1,65 @@
#include "KalmanFilter.h"
#include "config.h"
KalmanFilter::KalmanFilter() {
// 4 state variables (x, y, dx, dy), 2 measurements (x, y)
kf.init(4, 2, 0);
init_params();
initialized = false;
last_measurement = cv::Point2f(0, 0);
last_prediction = cv::Point2f(0, 0);
}
void KalmanFilter::init_params() {
// Transition matrix: x(k+1) = x(k) + dx(k), y(k+1) = y(k) + dy(k), dx(k+1) = dx(k), dy(k+1) = dy(k)
kf.transitionMatrix = (cv::Mat_<float>(4, 4) <<
1, 0, 1, 0,
0, 1, 0, 1,
0, 0, 1, 0,
0, 0, 0, 1
);
// Measurement matrix: measuring x and y
kf.measurementMatrix = (cv::Mat_<float>(2, 4) <<
1, 0, 0, 0,
0, 1, 0, 0
);
// Process noise covariance
kf.processNoiseCov = cv::Mat::eye(4, 4, CV_32F) * KF_PROCESS_NOISE;
// Measurement noise covariance
kf.measurementNoiseCov = cv::Mat::eye(2, 2, CV_32F) * KF_MEASUREMENT_NOISE;
// Error covariance post
kf.errorCovPost = cv::Mat::eye(4, 4, CV_32F);
}
void KalmanFilter::update(const cv::Point2f& measurement) {
cv::Mat measurement_mat = (cv::Mat_<float>(2, 1) << measurement.x, measurement.y);
if (!initialized) {
// Initialize state if not done yet
kf.statePost = (cv::Mat_<float>(4, 1) <<
measurement.x,
measurement.y,
0.0f,
0.0f);
initialized = true;
} else {
kf.correct(measurement_mat);
}
last_measurement = measurement;
}
cv::Point2f KalmanFilter::predict() {
if (!initialized) {
return cv::Point2f(0, 0);
}
cv::Mat prediction = kf.predict();
cv::Point2f result(prediction.at<float>(0), prediction.at<float>(1));
last_prediction = result;
return result;
}

208
src/MindVisionCamera.cpp Normal file
View File

@@ -0,0 +1,208 @@
#include "MindVisionCamera.h"
#include <iostream>
MindVisionCamera::MindVisionCamera(int cam_id, const std::string& target_color)
: camera_handle(-1), is_opened(false), width(640), height(480), fps(30) {
if (!initialize_camera(cam_id)) {
throw std::runtime_error("Cannot initialize MindVision camera!");
}
is_opened = true;
this->target_color = target_color;
if (this->target_color != "red" && this->target_color != "blue") {
throw std::invalid_argument("Only 'red' or 'blue' colors are supported");
}
set_cam_params();
}
bool MindVisionCamera::initialize_camera(int cam_id) {
// 初始化SDK
CameraSdkInit(1);
int iCameraCounts = 10;
tSdkCameraDevInfo tCameraEnumList[10]; // 声明为数组而不是指针
// 枚举设备,并建立设备列表
int iStatus = CameraEnumerateDevice(tCameraEnumList, &iCameraCounts);
std::cout << "CameraEnumerateDevice returned: " << iStatus << std::endl;
std::cout << "Found " << iCameraCounts << " cameras" << std::endl;
if (iCameraCounts <= 0) {
std::cerr << "No MindVision cameras found!" << std::endl;
return false;
}
// 选择指定ID的相机
if (cam_id >= iCameraCounts) {
std::cerr << "Camera ID " << cam_id << " not found! Only " << iCameraCounts << " cameras detected." << std::endl;
return false;
}
std::cout << "Initializing camera " << cam_id << ": " << tCameraEnumList[cam_id].acFriendlyName << std::endl;
// 相机初始化
iStatus = CameraInit(&tCameraEnumList[cam_id], -1, -1, &camera_handle);
std::cout << "CameraInit returned: " << iStatus << std::endl;
if (iStatus != CAMERA_STATUS_SUCCESS) {
std::cerr << "Failed to initialize camera! Error code: " << iStatus << std::endl;
return false;
}
// 获取相机能力
tSdkCameraCapbility capability;
iStatus = CameraGetCapability(camera_handle, &capability);
if (iStatus != CAMERA_STATUS_SUCCESS) {
std::cerr << "Failed to get camera capability! Error code: " << iStatus << std::endl;
return false;
}
// 设置输出格式
if (capability.sIspCapacity.bMonoSensor) {
CameraSetIspOutFormat(camera_handle, CAMERA_MEDIA_TYPE_MONO8);
} else {
CameraSetIspOutFormat(camera_handle, CAMERA_MEDIA_TYPE_BGR8);
}
// 让SDK进入工作模式
CameraPlay(camera_handle);
// 获取并设置分辨率为 640x480
tSdkImageResolution image_resolution;
int status = CameraGetImageResolution(camera_handle, &image_resolution);
std::cout << "Default resolution query returned: " << status << std::endl;
// 设置分辨率为 640x480
image_resolution.iIndex = 0xFF; // Use custom resolution (0xFF typically indicates custom resolution)
image_resolution.iWidth = 640;
image_resolution.iHeight = 480;
image_resolution.iWidthFOV = image_resolution.iWidth;
image_resolution.iHeightFOV = image_resolution.iHeight;
image_resolution.iHOffsetFOV = 0;
image_resolution.iVOffsetFOV = 0;
status = CameraSetImageResolution(camera_handle, &image_resolution);
if (status != CAMERA_STATUS_SUCCESS) {
std::cout << "Failed to set resolution to 640x480, using default settings" << std::endl;
// Try to get the current resolution after failed set
CameraGetImageResolution(camera_handle, &image_resolution);
} else {
std::cout << "Successfully set camera resolution to: " << image_resolution.iWidth << "x" << image_resolution.iHeight << std::endl;
}
width = image_resolution.iWidth;
height = image_resolution.iHeight;
// 开启自动曝光
CameraSetAeState(camera_handle, true);
// 设置帧率
CameraSetFrameSpeed(camera_handle, 10); // 设置为中等帧率
std::cout << "Initialized camera with resolution: " << width << "x" << height << std::endl;
return true;
}
MindVisionCamera::~MindVisionCamera() {
release();
}
void MindVisionCamera::set_camera_parameters() {
// 先设置为手动曝光模式
CameraSetAeState(camera_handle, false);
// 根据颜色设置曝光参数
if (target_color == "red") {
CameraSetExposureTime(camera_handle, 10000); // 5ms曝光时间
} else if (target_color == "blue") {
CameraSetExposureTime(camera_handle, 4000); // 8ms曝光时间
}
// 设置白平衡
CameraSetWbMode(camera_handle, false); // 关闭自动白平衡
CameraSetOnceWB(camera_handle); // 一次性白平衡
// 设置其他参数
CameraSetTriggerMode(camera_handle, 0); // 连续采集模式
}
void MindVisionCamera::set_cam_params() {
set_camera_parameters();
// Ensure resolution remains 640x480 after setting other parameters
tSdkImageResolution image_resolution;
image_resolution.iIndex = 0xFF; // Use custom resolution (0xFF typically indicates custom resolution)
image_resolution.iWidth = 640;
image_resolution.iHeight = 480;
image_resolution.iWidthFOV = image_resolution.iWidth;
image_resolution.iHeightFOV = image_resolution.iHeight;
image_resolution.iHOffsetFOV = 0;
image_resolution.iVOffsetFOV = 0;
int status = CameraSetImageResolution(camera_handle, &image_resolution);
if (status != CAMERA_STATUS_SUCCESS) {
std::cout << "Failed to maintain resolution at 640x480 in set_cam_params" << std::endl;
} else {
std::cout << "Confirmed camera resolution: " << image_resolution.iWidth << "x" << image_resolution.iHeight << std::endl;
}
}
bool MindVisionCamera::read_frame(cv::Mat& frame) {
if (!is_opened) {
return false;
}
tSdkFrameHead sFrameInfo;
BYTE* pbyBuffer;
// 获取一帧数据
if (CameraGetImageBuffer(camera_handle, &sFrameInfo, &pbyBuffer, 1000) == CAMERA_STATUS_SUCCESS) {
// 处理图像数据
unsigned char* g_pRgbBuffer = new unsigned char[sFrameInfo.iWidth * sFrameInfo.iHeight * 3];
CameraImageProcess(camera_handle, pbyBuffer, g_pRgbBuffer, &sFrameInfo);
// 创建OpenCV Mat对象
frame = cv::Mat(
cv::Size(sFrameInfo.iWidth, sFrameInfo.iHeight),
sFrameInfo.uiMediaType == CAMERA_MEDIA_TYPE_MONO8 ? CV_8UC1 : CV_8UC3,
g_pRgbBuffer
).clone(); // clone()确保数据被复制,而不是共享指针
// 释放原始缓冲区
CameraReleaseImageBuffer(camera_handle, pbyBuffer);
// 释放临时缓冲区
delete[] g_pRgbBuffer;
return true;
}
return false;
}
void MindVisionCamera::release() {
if (is_opened && camera_handle >= 0) {
// 停止采集
CameraUnInit(camera_handle);
camera_handle = -1;
is_opened = false;
}
}
bool MindVisionCamera::switch_color(const std::string& new_color) {
std::string lower_color = new_color;
// Convert to lowercase manually
for (auto& c : lower_color) {
c = std::tolower(c);
}
if ((lower_color == "red" || lower_color == "blue") && lower_color != target_color) {
target_color = lower_color;
set_cam_params();
return true;
}
return false;
}

236
src/MindVisionMain.cpp Normal file
View File

@@ -0,0 +1,236 @@
#include <iostream>
#include <string>
#include <vector>
#include <chrono>
#include <thread>
#include <opencv2/opencv.hpp>
#include "config.h"
#include "MindVisionCamera.h" // 使用MindVision相机
#include "ImagePreprocessor.h"
#include "ArmorDetector.h"
#include "KalmanFilter.h"
#include "Visualizer.h"
#include "BallisticPredictor.h"
#include "TTLCommunicator.h"
// Function to output control data to TTL device (with enable control)
void output_control_data(const cv::Point2f* ballistic_point,
const std::string& target_color,
int frame_counter,
TTLCommunicator* ttl_communicator,
const cv::Point2f& img_center,
bool use_ttl) {
// Only send data when TTL is enabled and meets frame interval
if (use_ttl && frame_counter % 5 == 0) {
std::string send_str;
if (ballistic_point != nullptr) {
// Calculate offset (based on actual image center)
int ballistic_offset_x = static_cast<int>(ballistic_point->x - img_center.x);
int ballistic_offset_y = static_cast<int>(ballistic_point->y - img_center.y);
// Color simplification mapping
std::string simplified_color = target_color;
if (target_color == "red") simplified_color = "r";
else if (target_color == "blue") simplified_color = "b";
// Construct send string
send_str = "s " + simplified_color + " " + std::to_string(ballistic_offset_x) + " " + std::to_string(ballistic_offset_y);
} else {
// Format when no detection result
send_str = "s u 0 0";
}
// Send data
if (ttl_communicator != nullptr) {
ttl_communicator->send_data(send_str);
}
}
}
void set_camera_resolution(MindVisionCamera& , int width, int height) {
// The resolution is set during camera initialization in MindVision
// We need to implement a method in MindVisionCamera to change resolution
// For now, we'll just log the intended change
std::cout << "Setting camera resolution to: " << width << "x" << height << std::endl;
}
int main(int /*argc*/, char* /*argv*/[]) {
std::string target_color = "blue";
int cam_id = 0;
cv::Size default_resolution(640, 480);
bool use_ttl = false; // Set to false to disable TTL communication
// Define optional resolution list (adjust based on camera support)
std::vector<cv::Size> resolutions = {
cv::Size(320, 240), // Low resolution, high frame rate
cv::Size(640, 480), // Standard resolution
cv::Size(1280, 720), // HD resolution
cv::Size(1920, 1080) // Full HD resolution
};
// Find the index of default resolution
int res_index = 1; // Default to index 1 (640x480)
for (size_t i = 0; i < resolutions.size(); i++) {
if (resolutions[i] == default_resolution) {
res_index = static_cast<int>(i);
break;
}
}
// Initialize TTL communication (only when enabled)
TTLCommunicator* ttl = nullptr;
if (use_ttl) {
ttl = new TTLCommunicator(TTL_BAUDRATE);
if (!ttl->connect()) {
std::cout << "Warning: Cannot establish TTL connection, will continue running but not send data" << std::endl;
}
} else {
std::cout << "TTL communication disabled" << std::endl;
}
// Initialize visual processing modules with MindVision camera
MindVisionCamera camera(cam_id, target_color);
if (!camera.is_opened) {
std::cerr << "Cannot open MindVision camera!" << std::endl;
return -1;
}
// Initialize ballistic predictor (adjustable bullet speed, e.g., 16m/s)
BallisticPredictor ballistic_predictor(16.0f);
// Record target speed (obtained from Kalman filter)
cv::Point2f target_speed(0.0f, 0.0f);
// Set initial resolution
set_camera_resolution(camera, resolutions[res_index].width, resolutions[res_index].height);
std::cout << "Initial camera resolution: " << resolutions[res_index].width << "x" << resolutions[res_index].height << std::endl;
ImagePreprocessor preprocessor;
ArmorDetector detector;
KalmanFilter kalman_tracker;
Visualizer visualizer;
int frame_counter = 0; // Counter to control output frequency
int max_consecutive_predicts = 20; // Maximum consecutive prediction times
int consecutive_predicts = 0; // Current consecutive prediction count
cv::Mat frame;
try {
while (true) {
if (!camera.read_frame(frame)) {
std::cout << "Cannot read from MindVision camera, exiting!" << std::endl;
break;
}
// Get actual image size and calculate center
cv::Point2f img_center(frame.cols / 2.0f, frame.rows / 2.0f);
// Preprocessing
cv::Mat mask, color_only_frame;
preprocessor.process_frame(frame, target_color, mask, color_only_frame);
// Armor plate detection
std::vector<LightBar> valid_light_bars;
std::vector<ArmorPlate> armor_plates;
detector.detect(mask, target_color, valid_light_bars, armor_plates);
// Kalman filter tracking (modified part: get target speed)
cv::Point2f* current_center = nullptr;
if (!armor_plates.empty()) {
current_center = &(armor_plates[0].center);
}
cv::Point2f* predicted_center = nullptr;
if (current_center != nullptr) {
// Has detection result: update Kalman filter, reset consecutive prediction count
kalman_tracker.update(*current_center);
predicted_center = new cv::Point2f(kalman_tracker.predict());
// Get velocity from the Kalman filter
target_speed = cv::Point2f(0.0f, 0.0f); // Kalman filter in OpenCV doesn't directly expose velocity
consecutive_predicts = 0;
} else {
// No detection result: only use Kalman prediction, limit consecutive predictions
consecutive_predicts++;
if (consecutive_predicts < max_consecutive_predicts) {
cv::Point2f temp_pred = kalman_tracker.predict();
if (temp_pred.x != 0 || temp_pred.y != 0) { // Check if prediction is valid
predicted_center = new cv::Point2f(temp_pred);
}
} else {
predicted_center = nullptr;
target_speed = cv::Point2f(0.0f, 0.0f);
}
}
// Determine display center
cv::Point2f* display_center = current_center;
if (display_center == nullptr && predicted_center != nullptr) {
display_center = predicted_center;
}
bool is_predicted = (display_center != nullptr) && (current_center == nullptr);
// Calculate ballistic prediction point
cv::Point2f* ballistic_point = ballistic_predictor.predict_ballistic_point(
predicted_center, img_center, target_speed
);
auto current_time = std::chrono::high_resolution_clock::now();
// Visualization
visualizer.draw_light_bars(frame, valid_light_bars, target_color);
if (!armor_plates.empty()) {
visualizer.draw_armor_plate(frame, armor_plates[0]);
}
visualizer.draw_offset_text(frame, display_center, target_color, is_predicted);
visualizer.draw_ballistic_point(frame, ballistic_point);
// Output control data to TTL (passing use_ttl to control whether to send)
output_control_data(display_center, target_color, frame_counter, ttl, img_center, use_ttl);
// Display windows
cv::imshow("Armor Detection", frame);
cv::imshow(target_color + " Mask", mask);
cv::imshow(target_color + " Only", color_only_frame);
// Exit on 'q' key press
if (cv::waitKey(1) == 'q') {
break;
}
frame_counter++;
// Control max frame rate (100 FPS)
auto end_time = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - current_time).count();
if (elapsed < 10) { // 100 FPS = 10ms per frame
std::this_thread::sleep_for(std::chrono::milliseconds(10 - elapsed));
}
// Clean up dynamically allocated memory
if (predicted_center != nullptr) {
delete predicted_center;
predicted_center = nullptr;
}
}
}
catch (const std::exception& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
// Cleanup
camera.release();
cv::destroyAllWindows();
// Only close TTL connection when enabled and initialized
if (use_ttl && ttl != nullptr) {
ttl->close();
delete ttl;
ttl = nullptr;
}
return 0;
}

59
src/TTLCommunicator.cpp Normal file
View File

@@ -0,0 +1,59 @@
#include "TTLCommunicator.h"
#include <iostream>
TTLCommunicator::TTLCommunicator(int baudrate) : baudrate(baudrate), connected(false) {
// Default to a common serial port name, though this will need to be configured for the specific system
port_name = "/dev/ttyUSB0"; // Common for Linux/Ubuntu
}
TTLCommunicator::~TTLCommunicator() {
close();
}
bool TTLCommunicator::connect() {
// Placeholder implementation - in a real implementation, you would open the serial port
std::cout << "Attempting to connect to TTL device on port: " << port_name << " at baud rate: " << baudrate << std::endl;
// For now, simulate a connection attempt
// In real implementation, use system calls to open the serial port
connected = open_serial_port();
if (connected) {
std::cout << "TTL connection established." << std::endl;
} else {
std::cout << "Warning: Failed to establish TTL connection." << std::endl;
}
return connected;
}
void TTLCommunicator::close() {
if (connected) {
close_serial_port();
connected = false;
std::cout << "TTL connection closed." << std::endl;
}
}
bool TTLCommunicator::send_data(const std::string& data) {
if (!connected) {
return false;
}
// Placeholder for sending data over serial
// In real implementation, write to the serial port
std::cout << "Sending TTL data: " << data << std::endl;
// Simulate successful transmission
return true;
}
bool TTLCommunicator::open_serial_port() {
// In a real implementation, this would use system calls like open(), tcsetattr(), etc.
// For now, return true to simulate a successful connection
return true;
}
void TTLCommunicator::close_serial_port() {
// In a real implementation, this would close the file descriptor
}

93
src/Visualizer.cpp Normal file
View File

@@ -0,0 +1,93 @@
#include "Visualizer.h"
#include <sstream>
Visualizer::Visualizer() {
// Constructor implementation
}
Visualizer::~Visualizer() {
// Destructor implementation
}
cv::Mat& Visualizer::draw_light_bars(cv::Mat& frame, const std::vector<LightBar>& light_bars, const std::string& target_color) {
cv::Scalar color = (target_color == "red") ? cv::Scalar(0, 0, 255) : cv::Scalar(255, 0, 0); // Red or Blue in BGR
for (const auto& light_bar : light_bars) {
// Draw rotated rectangle for each light bar
for (size_t i = 0; i < light_bar.box.size(); i++) {
cv::Point2f pt1 = light_bar.box[i];
cv::Point2f pt2 = light_bar.box[(i + 1) % light_bar.box.size()];
cv::line(frame, pt1, pt2, color, 2);
}
}
return frame;
}
cv::Mat& Visualizer::draw_armor_plate(cv::Mat& frame, const ArmorPlate& armor_plate) {
cv::circle(frame, armor_plate.center, 10, cv::Scalar(0, 255, 0), 2); // Green circle around armor center
// Draw confidence text
std::stringstream ss;
ss << "Conf: " << std::fixed << std::setprecision(2) << armor_plate.confidence;
cv::putText(frame, ss.str(), cv::Point(armor_plate.center.x + 15, armor_plate.center.y - 15),
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), 1);
return frame;
}
cv::Mat& Visualizer::draw_offset_text(cv::Mat& frame, const cv::Point2f* display_center, const std::string& /*target_color*/, bool is_predicted) {
if (display_center == nullptr) {
return frame;
}
cv::Point2f img_center(frame.cols / 2.0f, frame.rows / 2.0f);
int offset_x = static_cast<int>(display_center->x - img_center.x);
int offset_y = static_cast<int>(display_center->y - img_center.y);
std::stringstream ss;
ss << "Offset: (" << offset_x << ", " << offset_y << ")";
// Use different color based on whether it's predicted or actual
cv::Scalar color = is_predicted ? cv::Scalar(0, 255, 255) : cv::Scalar(0, 255, 0); // Yellow for predicted, Green for actual
cv::putText(frame, ss.str(), cv::Point(20, 30), cv::FONT_HERSHEY_SIMPLEX, 0.7, color, 2);
return frame;
}
cv::Mat& Visualizer::draw_crosshair(cv::Mat& frame, const cv::Point2f& center, const cv::Scalar& color, int size) {
// Draw horizontal line
cv::line(frame,
cv::Point(static_cast<int>(center.x - size), static_cast<int>(center.y)),
cv::Point(static_cast<int>(center.x + size), static_cast<int>(center.y)),
color, 2);
// Draw vertical line
cv::line(frame,
cv::Point(static_cast<int>(center.x), static_cast<int>(center.y - size)),
cv::Point(static_cast<int>(center.x), static_cast<int>(center.y + size)),
color, 2);
return frame;
}
cv::Mat& Visualizer::draw_ballistic_point(cv::Mat& frame, const cv::Point2f* ballistic_point) {
if (ballistic_point != nullptr) {
// Draw a red crosshair at the ballistic prediction point
cv::circle(frame, *ballistic_point, 8, cv::Scalar(0, 0, 255), 2); // Red circle
cv::line(frame,
cv::Point(static_cast<int>(ballistic_point->x - 10), static_cast<int>(ballistic_point->y)),
cv::Point(static_cast<int>(ballistic_point->x + 10), static_cast<int>(ballistic_point->y)),
cv::Scalar(0, 0, 255), 2); // Red horizontal line
cv::line(frame,
cv::Point(static_cast<int>(ballistic_point->x), static_cast<int>(ballistic_point->y - 10)),
cv::Point(static_cast<int>(ballistic_point->x), static_cast<int>(ballistic_point->y + 10)),
cv::Scalar(0, 0, 255), 2); // Red vertical line
// Label as ballistic point
cv::putText(frame, "BP", cv::Point(static_cast<int>(ballistic_point->x + 15), static_cast<int>(ballistic_point->y - 15)),
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255), 1);
}
return frame;
}

237
src/main.cpp Normal file
View File

@@ -0,0 +1,237 @@
#include <iostream>
#include <string>
#include <vector>
#include <chrono>
#include <thread>
#include <opencv2/opencv.hpp>
#include "config.h"
#include "MindVisionCamera.h" // 使用MindVision相机
#include "ImagePreprocessor.h"
#include "ArmorDetector.h"
#include "KalmanFilter.h"
#include "Visualizer.h"
#include "BallisticPredictor.h"
#include "TTLCommunicator.h"
// Function to output control data to TTL device (with enable control)
void output_control_data(const cv::Point2f* ballistic_point,
const std::string& target_color,
int frame_counter,
TTLCommunicator* ttl_communicator,
const cv::Point2f& img_center,
bool use_ttl,
bool armor_detected) {
// Only send data when TTL is enabled, meets frame interval, AND armor is detected
if (use_ttl && frame_counter % 5 == 0 && armor_detected) {
// Only send if we have a valid ballistic point to send
if (ballistic_point != nullptr) {
// Calculate offset (based on actual image center)
int ballistic_offset_x = static_cast<int>(ballistic_point->x - img_center.x);
int ballistic_offset_y = static_cast<int>(ballistic_point->y - img_center.y);
// Color simplification mapping
std::string simplified_color = target_color;
if (target_color == "red") simplified_color = "r";
else if (target_color == "blue") simplified_color = "b";
// Construct send string
std::string send_str = "s " + simplified_color + " " + std::to_string(ballistic_offset_x) + " " + std::to_string(ballistic_offset_y);
// Send data
if (ttl_communicator != nullptr) {
ttl_communicator->send_data(send_str);
}
}
// Note: When ballistic_point is nullptr, no TTL data is sent (fulfilling the requirement)
}
}
void set_camera_resolution(MindVisionCamera, int width, int height) {
// The resolution is set during camera initialization in MindVision
// We already set it in MindVisionCamera constructor, so this is for logging only
std::cout << "Camera resolution already set to: " << width << "x" << height << " during initialization" << std::endl;
}
int main(int /*argc*/, char* /*argv*/[]) {
std::string target_color = "red";
int cam_id = 0;
cv::Size default_resolution(640, 480); // This will be set by MindVisionCamera constructor
bool use_ttl = true; // Set to false to disable TTL communication
// Define optional resolution list (adjust based on camera support)
std::vector<cv::Size> resolutions = {
cv::Size(320, 240), // Low resolution, high frame rate
cv::Size(640, 480), // Standard resolution
cv::Size(1280, 720), // HD resolution
cv::Size(1920, 1080) // Full HD resolution
};
// Find the index of default resolution
int res_index = 1; // Default to index 1 (640x480)
for (size_t i = 0; i < resolutions.size(); i++) {
if (resolutions[i] == default_resolution) {
res_index = static_cast<int>(i);
break;
}
}
// Initialize TTL communication (only when enabled)
TTLCommunicator* ttl = nullptr;
if (use_ttl) {
ttl = new TTLCommunicator(TTL_BAUDRATE);
if (!ttl->connect()) {
std::cout << "Warning: Cannot establish TTL connection, will continue running but not send data" << std::endl;
}
} else {
std::cout << "TTL communication disabled" << std::endl;
}
// Initialize visual processing modules with MindVision camera
MindVisionCamera camera(cam_id, target_color);
if (!camera.is_opened) {
std::cerr << "Cannot open MindVision camera!" << std::endl;
return -1;
}
// Initialize ballistic predictor (adjustable bullet speed, e.g., 16m/s)
BallisticPredictor ballistic_predictor(16.0f);
// Record target speed (obtained from Kalman filter)
cv::Point2f target_speed(0.0f, 0.0f);
// Set initial resolution
set_camera_resolution(camera, resolutions[res_index].width, resolutions[res_index].height);
std::cout << "Initial camera resolution: " << resolutions[res_index].width << "x" << resolutions[res_index].height << std::endl;
ImagePreprocessor preprocessor;
ArmorDetector detector;
KalmanFilter kalman_tracker;
Visualizer visualizer;
int frame_counter = 0; // Counter to control output frequency
int max_consecutive_predicts = 20; // Maximum consecutive prediction times
int consecutive_predicts = 0; // Current consecutive prediction count
cv::Mat frame;
try {
while (true) {
//if (!camera.read_frame(frame)) {
// std::cout << "Cannot read from MindVision camera, exiting!" << std::endl;
// break;
//}
// Get actual image size and calculate center
cv::Point2f img_center(frame.cols / 2.0f, frame.rows / 2.0f);
// Preprocessing
cv::Mat mask, color_only_frame;
preprocessor.process_frame(frame, target_color, mask, color_only_frame);
// Armor plate detection
std::vector<LightBar> valid_light_bars;
std::vector<ArmorPlate> armor_plates;
detector.detect(mask, target_color, valid_light_bars, armor_plates);
// Kalman filter tracking (modified part: get target speed)
cv::Point2f* current_center = nullptr;
if (!armor_plates.empty()) {
current_center = &(armor_plates[0].center);
}
cv::Point2f* predicted_center = nullptr;
if (current_center != nullptr) {
// Has detection result: update Kalman filter, reset consecutive prediction count
kalman_tracker.update(*current_center);
predicted_center = new cv::Point2f(kalman_tracker.predict());
// Get velocity from the Kalman filter
target_speed = cv::Point2f(0.0f, 0.0f); // Kalman filter in OpenCV doesn't directly expose velocity
consecutive_predicts = 0;
} else {
// No detection result: only use Kalman prediction, limit consecutive predictions
consecutive_predicts++;
if (consecutive_predicts < max_consecutive_predicts) {
cv::Point2f temp_pred = kalman_tracker.predict();
if (temp_pred.x != 0 || temp_pred.y != 0) { // Check if prediction is valid
predicted_center = new cv::Point2f(temp_pred);
}
} else {
predicted_center = nullptr;
target_speed = cv::Point2f(0.0f, 0.0f);
}
}
// Determine display center
cv::Point2f* display_center = current_center;
if (display_center == nullptr && predicted_center != nullptr) {
display_center = predicted_center;
}
bool is_predicted = (display_center != nullptr) && (current_center == nullptr);
// Calculate ballistic prediction point
cv::Point2f* ballistic_point = ballistic_predictor.predict_ballistic_point(
predicted_center, img_center, target_speed
);
auto current_time = std::chrono::high_resolution_clock::now();
// Visualization
visualizer.draw_light_bars(frame, valid_light_bars, target_color);
if (!armor_plates.empty()) {
visualizer.draw_armor_plate(frame, armor_plates[0]);
}
visualizer.draw_offset_text(frame, display_center, target_color, is_predicted);
visualizer.draw_ballistic_point(frame, ballistic_point);
// Output control data to TTL (passing use_ttl to control whether to send)
// Only send TTL data if actual armor plates were detected (not just predicted)
bool armor_detected = !armor_plates.empty();
// Only send TTL data when we have an actual detection, not just prediction
bool should_send_ttl = armor_detected && (display_center != nullptr);
output_control_data(display_center, target_color, frame_counter, ttl, img_center, use_ttl, should_send_ttl);
// Display windows
cv::imshow("Armor Detection", frame);
cv::imshow(target_color + " Mask", mask);
cv::imshow(target_color + " Only", color_only_frame);
// Exit on 'q' key press
if (cv::waitKey(1) == 'q') {
break;
}
frame_counter++;
// Control max frame rate (100 FPS)
auto end_time = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - current_time).count();
if (elapsed < 10) { // 100 FPS = 10ms per frame
std::this_thread::sleep_for(std::chrono::milliseconds(30 - elapsed));
}
// Clean up dynamically allocated memory
if (predicted_center != nullptr) {
delete predicted_center;
predicted_center = nullptr;
}
}
}
catch (const std::exception& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
// Cleanup
camera.release();
cv::destroyAllWindows();
// Only close TTL connection when enabled and initialized
if (use_ttl && ttl != nullptr) {
ttl->close();
delete ttl;
ttl = nullptr;
}
return 0;
}