Compare commits

..

11 Commits

6 changed files with 230 additions and 34 deletions

View File

@@ -76,3 +76,6 @@ make -j$(nproc)
2. 确认MindVision相机驱动是否正确安装
3. 验证相机ID是否正确
4. 检查权限可能需要将用户添加到video组
## MindVision-SDK
`Linux`: >**wget https://www.mindvision.com.cn/wp-content/uploads/2023/08/linuxSDK_V2.1.0.37.tar.gz**

View File

@@ -23,6 +23,23 @@ struct ArmorPlate {
double confidence;
std::pair<LightBar, LightBar> pair;
std::vector<cv::Point2f> corners_2d; // Can be computed later for 3D pose
// Function to get bounding rectangle of the armor plate
cv::Rect2d getBoundingRect() const {
if (corners_2d.size() >= 4) {
cv::Point2f min_pt = corners_2d[0];
cv::Point2f max_pt = corners_2d[0];
for (const auto& pt : corners_2d) {
min_pt.x = std::min(min_pt.x, pt.x);
min_pt.y = std::min(min_pt.y, pt.y);
max_pt.x = std::max(max_pt.x, pt.x);
max_pt.y = std::max(max_pt.y, pt.y);
}
return cv::Rect2d(min_pt.x, min_pt.y, max_pt.x - min_pt.x, max_pt.y - min_pt.y);
}
// Fallback: use center and a fixed size
return cv::Rect2d(center.x - 30, center.y - 15, 60, 30);
}
};
class ArmorDetector {

View File

@@ -12,13 +12,15 @@ extern "C" {
class MindVisionCamera {
public:
int camera_handle; // MindVision SDK中的相机句柄
CameraHandle camera_handle; // MindVision SDk中的相机句柄
bool is_opened;
std::string target_color;
int width;
int height;
int fps;
unsigned char* g_pRgbBuffer; // 处理后数据缓存区
tSdkCameraCapbility capability; // 相机能力信息
tSdkImageResolution image_resolution; // 分辨率信息
MindVisionCamera(int cam_id = 0, const std::string& target_color = "red");
~MindVisionCamera();
@@ -29,6 +31,10 @@ public:
void release();
bool switch_color(const std::string& target_color);
int get_width() const { return width; }
int get_height() const { return height; }
bool set_resolution(int width, int height);
private:
void set_camera_parameters();
bool initialize_camera(int cam_id);

View File

@@ -36,6 +36,9 @@ const float ARMOR_ANGLE_DIFF_THRESHOLD = 15.0f * CV_PI / 180.0f; // Armor light
const float KF_PROCESS_NOISE = 0.02f; // Process noise covariance
const float KF_MEASUREMENT_NOISE = 0.5f; // Measurement noise covariance
// Focal length in pixels (from camera calibration)
const double FOCAL_PIXAL = 600.0; // This should match your actual camera calibration
// TTL communication settings
const int TTL_BAUDRATE = 115200;

View File

@@ -59,21 +59,19 @@ bool MindVisionCamera::initialize_camera(int cam_id) {
}
// 获取相机能力
tSdkCameraCapbility capability;
iStatus = CameraGetCapability(camera_handle, &capability);
if (iStatus != CAMERA_STATUS_SUCCESS) {
std::cerr << "Failed to get camera capability! Error code: " << iStatus << std::endl;
return false;
}
// 让SDK进入工作模式 - 根据原始OpenCv项目应该在设置格式前调用
CameraPlay(camera_handle);
// 设置输出格式 - 保持与原始工作项目一致直接设置为BGR8格式
CameraSetIspOutFormat(camera_handle, CAMERA_MEDIA_TYPE_BGR8);
// 让SDK进入工作模式 - 根据原始OpenCv项目应该在设置格式前调用
CameraPlay(camera_handle);
// 获取并设置分辨率为 640x480
tSdkImageResolution image_resolution;
int status = CameraGetImageResolution(camera_handle, &image_resolution);
std::cout << "Default resolution query returned: " << status << std::endl;
@@ -166,6 +164,18 @@ bool MindVisionCamera::read_frame(cv::Mat& frame) {
// 获取一帧数据
if (CameraGetImageBuffer(camera_handle, &sFrameInfo, &pbyBuffer, 1000) == CAMERA_STATUS_SUCCESS) {
// 直接使用CameraGetImageBufferEx获取处理后的RGB数据提高效率
INT width, height;
unsigned char* pData = CameraGetImageBufferEx(camera_handle, &width, &height, 2000);
if(pData != NULL) {
// 创建OpenCV Mat对象直接使用获取的数据
cv::Mat temp_mat(height, width, CV_8UC3, pData);
frame = temp_mat.clone(); // clone()确保数据被复制,而不是共享指针
// CameraGetImageBufferEx方式不需要手动释放缓冲区
return true;
}
// 使用全局缓冲区处理图像数据 - 与原始项目一致
int status = CameraImageProcess(camera_handle, pbyBuffer, g_pRgbBuffer, &sFrameInfo);
if (status != CAMERA_STATUS_SUCCESS) {
@@ -192,6 +202,38 @@ bool MindVisionCamera::read_frame_with_color_filter(cv::Mat& frame, cv::Mat& raw
return false;
}
// 尝试使用CameraGetImageBufferEx方式获取图像
INT width, height;
unsigned char* pData = CameraGetImageBufferEx(camera_handle, &width, &height, 2000);
if(pData != NULL) {
// 创建OpenCV Mat对象直接使用获取的数据
cv::Mat original_img(height, width, CV_8UC3, pData);
frame = original_img.clone(); // clone()确保数据被复制,而不是共享指针
// 创建HSV图像用于颜色过滤
cv::Mat hsv_img;
cv::cvtColor(frame, hsv_img, cv::COLOR_BGR2HSV);
// 根据颜色创建原始掩码
if (target_color == "red") {
// Red color range in HSV
cv::Mat mask1, mask2;
cv::inRange(hsv_img, cv::Scalar(0, 43, 49), cv::Scalar(25, 255, 255), mask1);
cv::inRange(hsv_img, cv::Scalar(156, 46, 49), cv::Scalar(180, 255, 255), mask2);
raw_mask = mask1 | mask2;
} else if (target_color == "blue") {
// Blue color range in HSV
cv::inRange(hsv_img, cv::Scalar(83, 200, 44), cv::Scalar(130, 255, 255), raw_mask);
} else {
raw_mask = cv::Mat::zeros(hsv_img.size(), CV_8UC1);
}
// CameraGetImageBufferEx方式不需要手动释放缓冲区
return true;
}
// 如果CameraGetImageBufferEx失败回退到原来的方式
tSdkFrameHead sFrameInfo;
BYTE* pbyBuffer;
@@ -239,11 +281,16 @@ bool MindVisionCamera::read_frame_with_color_filter(cv::Mat& frame, cv::Mat& raw
}
void MindVisionCamera::release() {
if (camera_handle >= 0) {
if (camera_handle != 0) {
// 停止采集不管is_opened状态如何都尝试释放
CameraPause(camera_handle);
CameraUnInit(camera_handle);
camera_handle = -1;
}
if(g_pRgbBuffer) {
free(g_pRgbBuffer);
g_pRgbBuffer = nullptr;
}
is_opened = false;
}
@@ -260,3 +307,29 @@ bool MindVisionCamera::switch_color(const std::string& new_color) {
}
return false;
}
bool MindVisionCamera::set_resolution(int width, int height){
if (!is_opened) {
return false;
}
tSdkImageResolution res;
int status = CameraGetImageResolution(camera_handle, &res);
res.iIndex = 0xFF;
res.iWidth = width;
res.iHeight = height;
res.iWidthFOV = capability.sResolutionRange.iWidthMax;
res.iHeightFOV = capability.sResolutionRange.iHeightMax;
res.iHOffsetFOV = 0;
res.iVOffsetFOV = 0;
status = CameraSetImageResolution(camera_handle, &res);
if (status == CAMERA_STATUS_SUCCESS) {
this->width = width;
this->height = height;
image_resolution = res;
return true;
}
return false;
}

View File

@@ -1,3 +1,4 @@
#include <cstdlib>
#include <iostream>
#include <string>
#include <vector>
@@ -5,8 +6,9 @@
#include <thread>
#include <memory>
#include <unistd.h>
#include <math.h>
#include <opencv2/opencv.hpp>
#include <opencv2/tracking.hpp>
#include "config.h"
#include "MindVisionCamera.h" // 使用MindVision相机
@@ -20,48 +22,64 @@
// Function to output control data to TTL device (with enable control)
void output_control_data(const cv::Point2f* ballistic_point,
const std::string& target_color,
int frame_counter,
TTLCommunicator* ttl_communicator,
const cv::Point2f& img_center,
bool use_ttl) {
// Only send data when TTL is enabled, meets frame interval, and has valid target
if (use_ttl && frame_counter % 5 == 0 && ballistic_point != nullptr) {
// Only send data when TTL is enabled and has valid target
if (use_ttl && ballistic_point != nullptr) {
std::ostringstream send_str;
// Calculate offset (based on actual image center)
int ballistic_offset_x = static_cast<int>(ballistic_point->x - img_center.x);
int ballistic_offset_y = static_cast<int>(img_center.y - ballistic_point->y);
int ballistic_offset_yaw = 1.9*-static_cast<int>(ballistic_point->x - img_center.x);
if ( abs(ballistic_offset_yaw) > 320){
ballistic_offset_yaw = ( ballistic_offset_yaw / abs( ballistic_offset_yaw ) ) * 220 ;
}
int ballistic_offset_pitch = 1.9*-static_cast<int>(img_center.y - ballistic_point->y);
if ( abs(ballistic_offset_pitch) > 180 ) {
ballistic_offset_pitch = ( ballistic_offset_pitch / abs( ballistic_offset_pitch ) ) * 180*1.9 ;
}
// Color simplification mapping
std::string simplified_color = target_color;
if (target_color == "red") simplified_color = "r";
else if (target_color == "blue") simplified_color = "b";
// Construct send string
send_str << "s " << simplified_color << " " << std::to_string(ballistic_offset_x) << " " << std::to_string(ballistic_offset_y) << "\n";
// Construct send string (original format)
send_str << "#s " << simplified_color << " " << std::to_string(ballistic_offset_yaw) << " " << std::to_string(ballistic_offset_pitch) << "*\n";
// Send data
if (ttl_communicator != nullptr) {
ttl_communicator->send_data(send_str.str());
}else{
std::cerr << "Error: TTLCommunicator is a null pointer!" << std::endl;
}
}
}
void set_camera_resolution(MindVisionCamera& , int width, int height) {
void set_camera_resolution(MindVisionCamera& camera, int width, int height) {
// The resolution is set during camera initialization in MindVision
// We need to implement a method in MindVisionCamera to change resolution
// For now, we'll just log the intended change
std::cout << "Setting camera resolution to: " << width << "x" << height << std::endl;
if (camera.set_resolution(width, height)){
std::cout << "Successfully set camera resolution to: " << width << "x" << height << std::endl;
} else {
std::cerr << "Failed to set camera resolution to: " << width << "x" << height << std::endl;
}
}
int main(int /*argc*/, char* /*argv*/[]) {
static int Numbe = 0;
std::string target_color = "red";
int cam_id = 0;
cv::Size default_resolution(640, 480);
bool use_ttl = true; // Set to false to disable TTL communication
cv::Size default_resolution(640, 480); // Changed to 640x480 for consistency with SJTU project
bool use_ttl = false; // Set to false to disable TTL communication
if (Numbe == 0) {
// 执行 shell 命令(注意安全风险!)
std::system("sudo chmod 777 /dev/tty*");
Numbe++;
}
// Define optional resolution list (adjust based on camera support)
std::vector<cv::Size> resolutions = {
@@ -117,6 +135,13 @@ int main(int /*argc*/, char* /*argv*/[]) {
KalmanFilter kalman_tracker;
Visualizer visualizer;
// Initialize KCF tracker
cv::Ptr<cv::Tracker> tracker = cv::TrackerKCF::create();
bool is_tracking = false;
cv::Rect2d tracking_roi;
int tracking_frame_count = 0;
const int MAX_TRACKING_FRAMES = 100; // Maximum frames to track before returning to search
int frame_counter = 0; // Counter to control output frequency
int max_consecutive_predicts = 20; // Maximum consecutive prediction times
int consecutive_predicts = 0; // Current consecutive prediction count
@@ -125,6 +150,7 @@ int main(int /*argc*/, char* /*argv*/[]) {
try {
while (true) {
// 使用新的颜色过滤方法同时获取图像和原始掩码
cv::Mat raw_mask;
if (!camera.read_frame_with_color_filter(frame, raw_mask, target_color)) {
std::cout << "Cannot read from MindVision camera, exiting!HERERER" << std::endl;
@@ -142,30 +168,90 @@ int main(int /*argc*/, char* /*argv*/[]) {
cv::Mat color_only_frame;
frame.copyTo(color_only_frame, raw_mask);
// Armor plate detection
// Initialize tracking center
cv::Point2f* tracking_center = nullptr;
std::unique_ptr<cv::Point2f> tracking_point = nullptr;
if (is_tracking) {
// Update tracker
bool success = tracker->update(frame, tracking_roi);
if (success && tracking_roi.area() > 0) {
// Calculate center of tracked ROI
tracking_point = std::make_unique<cv::Point2f>(
tracking_roi.x + tracking_roi.width / 2.0f,
tracking_roi.y + tracking_roi.height / 2.0f
);
tracking_center = tracking_point.get();
tracking_frame_count++;
// If tracking for too long or detection is available, search for armor again
if (tracking_frame_count > MAX_TRACKING_FRAMES) {
is_tracking = false;
tracking_frame_count = 0;
}
} else {
// Tracking failed, return to detection mode
is_tracking = false;
tracking_frame_count = 0;
}
}
// Armor plate detection - only when not tracking or need to verify tracking
std::vector<LightBar> valid_light_bars;
std::vector<ArmorPlate> armor_plates;
if (!is_tracking || tracking_frame_count % 10 == 0) { // Detect every 10 frames when tracking
detector.detect(mask, target_color, valid_light_bars, armor_plates);
}
// Kalman filter tracking (modified part: get target speed)
cv::Point2f* current_center = nullptr;
cv::Point2f* detection_center = nullptr;
if (!armor_plates.empty()) {
current_center = &(armor_plates[0].center);
detection_center = &(armor_plates[0].center);
}
// Use smart pointer for safer memory management
std::unique_ptr<cv::Point2f> predicted_center = nullptr;
if (current_center != nullptr) {
// Priority: detection -> tracking -> Kalman prediction
if (detection_center != nullptr) {
// Has detection result: update Kalman filter, reset consecutive prediction count
kalman_tracker.update(*current_center);
kalman_tracker.update(*detection_center);
predicted_center = std::make_unique<cv::Point2f>(kalman_tracker.predict());
// Get velocity from the Kalman filter
target_speed = cv::Point2f(0.0f, 0.0f); // Kalman filter in OpenCV doesn't directly expose velocity
// Start tracking if successfully detected
if (!is_tracking && !armor_plates.empty()) {
cv::Rect2d armor_rect = cv::boundingRect(std::vector<cv::Point2f>{
armor_plates[0].corners_2d[0],
armor_plates[0].corners_2d[1],
armor_plates[0].corners_2d[2],
armor_plates[0].corners_2d[3]
});
// Expand the bounding box slightly for better tracking
cv::Rect2d expanded_rect = cv::Rect2d(
armor_rect.x - armor_rect.width * 0.1,
armor_rect.y - armor_rect.height * 0.1,
armor_rect.width * 1.2,
armor_rect.height * 1.2
);
// Ensure the rectangle is within frame bounds
expanded_rect = expanded_rect & cv::Rect2d(0, 0, frame.cols, frame.rows);
tracker = cv::TrackerKCF::create(); // Create new tracker instance
tracker->init(frame, expanded_rect);
tracking_roi = expanded_rect;
is_tracking = true;
tracking_frame_count = 0;
}
consecutive_predicts = 0;
} else if (is_tracking && tracking_center != nullptr) {
// Use tracking result
kalman_tracker.update(*tracking_center);
predicted_center = std::make_unique<cv::Point2f>(kalman_tracker.predict());
consecutive_predicts = 0;
} else {
// No detection result: only use Kalman prediction, limit consecutive predictions
// No detection or tracking result: only use Kalman prediction, limit consecutive predictions
consecutive_predicts++;
if (consecutive_predicts < max_consecutive_predicts) {
cv::Point2f temp_pred = kalman_tracker.predict();
@@ -179,11 +265,14 @@ int main(int /*argc*/, char* /*argv*/[]) {
}
// Determine display center
cv::Point2f* display_center = current_center;
cv::Point2f* display_center = detection_center; // Give priority to detection results
if (display_center == nullptr && is_tracking && tracking_center != nullptr) {
display_center = tracking_center;
}
if (display_center == nullptr && predicted_center != nullptr) {
display_center = predicted_center.get();
}
bool is_predicted = (display_center != nullptr) && (current_center == nullptr);
bool is_predicted = (display_center != nullptr) && (detection_center == nullptr && (!is_tracking || tracking_center == nullptr));
// Calculate ballistic prediction point
cv::Point2f* ballistic_point = ballistic_predictor.predict_ballistic_point(
@@ -197,16 +286,21 @@ int main(int /*argc*/, char* /*argv*/[]) {
if (!armor_plates.empty()) {
visualizer.draw_armor_plate(frame, armor_plates[0]);
}
// Draw tracking rectangle if tracking
if (is_tracking) {
cv::rectangle(frame, tracking_roi, cv::Scalar(0, 255, 0), 2);
}
visualizer.draw_offset_text(frame, display_center, target_color, is_predicted);
visualizer.draw_ballistic_point(frame, ballistic_point);
// Output control data to TTL (passing use_ttl to control whether to send)
output_control_data(display_center, target_color, frame_counter, ttl, img_center, use_ttl);
// Now sending on every frame for smoother control
output_control_data(display_center, target_color, ttl, img_center, use_ttl);
// Display windows
cv::imshow("Armor Detection", frame);
cv::imshow(target_color + " Mask", mask);
cv::imshow(target_color + " Only", color_only_frame);
cv::imshow("Armor Detection", frame);
// Exit on 'q' key press
if (cv::waitKey(1) == 'q') {