Compare commits

31 Commits

Author SHA1 Message Date
e5557309ad 面积大小输出 2025-12-11 17:14:22 +08:00
79654f44ff data had upgrade 2025-12-06 14:42:59 +08:00
lyf
bf709ea616 增加PID调节注释在readme 2025-12-06 10:17:48 +08:00
f2d2f875ec 11 2025-12-05 18:31:35 +08:00
87a2722aa0 11 2025-12-05 18:30:32 +08:00
3f7cc22f95 11 2025-12-05 18:22:50 +08:00
865da57ebc 123 2025-12-05 18:08:48 +08:00
953da321bf 123 2025-12-05 18:07:42 +08:00
1f0a1e3a7c 修改报错 2025-12-05 18:03:03 +08:00
7c77c55031 修正(Segmentationfault) 2025-12-05 17:40:32 +08:00
lyf
b5ad1fa7f8 123 2025-12-05 14:35:59 +08:00
97f962c2ae 参数 2025-12-05 14:30:17 +08:00
9e3cd439f0 修改了参数 2025-12-05 14:28:33 +08:00
ac566a0cf6 添加了pid以及gui 2025-12-05 14:22:47 +08:00
lyf
8f5ced6be3 1 2025-12-05 13:48:14 +08:00
601e248cd6 删了垃圾文件 2025-12-05 13:43:53 +08:00
93935889e0 修改了一点参数 2025-12-05 13:27:42 +08:00
b4c795f77d 参考上海交大 2025-12-05 11:24:23 +08:00
3f60b1f564 参考上海交通大学 2025-12-05 11:20:41 +08:00
f6e7d37da9 参考上海交通大学代码进行修改 2025-12-05 11:13:37 +08:00
593cb37cf7 修正了每次ttl权限不足的问题 2025-12-04 19:58:14 +08:00
lyf
79c07e85bb 添加一个SDK获取代码 2025-12-04 13:04:31 +08:00
3aff16a9e0 解决了画面不全的问题,并且提高了分辨率,画面更全 2025-12-02 21:28:58 +08:00
lyf
eb32ca121d Merge pull request '对于视频流获取方面新加了一个用Mindvision官方说明的获取方式,测试了代码运行是内存占用高达1G' (#3) from Update into main
Reviewed-on: #3
2025-12-02 14:25:24 +08:00
lyf
f833a2aa5f 对于视频流获取方面新加了一个用Mindvision官方说明的获取方式,测试了代码运行是内存占用高达1G 2025-12-02 14:22:17 +08:00
lyf
241b7bc4dc Merge pull request 'TTL-Send_solver' (#2) from TTL-Send_solver into main
Reviewed-on: #2
2025-12-01 18:51:37 +08:00
lyf
c2be6378b7 粘包问题已解决。 2025-12-01 18:41:43 +08:00
lyf
3053029f14 这里对于ttl发送的字符形式做出改变。 2025-11-30 21:26:07 +08:00
lyf
fce57b59c0 Merge pull request '改到能跑' (#1) from Pro into main
Reviewed-on: #1
2025-11-20 19:42:42 +08:00
lyf
c3fc8c3c4f 项目结构大变 2025-11-20 19:38:34 +08:00
lyf
a8791ab3df 项目结构大变 2025-11-20 19:29:00 +08:00
21 changed files with 983 additions and 503 deletions

3
.gitignore vendored
View File

@@ -34,4 +34,5 @@
# BUILD
build/
.cache/
.cache/
.vscode/

View File

@@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.10)
project(armor_detector_mdv)
# Set C++ standard
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD 23)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
# Find OpenCV package
@@ -15,7 +15,7 @@ include_directories(./inc)
# Add executable for MindVision version
add_executable(armor_detector_mdv
src/main.cpp
src/MindVisionMain.cpp
src/MindVisionCamera.cpp
src/ImagePreprocessor.cpp
src/ArmorDetector.cpp
@@ -23,13 +23,13 @@ add_executable(armor_detector_mdv
src/Visualizer.cpp
src/BallisticPredictor.cpp
src/TTLCommunicator.cpp
src/PidController.cpp
)
# Link OpenCV libraries
target_link_libraries(armor_detector_mdv ${OpenCV_LIBS} -lutil -lpthread)
# Link MindVision SDK library
link_directories("/lib")
target_link_libraries(armor_detector_mdv MVSDK)
# Additional flags for better compilation

View File

@@ -75,4 +75,29 @@ make -j$(nproc)
1. 检查相机是否正确连接
2. 确认MindVision相机驱动是否正确安装
3. 验证相机ID是否正确
4. 检查权限可能需要将用户添加到video组
4. 检查权限可能需要将用户添加到video组
## MindVision-SDK
`Linux`: >**wget https://www.mindvision.com.cn/wp-content/uploads/2023/08/linuxSDK_V2.1.0.37.tar.gz**
## PID设置原理及其方式
- **PID初始真实值 = g_pitch(/yaw)_kp`位于23行` * 相关系数(`位于410行`**
- **PID调节滑块含义** `46行`轨迹条回调函数为上下限。例如 pos / 100 如果pos范围为0~1000`位于400行`则滑块调节范围为 0~10。
### Kp比例系数的作用
- 响应速度Kp值越大系统对误差的响应越快
- 控制力度:直接根据当前误差大小产生控制输出
### Ki积分系数的作用
- 消除稳态误差:累积历史误差,清除长期存在的小偏差
- 系统偏移补偿:补偿机械系统的小偏差和静态误差
### Kd微分系数的作用
- 预测变化趋势:根据误差的变化率进行调节
- 抑制振荡:在系统接近目标时减缓调节速度
**参数调整建议 **
- Kp先调首先调整Kp获得基本响应特性
- Kd次调增加Kd减少振荡
- Ki后调最后调节Ki消除稳态误差

View File

@@ -23,6 +23,23 @@ struct ArmorPlate {
double confidence;
std::pair<LightBar, LightBar> pair;
std::vector<cv::Point2f> corners_2d; // Can be computed later for 3D pose
// Function to get bounding rectangle of the armor plate
cv::Rect2d getBoundingRect() const {
if (corners_2d.size() >= 4) {
cv::Point2f min_pt = corners_2d[0];
cv::Point2f max_pt = corners_2d[0];
for (const auto& pt : corners_2d) {
min_pt.x = std::min(min_pt.x, pt.x);
min_pt.y = std::min(min_pt.y, pt.y);
max_pt.x = std::max(max_pt.x, pt.x);
max_pt.y = std::max(max_pt.y, pt.y);
}
return cv::Rect2d(min_pt.x, min_pt.y, max_pt.x - min_pt.x, max_pt.y - min_pt.y);
}
// Fallback: use center and a fixed size
return cv::Rect2d(center.x - 30, center.y - 15, 60, 30);
}
};
class ArmorDetector {

View File

@@ -6,21 +6,22 @@
class BallisticPredictor {
public:
BallisticPredictor(float bullet_speed = 16.0f);
~BallisticPredictor(); // Added destructor to properly manage memory
// Predict ballistic point considering target movement and flight time
cv::Point2f* predict_ballistic_point(const cv::Point2f* predicted_center,
const cv::Point2f& img_center,
cv::Point2f* predict_ballistic_point(const cv::Point2f* predicted_center,
const cv::Point2f& img_center,
const cv::Point2f& target_speed);
// Get last ballistic point
const cv::Point2f* get_last_ballistic_point() const { return last_ballistic_point; }
private:
float bullet_speed; // Bullet speed (m/s)
float armor_half_width; // Armor half width (converted to meters)
float armor_half_height; // Armor half height (converted to meters)
cv::Point2f* last_ballistic_point; // Last ballistic prediction
// Calculate distance to target
float calculate_distance(const cv::Point2f& armor_center, const cv::Point2f& img_center, float focal_length = 800.0f);
};

View File

@@ -8,16 +8,12 @@ class ImagePreprocessor {
public:
ImagePreprocessor();
~ImagePreprocessor();
// Get mask based on target color
cv::Mat get_mask(const cv::Mat& frame, const std::string& target_color);
// Get frame with only the target color
cv::Mat get_color_only_frame(const cv::Mat& frame, const std::string& target_color);
// Combined function to return both mask and color-only frame
void process_frame(const cv::Mat& frame, const std::string& target_color,
cv::Mat& mask, cv::Mat& color_only_frame);
// Apply morphological operations to a mask
void apply_morphology(const cv::Mat& input_mask, cv::Mat& output_mask);
// Apply Gaussian blur to reduce noise
void apply_gaussian_blur(const cv::Mat& input, cv::Mat& output, int kernel_size = 6);
};
#endif // IMAGEPREPROCESSOR_H

View File

@@ -6,12 +6,16 @@
class KalmanFilter {
public:
KalmanFilter();
void update(const cv::Point2f& measurement);
cv::Point2f predict();
// Method to update the process noise based on target movement characteristics
void updateProcessNoise(const cv::Point2f& current_measurement);
cv::Point2f get_last_measurement() const { return last_measurement; }
cv::Point2f get_last_prediction() const { return last_prediction; }
cv::Point2f get_current_velocity() const; // New method to get current velocity
bool is_initialized() const { return initialized; }
private:
@@ -19,8 +23,16 @@ private:
bool initialized;
cv::Point2f last_measurement;
cv::Point2f last_prediction;
cv::Point2f prev_measurement;
bool has_prev_measurement;
// Store previous states for velocity and acceleration estimation
cv::Point2f prev_velocity;
double time_elapsed; // Time in seconds between measurements
bool has_prev_velocity;
void init_params();
cv::Point2f estimate_velocity(const cv::Point2f& current, const cv::Point2f& previous);
};
#endif // KALMANFILTER_H

View File

@@ -3,6 +3,7 @@
#include <opencv2/opencv.hpp>
#include <string>
#include <memory>
// MindVision SDK 头文件 - 可能需要根据实际SDK文件调整
extern "C" {
@@ -11,21 +12,29 @@ extern "C" {
class MindVisionCamera {
public:
int camera_handle; // MindVision SDK中的相机句柄
CameraHandle camera_handle; // MindVision SDk中的相机句柄
bool is_opened;
std::string target_color;
int width;
int height;
int fps;
unsigned char* g_pRgbBuffer; // 处理后数据缓存区
tSdkCameraCapbility capability; // 相机能力信息
tSdkImageResolution image_resolution; // 分辨率信息
MindVisionCamera(int cam_id = 0, const std::string& target_color = "red");
~MindVisionCamera();
void set_cam_params();
bool set_cam_params();
bool read_frame(cv::Mat& frame);
bool read_frame_with_color_filter(cv::Mat& frame, cv::Mat& mask, const std::string& target_color);
void release();
bool switch_color(const std::string& target_color);
int get_width() const { return width; }
int get_height() const { return height; }
bool set_resolution(int width, int height);
private:
void set_camera_parameters();
bool initialize_camera(int cam_id);

46
inc/PidController.h Normal file
View File

@@ -0,0 +1,46 @@
#ifndef PID_CONTROLLER_H
#define PID_CONTROLLER_H
#include <opencv2/opencv.hpp>
class PidController {
public:
// 构造函数
PidController(float kp = 0.0f, float ki = 0.0f, float kd = 0.0f);
// 更新PID计算
float update(float setpoint, float measured_value, float dt);
// 重置PID控制器
void reset();
// 设置PID参数
void setKp(float kp);
void setKi(float ki);
void setKd(float kd);
void setParameters(float kp, float ki, float kd);
// 获取PID参数
float getKp() const;
float getKi() const;
float getKd() const;
// 设置输出限制
void setOutputLimits(float min, float max);
// 获取输出
float getOutput() const;
// 获取误差
float getError() const;
private:
float kp_, ki_, kd_; // PID参数
float last_error_; // 上一次误差
float integral_; // 积分项
float output_; // 输出值
float output_min_, output_max_; // 输出限制
bool first_iteration_; // 是否为第一次迭代
};
#endif // PID_CONTROLLER_H

View File

@@ -3,17 +3,11 @@
#include <string>
// Placeholder for TTL communication class
// In a real implementation, you would use a C++ serial library like:
// - serial (https://github.com/wjwwood/serial)
// - Boost.Asio
// - or a system-specific approach
class TTLCommunicator {
public:
TTLCommunicator(int baudrate = 115200);
TTLCommunicator(const std::string& port_name = "/dev/ttyUSB0", int baudrate = 115200);
~TTLCommunicator();
bool connect();
void close();
bool send_data(const std::string& data);
@@ -23,8 +17,9 @@ private:
std::string port_name;
int baudrate;
bool connected;
// These would be implemented with actual serial communication code
int serial_fd;
bool open_serial_port();
void close_serial_port();
};

View File

@@ -28,14 +28,17 @@ const float HORIZONTAL_ANGLE_THRESHOLD_RAD = HORIZONTAL_ANGLE_THRESHOLD * CV_PI
const float NEARBY_LIGHT_BAR_THRESHOLD = 500.0f; // Light bar merging distance threshold (pixels)
const float LIGHT_BAR_IOU_THRESHOLD = 0.05f; // Light bar merging IOU threshold
const float ARMOR_DISTANCE_RATIO_MIN = 0.6f; // Armor light bar distance ratio range
const float ARMOR_DISTANCE_RATIO_MAX = 3.0f;
const float ARMOR_DISTANCE_RATIO_MAX = 4.0f;
const float ARMOR_LENGTH_DIFF_RATIO = 0.5f; // Armor light bar length difference ratio
const float ARMOR_ANGLE_DIFF_THRESHOLD = 20.0f * CV_PI / 180.0f; // Armor light bar angle difference threshold (radians)
const float ARMOR_ANGLE_DIFF_THRESHOLD = 15.0f * CV_PI / 180.0f; // Armor light bar angle difference threshold (radians)
// Kalman filter parameters
const float KF_PROCESS_NOISE = 0.02f; // Process noise covariance
const float KF_MEASUREMENT_NOISE = 0.5f; // Measurement noise covariance
// Focal length in pixels (from camera calibration)
const double FOCAL_PIXAL = 600.0; // This should match your actual camera calibration
// TTL communication settings
const int TTL_BAUDRATE = 115200;

View File

@@ -1,4 +1,5 @@
#include "ArmorDetector.h"
#include <iostream>
#include <cmath>
#include <algorithm>
@@ -143,8 +144,10 @@ std::vector<LightBar> ArmorDetector::filter_valid_light_bars(const std::vector<s
float bar_width = width > height ? height : width;
float main_angle = rect.angle;
if (width <= height) {
main_angle += 90.0;
if (width > height) {
// 保持原始角度不变
} else {
main_angle += 90;
}
// Normalize angle to [-90, 90]

View File

@@ -2,20 +2,29 @@
#include "config.h"
#include <cmath>
BallisticPredictor::BallisticPredictor(float bullet_speed)
BallisticPredictor::BallisticPredictor(float bullet_speed)
: bullet_speed(bullet_speed), last_ballistic_point(nullptr) {
armor_half_width = ARMOR_WIDTH / 2000.0f; // Convert to meters
armor_half_height = ARMOR_HEIGHT / 2000.0f; // Convert to meters
}
cv::Point2f* BallisticPredictor::predict_ballistic_point(const cv::Point2f* predicted_center,
const cv::Point2f& img_center,
BallisticPredictor::~BallisticPredictor() {
if (last_ballistic_point != nullptr) {
delete last_ballistic_point;
last_ballistic_point = nullptr;
}
}
cv::Point2f* BallisticPredictor::predict_ballistic_point(const cv::Point2f* predicted_center,
const cv::Point2f& img_center,
const cv::Point2f& target_speed) {
// Clean up the last ballistic point before creating a new one
if (last_ballistic_point != nullptr) {
delete last_ballistic_point;
last_ballistic_point = nullptr;
}
if (predicted_center == nullptr) {
if (last_ballistic_point != nullptr) {
delete last_ballistic_point;
last_ballistic_point = nullptr;
}
return nullptr;
}
@@ -36,9 +45,6 @@ cv::Point2f* BallisticPredictor::predict_ballistic_point(const cv::Point2f* pred
cv::Point2f ballistic_point(ballistic_x, ballistic_y);
// Update last ballistic point
if (last_ballistic_point != nullptr) {
delete last_ballistic_point;
}
last_ballistic_point = new cv::Point2f(ballistic_point);
return last_ballistic_point;

View File

@@ -1,4 +1,5 @@
#include "ImagePreprocessor.h"
#include <algorithm>
#include <cctype>
ImagePreprocessor::ImagePreprocessor() {
@@ -9,47 +10,16 @@ ImagePreprocessor::~ImagePreprocessor() {
// Destructor implementation
}
cv::Mat ImagePreprocessor::get_mask(const cv::Mat& frame, const std::string& target_color) {
cv::Mat hsv_frame;
cv::cvtColor(frame, hsv_frame, cv::COLOR_BGR2HSV);
cv::Mat mask;
if (target_color == "red") {
// Red color range in HSV
cv::Mat mask1, mask2;
cv::inRange(hsv_frame, cv::Scalar(0, 43, 49), cv::Scalar(25, 255, 255), mask1);
cv::inRange(hsv_frame, cv::Scalar(156, 46, 49), cv::Scalar(180, 255, 255), mask2);
mask = mask1 | mask2;
} else if (target_color == "blue") {
// Blue color range in HSV
cv::inRange(hsv_frame, cv::Scalar(83, 200, 44), cv::Scalar(130, 255, 255), mask);
} else {
// Default to empty mask if color not recognized
mask = cv::Mat::zeros(frame.size(), CV_8UC1);
}
void ImagePreprocessor::apply_morphology(const cv::Mat& input_mask, cv::Mat& output_mask) {
// Apply morphological operations to reduce noise
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
cv::morphologyEx(mask, mask, cv::MORPH_OPEN, kernel);
cv::morphologyEx(mask, mask, cv::MORPH_CLOSE, kernel);
return mask;
cv::morphologyEx(input_mask, output_mask, cv::MORPH_OPEN, kernel);
cv::morphologyEx(output_mask, output_mask, cv::MORPH_CLOSE, kernel);
}
cv::Mat ImagePreprocessor::get_color_only_frame(const cv::Mat& frame, const std::string& target_color) {
cv::Mat mask = get_mask(frame, target_color);
// 不再反转掩膜,直接使用原始掩膜
// 目标颜色区域为白色(255),非目标颜色区域为黑色(0)
cv::Mat color_only_frame;
frame.copyTo(color_only_frame, mask); // 使用掩膜复制原始帧,只保留目标颜色部分
return color_only_frame;
}
void ImagePreprocessor::process_frame(const cv::Mat& frame, const std::string& target_color,
cv::Mat& mask, cv::Mat& color_only_frame) {
mask = get_mask(frame, target_color);
color_only_frame = get_color_only_frame(frame, target_color);
void ImagePreprocessor::apply_gaussian_blur(const cv::Mat& input, cv::Mat& output, int kernel_size) {
// Apply Gaussian blur to reduce noise
// Ensure kernel size is odd for Gaussian blur
int adjusted_kernel_size = kernel_size % 2 == 0 ? kernel_size + 1 : kernel_size;
cv::GaussianBlur(input, output, cv::Size(adjusted_kernel_size, adjusted_kernel_size), 0);
}

View File

@@ -1,38 +1,90 @@
#include "KalmanFilter.h"
#include "config.h"
#include <iostream>
#include <chrono>
KalmanFilter::KalmanFilter() {
// 4 state variables (x, y, dx, dy), 2 measurements (x, y)
kf.init(4, 2, 0);
// 6 state variables (x, y, vx, vy, ax, ay), 2 measurements (x, y)
kf.init(6, 2, 0);
init_params();
initialized = false;
has_prev_measurement = false;
has_prev_velocity = false;
time_elapsed = 1.0 / 100.0; // Assuming 100 FPS as default
last_measurement = cv::Point2f(0, 0);
last_prediction = cv::Point2f(0, 0);
prev_measurement = cv::Point2f(0, 0);
prev_velocity = cv::Point2f(0, 0);
}
void KalmanFilter::init_params() {
// Transition matrix: x(k+1) = x(k) + dx(k), y(k+1) = y(k) + dy(k), dx(k+1) = dx(k), dy(k+1) = dy(k)
kf.transitionMatrix = (cv::Mat_<float>(4, 4) <<
1, 0, 1, 0,
0, 1, 0, 1,
0, 0, 1, 0,
0, 0, 0, 1
// Transition matrix for constant acceleration model
// x(k+1) = x(k) + vx(k)*dt + 0.5*ax(k)*dt^2
// y(k+1) = y(k) + vy(k)*dt + 0.5*ay(k)*dt^2
// vx(k+1) = vx(k) + ax(k)*dt
// vy(k+1) = vy(k) + ay(k)*dt
// ax(k+1) = ax(k)
// ay(k+1) = ay(k)
double dt = time_elapsed;
kf.transitionMatrix = (cv::Mat_<float>(6, 6) <<
1, 0, dt, 0, 0.5*dt*dt, 0,
0, 1, 0, dt, 0, 0.5*dt*dt,
0, 0, 1, 0, dt, 0,
0, 0, 0, 1, 0, dt,
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1
);
// Measurement matrix: measuring x and y
kf.measurementMatrix = (cv::Mat_<float>(2, 4) <<
1, 0, 0, 0,
0, 1, 0, 0
// Measurement matrix: measuring x and y only
kf.measurementMatrix = (cv::Mat_<float>(2, 6) <<
1, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0
);
// Process noise covariance
kf.processNoiseCov = cv::Mat::eye(4, 4, CV_32F) * KF_PROCESS_NOISE;
// Process noise covariance - higher for acceleration components
kf.processNoiseCov = cv::Mat::zeros(6, 6, CV_32F);
kf.processNoiseCov.at<float>(0, 0) = 1.0f; // x position noise
kf.processNoiseCov.at<float>(1, 1) = 1.0f; // y position noise
kf.processNoiseCov.at<float>(2, 2) = 4.0f; // x velocity noise
kf.processNoiseCov.at<float>(3, 3) = 4.0f; // y velocity noise
kf.processNoiseCov.at<float>(4, 4) = 8.0f; // x acceleration noise
kf.processNoiseCov.at<float>(5, 5) = 8.0f; // y acceleration noise
// Measurement noise covariance
kf.measurementNoiseCov = cv::Mat::eye(2, 2, CV_32F) * KF_MEASUREMENT_NOISE;
kf.measurementNoiseCov = cv::Mat::eye(2, 2, CV_32F) * 0.1f; // Lower measurement noise
// Error covariance post
kf.errorCovPost = cv::Mat::eye(4, 4, CV_32F);
kf.errorCovPost = cv::Mat::eye(6, 6, CV_32F) * 0.1f;
}
cv::Point2f KalmanFilter::estimate_velocity(const cv::Point2f& current, const cv::Point2f& previous) {
// Calculate velocity based on position difference
cv::Point2f velocity = current - previous;
velocity.x /= (float)time_elapsed;
velocity.y /= (float)time_elapsed;
return velocity;
}
void KalmanFilter::updateProcessNoise(const cv::Point2f& current_measurement) {
if (has_prev_measurement && has_prev_velocity) {
// Estimate current velocity
cv::Point2f current_velocity = estimate_velocity(current_measurement, prev_measurement);
// Calculate acceleration based on change in velocity
cv::Point2f acceleration = current_velocity - prev_velocity;
acceleration.x /= (float)time_elapsed;
acceleration.y /= (float)time_elapsed;
// Adjust process noise based on estimated acceleration (higher acceleration = higher uncertainty)
float acc_magnitude = std::sqrt(acceleration.x * acceleration.x + acceleration.y * acceleration.y);
// Increase process noise for rapid maneuvers
float noise_factor = 1.0f + 0.5f * acc_magnitude;
kf.processNoiseCov.at<float>(4, 4) = 8.0f * noise_factor; // ax noise
kf.processNoiseCov.at<float>(5, 5) = 8.0f * noise_factor; // ay noise
}
}
void KalmanFilter::update(const cv::Point2f& measurement) {
@@ -40,19 +92,65 @@ void KalmanFilter::update(const cv::Point2f& measurement) {
if (!initialized) {
// Initialize state if not done yet
kf.statePost = (cv::Mat_<float>(4, 1) <<
measurement.x,
measurement.y,
0.0f,
0.0f);
kf.statePost = (cv::Mat_<float>(6, 1) <<
measurement.x, // x position
measurement.y, // y position
0.0f, // x velocity
0.0f, // y velocity
0.0f, // x acceleration
0.0f // y acceleration
);
initialized = true;
} else {
// Update process noise based on target movement characteristics
updateProcessNoise(measurement);
// Perform correction step
kf.correct(measurement_mat);
}
// Update previous state for next iteration
if (has_prev_measurement) {
// Calculate velocity based on position change
cv::Point2f current_velocity = estimate_velocity(measurement, prev_measurement);
if (has_prev_velocity) {
// Estimate acceleration based on velocity change
cv::Point2f acceleration = current_velocity - prev_velocity;
acceleration.x /= (float)time_elapsed;
acceleration.y /= (float)time_elapsed;
// Update state with estimated acceleration
kf.statePost.at<float>(4) = acceleration.x; // x acceleration
kf.statePost.at<float>(5) = acceleration.y; // y acceleration
}
// Update previous velocity
prev_velocity = current_velocity;
has_prev_velocity = true;
} else {
// Initialize previous velocity as zero
prev_velocity = cv::Point2f(0, 0);
has_prev_velocity = true;
}
// Update stored measurements
prev_measurement = last_measurement;
has_prev_measurement = true;
last_measurement = measurement;
}
cv::Point2f KalmanFilter::get_current_velocity() const {
if (!initialized) {
return cv::Point2f(0.0f, 0.0f);
}
// Return the velocity components from the state vector
float vx = kf.statePost.at<float>(2);
float vy = kf.statePost.at<float>(3);
return cv::Point2f(vx, vy);
}
cv::Point2f KalmanFilter::predict() {
if (!initialized) {
return cv::Point2f(0, 0);

View File

@@ -1,20 +1,27 @@
#include "MindVisionCamera.h"
#include <iostream>
#include <memory>
MindVisionCamera::MindVisionCamera(int cam_id, const std::string& target_color)
: camera_handle(-1), is_opened(false), width(640), height(480), fps(100), g_pRgbBuffer(nullptr) {
MindVisionCamera::MindVisionCamera(int cam_id, const std::string& target_color)
: camera_handle(-1), is_opened(false), width(640), height(480), fps(30) {
if (!initialize_camera(cam_id)) {
// 即使初始化失败,也要清理可能已分配的资源
release();
throw std::runtime_error("Cannot initialize MindVision camera!");
}
is_opened = true;
this->target_color = target_color;
if (this->target_color != "red" && this->target_color != "blue") {
release(); // 清理已初始化的资源
throw std::invalid_argument("Only 'red' or 'blue' colors are supported");
}
set_cam_params();
if (!set_cam_params()) { // 修改set_cam_params现在返回bool值
release(); // 清理已初始化的资源
throw std::runtime_error("Failed to set camera parameters!");
}
}
bool MindVisionCamera::initialize_camera(int cam_id) {
@@ -52,25 +59,19 @@ bool MindVisionCamera::initialize_camera(int cam_id) {
}
// 获取相机能力
tSdkCameraCapbility capability;
iStatus = CameraGetCapability(camera_handle, &capability);
if (iStatus != CAMERA_STATUS_SUCCESS) {
std::cerr << "Failed to get camera capability! Error code: " << iStatus << std::endl;
return false;
}
// 设置输出格式
if (capability.sIspCapacity.bMonoSensor) {
CameraSetIspOutFormat(camera_handle, CAMERA_MEDIA_TYPE_MONO8);
} else {
CameraSetIspOutFormat(camera_handle, CAMERA_MEDIA_TYPE_BGR8);
}
// 让SDK进入工作模式
// 设置输出格式 - 保持与原始工作项目一致直接设置为BGR8格式
CameraSetIspOutFormat(camera_handle, CAMERA_MEDIA_TYPE_BGR8);
// 让SDK进入工作模式 - 根据原始OpenCv项目应该在设置格式前调用
CameraPlay(camera_handle);
// 获取并设置分辨率为 640x480
tSdkImageResolution image_resolution;
int status = CameraGetImageResolution(camera_handle, &image_resolution);
std::cout << "Default resolution query returned: " << status << std::endl;
@@ -95,59 +96,62 @@ bool MindVisionCamera::initialize_camera(int cam_id) {
width = image_resolution.iWidth;
height = image_resolution.iHeight;
// 开启自动曝光
CameraSetAeState(camera_handle, true);
// 分配处理后的数据缓存区 - 使用最大分辨率,与原始项目一致
g_pRgbBuffer = (unsigned char *)malloc(capability.sResolutionRange.iHeightMax *
capability.sResolutionRange.iWidthMax * 3);
// 设置帧率
CameraSetFrameSpeed(camera_handle, 10); // 设置为中等帧率
// 设置帧率(虽然稍后 set_cam_params 会再次设置,但这里先设定)
CameraSetFrameSpeed(camera_handle, 2); // 设置为较高帧率
std::cout << "Initialized camera with resolution: " << width << "x" << height << std::endl;
if (!g_pRgbBuffer) {
std::cerr << "Failed to allocate global RGB buffer" << std::endl;
return false;
}
return true;
}
MindVisionCamera::~MindVisionCamera() {
release();
try {
if (g_pRgbBuffer) {
free(g_pRgbBuffer);
g_pRgbBuffer = nullptr;
}
release();
} catch (...) {
// 析构函数中捕获异常但不重新抛出
// 确保即使在异常情况下也能正确清理资源
}
}
void MindVisionCamera::set_camera_parameters() {
// 先设置为手动曝光模式
CameraSetAeState(camera_handle, false);
// 根据颜色设置曝光参数
// 根据颜色设置曝光参数,以优化装甲板检测效果
if (target_color == "red") {
CameraSetExposureTime(camera_handle, 10000); // 5ms曝光时间
CameraSetExposureTime(camera_handle, 5000); // 红色目标曝光时间5ms (5000微秒)
} else if (target_color == "blue") {
CameraSetExposureTime(camera_handle, 4000); // 8ms曝光时间
CameraSetExposureTime(camera_handle, 8000); // 蓝色目标曝光时间8ms (8000微秒)
}
// 设置白平衡
CameraSetWbMode(camera_handle, false); // 关闭自动白平衡
CameraSetOnceWB(camera_handle); // 一次性白平衡
// 设置其他参数
CameraSetTriggerMode(camera_handle, 0); // 连续采集模式
// 设置帧率为100FPS
CameraSetFrameSpeed(camera_handle, 2); // 2代表较高帧率具体值可能需要调整
}
void MindVisionCamera::set_cam_params() {
bool MindVisionCamera::set_cam_params() {
// 只设置相机参数(如曝光、白平衡等),不重新设置分辨率
set_camera_parameters();
// Ensure resolution remains 640x480 after setting other parameters
tSdkImageResolution image_resolution;
image_resolution.iIndex = 0xFF; // Use custom resolution (0xFF typically indicates custom resolution)
image_resolution.iWidth = 640;
image_resolution.iHeight = 480;
image_resolution.iWidthFOV = image_resolution.iWidth;
image_resolution.iHeightFOV = image_resolution.iHeight;
image_resolution.iHOffsetFOV = 0;
image_resolution.iVOffsetFOV = 0;
int status = CameraSetImageResolution(camera_handle, &image_resolution);
if (status != CAMERA_STATUS_SUCCESS) {
std::cout << "Failed to maintain resolution at 640x480 in set_cam_params" << std::endl;
} else {
std::cout << "Confirmed camera resolution: " << image_resolution.iWidth << "x" << image_resolution.iHeight << std::endl;
}
return true;
}
bool MindVisionCamera::read_frame(cv::Mat& frame) {
@@ -157,39 +161,137 @@ bool MindVisionCamera::read_frame(cv::Mat& frame) {
tSdkFrameHead sFrameInfo;
BYTE* pbyBuffer;
// 获取一帧数据
if (CameraGetImageBuffer(camera_handle, &sFrameInfo, &pbyBuffer, 1000) == CAMERA_STATUS_SUCCESS) {
// 处理图像数据
unsigned char* g_pRgbBuffer = new unsigned char[sFrameInfo.iWidth * sFrameInfo.iHeight * 3];
CameraImageProcess(camera_handle, pbyBuffer, g_pRgbBuffer, &sFrameInfo);
// 创建OpenCV Mat对象
frame = cv::Mat(
cv::Size(sFrameInfo.iWidth, sFrameInfo.iHeight),
sFrameInfo.uiMediaType == CAMERA_MEDIA_TYPE_MONO8 ? CV_8UC1 : CV_8UC3,
g_pRgbBuffer
).clone(); // clone()确保数据被复制,而不是共享指针
// 直接使用CameraGetImageBufferEx获取处理后的RGB数据提高效率
INT width, height;
unsigned char* pData = CameraGetImageBufferEx(camera_handle, &width, &height, 2000);
if(pData != NULL) {
// 创建OpenCV Mat对象,直接使用获取的数据
cv::Mat temp_mat(height, width, CV_8UC3, pData);
frame = temp_mat.clone(); // clone()确保数据被复制,而不是共享指针
// CameraGetImageBufferEx方式不需要手动释放缓冲区
return true;
}
// 使用全局缓冲区处理图像数据 - 与原始项目一致
int status = CameraImageProcess(camera_handle, pbyBuffer, g_pRgbBuffer, &sFrameInfo);
if (status != CAMERA_STATUS_SUCCESS) {
std::cerr << "Failed to process image: error code " << status << std::endl;
CameraReleaseImageBuffer(camera_handle, pbyBuffer);
return false;
}
// 创建OpenCV Mat对象 - 使用全局缓冲区固定为CV_8UC3格式
cv::Mat temp_mat(sFrameInfo.iHeight, sFrameInfo.iWidth, CV_8UC3, g_pRgbBuffer);
frame = temp_mat.clone(); // clone()确保数据被复制,而不是共享指针
// 释放原始缓冲区
CameraReleaseImageBuffer(camera_handle, pbyBuffer);
// 释放临时缓冲区
delete[] g_pRgbBuffer;
return true;
}
return false;
}
bool MindVisionCamera::read_frame_with_color_filter(cv::Mat& frame, cv::Mat& raw_mask, const std::string& target_color) {
if (!is_opened) {
return false;
}
// 尝试使用CameraGetImageBufferEx方式获取图像
INT width, height;
unsigned char* pData = CameraGetImageBufferEx(camera_handle, &width, &height, 2000);
if(pData != NULL) {
// 创建OpenCV Mat对象直接使用获取的数据
cv::Mat original_img(height, width, CV_8UC3, pData);
frame = original_img.clone(); // clone()确保数据被复制,而不是共享指针
// 创建HSV图像用于颜色过滤
cv::Mat hsv_img;
cv::cvtColor(frame, hsv_img, cv::COLOR_BGR2HSV);
// 根据颜色创建原始掩码
if (target_color == "red") {
// Red color range in HSV
cv::Mat mask1, mask2;
cv::inRange(hsv_img, cv::Scalar(0, 43, 49), cv::Scalar(25, 255, 255), mask1);
cv::inRange(hsv_img, cv::Scalar(156, 46, 49), cv::Scalar(180, 255, 255), mask2);
raw_mask = mask1 | mask2;
} else if (target_color == "blue") {
// Blue color range in HSV
cv::inRange(hsv_img, cv::Scalar(83, 200, 44), cv::Scalar(130, 255, 255), raw_mask);
} else {
raw_mask = cv::Mat::zeros(hsv_img.size(), CV_8UC1);
}
// CameraGetImageBufferEx方式不需要手动释放缓冲区
return true;
}
// 如果CameraGetImageBufferEx失败回退到原来的方式
tSdkFrameHead sFrameInfo;
BYTE* pbyBuffer;
// 获取一帧数据
if (CameraGetImageBuffer(camera_handle, &sFrameInfo, &pbyBuffer, 1000) == CAMERA_STATUS_SUCCESS) {
// 使用全局缓冲区处理图像数据 - 与原始项目一致
int status = CameraImageProcess(camera_handle, pbyBuffer, g_pRgbBuffer, &sFrameInfo);
if (status != CAMERA_STATUS_SUCCESS) {
std::cerr << "Failed to process image: error code " << status << std::endl;
CameraReleaseImageBuffer(camera_handle, pbyBuffer);
return false;
}
// 创建原始RGB图像 - 使用全局缓冲区固定为CV_8UC3格式
cv::Mat original_img(sFrameInfo.iHeight, sFrameInfo.iWidth, CV_8UC3, g_pRgbBuffer);
// 复制图像数据,确保原始数据不会被后续操作影响
frame = original_img.clone();
// 创建HSV图像用于颜色过滤
cv::Mat hsv_img;
cv::cvtColor(frame, hsv_img, cv::COLOR_BGR2HSV);
// 根据颜色创建原始掩码
if (target_color == "red") {
// Red color range in HSV
cv::Mat mask1, mask2;
cv::inRange(hsv_img, cv::Scalar(0, 43, 49), cv::Scalar(25, 255, 255), mask1);
cv::inRange(hsv_img, cv::Scalar(156, 46, 49), cv::Scalar(180, 255, 255), mask2);
raw_mask = mask1 | mask2;
} else if (target_color == "blue") {
// Blue color range in HSV
cv::inRange(hsv_img, cv::Scalar(83, 200, 44), cv::Scalar(130, 255, 255), raw_mask);
} else {
raw_mask = cv::Mat::zeros(hsv_img.size(), CV_8UC1);
}
// 释放相机缓冲区
CameraReleaseImageBuffer(camera_handle, pbyBuffer);
return true;
}
return false;
}
void MindVisionCamera::release() {
if (is_opened && camera_handle >= 0) {
// 停止采集
if (camera_handle != 0) {
// 停止采集不管is_opened状态如何都尝试释放
CameraPause(camera_handle);
CameraUnInit(camera_handle);
camera_handle = -1;
is_opened = false;
}
if(g_pRgbBuffer) {
free(g_pRgbBuffer);
g_pRgbBuffer = nullptr;
}
is_opened = false;
}
bool MindVisionCamera::switch_color(const std::string& new_color) {
@@ -201,7 +303,32 @@ bool MindVisionCamera::switch_color(const std::string& new_color) {
if ((lower_color == "red" || lower_color == "blue") && lower_color != target_color) {
target_color = lower_color;
set_cam_params();
return set_cam_params(); // 返回set_cam_params的结果
}
return false;
}
bool MindVisionCamera::set_resolution(int width, int height){
if (!is_opened) {
return false;
}
tSdkImageResolution res;
int status = CameraGetImageResolution(camera_handle, &res);
res.iIndex = 0xFF;
res.iWidth = width;
res.iHeight = height;
res.iWidthFOV = capability.sResolutionRange.iWidthMax;
res.iHeightFOV = capability.sResolutionRange.iHeightMax;
res.iHOffsetFOV = 0;
res.iVOffsetFOV = 0;
status = CameraSetImageResolution(camera_handle, &res);
if (status == CAMERA_STATUS_SUCCESS) {
this->width = width;
this->height = height;
image_resolution = res;
return true;
}
return false;

View File

@@ -1,10 +1,14 @@
#include <cstdlib>
#include <iostream>
#include <string>
#include <vector>
#include <chrono>
#include <thread>
#include <memory>
#include <unistd.h>
#include <math.h>
#include <opencv2/opencv.hpp>
#include <opencv2/tracking.hpp>
#include "config.h"
#include "MindVisionCamera.h" // 使用MindVision相机
@@ -14,54 +18,133 @@
#include "Visualizer.h"
#include "BallisticPredictor.h"
#include "TTLCommunicator.h"
#include "PidController.h"
// 全局PID参数使用简单类型避免初始化问题
float g_pitch_kp = 0.5217f;
float g_pitch_ki = 0.006f;
float g_pitch_kd = 0.0773f;
float g_yaw_kp = 0.6419f;
float g_yaw_ki = 0.2409f;
float g_yaw_kd = 0.4372f;
// PID控制器实例
PidController pitch_pid(g_pitch_kp, g_pitch_ki, g_pitch_kd);
PidController yaw_pid(g_yaw_kp, g_yaw_ki, g_yaw_kd);
// 更新PID控制器参数的函数
void update_pid_controllers() {
static bool first_call = true;
if (first_call) {
first_call = false;
return;
}
pitch_pid.setParameters(g_pitch_kp, g_pitch_ki, g_pitch_kd);
yaw_pid.setParameters(g_yaw_kp, g_yaw_ki, g_yaw_kd);
}
// 轨迹条回调函数
void on_pitch_kp_trackbar(int pos, void*) {
g_pitch_kp = pos / 10000.0f;
}
void on_pitch_ki_trackbar(int pos, void*) {
g_pitch_ki = pos / 100000.0f;
}
void on_pitch_kd_trackbar(int pos, void*) {
g_pitch_kd = pos / 100000.0f;
}
void on_yaw_kp_trackbar(int pos, void*) {
g_yaw_kp = pos / 10000.0f;
}
void on_yaw_ki_trackbar(int pos, void*) {
g_yaw_ki = pos / 100000.1f;
}
void on_yaw_kd_trackbar(int pos, void*) {
g_yaw_kd = pos / 100000.0f;
}
// Function to output control data to TTL device (with enable control)
void output_control_data(const cv::Point2f* ballistic_point,
const std::string& target_color,
int frame_counter,
TTLCommunicator* ttl_communicator,
const cv::Point2f& img_center,
bool use_ttl) {
// Only send data when TTL is enabled and meets frame interval
if (use_ttl && frame_counter % 5 == 0) {
std::string send_str;
// Only send data when TTL is enabled and has valid target
if (use_ttl && ballistic_point != nullptr) {
std::ostringstream send_str;
if (ballistic_point != nullptr) {
// Calculate offset (based on actual image center)
int ballistic_offset_x = static_cast<int>(ballistic_point->x - img_center.x);
int ballistic_offset_y = static_cast<int>(ballistic_point->y - img_center.y);
// Calculate offset (based on actual image center)
float raw_offset_x = -(ballistic_point->x - img_center.x); // yaw error
float raw_offset_y = -(img_center.y - ballistic_point->y ); // pitch error
// Color simplification mapping
std::string simplified_color = target_color;
if (target_color == "red") simplified_color = "r";
else if (target_color == "blue") simplified_color = "b";
// Calculate time delta
static auto last_time = std::chrono::high_resolution_clock::now();
auto current_time = std::chrono::high_resolution_clock::now();
float dt = std::chrono::duration<float>(current_time - last_time).count();
if (dt <= 0) dt = 0.01f; // Minimum dt to avoid division by zero
last_time = current_time;
// Construct send string
send_str = "s " + simplified_color + " " + std::to_string(ballistic_offset_x) + " " + std::to_string(ballistic_offset_y);
} else {
// Format when no detection result
send_str = "s u 0 0";
}
// Update PID controllers with latest parameters
update_pid_controllers();
// Apply PID control to the pitch (vertical) component
float pid_pitch_output = pitch_pid.update(0.0f, raw_offset_y, dt); // Setpoint is 0, error is raw_offset_y
// Apply PID control to the yaw (horizontal) component
float pid_yaw_output = yaw_pid.update(0.0f, raw_offset_x, dt); // Setpoint is 0, error is raw_offset_x
// Convert PID outputs to the expected format
// The PID output might be large, so we might need to scale it
int ballistic_offset_yaw = 1.4*(-static_cast<int>(pid_yaw_output));
int ballistic_offset_pitch = 2*(-static_cast<int>(pid_pitch_output));
// Color simplification mapping
std::string simplified_color = target_color;
if (target_color == "red") simplified_color = "r";
else if (target_color == "blue") simplified_color = "b";
// Construct send string (original format)
send_str << "#s " << simplified_color << " " << std::to_string(ballistic_offset_yaw) << " " << std::to_string(ballistic_offset_pitch) << "*\n";
// Send data
if (ttl_communicator != nullptr) {
ttl_communicator->send_data(send_str);
ttl_communicator->send_data(send_str.str());
}else{
std::cerr << "Error: TTLCommunicator is a null pointer!" << std::endl;
}
}
}
void set_camera_resolution(MindVisionCamera& , int width, int height) {
void set_camera_resolution(MindVisionCamera& camera, int width, int height) {
// The resolution is set during camera initialization in MindVision
// We need to implement a method in MindVisionCamera to change resolution
// For now, we'll just log the intended change
std::cout << "Setting camera resolution to: " << width << "x" << height << std::endl;
if (camera.set_resolution(width, height)){
std::cout << "Successfully set camera resolution to: " << width << "x" << height << std::endl;
} else {
std::cerr << "Failed to set camera resolution to: " << width << "x" << height << std::endl;
}
}
int main(int /*argc*/, char* /*argv*/[]) {
std::string target_color = "blue";
static int Numbe = 0;
std::string target_color = "red";
int cam_id = 0;
cv::Size default_resolution(640, 480);
bool use_ttl = false; // Set to false to disable TTL communication
cv::Size default_resolution(1280, 720); // Changed to 640x480 for consistency with SJTU project
bool use_ttl = true; // Set to false to disable TTL communication
if (Numbe == 0) {
// 执行 shell 命令(注意安全风险!)
std::system("sudo chmod 777 /dev/tty*");
Numbe++;
}
// Define optional resolution list (adjust based on camera support)
std::vector<cv::Size> resolutions = {
@@ -83,9 +166,14 @@ int main(int /*argc*/, char* /*argv*/[]) {
// Initialize TTL communication (only when enabled)
TTLCommunicator* ttl = nullptr;
if (use_ttl) {
ttl = new TTLCommunicator(TTL_BAUDRATE);
if (!ttl->connect()) {
std::cout << "Warning: Cannot establish TTL connection, will continue running but not send data" << std::endl;
// On Linux, the port would typically be /dev/ttyUSB0, /dev/ttyACM0, etc.
ttl = new TTLCommunicator("/dev/ttyUSB0", TTL_BAUDRATE);
if (ttl != nullptr) {
if (!ttl->connect()) {
std::cout << "Warning: Cannot establish TTL connection, will continue running but not send data" << std::endl;
}
} else {
std::cout << "Error: Failed to create TTL communicator instance" << std::endl;
}
} else {
std::cout << "TTL communication disabled" << std::endl;
@@ -112,6 +200,13 @@ int main(int /*argc*/, char* /*argv*/[]) {
KalmanFilter kalman_tracker;
Visualizer visualizer;
// Initialize KCF tracker
cv::Ptr<cv::Tracker> tracker = cv::TrackerKCF::create();
bool is_tracking = false;
cv::Rect tracking_roi;
int tracking_frame_count = 0;
const int MAX_TRACKING_FRAMES = 100; // Maximum frames to track before returning to search
int frame_counter = 0; // Counter to control output frequency
int max_consecutive_predicts = 20; // Maximum consecutive prediction times
int consecutive_predicts = 0; // Current consecutive prediction count
@@ -119,46 +214,118 @@ int main(int /*argc*/, char* /*argv*/[]) {
cv::Mat frame;
try {
while (true) {
if (!camera.read_frame(frame)) {
std::cout << "Cannot read from MindVision camera, exiting!" << std::endl;
// 使用新的颜色过滤方法同时获取图像和原始掩码
cv::Mat raw_mask;
if (!camera.read_frame_with_color_filter(frame, raw_mask, target_color)) {
std::cout << "Cannot read from MindVision camera, exiting!HERERER" << std::endl;
break;
}
// Get actual image size and calculate center
cv::Point2f img_center(frame.cols / 2.0f, frame.rows / 2.0f);
// Preprocessing
cv::Mat mask, color_only_frame;
preprocessor.process_frame(frame, target_color, mask, color_only_frame);
// 使用OpenCV进行形态学处理
cv::Mat mask;
preprocessor.apply_morphology(raw_mask, mask);
// Armor plate detection
std::vector<LightBar> valid_light_bars;
std::vector<ArmorPlate> armor_plates;
detector.detect(mask, target_color, valid_light_bars, armor_plates);
// 生成彩色图像(仅显示目标颜色)
cv::Mat color_only_frame;
frame.copyTo(color_only_frame, raw_mask);
// Kalman filter tracking (modified part: get target speed)
cv::Point2f* current_center = nullptr;
if (!armor_plates.empty()) {
current_center = &(armor_plates[0].center);
// Initialize tracking center
cv::Point2f* tracking_center = nullptr;
std::unique_ptr<cv::Point2f> tracking_point = nullptr;
if (is_tracking) {
// Update tracker
bool success = tracker->update(frame, tracking_roi);
if (success && tracking_roi.area() > 0) {
// Calculate center of tracked ROI
tracking_point = std::make_unique<cv::Point2f>(
tracking_roi.x + tracking_roi.width / 2.0f,
tracking_roi.y + tracking_roi.height / 2.0f
);
tracking_center = tracking_point.get();
tracking_frame_count++;
// If tracking for too long or detection is available, search for armor again
if (tracking_frame_count > MAX_TRACKING_FRAMES) {
is_tracking = false;
tracking_frame_count = 0;
}
} else {
// Tracking failed, return to detection mode
is_tracking = false;
tracking_frame_count = 0;
}
}
cv::Point2f* predicted_center = nullptr;
// Armor plate detection - only when not tracking or need to verify tracking
std::vector<LightBar> valid_light_bars;
std::vector<ArmorPlate> armor_plates;
if (current_center != nullptr) {
if (!is_tracking || tracking_frame_count % 10 == 0) { // Detect every 10 frames when tracking
detector.detect(mask, target_color, valid_light_bars, armor_plates);
}
// Kalman filter tracking (modified part: get target speed)
cv::Point2f* detection_center = nullptr;
if (!armor_plates.empty()) {
detection_center = &(armor_plates[0].center);
}
// Use smart pointer for safer memory management
std::unique_ptr<cv::Point2f> predicted_center = nullptr;
// Priority: detection -> tracking -> Kalman prediction
if (detection_center != nullptr) {
// Has detection result: update Kalman filter, reset consecutive prediction count
kalman_tracker.update(*current_center);
predicted_center = new cv::Point2f(kalman_tracker.predict());
kalman_tracker.update(*detection_center);
predicted_center = std::make_unique<cv::Point2f>(kalman_tracker.predict());
// Get velocity from the Kalman filter
target_speed = cv::Point2f(0.0f, 0.0f); // Kalman filter in OpenCV doesn't directly expose velocity
// Start tracking if successfully detected
if (!is_tracking && !armor_plates.empty() && armor_plates[0].corners_2d.size() >= 4) {
// Only start tracking if we have all 4 corners
cv::Rect2d armor_rect = cv::boundingRect(std::vector<cv::Point2f>{
armor_plates[0].corners_2d[0],
armor_plates[0].corners_2d[1],
armor_plates[0].corners_2d[2],
armor_plates[0].corners_2d[3]
});
// Expand the bounding box slightly for better tracking
cv::Rect2d expanded_rect = cv::Rect2d(
armor_rect.x - armor_rect.width * 0.1,
armor_rect.y - armor_rect.height * 0.1,
armor_rect.width * 1.2,
armor_rect.height * 1.2
);
// Ensure the rectangle is within frame bounds
expanded_rect = expanded_rect & cv::Rect2d(0, 0, frame.cols, frame.rows);
// Initialize tracker only if the rectangle has positive area
if (expanded_rect.area() > 0) {
tracker = cv::TrackerKCF::create(); // Create new tracker instance
tracker->init(frame, expanded_rect);
tracking_roi = expanded_rect;
is_tracking = true;
tracking_frame_count = 0;
}
}
consecutive_predicts = 0;
} else if (is_tracking && tracking_center != nullptr) {
// Use tracking result
kalman_tracker.update(*tracking_center);
predicted_center = std::make_unique<cv::Point2f>(kalman_tracker.predict());
consecutive_predicts = 0;
} else {
// No detection result: only use Kalman prediction, limit consecutive predictions
// No detection or tracking result: only use Kalman prediction, limit consecutive predictions
consecutive_predicts++;
if (consecutive_predicts < max_consecutive_predicts) {
cv::Point2f temp_pred = kalman_tracker.predict();
if (temp_pred.x != 0 || temp_pred.y != 0) { // Check if prediction is valid
predicted_center = new cv::Point2f(temp_pred);
predicted_center = std::make_unique<cv::Point2f>(temp_pred);
}
} else {
predicted_center = nullptr;
@@ -167,34 +334,80 @@ int main(int /*argc*/, char* /*argv*/[]) {
}
// Determine display center
cv::Point2f* display_center = current_center;
if (display_center == nullptr && predicted_center != nullptr) {
display_center = predicted_center;
cv::Point2f* display_center = detection_center; // Give priority to detection results
if (display_center == nullptr && is_tracking && tracking_center != nullptr) {
display_center = tracking_center;
}
bool is_predicted = (display_center != nullptr) && (current_center == nullptr);
if (display_center == nullptr && predicted_center != nullptr) {
display_center = predicted_center.get();
}
bool is_predicted = (display_center != nullptr) && (detection_center == nullptr && (!is_tracking || tracking_center == nullptr));
// Calculate ballistic prediction point
cv::Point2f* ballistic_point = ballistic_predictor.predict_ballistic_point(
predicted_center, img_center, target_speed
);
cv::Point2f* ballistic_point = nullptr;
if (predicted_center) {
ballistic_point = ballistic_predictor.predict_ballistic_point(
predicted_center.get(), img_center, target_speed
);
}
auto current_time = std::chrono::high_resolution_clock::now();
// Visualization
visualizer.draw_light_bars(frame, valid_light_bars, target_color);
if (!armor_plates.empty()) {
// Only draw armor plate if it has valid data
visualizer.draw_armor_plate(frame, armor_plates[0]);
}
// Draw tracking rectangle if tracking
if (is_tracking) {
cv::rectangle(frame, tracking_roi, cv::Scalar(0, 255, 0), 2);
}
visualizer.draw_offset_text(frame, display_center, target_color, is_predicted);
visualizer.draw_ballistic_point(frame, ballistic_point);
if (ballistic_point != nullptr) {
visualizer.draw_ballistic_point(frame, ballistic_point);
}
// Output control data to TTL (passing use_ttl to control whether to send)
output_control_data(display_center, target_color, frame_counter, ttl, img_center, use_ttl);
// Now sending on every frame for smoother control
// Only send if ballistic_point is not null and we have a valid display center
if (display_center != nullptr && ballistic_point != nullptr) {
output_control_data(display_center, target_color, ttl, img_center, use_ttl);
}
// Create trackbars for PID parameter tuning
static bool initialized_trackbars = false;
if (!initialized_trackbars) {
cv::namedWindow("PID Tuning", cv::WINDOW_AUTOSIZE);
// Create trackbars for pitch PID parameters
cv::createTrackbar("Pitch Kp", "PID Tuning", nullptr, 10000, on_pitch_kp_trackbar);
cv::createTrackbar("Pitch Ki", "PID Tuning", nullptr, 10000, on_pitch_ki_trackbar);
cv::createTrackbar("Pitch Kd", "PID Tuning", nullptr, 10000, on_pitch_kd_trackbar);
// Create trackbars for yaw PID parameters
cv::createTrackbar("Yaw Kp", "PID Tuning", nullptr, 10000, on_yaw_kp_trackbar);
cv::createTrackbar("Yaw Ki", "PID Tuning", nullptr, 10000, on_yaw_ki_trackbar);
cv::createTrackbar("Yaw Kd", "PID Tuning", nullptr, 10000, on_yaw_kd_trackbar);
// Set initial positions
cv::setTrackbarPos("Pitch Kp", "PID Tuning", static_cast<int>(g_pitch_kp * 10000));
cv::setTrackbarPos("Pitch Ki", "PID Tuning", static_cast<int>(g_pitch_ki * 10000));
cv::setTrackbarPos("Pitch Kd", "PID Tuning", static_cast<int>(g_pitch_kd * 10000));
cv::setTrackbarPos("Yaw Kp", "PID Tuning", static_cast<int>(g_yaw_kp * 10000));
cv::setTrackbarPos("Yaw Ki", "PID Tuning", static_cast<int>(g_yaw_ki * 10000));
cv::setTrackbarPos("Yaw Kd", "PID Tuning", static_cast<int>(g_yaw_kd * 10000));
initialized_trackbars = true;
}
// Display windows
cv::imshow("Armor Detection", frame);
cv::imshow(target_color + " Mask", mask);
cv::imshow(target_color + " Only", color_only_frame);
//cv::imshow(target_color + " Mask", mask);
//cv::imshow(target_color + " Only", color_only_frame);
// Only call cv::waitKey to keep GUI responsive
cv::waitKey(1);
// Exit on 'q' key press
if (cv::waitKey(1) == 'q') {
@@ -203,6 +416,17 @@ int main(int /*argc*/, char* /*argv*/[]) {
frame_counter++;
float area = 0.0;
if (!armor_plates.empty()) {
// 获取装甲板配对的第一个灯条
const LightBar& light_bar = armor_plates[0].pair.first;
// 直接使用灯条的area即rect_area赋值
area = light_bar.area;
// 额外校验:确保灯条面积有效(避免负数/0值
if (area <= 0) {
area = 0.0f;
}
}
// Control max frame rate (100 FPS)
auto end_time = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - current_time).count();
@@ -210,27 +434,39 @@ int main(int /*argc*/, char* /*argv*/[]) {
std::this_thread::sleep_for(std::chrono::milliseconds(10 - elapsed));
}
// Clean up dynamically allocated memory
if (predicted_center != nullptr) {
delete predicted_center;
predicted_center = nullptr;
}
// Smart pointers automatically handle memory cleanup
}
}
catch (const std::exception& e) {
std::cerr << "Error: " << e.what() << std::endl;
std::cerr << "Error in main loop: " << e.what() << std::endl;
}
catch (...) {
std::cerr << "Unknown error occurred in main loop" << std::endl;
}
// Cleanup
camera.release();
cv::destroyAllWindows();
try {
camera.release();
} catch (...) {
std::cerr << "Error during camera release" << std::endl;
}
try {
cv::destroyAllWindows();
} catch (...) {
std::cerr << "Error during window destruction" << std::endl;
}
// Only close TTL connection when enabled and initialized
if (use_ttl && ttl != nullptr) {
ttl->close();
try {
ttl->close();
} catch (...) {
std::cerr << "Error during TTL close" << std::endl;
}
delete ttl;
ttl = nullptr;
}
return 0;
}
}

87
src/PidController.cpp Normal file
View File

@@ -0,0 +1,87 @@
#include "PidController.h"
#include <algorithm>
#include <cmath>
PidController::PidController(float kp, float ki, float kd)
: kp_(kp), ki_(ki), kd_(kd), last_error_(0.0f), integral_(0.0f), output_(0.0f),
output_min_(-400.0f), output_max_(400.0f), first_iteration_(true) {}
float PidController::update(float setpoint, float measured_value, float dt) {
float error = setpoint - measured_value;
// 处理积分饱和
integral_ += error * dt;
// 计算微分(使用前向差分)
float derivative = 0.0f;
if (!first_iteration_) {
derivative = (error - last_error_) / dt;
} else {
first_iteration_ = false;
}
// PID计算
float proportional = kp_ * error;
float integral_contribution = ki_ * integral_;
float derivative_contribution = kd_ * derivative;
output_ = proportional + integral_contribution + derivative_contribution;
// 限制输出
output_ = std::max(output_min_, std::min(output_max_, output_));
// 保存当前误差用于下次计算微分
last_error_ = error;
return output_;
}
void PidController::reset() {
last_error_ = 0.0f;
integral_ = 0.0f;
output_ = 0.0f;
first_iteration_ = true;
}
void PidController::setKp(float kp) {
kp_ = kp;
}
void PidController::setKi(float ki) {
ki_ = ki;
}
void PidController::setKd(float kd) {
kd_ = kd;
}
void PidController::setParameters(float kp, float ki, float kd) {
kp_ = kp;
ki_ = ki;
kd_ = kd;
}
float PidController::getKp() const {
return kp_;
}
float PidController::getKi() const {
return ki_;
}
float PidController::getKd() const {
return kd_;
}
void PidController::setOutputLimits(float min, float max) {
output_min_ = min;
output_max_ = max;
}
float PidController::getOutput() const {
return output_;
}
float PidController::getError() const {
return last_error_;
}

View File

@@ -1,9 +1,17 @@
#include "TTLCommunicator.h"
#include <iostream>
TTLCommunicator::TTLCommunicator(int baudrate) : baudrate(baudrate), connected(false) {
// Default to a common serial port name, though this will need to be configured for the specific system
port_name = "/dev/ttyUSB0"; // Common for Linux/Ubuntu
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <termios.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
TTLCommunicator::TTLCommunicator(const std::string& port, int baudrate)
: port_name(port), baudrate(baudrate), connected(false) {
serial_fd = -1;
}
TTLCommunicator::~TTLCommunicator() {
@@ -11,19 +19,16 @@ TTLCommunicator::~TTLCommunicator() {
}
bool TTLCommunicator::connect() {
// Placeholder implementation - in a real implementation, you would open the serial port
std::cout << "Attempting to connect to TTL device on port: " << port_name << " at baud rate: " << baudrate << std::endl;
// For now, simulate a connection attempt
// In real implementation, use system calls to open the serial port
connected = open_serial_port();
if (connected) {
std::cout << "TTL connection established." << std::endl;
} else {
std::cout << "Warning: Failed to establish TTL connection." << std::endl;
}
return connected;
}
@@ -37,23 +42,103 @@ void TTLCommunicator::close() {
bool TTLCommunicator::send_data(const std::string& data) {
if (!connected) {
std::cerr << "Error: Cannot send data, TTL not connected." << std::endl;
return false;
}
// Placeholder for sending data over serial
// In real implementation, write to the serial port
std::cout << "Sending TTL data: " << data << std::endl;
// Simulate successful transmission
return true;
if (serial_fd != -1) {
ssize_t bytes_written = write(serial_fd, data.c_str(), data.length());
if (bytes_written == -1) {
std::cerr << "Error writing to serial port: " << strerror(errno) << std::endl;
return false;
} else {
std::cout << "Sent " << bytes_written << " bytes: " << data;
fsync(serial_fd); // Ensure data is sent
return true;
}
}
return false;
}
bool TTLCommunicator::open_serial_port() {
// In a real implementation, this would use system calls like open(), tcsetattr(), etc.
// For now, return true to simulate a successful connection
// Open the serial port
serial_fd = open(port_name.c_str(), O_RDWR | O_NOCTTY);
if (serial_fd == -1) {
std::cerr << "Error opening serial port " << port_name << std::endl;
return false;
}
struct termios tty;
// Get current attributes
if (tcgetattr(serial_fd, &tty) != 0) {
std::cerr << "Error getting serial attributes" << std::endl;
::close(serial_fd);
return false;
}
// Set baud rate
speed_t baud_rate;
switch(baudrate) {
case 9600: baud_rate = B9600; break;
case 19200: baud_rate = B19200; break;
case 38400: baud_rate = B38400; break;
case 57600: baud_rate = B57600; break;
case 115200: baud_rate = B115200; break;
case 230400: baud_rate = B230400; break;
case 460800: baud_rate = B460800; break;
case 921600: baud_rate = B921600; break;
default:
std::cerr << "Unsupported baud rate: " << baudrate << std::endl;
::close(serial_fd);
return false;
}
cfsetispeed(&tty, baud_rate);
cfsetospeed(&tty, baud_rate);
// 8N1: 8 data bits, no parity, 1 stop bit
tty.c_cflag &= ~PARENB; // No parity
tty.c_cflag &= ~CSTOPB; // 1 stop bit
tty.c_cflag &= ~CSIZE; // Clear data bits
tty.c_cflag |= CS8; // 8 data bits
tty.c_cflag &= ~CRTSCTS; // No flow control
tty.c_cflag |= CREAD | CLOCAL; // Enable reading, ignore control lines
// Non-canonical mode
tty.c_lflag &= ~ICANON; // Non-canonical mode
tty.c_lflag &= ~ECHO; // No echo
tty.c_lflag &= ~ECHOE; // No erasure
tty.c_lflag &= ~ISIG; // No signal handling
// No software flow control
tty.c_iflag &= ~(IXON | IXOFF | IXANY);
// No output processing
tty.c_oflag &= ~OPOST;
// Timeouts
tty.c_cc[VMIN] = 0; // Non-blocking read
tty.c_cc[VTIME] = 5; // 0.5 second timeout
// Apply settings
if (tcsetattr(serial_fd, TCSANOW, &tty) != 0) {
std::cerr << "Error setting serial attributes" << std::endl;
::close(serial_fd);
return false;
}
// Flush port
tcflush(serial_fd, TCIOFLUSH);
return true;
}
void TTLCommunicator::close_serial_port() {
// In a real implementation, this would close the file descriptor
if (serial_fd != -1) {
::close(serial_fd);
serial_fd = -1;
}
connected = false;
}

View File

@@ -43,7 +43,7 @@ cv::Mat& Visualizer::draw_offset_text(cv::Mat& frame, const cv::Point2f* display
cv::Point2f img_center(frame.cols / 2.0f, frame.rows / 2.0f);
int offset_x = static_cast<int>(display_center->x - img_center.x);
int offset_y = static_cast<int>(display_center->y - img_center.y);
int offset_y = static_cast<int>(img_center.y - display_center->y);
std::stringstream ss;
ss << "Offset: (" << offset_x << ", " << offset_y << ")";

View File

@@ -1,237 +0,0 @@
#include <iostream>
#include <string>
#include <vector>
#include <chrono>
#include <thread>
#include <opencv2/opencv.hpp>
#include "config.h"
#include "MindVisionCamera.h" // 使用MindVision相机
#include "ImagePreprocessor.h"
#include "ArmorDetector.h"
#include "KalmanFilter.h"
#include "Visualizer.h"
#include "BallisticPredictor.h"
#include "TTLCommunicator.h"
// Function to output control data to TTL device (with enable control)
void output_control_data(const cv::Point2f* ballistic_point,
const std::string& target_color,
int frame_counter,
TTLCommunicator* ttl_communicator,
const cv::Point2f& img_center,
bool use_ttl,
bool armor_detected) {
// Only send data when TTL is enabled, meets frame interval, AND armor is detected
if (use_ttl && frame_counter % 5 == 0 && armor_detected) {
// Only send if we have a valid ballistic point to send
if (ballistic_point != nullptr) {
// Calculate offset (based on actual image center)
int ballistic_offset_x = static_cast<int>(ballistic_point->x - img_center.x);
int ballistic_offset_y = static_cast<int>(ballistic_point->y - img_center.y);
// Color simplification mapping
std::string simplified_color = target_color;
if (target_color == "red") simplified_color = "r";
else if (target_color == "blue") simplified_color = "b";
// Construct send string
std::string send_str = "s " + simplified_color + " " + std::to_string(ballistic_offset_x) + " " + std::to_string(ballistic_offset_y);
// Send data
if (ttl_communicator != nullptr) {
ttl_communicator->send_data(send_str);
}
}
// Note: When ballistic_point is nullptr, no TTL data is sent (fulfilling the requirement)
}
}
void set_camera_resolution(MindVisionCamera, int width, int height) {
// The resolution is set during camera initialization in MindVision
// We already set it in MindVisionCamera constructor, so this is for logging only
std::cout << "Camera resolution already set to: " << width << "x" << height << " during initialization" << std::endl;
}
int main(int /*argc*/, char* /*argv*/[]) {
std::string target_color = "red";
int cam_id = 0;
cv::Size default_resolution(640, 480); // This will be set by MindVisionCamera constructor
bool use_ttl = true; // Set to false to disable TTL communication
// Define optional resolution list (adjust based on camera support)
std::vector<cv::Size> resolutions = {
cv::Size(320, 240), // Low resolution, high frame rate
cv::Size(640, 480), // Standard resolution
cv::Size(1280, 720), // HD resolution
cv::Size(1920, 1080) // Full HD resolution
};
// Find the index of default resolution
int res_index = 1; // Default to index 1 (640x480)
for (size_t i = 0; i < resolutions.size(); i++) {
if (resolutions[i] == default_resolution) {
res_index = static_cast<int>(i);
break;
}
}
// Initialize TTL communication (only when enabled)
TTLCommunicator* ttl = nullptr;
if (use_ttl) {
ttl = new TTLCommunicator(TTL_BAUDRATE);
if (!ttl->connect()) {
std::cout << "Warning: Cannot establish TTL connection, will continue running but not send data" << std::endl;
}
} else {
std::cout << "TTL communication disabled" << std::endl;
}
// Initialize visual processing modules with MindVision camera
MindVisionCamera camera(cam_id, target_color);
if (!camera.is_opened) {
std::cerr << "Cannot open MindVision camera!" << std::endl;
return -1;
}
// Initialize ballistic predictor (adjustable bullet speed, e.g., 16m/s)
BallisticPredictor ballistic_predictor(16.0f);
// Record target speed (obtained from Kalman filter)
cv::Point2f target_speed(0.0f, 0.0f);
// Set initial resolution
set_camera_resolution(camera, resolutions[res_index].width, resolutions[res_index].height);
std::cout << "Initial camera resolution: " << resolutions[res_index].width << "x" << resolutions[res_index].height << std::endl;
ImagePreprocessor preprocessor;
ArmorDetector detector;
KalmanFilter kalman_tracker;
Visualizer visualizer;
int frame_counter = 0; // Counter to control output frequency
int max_consecutive_predicts = 20; // Maximum consecutive prediction times
int consecutive_predicts = 0; // Current consecutive prediction count
cv::Mat frame;
try {
while (true) {
//if (!camera.read_frame(frame)) {
// std::cout << "Cannot read from MindVision camera, exiting!" << std::endl;
// break;
//}
// Get actual image size and calculate center
cv::Point2f img_center(frame.cols / 2.0f, frame.rows / 2.0f);
// Preprocessing
cv::Mat mask, color_only_frame;
preprocessor.process_frame(frame, target_color, mask, color_only_frame);
// Armor plate detection
std::vector<LightBar> valid_light_bars;
std::vector<ArmorPlate> armor_plates;
detector.detect(mask, target_color, valid_light_bars, armor_plates);
// Kalman filter tracking (modified part: get target speed)
cv::Point2f* current_center = nullptr;
if (!armor_plates.empty()) {
current_center = &(armor_plates[0].center);
}
cv::Point2f* predicted_center = nullptr;
if (current_center != nullptr) {
// Has detection result: update Kalman filter, reset consecutive prediction count
kalman_tracker.update(*current_center);
predicted_center = new cv::Point2f(kalman_tracker.predict());
// Get velocity from the Kalman filter
target_speed = cv::Point2f(0.0f, 0.0f); // Kalman filter in OpenCV doesn't directly expose velocity
consecutive_predicts = 0;
} else {
// No detection result: only use Kalman prediction, limit consecutive predictions
consecutive_predicts++;
if (consecutive_predicts < max_consecutive_predicts) {
cv::Point2f temp_pred = kalman_tracker.predict();
if (temp_pred.x != 0 || temp_pred.y != 0) { // Check if prediction is valid
predicted_center = new cv::Point2f(temp_pred);
}
} else {
predicted_center = nullptr;
target_speed = cv::Point2f(0.0f, 0.0f);
}
}
// Determine display center
cv::Point2f* display_center = current_center;
if (display_center == nullptr && predicted_center != nullptr) {
display_center = predicted_center;
}
bool is_predicted = (display_center != nullptr) && (current_center == nullptr);
// Calculate ballistic prediction point
cv::Point2f* ballistic_point = ballistic_predictor.predict_ballistic_point(
predicted_center, img_center, target_speed
);
auto current_time = std::chrono::high_resolution_clock::now();
// Visualization
visualizer.draw_light_bars(frame, valid_light_bars, target_color);
if (!armor_plates.empty()) {
visualizer.draw_armor_plate(frame, armor_plates[0]);
}
visualizer.draw_offset_text(frame, display_center, target_color, is_predicted);
visualizer.draw_ballistic_point(frame, ballistic_point);
// Output control data to TTL (passing use_ttl to control whether to send)
// Only send TTL data if actual armor plates were detected (not just predicted)
bool armor_detected = !armor_plates.empty();
// Only send TTL data when we have an actual detection, not just prediction
bool should_send_ttl = armor_detected && (display_center != nullptr);
output_control_data(display_center, target_color, frame_counter, ttl, img_center, use_ttl, should_send_ttl);
// Display windows
cv::imshow("Armor Detection", frame);
cv::imshow(target_color + " Mask", mask);
cv::imshow(target_color + " Only", color_only_frame);
// Exit on 'q' key press
if (cv::waitKey(1) == 'q') {
break;
}
frame_counter++;
// Control max frame rate (100 FPS)
auto end_time = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - current_time).count();
if (elapsed < 10) { // 100 FPS = 10ms per frame
std::this_thread::sleep_for(std::chrono::milliseconds(30 - elapsed));
}
// Clean up dynamically allocated memory
if (predicted_center != nullptr) {
delete predicted_center;
predicted_center = nullptr;
}
}
}
catch (const std::exception& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
// Cleanup
camera.release();
cv::destroyAllWindows();
// Only close TTL connection when enabled and initialized
if (use_ttl && ttl != nullptr) {
ttl->close();
delete ttl;
ttl = nullptr;
}
return 0;
}