Files
Catalyst-MDVS/Catalyst-MDVS2/src/MindVisionMain.cpp

354 lines
14 KiB
C++
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#include <cstdlib>
#include <iostream>
#include <string>
#include <vector>
#include <chrono>
#include <thread>
#include <memory>
#include <unistd.h>
#include <opencv2/opencv.hpp>
#include <opencv2/tracking.hpp>
#include "config.h"
#include "MindVisionCamera.h" // 使用MindVision相机
#include "ImagePreprocessor.h"
#include "ArmorDetector.h"
#include "KalmanFilter.h"
#include "Visualizer.h"
#include "BallisticPredictor.h"
#include "TTLCommunicator.h"
// Function to output control data to TTL device (with enable control)
void output_control_data(const cv::Point2f* ballistic_point,
const std::string& target_color,
TTLCommunicator* ttl_communicator,
const cv::Point2f& img_center,
bool use_ttl) {
// Only send data when TTL is enabled and has valid target
if (use_ttl && ballistic_point != nullptr) {
std::ostringstream send_str;
// Calculate offset (based on actual image center)
int ballistic_offset_x = -static_cast<int>(ballistic_point->x - img_center.x);
if ( abs(ballistic_offset_x) > 320){
ballistic_offset_x = ( ballistic_offset_x / abs( ballistic_offset_x ) ) * 320 ;
}
int ballistic_offset_y = -static_cast<int>(img_center.y - ballistic_point->y);
if ( abs(ballistic_offset_y) > 180 ) {
ballistic_offset_y = ( ballistic_offset_x / abs( ballistic_offset_x ) ) * 180 ;
}
// Color simplification mapping
std::string simplified_color = target_color;
if (target_color == "red") simplified_color = "r";
else if (target_color == "blue") simplified_color = "b";
// Construct send string (original format)
send_str << "#s " << simplified_color << " " << std::to_string(ballistic_offset_x) << " " << std::to_string(ballistic_offset_y) << "*\n";
// Send data
if (ttl_communicator != nullptr) {
ttl_communicator->send_data(send_str.str());
}else{
std::cerr << "Error: TTLCommunicator is a null pointer!" << std::endl;
}
}
}
void set_camera_resolution(MindVisionCamera& camera, int width, int height) {
// The resolution is set during camera initialization in MindVision
// We need to implement a method in MindVisionCamera to change resolution
// For now, we'll just log the intended change
if (camera.set_resolution(width, height)){
std::cout << "Successfully set camera resolution to: " << width << "x" << height << std::endl;
} else {
std::cerr << "Failed to set camera resolution to: " << width << "x" << height << std::endl;
}
}
int main(int /*argc*/, char* /*argv*/[]) {
static int Numbe = 0;
std::string target_color = "red";
int cam_id = 0;
cv::Size default_resolution(640, 480); // Changed to 640x480 for consistency with SJTU project
bool use_ttl = false; // Set to false to disable TTL communication
if (Numbe == 0) {
// 执行 shell 命令(注意安全风险!)
std::system("sudo chmod 777 /dev/tty*");
Numbe++;
}
// Define optional resolution list (adjust based on camera support)
std::vector<cv::Size> resolutions = {
cv::Size(320, 240), // Low resolution, high frame rate
cv::Size(640, 480), // Standard resolution
cv::Size(1280, 720), // HD resolution
cv::Size(1920, 1080) // Full HD resolution
};
// Find the index of default resolution
int res_index = 1; // Default to index 1 (640x480)
for (size_t i = 0; i < resolutions.size(); i++) {
if (resolutions[i] == default_resolution) {
res_index = static_cast<int>(i);
break;
}
}
// Initialize TTL communication (only when enabled)
TTLCommunicator* ttl = nullptr;
if (use_ttl) {
// On Linux, the port would typically be /dev/ttyUSB0, /dev/ttyACM0, etc.
ttl = new TTLCommunicator("/dev/ttyUSB0", TTL_BAUDRATE);
if (ttl != nullptr) {
if (!ttl->connect()) {
std::cout << "Warning: Cannot establish TTL connection, will continue running but not send data" << std::endl;
}
} else {
std::cout << "Error: Failed to create TTL communicator instance" << std::endl;
}
} else {
std::cout << "TTL communication disabled" << std::endl;
}
// Initialize visual processing modules with MindVision camera
MindVisionCamera camera(cam_id, target_color);
if (!camera.is_opened) {
std::cerr << "Cannot open MindVision camera!" << std::endl;
return -1;
}
// Initialize ballistic predictor (adjustable bullet speed, e.g., 16m/s)
BallisticPredictor ballistic_predictor(16.0f);
// Record target speed (obtained from Kalman filter)
cv::Point2f target_speed(0.0f, 0.0f);
// Set initial resolution
set_camera_resolution(camera, resolutions[res_index].width, resolutions[res_index].height);
std::cout << "Initial camera resolution: " << resolutions[res_index].width << "x" << resolutions[res_index].height << std::endl;
ImagePreprocessor preprocessor;
ArmorDetector detector;
KalmanFilter kalman_tracker;
Visualizer visualizer;
// Initialize KCF tracker
cv::Ptr<cv::Tracker> tracker = cv::TrackerKCF::create();
bool is_tracking = false;
cv::Rect2d tracking_roi;
int tracking_frame_count = 0;
const int MAX_TRACKING_FRAMES = 100; // Maximum frames to track before returning to search
int frame_counter = 0; // Counter to control output frequency
int max_consecutive_predicts = 20; // Maximum consecutive prediction times
int consecutive_predicts = 0; // Current consecutive prediction count
cv::Mat frame;
try {
while (true) {
// 使用新的颜色过滤方法同时获取图像和原始掩码
cv::Mat raw_mask;
if (!camera.read_frame_with_color_filter(frame, raw_mask, target_color)) {
std::cout << "Cannot read from MindVision camera, exiting!HERERER" << std::endl;
break;
}
// Get actual image size and calculate center
cv::Point2f img_center(frame.cols / 2.0f, frame.rows / 2.0f);
// 使用OpenCV进行形态学处理
cv::Mat mask;
preprocessor.apply_morphology(raw_mask, mask);
// 生成彩色图像(仅显示目标颜色)
cv::Mat color_only_frame;
frame.copyTo(color_only_frame, raw_mask);
// Initialize tracking center
cv::Point2f* tracking_center = nullptr;
std::unique_ptr<cv::Point2f> tracking_point = nullptr;
if (is_tracking) {
// Update tracker
bool success = tracker->update(frame, tracking_roi);
if (success && tracking_roi.area() > 0) {
// Calculate center of tracked ROI
tracking_point = std::make_unique<cv::Point2f>(
tracking_roi.x + tracking_roi.width / 2.0f,
tracking_roi.y + tracking_roi.height / 2.0f
);
tracking_center = tracking_point.get();
tracking_frame_count++;
// If tracking for too long or detection is available, search for armor again
if (tracking_frame_count > MAX_TRACKING_FRAMES) {
is_tracking = false;
tracking_frame_count = 0;
}
} else {
// Tracking failed, return to detection mode
is_tracking = false;
tracking_frame_count = 0;
}
}
// Armor plate detection - only when not tracking or need to verify tracking
std::vector<LightBar> valid_light_bars;
std::vector<ArmorPlate> armor_plates;
if (!is_tracking || tracking_frame_count % 10 == 0) { // Detect every 10 frames when tracking
detector.detect(mask, target_color, valid_light_bars, armor_plates);
}
// Kalman filter tracking (modified part: get target speed)
cv::Point2f* detection_center = nullptr;
if (!armor_plates.empty()) {
detection_center = &(armor_plates[0].center);
}
// Use smart pointer for safer memory management
std::unique_ptr<cv::Point2f> predicted_center = nullptr;
// Priority: detection -> tracking -> Kalman prediction
if (detection_center != nullptr) {
// Has detection result: update Kalman filter, reset consecutive prediction count
kalman_tracker.update(*detection_center);
predicted_center = std::make_unique<cv::Point2f>(kalman_tracker.predict());
// Start tracking if successfully detected
if (!is_tracking && !armor_plates.empty()) {
cv::Rect2d armor_rect = cv::boundingRect(std::vector<cv::Point2f>{
armor_plates[0].corners_2d[0],
armor_plates[0].corners_2d[1],
armor_plates[0].corners_2d[2],
armor_plates[0].corners_2d[3]
});
// Expand the bounding box slightly for better tracking
cv::Rect2d expanded_rect = cv::Rect2d(
armor_rect.x - armor_rect.width * 0.1,
armor_rect.y - armor_rect.height * 0.1,
armor_rect.width * 1.2,
armor_rect.height * 1.2
);
// Ensure the rectangle is within frame bounds
expanded_rect = expanded_rect & cv::Rect2d(0, 0, frame.cols, frame.rows);
tracker = cv::TrackerKCF::create(); // Create new tracker instance
tracker->init(frame, expanded_rect);
tracking_roi = expanded_rect;
is_tracking = true;
tracking_frame_count = 0;
}
consecutive_predicts = 0;
} else if (is_tracking && tracking_center != nullptr) {
// Use tracking result
kalman_tracker.update(*tracking_center);
predicted_center = std::make_unique<cv::Point2f>(kalman_tracker.predict());
consecutive_predicts = 0;
} else {
// No detection or tracking result: only use Kalman prediction, limit consecutive predictions
consecutive_predicts++;
if (consecutive_predicts < max_consecutive_predicts) {
cv::Point2f temp_pred = kalman_tracker.predict();
if (temp_pred.x != 0 || temp_pred.y != 0) { // Check if prediction is valid
predicted_center = std::make_unique<cv::Point2f>(temp_pred);
}
} else {
predicted_center = nullptr;
target_speed = cv::Point2f(0.0f, 0.0f);
}
}
// Determine display center
cv::Point2f* display_center = detection_center; // Give priority to detection results
if (display_center == nullptr && is_tracking && tracking_center != nullptr) {
display_center = tracking_center;
}
if (display_center == nullptr && predicted_center != nullptr) {
display_center = predicted_center.get();
}
bool is_predicted = (display_center != nullptr) && (detection_center == nullptr && (!is_tracking || tracking_center == nullptr));
// Calculate ballistic prediction point
cv::Point2f* ballistic_point = ballistic_predictor.predict_ballistic_point(
predicted_center.get(), img_center, target_speed
);
auto current_time = std::chrono::high_resolution_clock::now();
// Visualization
visualizer.draw_light_bars(frame, valid_light_bars, target_color);
if (!armor_plates.empty()) {
visualizer.draw_armor_plate(frame, armor_plates[0]);
}
// Draw tracking rectangle if tracking
if (is_tracking) {
cv::rectangle(frame, tracking_roi, cv::Scalar(0, 255, 0), 2);
}
visualizer.draw_offset_text(frame, display_center, target_color, is_predicted);
visualizer.draw_ballistic_point(frame, ballistic_point);
// Output control data to TTL (passing use_ttl to control whether to send)
// Now sending on every frame for smoother control
output_control_data(display_center, target_color, ttl, img_center, use_ttl);
// Display windows
cv::imshow("Armor Detection", frame);
cv::imshow(target_color + " Mask", mask);
cv::imshow(target_color + " Only", color_only_frame);
// Exit on 'q' key press
if (cv::waitKey(1) == 'q') {
break;
}
frame_counter++;
// Control max frame rate (100 FPS)
auto end_time = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - current_time).count();
if (elapsed < 10) { // 100 FPS = 10ms per frame
std::this_thread::sleep_for(std::chrono::milliseconds(10 - elapsed));
}
// Smart pointers automatically handle memory cleanup
}
}
catch (const std::exception& e) {
std::cerr << "Error in main loop: " << e.what() << std::endl;
}
catch (...) {
std::cerr << "Unknown error occurred in main loop" << std::endl;
}
// Cleanup
try {
camera.release();
} catch (...) {
std::cerr << "Error during camera release" << std::endl;
}
try {
cv::destroyAllWindows();
} catch (...) {
std::cerr << "Error during window destruction" << std::endl;
}
// Only close TTL connection when enabled and initialized
if (use_ttl && ttl != nullptr) {
try {
ttl->close();
} catch (...) {
std::cerr << "Error during TTL close" << std::endl;
}
delete ttl;
ttl = nullptr;
}
return 0;
}