当前位置: 首页 > article >正文

亚博microros小车-原生ubuntu支持系列:22 物体识别追踪

背景知识

跟上一个颜色追踪类似。也是基于opencv的,不过背后的算法有很多

  • BOOSTING:算法原理类似于Haar cascades (AdaBoost),是一种很老的算法。这个算法速度慢并且不是很准。
  • MIL:BOOSTING准一点。
  • KCF:速度比BOOSTINGMIL更快,与BOOSTINGMIL一样不能很好地处理遮挡问题。
  • CSRT:KCF更准一些,但是速度比KCF稍慢。
  • MedianFlow:对于快速移动的目标和外形变化迅速的目标效果不好。
  • TLD:会产生较多的false-positives。
  • MOSSE:算法速度非常快,但是准确率比不上KCFCSRT。在一些追求算法速度的场合很适用。
  • GOTURN:OpenCV中自带的唯一一个基于深度学习的算法。运行算法需要提前下载好模型文件。

对算法背后的原理干兴趣的,推荐看看大佬的

几个目标跟踪算法_视频目标跟踪算法-CSDN博客


    def initWorking(self, frame, box):
        '''
        Tracker work initialization 追踪器工作初始化
        frame:初始化追踪画面
        box:追踪的区域
        '''
        if not self.tracker: raise Exception("追踪器未初始化Tracker is not initialized")
        status = self.tracker.init(frame, box)
        #if not status: raise Exception("追踪器工作初始化失败Failed to initialize tracker job")
        self.coord = box
        self.isWorking = True

    def track(self, frame):
        if self.isWorking:#更新跟踪器
            status, self.coord = self.tracker.update(frame)
            if status:#如果跟踪成功,则绘制跟踪框:获取的坐标位置(x,y,w,h)画框就是(x,y),(x+w,y+h)
                p1 = (int(self.coord[0]), int(self.coord[1]))
                p2 = (int(self.coord[0] + self.coord[2]), int(self.coord[1] + self.coord[3]))
                cv.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
                return frame, p1, p2
            else:
                # 跟踪失败
		# Tracking failed
                cv.putText(frame, "Tracking failure detected", (100, 80), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
                return frame, (0, 0), (0, 0)
        else: return frame, (0, 0), (0, 0)

#!/usr/bin/env python3
# encoding: utf-8
import getpass
import threading
from yahboom_esp32ai_car.astra_common import *
from sensor_msgs.msg import CompressedImage,Image
from std_msgs.msg import Int32, Bool,UInt16
import rclpy
from rclpy.node import Node
from cv_bridge import CvBridge

from rclpy.time import Time
import datetime
from ament_index_python.packages import get_package_share_directory #获取shares目录绝对路径


class mono_Tracker(Node):
    def __init__(self,name):
        super().__init__(name)
        self.pub_Servo1 = self.create_publisher(Int32,"servo_s1" , 10)#舵机控制
        self.pub_Servo2 = self.create_publisher(Int32,"servo_s2" , 10) 
        #初始化参数
        self.declare_param()
        self.target_servox = 0 #目标中心点x坐标
        self.target_servoy = 10 #目标中心点y坐标
        self.point_pose = (0, 0, 0)  #目标点位置 (x,y,z)
        self.circle = (0, 0, 0) #目标点圆信息 (x,y,radius)
        self.hsv_range = ()  #hsv 颜色范围
        self.dyn_update = True
        self.select_flags = False
        self.gTracker_state = False
        self.windows_name = 'frame'
        self.cols, self.rows = 0, 0 #鼠标选择区域坐标
        self.Mouse_XY = (0, 0) #鼠标点击坐标
        self.index = 2
        self.end = 0
        self.color = color_follow()
    
        self.tracker_types = ['BOOSTING', 'MIL', 'KCF']
        self.tracker_type = ['KCF'] 
        self.VideoSwitch = True
        self.img_flip = False

        self.last_stamp = None
        self.new_seconds = 0
        self.fps_seconds = 1

        ser1_angle = Int32()#舵机初始角度
        ser1_angle.data = int(self.target_servox)
        ser2_angle = Int32()
        ser2_angle.data = int(self.target_servoy)

        #确保角度正常处于中间
        for i in range(10):
            self.pub_Servo1.publish(ser1_angle)
            self.pub_Servo2.publish(ser2_angle)
            time.sleep(0.1)
        


        self.hsv_text = get_package_share_directory('yahboom_esp32ai_car')+'/resource/colorHSV.text'
        self.mono_PID = (12, 0, 0.9)
        self.scale = 1000
        self.PID_init()

        print("OpenCV Version: ",cv.__version__)
        self.gTracker = Tracker(tracker_type=self.tracker_type)
        self.tracker_type = self.tracker_types[self.index]
        self.Track_state = 'init'

        #USB
        #self.capture = cv.VideoCapture(0)
        #self.timer = self.create_timer(0.001, self.on_timer)

        #ESP32_wifi
        self.bridge = CvBridge()
        self.sub_img = self.create_subscription(
            CompressedImage, '/espRos/esp32camera', self.handleTopic, 1) #获取esp32传来的图像


    def declare_param(self):
        #PID
        self.declare_parameter("Kp",12)
        self.Kp = self.get_parameter('Kp').get_parameter_value().integer_value
        self.declare_parameter("Ki",0)
        self.Ki = self.get_parameter('Ki').get_parameter_value().integer_value
        self.declare_parameter("Kd",0.9)
        self.Kd = self.get_parameter('Kd').get_parameter_value().integer_value
    
    def get_param(self):
        self.Kd = self.get_parameter('Kd').get_parameter_value().integer_value
        self.Ki = self.get_parameter('Ki').get_parameter_value().integer_value
        self.Kp = self.get_parameter('Kp').get_parameter_value().integer_value
        self.mono_PID = (self.Kp,self.Ki,self.Kd)
        

    def cancel(self):     
        self.Reset()
        if self.VideoSwitch==False: self.__sub_img.unregister()
        cv.destroyAllWindows()

    # USB
    # def on_timer(self):
    #     self.get_param()
    #     ret, frame = self.capture.read()
    #     action = cv.waitKey(10) & 0xFF
    #     frame, binary =self.process(frame, action)
    #     start = time.time()
    #     fps = 1 / (start - self.end)
    #     text = "FPS : " + str(int(fps))
    #     self.end = start
    #     cv.putText(frame, text, (20, 30), cv.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 1)
    #     if len(binary) != 0: cv.imshow('frame', ManyImgs(1, ([frame, binary])))
    #     else:cv.imshow('frame', frame)
    #     if action == ord('q') or action == 113:
    #         self.capture.release()
    #         cv.destroyAllWindows()

    #ESP32_wifi  图像回调函数
    def handleTopic(self, msg):
        self.last_stamp = msg.header.stamp  
        if self.last_stamp:
            total_secs = Time(nanoseconds=self.last_stamp.nanosec, seconds=self.last_stamp.sec).nanoseconds
            delta = datetime.timedelta(seconds=total_secs * 1e-9)
            seconds = delta.total_seconds()*100

            if self.new_seconds != 0:
                self.fps_seconds = seconds - self.new_seconds

            self.new_seconds = seconds#保留这次的值

        self.get_param()
        start = time.time()
        frame = self.bridge.compressed_imgmsg_to_cv2(msg)#图像格式转换
        frame = cv.resize(frame, (640, 480))

        action = cv.waitKey(10) & 0xFF #获取按键事件
        frame, binary =self.process(frame, action)#核心处理逻辑
        
        
        end = time.time()
        fps = 1 / ((end - start)+self.fps_seconds)
        
        text = "FPS : " + str(int(fps))
        cv.putText(frame, text, (10,20), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2)
        cv.imshow('frame', frame)

        if action == ord('q') or action == 113:
            cv.destroyAllWindows()




    def Reset(self):
        self.hsv_range = ()
        self.circle = (0, 0, 0)
        self.Mouse_XY = (0, 0)
        self.Track_state = 'init'
        self.target_servox = 0
        self.target_servoy = 10
    
    #控制舵机
    def execute(self, point_x, point_y):
        # rospy.loginfo("point_x: {}, point_y: {}".format(point_x, point_y))
        #通过PID计算舵机调整量(参数是计算目标位置与图像中心的偏差,图像中心坐标(320,240))
        [x_Pid, y_Pid] = self.PID_controller.update([point_x - 320, point_y - 240])
        if self.img_flip == True:#根据图像翻转标识调整方向
            self.target_servox -= x_Pid
            self.target_servoy += y_Pid
        else:
            self.target_servox -= x_Pid
            self.target_servoy += y_Pid
        #角度控制(保护舵机)    
        if self.target_servox >= 45:
            self.target_servox = 45
        elif self.target_servox <= -45:
            self.target_servox = -45
        if self.target_servoy >= 20:
            self.target_servoy = 20
        elif self.target_servoy <= -90:
            self.target_servoy = -90
        print("servo1",self.target_servox)
        servo1_angle = Int32()
        servo1_angle.data = int(self.target_servox)
        servo2_angle = Int32()
        servo2_angle.data = int(self.target_servoy)
        self.pub_Servo1.publish(servo1_angle)
        self.pub_Servo2.publish(servo2_angle)

    def dynamic_reconfigure_callback(self, config, level):
        self.scale = config['scale']
        self.mono_PID = (config['Kp'], config['Ki'], config['Kd'])
        self.hsv_range = ((config['Hmin'], config['Smin'], config['Vmin']),
                          (config['Hmax'], config['Smax'], config['Vmax']))
        self.PID_init()
        return config

    def PID_init(self):
        self.PID_controller = simplePID(
            [0, 0],
            [self.mono_PID[0] / float(self.scale), self.mono_PID[0] / float(self.scale)],
            [self.mono_PID[1] / float(self.scale), self.mono_PID[1] / float(self.scale)],
            [self.mono_PID[2] / float(self.scale), self.mono_PID[2] / float(self.scale)])
    #鼠标回调,除了param其他都是自动获取  
    def onMouse(self, event, x, y, flags, param):
        if event == 1:#左键点击
            self.Track_state = 'init'
            self.select_flags = True
            self.Mouse_XY = (x,y)
        if event == 4:#左键放开
            self.select_flags = False
            self.Track_state = 'identify'
        if self.select_flags == True:
            self.cols = min(self.Mouse_XY[0], x), min(self.Mouse_XY[1], y)
            self.rows = max(self.Mouse_XY[0], x), max(self.Mouse_XY[1], y)
            self.Roi_init = (self.cols[0], self.cols[1], self.rows[0], self.rows[1])
    #图像处理主流程
    def process(self, rgb_img, action):
        # param action: [113 or 'q':退出],[114 or 'r':重置],[105 or 'i':识别],[32:开始追踪]
        rgb_img = cv.resize(rgb_img, (640, 480))
        binary = []
        if self.img_flip == True: rgb_img = cv.flip(rgb_img, 1)#图像翻转
        #按键处理
        if action == 32: self.Track_state = 'tracking'
        elif action == ord('i') or action == 105: self.Track_state = "identify"
        elif action == ord('r') or action == 114: self.Reset()
        elif action == ord('q') or action == 113: self.cancel()
        if self.Track_state == 'init':#初始化状态
            cv.namedWindow(self.windows_name, cv.WINDOW_AUTOSIZE)
            cv.setMouseCallback(self.windows_name, self.onMouse, 0)
            if self.select_flags == True:#绘制选择区域
                cv.line(rgb_img, self.cols, self.rows, (255, 0, 0), 2)
                cv.rectangle(rgb_img, self.cols, self.rows, (0, 255, 0), 2)
                if self.Roi_init[0] != self.Roi_init[2] and self.Roi_init[1] != self.Roi_init[3]:
                    if self.tracker_type == "color": rgb_img, self.hsv_range = self.color.Roi_hsv(rgb_img, self.Roi_init)#提取roi区域hsv范围
                    self.gTracker_state = True
                    self.dyn_update = True
                else: self.Track_state = 'init'
        if self.Track_state != 'init':#跟踪模式
            if self.tracker_type == "color" and len(self.hsv_range) != 0:
                rgb_img, binary, self.circle = self.color.object_follow(rgb_img, self.hsv_range)#颜色追踪
                if self.dyn_update == True:
                    params = {'Hmin': self.hsv_range[0][0], 'Hmax': self.hsv_range[1][0],
                              'Smin': self.hsv_range[0][1], 'Smax': self.hsv_range[1][1],
                              'Vmin': self.hsv_range[0][2], 'Vmax': self.hsv_range[1][2]}
                    self.dyn_client.update_configuration(params)#更新动态参数
                    self.dyn_update = False
            if self.tracker_type != "color":#其他跟踪模式
                if self.gTracker_state == True:
                    Roi = (self.Roi_init[0], self.Roi_init[1], self.Roi_init[2] - self.Roi_init[0], self.Roi_init[3] - self.Roi_init[1])
                    self.gTracker = Tracker(tracker_type=self.tracker_type)
                    self.gTracker.initWorking(rgb_img, Roi)
                    self.gTracker_state = False
                rgb_img, (targBegin_x, targBegin_y), (targEnd_x, targEnd_y) = self.gTracker.track(rgb_img)#执行追踪
                center_x = targEnd_x / 2 + targBegin_x / 2
                center_y = targEnd_y / 2 + targBegin_y / 2
                width = targEnd_x - targBegin_x
                high = targEnd_y - targBegin_y
                self.point_pose = (center_x, center_y, min(width, high))
        if self.Track_state == 'tracking':#执行追踪
            if self.circle[2] != 0: threading.Thread(target=self.execute, args=(self.circle[0], self.circle[1])).start()
            if self.point_pose[0] != 0 and self.point_pose[1] != 0: threading.Thread(target=self.execute, args=(self.point_pose[0], self.point_pose[1])).start()
        if self.tracker_type != "color": cv.putText(rgb_img, " Tracker", (260, 20), cv.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
        return rgb_img, binary

class simplePID:
    '''very simple discrete PID controller'''

    def __init__(self, target, P, I, D):
        '''Create a discrete PID controller
        each of the parameters may be a vector if they have the same length
        Args:
        target (double) -- the target value(s)
        P, I, D (double)-- the PID parameter
        '''
        # check if parameter shapes are compatabile.
        if (not (np.size(P) == np.size(I) == np.size(D)) or ((np.size(target) == 1) and np.size(P) != 1) or (
                np.size(target) != 1 and (np.size(P) != np.size(target) and (np.size(P) != 1)))):
            raise TypeError('input parameters shape is not compatable')
       # rospy.loginfo('P:{}, I:{}, D:{}'.format(P, I, D))
        self.Kp = np.array(P)
        self.Ki = np.array(I)
        self.Kd = np.array(D)
        self.last_error = 0
        self.integrator = 0
        self.timeOfLastCall = None
        self.setPoint = np.array(target)
        self.integrator_max = float('inf')

    def update(self, current_value):
        '''Updates the PID controller.
        Args:
            current_value (double): vector/number of same legth as the target given in the constructor
        Returns:
            controll signal (double): vector of same length as the target
        '''
        current_value = np.array(current_value)
        if (np.size(current_value) != np.size(self.setPoint)):
            raise TypeError('current_value and target do not have the same shape')
        if (self.timeOfLastCall is None):
            # the PID was called for the first time. we don't know the deltaT yet
            # no controll signal is applied
            self.timeOfLastCall = time.perf_counter()
            return np.zeros(np.size(current_value))
        error = self.setPoint - current_value#计算误差
        P = error
        currentTime = time.perf_counter()
        deltaT = (currentTime - self.timeOfLastCall)
        # integral of the error is current error * time since last update  计算I
        self.integrator = self.integrator + (error * deltaT)
        I = self.integrator
        # derivative is difference in error / time since last update 计算P
        D = (error - self.last_error) / deltaT
        self.last_error = error
        self.timeOfLastCall = currentTime
        # return controll signal 计算输出
        return self.Kp * P + self.Ki * I + self.Kd * D


def main():
    rclpy.init()
    mono_tracker = mono_Tracker("monoIdentify")
    try:
        rclpy.spin(mono_tracker)
    except KeyboardInterrupt:
        pass
    finally:
        mono_tracker.destroy_node()
        rclpy.shutdown()
    
    

关键功能:

  • 支持多种跟踪算法(KCF/BOOSTING/MIL)

  • 颜色跟踪模式可自动提取HSV范围

  • 多线程控制确保流畅性

  • 完善的异常处理机制

  • 实时显示处理帧率

这个系统可以实现对运动目标或特定颜色物体的自动跟踪,通过PID算法保持目标在画面中心位置,典型应用于智能监控、机器人视觉跟随等场景。这个做上一篇的颜色追踪类似:亚博microros小车-原生ubuntu支持系列:21 颜色追踪-CSDN博客

都是根据目标的中心坐标和来计算s1、s2舵机转动角度,然后发布给底盘。

程序说明

程序启动后,通过鼠标选中需要跟踪的物体,按下空格键,小车的云台舵机进入跟踪模式。小车云台会跟随 被跟踪的物体移动,并且时刻保证被追踪的物体保持在画面中心。

测试

启动小车代理

 sudo docker run -it --rm -v /dev:/dev -v /dev/shm:/dev/shm --privileged --net=host microros/micro-ros-agent:humble udp4 --port 8090 -v4

启动图像代理

docker run -it --rm -v /dev:/dev -v /dev/shm:/dev/shm --privileged --net=host microros/micro-ros-agent:humble udp4 --port 9999 -v4

(选做)调整舵机

ros2 run yahboom_esp32_mediapipe control_servo

启动脚本

ros2 run yahboom_esp32ai_car mono_Tracker

启动之后,进入选择模式,用鼠标选中目标所在位置,如下图所示,松开即开始识别。

把我的镇宅之宝小黄鸭拿出来了

键盘按键控制:

【r】:选择模式,可用鼠标选择要识别目标的区域,如上图。

【q】:退出程序。

【空格键】:目标追踪;在跟随的时候缓慢移动目标即可,移动太快将会丢失目标。

按下空格能看到通过pid算出来的运动角度。

ohu@bohu-TM1701:~/yahboomcar/yahboomcar_ws$ ros2 run yahboom_esp32ai_car mono_Tracker 
OpenCV Version:  4.11.0
servo1 0.0
servo1 1.6740220783505821
servo1 2.981003373069086
servo1 4.277003373069086
servo1 5.418249937784921
servo1 6.018073510048205
servo1 6.0159973543603105
servo1 5.71860954302583
servo1 4.775629931904761
servo1 3.8326030635678214
servo1 2.9206030635678215
servo1 1.9081850712547614
servo1 0.7822878210318385
servo1 0.18899249274645924
servo1 -0.2834846440358629
servo1 -0.7540859436953311
servo1 -1.234085943695331
servo1 -1.6484508720535367
servo1 -2.1135512406099104
servo1 -2.5695512406099104
servo1 -3.058080514019669
servo1 -3.538080514019669
servo1 -4.171699309150052
servo1 -4.530007470444837
servo1 -4.800294859267008
servo1 -4.8453324923276115
servo1 -4.865291304517047
servo1 -4.94373365090143
servo1 -5.136952220684027
servo1 -5.469228000644295
servo1 -6.027853281852513
servo1 -6.710800306564447
servo1 -7.6552821486406
servo1 -8.052589780593427
servo1 -9.223112314977138
servo1 -10.319161974550278
servo1 -11.591078854236214
servo1 -12.416221902691895
servo1 -13.360740214236708
servo1 -14.51497408279646
servo1 -14.780085181765932
servo1 -14.310165362425591
servo1 -14.15717444041632
servo1 -14.435688173088373
servo1 -15.32025424018754
servo1 -16.531503721252605
servo1 -17.836778372955706
servo1 -19.017197794387748
servo1 -20.28310463824062
servo1 -21.166754300006183
servo1 -21.242287643352327
servo1 -21.1565011124945
servo1 -21.707825159200315
servo1 -23.013047473734893
servo1 -24.58221911876973
servo1 -26.81213675078672
servo1 -28.374622533573962
servo1 -29.871810297867736
servo1 -31.217257596323204
servo1 -32.31216336848393
servo1 -33.601930296065284
servo1 -34.03160970476731
2查看节点话题通讯图

可以通过rqt查看节点之间的话题通讯,

对比下颜色追踪的,

不足:

移动速度稍快一点就跟丢了。

以上


http://www.kler.cn/a/533810.html

相关文章:

  • 控件【QT】
  • 在游戏本(6G显存)上本地部署Deepseek,运行一个14B大语言模型,并使用API访问
  • NeetCode刷题第21天(2025.2.4)
  • springboot+vue+uniapp的校园二手交易小程序
  • 算法题(57):找出字符串中第一个匹配项的下标
  • kaggle视频行为分析1st and Future - Player Contact Detection
  • AI绘画:解锁商业设计新宇宙(6/10)
  • 使用request库实现接口测试-笔记
  • 阿里云 ubuntu22.04 中国区节点安装 Docker
  • 2024年12月 Scratch 图形化(一级)真题解析 中国电子学会全国青少年软件编程等级考试
  • arm 下 多线程访问同一变量 ,使用原子操作 性能差问题
  • 【Git】二、分支管理详解
  • 2024年12月 Scratch 图形化(三级)真题解析 中国电子学会全国青少年软件编程等级考试
  • 记录一下 在Mac下用pyinstallter 打包 Django项目
  • 自己实现的一个缓存数据库(搞着玩) .net Core/6/8/9
  • 【C语言高级特性】位操作(二):应用场景
  • python开发:爬虫示例——GET和POST请求处理
  • vue2-给data动态添加属性
  • WPS中解除工作表密码保护(忘记密码)
  • 手写MVVM框架-实现v-model(单向绑定)
  • rabbitMQ数据隔离
  • 1 HBase 基础
  • PHP 中 `foreach` 循环结合引用使用时可能出现的问题
  • 【C++】STL——vector的使用
  • 【自然语言处理(NLP)】生成词向量:ELMo(Embedded from Language Models)原理及应用
  • 硬件电路基础