亚博microros小车-原生ubuntu支持系列:24 巡线驾驶
这篇跟之前的颜色识别类似,亚博microros小车-原生ubuntu支持系列:21 颜色追踪-CSDN博客
1、程序功能说明
程序启动后,调整摄像头的俯仰角,把摄像头往下掰动,使得摄像头可以看到线,然后点击图像窗口,按下r键进选色模式;接着在在画面中的线的区域内,框出所需要巡线的颜色,松开鼠标后会自动加载处理后的图像;最后按下空格键开启巡线功能。小车在运行过程中,遇到障碍物会停下并且蜂鸣器会响。
2 启动命令
启动小车代理、启动图像代理
#小车代理
sudo docker run -it --rm -v /dev:/dev -v /dev/shm:/dev/shm --privileged --net=host microros/micro-ros-agent:humble udp4 --port 8090 -v4
#摄像头代理(先启动代理再打开小车开关)
docker run -it --rm -v /dev:/dev -v /dev/shm:/dev/shm --privileged --net=host microros/micro-ros-agent:humble udp4 --port 9999 -v4
终端输入,
ros2 run yahboom_esp32ai_car follow_line
然后按下键盘上的r/R键进入选色模式,用鼠标框出一片区域(该区域只能有一中颜色),
选定后松开鼠标,
右边显示的就是处理后的图像,它会显示红线部分,按下空格后开始巡线。
3节点通信图
代码
src/yahboom_esp32ai_car/yahboom_esp32ai_car/目录下新建follow_common.py
#!/usr/bin/env python
# encoding: utf-8
import time
import cv2 as cv
import numpy as np
from std_msgs.msg import Bool
from geometry_msgs.msg import Twist
def write_HSV(wf_path, value):
with open(wf_path, "w") as wf:
wf_str = str(value[0][0]) + ', ' + str(
value[0][1]) + ', ' + str(value[0][2]) + ', ' + str(
value[1][0]) + ', ' + str(value[1][1]) + ', ' + str(
value[1][2])
wf.write(wf_str)
wf.flush()
def read_HSV(rf_path):
rf = open(rf_path, "r+")
line = rf.readline()
if len(line) == 0: return ()
list = line.split(',')
if len(list) != 6: return ()
hsv = ((int(list[0]), int(list[1]), int(list[2])),
(int(list[3]), int(list[4]), int(list[5])))
rf.flush()
return hsv
# 定义函数,第一个参数是缩放比例,第二个参数是需要显示的图片组成的元组或者列表
# Define the function, the first parameter is the zoom ratio, and the second parameter is a tuple or list of pictures to be displayed
def ManyImgs(scale, imgarray):
rows = len(imgarray) # 元组或者列表的长度 Length of tuple or list
cols = len(imgarray[0]) # 如果imgarray是列表,返回列表里第一幅图像的通道数,如果是元组,返回元组里包含的第一个列表的长度
# If imgarray is a list, return the number of channels of the first image in the list, if it is a tuple, return the length of the first list contained in the tuple
# print("rows=", rows, "cols=", cols)
# 判断imgarray[0]的类型是否是list
# 是list,表明imgarray是一个元组,需要垂直显示
# Determine whether the type of imgarray[0] is list
# It is a list, indicating that imgarray is a tuple and needs to be displayed verticallyIt is a list, indicating that imgarray is a tuple and needs to be displayed vertically
rowsAvailable = isinstance(imgarray[0], list)
# 第一张图片的宽高
# The width and height of the first picture
width = imgarray[0][0].shape[1]
height = imgarray[0][0].shape[0]
# print("width=", width, "height=", height)
# 如果传入的是一个元组
# If the incoming is a tuple
if rowsAvailable:
for x in range(0, rows):
for y in range(0, cols):
# 遍历元组,如果是第一幅图像,不做变换
# Traverse the tuple, if it is the first image, do not transform
if imgarray[x][y].shape[:2] == imgarray[0][0].shape[:2]:
imgarray[x][y] = cv.resize(imgarray[x][y], (0, 0), None, scale, scale)
# 将其他矩阵变换为与第一幅图像相同大小,缩放比例为scale
# Transform other matrices to the same size as the first image, and the zoom ratio is scale
else:
imgarray[x][y] = cv.resize(imgarray[x][y], (imgarray[0][0].shape[1], imgarray[0][0].shape[0]), None,
scale, scale)
# 如果图像是灰度图,将其转换成彩色显示
# If the image is grayscale, convert it to color display
if len(imgarray[x][y].shape) == 2:
imgarray[x][y] = cv.cvtColor(imgarray[x][y], cv.COLOR_GRAY2BGR)
# 创建一个空白画布,与第一张图片大小相同
# Create a blank canvas, the same size as the first picture
imgBlank = np.zeros((height, width, 3), np.uint8)
hor = [imgBlank] * rows # 与第一张图片大小相同,与元组包含列表数相同的水平空白图像
# The same size as the first picture, and the same number of horizontal blank images as the tuple contains the list
for x in range(0, rows):
# 将元组里第x个列表水平排列
# Arrange the x-th list in the tuple horizontally
hor[x] = np.hstack(imgarray[x])
ver = np.vstack(hor) # 将不同列表垂直拼接
# Concatenate different lists vertically
# 如果传入的是一个列表
# If the incoming is a list
else:
# 变换操作,与前面相同
# Transformation operation, same as before
for x in range(0, rows):
if imgarray[x].shape[:2] == imgarray[0].shape[:2]:
imgarray[x] = cv.resize(imgarray[x], (0, 0), None, scale, scale)
else:
imgarray[x] = cv.resize(imgarray[x], (imgarray[0].shape[1], imgarray[0].shape[0]), None, scale, scale)
if len(imgarray[x].shape) == 2:
imgarray[x] = cv.cvtColor(imgarray[x], cv.COLOR_GRAY2BGR)
# 将列表水平排列
# Arrange the list horizontally
hor = np.hstack(imgarray)
ver = hor
return ver
class color_follow:
def __init__(self):
'''
初始化一些参数
Initialize some parameters
'''
self.binary = None
self.Center_x = 0
self.Center_y = 0
self.Center_r = 0
def line_follow(self, rgb_img, hsv_msg):
height, width = rgb_img.shape[:2]
img = rgb_img.copy()
img[0:int(height / 2), 0:width] = 0
# 将图像转换为HSV。
# Convert the image to HSV.
hsv_img = cv.cvtColor(img, cv.COLOR_BGR2HSV)
lower = np.array(hsv_msg[0], dtype="uint8")
upper = np.array(hsv_msg[1], dtype="uint8")
# 根据特定颜色范围创建mask
# Create a mask based on a specific color range
mask = cv.inRange(hsv_img, lower, upper)
color_mask = cv.bitwise_and(hsv_img, hsv_img, mask=mask)
# 将图像转为灰度图
# Convert the image to grayscale
gray_img = cv.cvtColor(color_mask, cv.COLOR_RGB2GRAY)
# 获取不同形状的结构元素
# Get structure elements of different shapes
kernel = cv.getStructuringElement(cv.MORPH_RECT, (5, 5))
# 形态学闭操作
# Morphological closed operation
gray_img = cv.morphologyEx(gray_img, cv.MORPH_CLOSE, kernel)
# 图像二值化操作
# Image binarization operation
ret, binary = cv.threshold(gray_img, 10, 255, cv.THRESH_BINARY)
# 获取轮廓点集(坐标)
# Get the set of contour points (coordinates)
find_contours = cv.findContours(binary, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
if len(find_contours) == 3: contours = find_contours[1]
else: contours = find_contours[0]
if len(contours) != 0:
areas = []
for c in range(len(contours)): areas.append(cv.contourArea(contours[c]))
max_id = areas.index(max(areas))
max_rect = cv.minAreaRect(contours[max_id])
max_box = cv.boxPoints(max_rect)
max_box = np.int0(max_box)
# 获取盒⼦顶点
# Get box vertices
box = cv.boxPoints(max_rect)
# 转成long类型
# Convert to long type
box = np.int0(box)
# 绘制最小矩形
# Draw the smallest rectangle
cv.drawContours(rgb_img, [box], 0, (255, 0, 0), 2)
(color_x, color_y), color_radius = cv.minEnclosingCircle(max_box)
# 将检测到的颜色用原形线圈标记出来
# Mark the detected color with the original shape coil
self.Center_x = int(color_x)
self.Center_y = int(color_y)
self.Center_r = int(color_radius)
cv.circle(rgb_img, (self.Center_x, self.Center_y), 5, (255, 0, 255), -1)
else:
self.Center_x = 0
self.Center_y = 0
self.Center_r = 0
return rgb_img, binary, (self.Center_x, self.Center_y, self.Center_r)
def Roi_hsv(self, img, Roi):
'''
Get the range of HSV in a certain area获取某一区域的HSV的范围
:param img: 彩色图
:param Roi: (x_min, y_min, x_max, y_max)
Roi=(290,280,350,340)
:return: The range of images and HSV such as:(0,0,90)(177,40,150) 图像和HSV的范围 例如:(0,0,90)(177,40,150)
'''
H = [];S = [];V = []
# img = cv.resize(img, (640, 480), )
# 将彩色图转成HSV
# Convert color image to HSV
HSV = cv.cvtColor(img, cv.COLOR_BGR2HSV)
# 画矩形框
# Draw a rectangular frame
# cv.rectangle(img, (Roi[0], Roi[1]), (Roi[2], Roi[3]), (0, 255, 0), 2)
# 依次取出每行每列的H,S,V值放入容器中
# Take out the H, S, V values of each row and each column in turn and put them into the container
for i in range(Roi[0], Roi[2]):
for j in range(Roi[1], Roi[3]):
H.append(HSV[j, i][0])
S.append(HSV[j, i][1])
V.append(HSV[j, i][2])
# 分别计算出H,S,V的最大最小
# Calculate the maximum and minimum of H, S, and V respectively
H_min = min(H); H_max = max(H)
S_min = min(S); S_max = 253
V_min = min(V); V_max = 255
# HSV范围调整
# HSV range adjustment
if H_max + 5 > 255: H_max = 255
else: H_max += 5
if H_min - 5 < 0: H_min = 0
else: H_min -= 5
if S_min - 20 < 0: S_min = 0
else: S_min -= 20
if V_min - 20 < 0: V_min = 0
else: V_min -= 20
lowerb = 'lowerb : (' + str(H_min) + ' ,' + str(S_min) + ' ,' + str(V_min) + ')'
upperb = 'upperb : (' + str(H_max) + ' ,' + str(S_max) + ' ,' + str(V_max) + ')'
txt1 = 'Learning ...'
txt2 = 'OK !!!'
if S_min < 5 or V_min < 5:
cv.putText(img, txt1, (30, 50), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
else:
cv.putText(img, txt2, (30, 50), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
cv.putText(img, lowerb, (150, 30), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
cv.putText(img, upperb, (150, 50), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
hsv_range = ((int(H_min), int(S_min), int(V_min)), (int(H_max), int(S_max), int(V_max)))
return img, hsv_range
class simplePID:
'''very simple discrete PID controller'''
def __init__(self, target, P, I, D):
'''Create a discrete PID controller
each of the parameters may be a vector if they have the same length
Args:
target (double) -- the target value(s)
P, I, D (double)-- the PID parameter
'''
# check if parameter shapes are compatabile.
if (not (np.size(P) == np.size(I) == np.size(D)) or ((np.size(target) == 1) and np.size(P) != 1) or (
np.size(target) != 1 and (np.size(P) != np.size(target) and (np.size(P) != 1)))):
raise TypeError('input parameters shape is not compatable')
self.Kp = np.array(P)
self.Ki = np.array(I)
self.Kd = np.array(D)
self.last_error = 0
self.integrator = 0
self.timeOfLastCall = None
self.setPoint = np.array(target)
self.integrator_max = float('inf')
def update(self, current_value):
'''Updates the PID controller.
Args:
current_value (double): vector/number of same legth as the target given in the constructor
Returns:
controll signal (double): vector of same length as the target
'''
current_value = np.array(current_value)
if (np.size(current_value) != np.size(self.setPoint)):
raise TypeError('current_value and target do not have the same shape')
if (self.timeOfLastCall is None):
# the PID was called for the first time. we don't know the deltaT yet
# no controll signal is applied
self.timeOfLastCall = time.perf_counter()
return np.zeros(np.size(current_value))
error = self.setPoint - current_value
P = error
currentTime = time.perf_counter()
deltaT = (currentTime - self.timeOfLastCall)
# integral of the error is current error * time since last update
self.integrator = self.integrator + (error * deltaT)
I = self.integrator
# derivative is difference in error / time since last update
D = (error - self.last_error) / deltaT
self.last_error = error
self.timeOfLastCall = currentTime
# return controll signal
return self.Kp * P + self.Ki * I + self.Kd * D
src/yahboom_esp32ai_car/yahboom_esp32ai_car目录下新建文件fllow_line.py
# ros lib
import rclpy
from rclpy.node import Node
from std_msgs.msg import Bool
from geometry_msgs.msg import Twist
from sensor_msgs.msg import CompressedImage, LaserScan, Image
from cv_bridge import CvBridge
# common lib
import os
import threading
import math
from yahboom_esp32ai_car.follow_common import *
from std_msgs.msg import Int32, Bool, UInt16
from rclpy.time import Time
import datetime
from ament_index_python.packages import get_package_share_directory #获取shares目录绝对路径
RAD2DEG = 180 / math.pi # 弧度转角度系数
print("import finish")
cv_edition = cv.__version__
print("cv_edition: ", cv_edition)
class LineDetect(Node):
def __init__(self, name):
super().__init__(name)
# create a publisher
self.pub_cmdVel = self.create_publisher(Twist, "/cmd_vel", 1) # 控制小车速度
self.pub_Buzzer = self.create_publisher(UInt16, "/beep", 1)
self.pub_Servo1 = self.create_publisher(Int32, "/servo_s1", 10) #舵机1角度控制
self.pub_Servo2 = self.create_publisher(Int32, "/servo_s2", 10) # 舵机2角度控制
# create a subscriber
self.sub_JoyState = self.create_subscription(
Bool, "/JoyState", self.JoyStateCallback, 1)
self.sub_scan = self.create_subscription(
LaserScan, "/scan", self.registerScan, 1) # 激光雷达数据
self.Joy_active = False
self.img = None # 存储当前帧图像
self.circle = () # 存储检测到的目标圆信息
self.hsv_range = () # 颜色跟踪的HSV范
self.Roi_init = () # ROI区域坐标
self.warning = 1 #障碍物警告计数器
self.end = 0
self.Start_state = True
self.dyn_update = False
self.Buzzer_state = False
self.select_flags = False
self.Track_state = 'identify' # 跟踪状态机:初始化/识别/跟踪
self.windows_name = 'frame'
self.cols, self.rows = 0, 0
self.Mouse_XY = (0, 0)
self.hsv_text = get_package_share_directory('yahboom_esp32ai_car')+'/resource/colorHSV.text'
# 算法模块初始化
self.color = color_follow()
self.scale = 1000
self.FollowLinePID = (50, 0, 5) #60,0,20
self.linear = 0.15
self.LaserAngle = 30 # 激光雷达检测角度范围(度)
self.ResponseDist = 0.35 # 障碍物响应距离(米)
# 舵机初始角度设置
self.PWMServo_X = 0 # 水平舵机初始角度
self.PWMServo_Y = -45 # 垂直舵机初始角度
self.s1_init_angle = Int32()
self.s1_init_angle.data = self.PWMServo_X
self.s2_init_angle = Int32()
self.s2_init_angle.data = self.PWMServo_Y
# 参数声明与初始化
self.PID_init()
self.declare_param()
self.img_flip = True
self.refresh = False
self.last_stamp = None
self.new_seconds = 0
self.fps_seconds = 1
# usb
# self.capture = cv.VideoCapture(0)
# self.timer = self.create_timer(0.001, self.on_timer)
# esp32_wifi
self.bridge = CvBridge()
self.sub_img = self.create_subscription(
CompressedImage, '/espRos/esp32camera', self.handleTopic, 1) # 获取esp32传来的图像
# 确保角度正常处于巡线角度
for i in range(10):
self.pub_Servo1.publish(self.s1_init_angle)
self.pub_Servo2.publish(self.s2_init_angle)
time.sleep(0.1)
def declare_param(self):
#声明ROS参数服务器中的动态参数
# HSV 颜色范围参数
self.declare_parameter("Hmin", 0)
self.Hmin = self.get_parameter(
'Hmin').get_parameter_value().integer_value
self.declare_parameter("Smin", 85)
self.Smin = self.get_parameter(
'Smin').get_parameter_value().integer_value
self.declare_parameter("Vmin", 126)
self.Vmin = self.get_parameter(
'Vmin').get_parameter_value().integer_value
self.declare_parameter("Hmax", 9)
self.Hmax = self.get_parameter(
'Hmax').get_parameter_value().integer_value
self.declare_parameter("Smax", 253)
self.Smax = self.get_parameter(
'Smax').get_parameter_value().integer_value
self.declare_parameter("Vmax", 253)
self.Vmax = self.get_parameter(
'Vmax').get_parameter_value().integer_value
# PID
self.declare_parameter("Kp", 50)
self.Kp = self.get_parameter('Kp').get_parameter_value().integer_value
self.declare_parameter("Ki", 0)
self.Ki = self.get_parameter('Ki').get_parameter_value().integer_value
self.declare_parameter("Kd", 5)
self.Kd = self.get_parameter('Kd').get_parameter_value().integer_value
# other
self.declare_parameter("scale", 1000)
self.scale = self.get_parameter(
'scale').get_parameter_value().integer_value
self.declare_parameter("LaserAngle", 30)
self.LaserAngle = self.get_parameter(
'LaserAngle').get_parameter_value().integer_value
self.declare_parameter("linear", 0.15)
self.linear = self.get_parameter(
'linear').get_parameter_value().double_value
self.declare_parameter("ResponseDist", 0.35)
self.ResponseDist = self.get_parameter(
'ResponseDist').get_parameter_value().double_value
self.declare_parameter('refresh', False)
self.refresh = self.get_parameter(
'refresh').get_parameter_value().bool_value
self.pub_Servo2.publish(self.s2_init_angle)
self.pub_Servo1.publish(self.s1_init_angle)
def PID_init(self):
self.PID_controller = simplePID(
[0, 0],
[self.FollowLinePID[0] / 1.0 / (self.scale), 0],
[self.FollowLinePID[1] / 1.0 / (self.scale), 0],
[self.FollowLinePID[2] / 1.0 / (self.scale), 0])
#鼠标回调函数,用于选择ROI区域,除了param都是自动获取
def onMouse(self, event, x, y, flags, param):
if event == 1: # 左键按下
self.Track_state = 'init'
self.select_flags = True
self.Mouse_XY = (x, y)
if event == 4: # 左键释放
self.select_flags = False
self.Track_state = 'mouse'
if self.select_flags == True: # 计算矩形区域坐标
self.cols = min(self.Mouse_XY[0], x), min(self.Mouse_XY[1], y)
self.rows = max(self.Mouse_XY[0], x), max(self.Mouse_XY[1], y)
self.Roi_init = (self.cols[0], self.cols[1],
self.rows[0], self.rows[1])
def execute(self, point_x, color_radius):
if self.Joy_active == True:
if self.Start_state == True:
self.PID_init()
self.Start_state = False
return
self.Start_state = True
if color_radius == 0: # 未检测到目标,则停止
print("Not Found")
self.pub_cmdVel.publish(Twist())
else:
twist = Twist()
b = UInt16() # PID计算角速度:320是中心点的X坐标的值,通过得到的图像的X值与320的偏差,可以计算出“我现在距离中心有多远”,然后计算角速度的值
[z_Pid, _] = self.PID_controller.update([(point_x - 320)*1.0/16, 0])
# [z_Pid, _] = self.PID_controller.update([(point_x - 10)*1.0/16, 0])
if self.img_flip == True: # 根据图像翻转状态调整转向方向
twist.angular.z = -z_Pid # -z_Pid
# else: twist.angular.z = (twist.angular.z+z_Pid)*0.2
else:
twist.angular.z = +z_Pid
# point_x = point_x
# twist.angular.z=-(point_x-320)*1.0/128.0
twist.linear.x = self.linear
if self.warning > 10:#有障碍物
print("Obstacles ahead !!!")
self.pub_cmdVel.publish(Twist())
self.Buzzer_state = True
b.data = 1
self.pub_Buzzer.publish(b)
else:
if self.Buzzer_state == True:
b.data = 0
for i in range(3):
self.pub_Buzzer.publish(b)
self.Buzzer_state = False
# 当接近中线时取消转向
if abs(point_x-320) < 40:
# if abs(point_x-30)>40:
twist.angular.z = 0.0
if self.Joy_active == False: # 发布控制指令
self.pub_cmdVel.publish(twist)
else:
twist.angular.z = 0.0
def process(self, rgb_img, action):
binary = []
rgb_img = cv.resize(rgb_img, (640, 480))
if self.img_flip == True:
rgb_img = cv.flip(rgb_img, 1) # 水平翻转
# 键盘控制状态切换
if action == 32: # 空格键开始跟踪
self.Track_state = 'tracking'
elif action == ord('i') or action == 105:
self.Track_state = "identify"
elif action == ord('r') or action == 114:
self.Reset()
elif action == ord('q') or action == 113:
self.cancel()
# 初始化模式:选择ROI区域
if self.Track_state == 'init':
cv.namedWindow(self.windows_name, cv.WINDOW_AUTOSIZE)
cv.setMouseCallback(self.windows_name, self.onMouse, 0)
if self.select_flags == True: # 绘制选择区域
cv.line(rgb_img, self.cols, self.rows, (255, 0, 0), 2)
cv.rectangle(rgb_img, self.cols, self.rows, (0, 255, 0), 2)
if self.Roi_init[0] != self.Roi_init[2] and self.Roi_init[1] != self.Roi_init[3]:
rgb_img, self.hsv_range = self.color.Roi_hsv(
rgb_img, self.Roi_init)
self.dyn_update = True
else:
self.Track_state = 'init'
elif self.Track_state == "identify": # 识别模式:读取预设HSV值
# print(self.circle[0])
if os.path.exists(self.hsv_text):
self.hsv_range = read_HSV(self.hsv_text)
else: # 无预设文件则进入初始化
self.Track_state = 'init'
if self.Track_state != 'init' and len(self.hsv_range) != 0: # 执行颜色跟踪
rgb_img, binary, self.circle = self.color.line_follow(
rgb_img, self.hsv_range)
if self.dyn_update == True: # 更新动态参数到ROS参数服务器
write_HSV(self.hsv_text, self.hsv_range)
self.Hmin = rclpy.parameter.Parameter(
'Hmin', rclpy.Parameter.Type.INTEGER, self.hsv_range[0][0])
self.Smin = rclpy.parameter.Parameter(
'Smin', rclpy.Parameter.Type.INTEGER, self.hsv_range[0][1])
self.Vmin = rclpy.parameter.Parameter(
'Vmin', rclpy.Parameter.Type.INTEGER, self.hsv_range[0][2])
self.Hmax = rclpy.parameter.Parameter(
'Hmax', rclpy.Parameter.Type.INTEGER, self.hsv_range[1][0])
self.Smax = rclpy.parameter.Parameter(
'Smax', rclpy.Parameter.Type.INTEGER, self.hsv_range[1][1])
self.Vmax = rclpy.parameter.Parameter(
'Vmax', rclpy.Parameter.Type.INTEGER, self.hsv_range[1][2])
all_new_parameters = [self.Hmin, self.Smin,
self.Vmin, self.Hmax, self.Smax, self.Vmax]
self.set_parameters(all_new_parameters)
self.dyn_update = False
if self.Track_state == 'tracking': # 跟踪模式:执行控制指令
if len(self.circle) != 0:
threading.Thread(target=self.execute, args=(
self.circle[0], self.circle[2])).start()
else:
if self.Start_state == True:
# self.pub_cmdVel.publish(Twist())
self.Start_state = False
return rgb_img, binary
#esp_wifi 图像订阅回调函数
def handleTopic(self, msg):
self.last_stamp = msg.header.stamp
if self.last_stamp:
total_secs = Time(nanoseconds=self.last_stamp.nanosec, seconds=self.last_stamp.sec).nanoseconds
delta = datetime.timedelta(seconds=total_secs * 1e-9)
seconds = delta.total_seconds()*100
if self.new_seconds != 0:
self.fps_seconds = seconds - self.new_seconds
self.new_seconds = seconds#保留这次的值
self.get_param()
start = time.time()
frame = self.bridge.compressed_imgmsg_to_cv2(msg) # 转换图像格式
frame = cv.resize(frame, (640, 480))
action = cv.waitKey(10) & 0xFF #获取键盘输入
frame, binary = self.process(frame, action) #处理图像,主流程
end = time.time()
fps = 1 / ((end - start)+self.fps_seconds)
text = "FPS : " + str(int(fps))
cv.putText(frame, text, (10, 20),
cv.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
if len(binary) != 0:#显示
cv.imshow('frame', ManyImgs(1, ([frame, binary])))
else:
cv.imshow('frame', frame)
if action == ord('q') or action == 113:
cv.destroyAllWindows()
#usb
# def on_timer(self):
# self.get_param()
# ret, frame = self.capture.read()
# action = cv.waitKey(10) & 0xFF
# frame, binary = self.process(frame, action)
# start = time.time()
# fps = 1 / (start - self.end)
# text = "FPS : " + str(int(fps))
# self.end = start
# cv.putText(frame, text, (30, 30), cv.FONT_HERSHEY_SIMPLEX,
# 0.6, (100, 200, 200), 1)
# if len(binary) != 0:
# cv.imshow('frame', ManyImgs(1, ([frame, binary])))
# else:
# cv.imshow('frame', frame)
# if action == ord('q') or action == 113:
# self.capture.release()
# cv.destroyAllWindows()
def JoyStateCallback(self, msg):
if not isinstance(msg, Bool):
return
self.Joy_active = msg.data
# self.pub_cmdVel.publish(Twist())
#激光雷达回调函数
def registerScan(self, scan_data):
self.warning = 1
if not isinstance(scan_data, LaserScan):
return
# 记录激光扫描并发布最近物体的位置(或指向某点)
ranges = np.array(scan_data.ranges)
# 按距离排序以检查从较近的点到较远的点是否是真实的东西
# if we already have a last scan to compare to:
for i in range(len(ranges)):
angle = (scan_data.angle_min +
scan_data.angle_increment * i) * RAD2DEG
# if angle > 90: print "i: {},angle: {},dist: {}".format(i, angle, scan_data.ranges[i])
# 通过清除不需要的扇区的数据来保留有效的数据 (只检测前方90度范围)
if abs(angle) < 80:
if ranges[i] > 0.2:
if ranges[i] < self.ResponseDist:
self.warning += 1
def Reset(self):
self.PID_init()
self.Track_state = 'init'
self.hsv_range = ()
self.Joy_active = False
self.Mouse_XY = (0, 0)
self.pub_cmdVel.publish(Twist())
print("Reset succes!!!")
def get_param(self):
# hsv
self.Hmin = self.get_parameter(
'Hmin').get_parameter_value().integer_value
self.Smin = self.get_parameter(
'Smin').get_parameter_value().integer_value
self.Vmin = self.get_parameter(
'Vmin').get_parameter_value().integer_value
self.Hmax = self.get_parameter(
'Hmax').get_parameter_value().integer_value
self.Smax = self.get_parameter(
'Smax').get_parameter_value().integer_value
self.Vmax = self.get_parameter(
'Vmax').get_parameter_value().integer_value
# kpi
self.Kd = self.get_parameter('Kd').get_parameter_value().integer_value
self.Ki = self.get_parameter('Ki').get_parameter_value().integer_value
self.Kp = self.get_parameter('Kp').get_parameter_value().integer_value
self.FollowLinePID = (self.Kp, self.Ki, self.Kd)
#
self.scale = self.get_parameter(
'scale').get_parameter_value().integer_value
self.LaserAngle = self.get_parameter(
'LaserAngle').get_parameter_value().integer_value
self.linear = self.get_parameter(
'linear').get_parameter_value().double_value
self.ResponseDist = self.get_parameter(
'ResponseDist').get_parameter_value().double_value
self.refresh = self.get_parameter(
'refresh').get_parameter_value().bool_value
def main():
rclpy.init()
linedetect = LineDetect("follow_line")
print("start it")
try:
rclpy.spin(linedetect)
except KeyboardInterrupt:
pass
finally:
linedetect.destroy_node()
rclpy.shutdown()
识别还是跟之前用的颜色追踪一样,调用findContours。后面就是
- 计算巡线的中心坐标与图像中心的偏移量,
- 根据坐标偏移量计算出角速度的值,
- 发布速度驱动小车。
实际测试:走直线没啥问题,拐弯容易小车跟丢了。