代码拉取完成,页面将自动刷新
# 调用一些第三方工具包以及项目其他文件中的函数和数据
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from utils.general import non_max_suppression, scale_coords, xyxy2xywh
from utils.plots import Annotator, colors
from utils.torch_utils import time_sync
from utils.augmentations import letterbox
from parameter import size, left_map1, left_map2, right_map1, right_map2, SGBM_stereo, imgsz, stride, device, half, \
model, line_thickness, names, hide_conf, hide_labels, minDisparity, numDisparities, Q, A, B, C
from deep_sort.utils.parser import get_config
from deep_sort.deep_sort import DeepSort
def crash(id, xyxy, CM_distance):
# 更新数据
if mess.get(id) is None:
mess[id] = []
mess[id].append([xyxy, CM_distance])
if len(mess[id]) == 8:
t = mess.get(id)
print(t)
x = [1, 2, 3, 4, 5, 6, 7, 8]
y = [m[1] for m in t]
print(y)
A, B, C = np.polyfit(x, y, 2)
print(A)
if A < -1:
print("ERROOooooooR!!!")
del mess[id]
def run():
# START
cap = cv2.VideoCapture(0) # 打开编号0的摄像头
assert cap.isOpened(), f'Failed to open cam !' # 检测相机是否打开
cudnn.benchmark = True # 加速图像处理速度
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 2560) # 设置相机画面宽
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960) # 设置相机画面高
cap.set(cv2.CAP_PROP_FPS, 60) # 设置相机帧数
frame_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # 获取画面宽度
frame_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 获取画面高度
cfg = get_config()
cfg.merge_from_file("deep_sort/configs/deep_sort.yaml")
deepsort = DeepSort(model_type='osnet_x0_25',
max_dist=cfg.DEEPSORT.MAX_DIST,
max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
max_age=cfg.DEEPSORT.MAX_AGE,
n_init=cfg.DEEPSORT.N_INIT,
nn_budget=cfg.DEEPSORT.NN_BUDGET,
use_cuda=False)
# main Loop
while True:
t0 = time_sync() # 获取当前帧处理开始时间 t0
success, img = cap.read() # 读取相机一帧画面
# 分离左右图像,调整图像大小
img_L = img[0:frame_h, 0:int(frame_w / 2)] # 左图像用于yolo目标检测
img_R = img[0:frame_h, int(frame_w / 2):frame_w] # 右图像其辅助作用
# 调整图像尺寸
img_L = cv2.resize(img_L, dsize=size)
img_R = cv2.resize(img_R, dsize=size)
# ================================================SGBM==============================================================
# 图像校准
img1_rectified = cv2.remap(img_L, left_map1, left_map2, cv2.INTER_LINEAR)
img2_rectified = cv2.remap(img_R, right_map1, right_map2, cv2.INTER_LINEAR)
# 图像转灰度图 BGR->GRAY
imgL = cv2.cvtColor(img1_rectified, cv2.COLOR_BGR2GRAY)
imgR = cv2.cvtColor(img2_rectified, cv2.COLOR_BGR2GRAY)
disp = SGBM_stereo.compute(imgL, imgR).astype(np.float32) / 16.0 # 计算左右画面视差
dislist = cv2.reprojectImageTo3D(disp, Q) # 返回_3dImage 3D图像 获取画面深度信息
# ================================================yolo v5===========================================================
img0 = img_L.copy() # 拷贝左图像,用于画面绘图和显示
img_L = [letterbox(img_L, imgsz, stride=stride)[0]] # 保持图片的长宽比例,剩下的部分采用灰色填充
# Stack
img_L = np.stack(img_L, 0)
# Convert
img_L = img_L[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img_L = np.ascontiguousarray(img_L) # 将内存不连续存储的数组转换为内存连续存储的数组,使得运行速度更快
img_L = torch.from_numpy(img_L).to(device) # 从numpy.ndarray创建一个张量
img_L = img_L.half() if half else img_L.float() # uint8 to fp16/32
img_L = img_L / 255.0 # 0 - 255 to 0.0 - 1.0
t1 = time_sync()
pred = model(img_L)[0] # 预测
det = non_max_suppression(prediction=pred, conf_thres=0.3, iou_thres=0.45, max_det=20)[0] # NMS非极大值抑制
t2 = time_sync()
s = 'cam: '
s += '%gx%g ' % img_L.shape[2:] # print string
gn = torch.tensor(img0.shape)[[1, 0, 1, 0]] # normalization gain whwh
annotator = Annotator(img0, line_width=line_thickness, example=str(names))
if len(pred):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img_L.shape[2:], det[:, :4], img0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
xywhs = xyxy2xywh(det[:, 0:4])
confs = det[:, 4]
clss = det[:, 5]
# pass yolov5 to deepsort
t3 = time_sync()
outputs = deepsort.update(xywhs.cpu(), confs.cpu(), clss.cpu(), img0)
t4 = time_sync()
if len(outputs) > 0:
for j, (output, conf) in enumerate(zip(outputs, confs)):
xyxy = output[0:4]
id = output[4]
cls = output[5]
c = int(cls) # integer class
# 计算目标距离
point1 = (int(xyxy[0] + 1 * (xyxy[2] - xyxy[0]) / 4), int(xyxy[1] + 1 * (xyxy[3] - xyxy[1]) / 4))
point2 = (int(xyxy[0] + 1 * (xyxy[2] - xyxy[0]) / 4), int(xyxy[1] + 2 * (xyxy[3] - xyxy[1]) / 4))
point3 = (int(xyxy[0] + 1 * (xyxy[2] - xyxy[0]) / 4), int(xyxy[1] + 3 * (xyxy[3] - xyxy[1]) / 4))
point4 = (int(xyxy[0] + 2 * (xyxy[2] - xyxy[0]) / 4), int(xyxy[1] + 1 * (xyxy[3] - xyxy[1]) / 4))
point5 = (int(xyxy[0] + 2 * (xyxy[2] - xyxy[0]) / 4), int(xyxy[1] + 2 * (xyxy[3] - xyxy[1]) / 4))
point6 = (int(xyxy[0] + 2 * (xyxy[2] - xyxy[0]) / 4), int(xyxy[1] + 3 * (xyxy[3] - xyxy[1]) / 4))
point7 = (int(xyxy[0] + 3 * (xyxy[2] - xyxy[0]) / 4), int(xyxy[1] + 1 * (xyxy[3] - xyxy[1]) / 4))
point8 = (int(xyxy[0] + 3 * (xyxy[2] - xyxy[0]) / 4), int(xyxy[1] + 2 * (xyxy[3] - xyxy[1]) / 4))
point9 = (int(xyxy[0] + 3 * (xyxy[2] - xyxy[0]) / 4), int(xyxy[1] + 3 * (xyxy[3] - xyxy[1]) / 4))
point_list = [point1, point2, point3, point4, point5, point6, point7, point8, point9]
distance = []
for point in point_list:
# cv2.circle(img=demo_img, center=point, radius=2, color=colors(c, True), thickness=1)
dist = (dislist[point[1]][point[0]] / 5)[-1] # 计算像素点深度信息
cv2.circle(img=img0, center=point, radius=1, color=(0, 0, 225), thickness=1)
if 0 <= dist < 10000:
distance.append(dist)
distance.sort()
# 创建Label(预测框,物品名称,置信度,距离信息)
if len(distance) <= 3:
CM_distance = 0
label = None if hide_labels else (names[c] if hide_conf else f'{id} {names[c]} {conf:.0%} null')
else:
dist = (distance[0] + distance[1] + distance[2]) / 3
CM_distance = A * dist ** 2 + B * dist + C
label = None if hide_labels else (
names[c] if hide_conf else f'{id} {names[c]} {conf:.0%} {int(CM_distance)}cm')
# 画出预测框
annotator.box_label(xyxy, label, color=colors(c, True))
# 碰撞
if CM_distance > 0:
crash(id, xyxy, CM_distance)
# print(f'{s}Done')
else:
deepsort.increment_ages()
print('No detections')
t5 = time_sync() # 获取当前帧处理结束时间 t1
# Print time (inference-only)
# print(f'{s}Done. [目标检测:{t2 - t1:.3f}s][目标跟踪:{t4 - t3:.3f}s][total:{t5 - t0:.3f}s]')
cv2.imshow('Demo', img0)
# x = (disp - minDisparity) / numDisparities
# cv2.imshow('SGNM_disparity', x)
cv2.waitKey(1) # 1 millisecond
if cv2.waitKey(1) == 'q':
cap.release()
cv2.destroyAllWindows()
break
# ==================================================================================================================
if __name__ == '__main__':
mess = {} # 创建目标监测字典
run()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。