闵行区 网站制作,已经备案的网站新增ip怎么做,一个公司做网站需要注意什么条件,搞一个公司网站得多少钱摘要 工地安全监管是计算机视觉在工业领域的重要应用场景。本文详细讲解如何使用YOLOv5、YOLOv8、YOLOv10三个主流目标检测算法实现工人安全帽佩戴检测。文章涵盖数据集构建、模型训练、性能对比、GUI界面开发全流程#xff0c;并提供完整可运行的Python代码。通过本文#…摘要工地安全监管是计算机视觉在工业领域的重要应用场景。本文详细讲解如何使用YOLOv5、YOLOv8、YOLOv10三个主流目标检测算法实现工人安全帽佩戴检测。文章涵盖数据集构建、模型训练、性能对比、GUI界面开发全流程并提供完整可运行的Python代码。通过本文您将掌握从数据标注到模型部署的完整项目开发能力。1. 项目背景与意义1.1 行业痛点建筑工地是安全事故高发区域未佩戴安全帽是导致头部受伤死亡的主要原因。传统人工巡查存在人力成本高需24小时轮班监管盲区多塔吊、深基坑等区域响应不及时事故发生后才能发现1.2 技术解决方案基于深度学习的实时目标检测可自动识别未佩戴安全帽的工人并实时报警。YOLO系列算法因其速度快、精度高、部署简单的特点成为工业视觉的首选方案。1.3 数据集来源本文使用公开数据集Safety-Helmet-Wearing-Dataset包含图片数量7581张标注类别2类head: 佩戴安全帽person: 未佩戴安全帽标注格式VOC XML格式2. 数据集准备与处理2.1 数据集下载与结构python# 下载数据集 import os import urllib.request import zipfile # 数据集下载链接替换为实际链接 url https://github.com/njvisionpower/Safety-Helmet-Wearing-Dataset/archive/master.zip urllib.request.urlretrieve(url, dataset.zip) # 解压 with zipfile.ZipFile(dataset.zip, r) as zip_ref: zip_ref.extractall(./) # 重命名 os.rename(Safety-Helmet-Wearing-Dataset-master, helmet_dataset)2.2 数据集分析pythonimport os import xml.etree.ElementTree as ET import matplotlib.pyplot as plt def analyze_dataset(annotation_dir): class_count {helmet: 0, head: 0} image_sizes [] for xml_file in os.listdir(annotation_dir): if not xml_file.endswith(.xml): continue tree ET.parse(os.path.join(annotation_dir, xml_file)) root tree.getroot() # 统计图片尺寸 size root.find(size) if size is not None: w int(size.find(width).text) h int(size.find(height).text) image_sizes.append((w, h)) # 统计类别 for obj in root.findall(object): name obj.find(name).text if name person: class_count[head] 1 elif name helmet: class_count[helmet] 1 else: class_count[name] class_count.get(name, 0) 1 return class_count, image_sizes class_count, sizes analyze_dataset(helmet_dataset/Annotations) print(f类别统计{class_count}) # 可视化图片尺寸分布 widths, heights zip(*sizes) plt.figure(figsize(12, 4)) plt.subplot(1, 2, 1) plt.hist(widths, bins30, alpha0.7, labelWidth) plt.hist(heights, bins30, alpha0.7, labelHeight) plt.legend() plt.title(Image Size Distribution) plt.show()2.3 数据集格式转换YOLO系列需要特定的标注格式类别ID 归一化的中心坐标宽高pythonimport os import xml.etree.ElementTree as ET from tqdm import tqdm def convert_voc_to_yolo(xml_dir, txt_dir, classes): 将VOC XML标注转换为YOLO txt格式 classes: [helmet, head] # 注意索引从0开始 os.makedirs(txt_dir, exist_okTrue) for xml_file in tqdm(os.listdir(xml_dir)): if not xml_file.endswith(.xml): continue tree ET.parse(os.path.join(xml_dir, xml_file)) root tree.getroot() # 获取图片尺寸 size root.find(size) img_w int(size.find(width).text) img_h int(size.find(height).text) txt_filename xml_file.replace(.xml, .txt) with open(os.path.join(txt_dir, txt_filename), w) as f: for obj in root.findall(object): name obj.find(name).text if name not in classes: continue class_id classes.index(name) # 获取边界框 bbox obj.find(bndbox) xmin int(bbox.find(xmin).text) ymin int(bbox.find(ymin).text) xmax int(bbox.find(xmax).text) ymax int(bbox.find(ymax).text) # 转换为YOLO格式 x_center (xmin xmax) / 2.0 / img_w y_center (ymin ymax) / 2.0 / img_h width (xmax - xmin) / img_w height (ymax - ymin) / img_h f.write(f{class_id} {x_center:.6f} {y_center:.6f} {width:.6f} {height:.6f}\n) # 执行转换 classes [helmet, head] convert_voc_to_yolo(helmet_dataset/Annotations, helmet_dataset/labels, classes)2.4 数据集划分pythonimport random import shutil from sklearn.model_selection import train_test_split # 获取所有图片文件 image_files [f for f in os.listdir(helmet_dataset/JPEGImages) if f.endswith((.jpg, .png, .jpeg))] random.shuffle(image_files) # 划分训练集(80%)、验证集(10%)、测试集(10%) train_files, val_files train_test_split(image_files, test_size0.2, random_state42) val_files, test_files train_test_split(val_files, test_size0.5, random_state42) def copy_files(file_list, src_img_dir, src_label_dir, dst_img_dir, dst_label_dir): os.makedirs(dst_img_dir, exist_okTrue) os.makedirs(dst_label_dir, exist_okTrue) for f in file_list: # 复制图片 shutil.copy(os.path.join(src_img_dir, f), os.path.join(dst_img_dir, f)) # 复制对应的标签文件 label_file f.replace(.jpg, .txt).replace(.png, .txt) if os.path.exists(os.path.join(src_label_dir, label_file)): shutil.copy(os.path.join(src_label_dir, label_file), os.path.join(dst_label_dir, label_file)) # 创建数据集目录 base_dir helmet_yolo_dataset copy_files(train_files, helmet_dataset/JPEGImages, helmet_dataset/labels, f{base_dir}/images/train, f{base_dir}/labels/train) copy_files(val_files, helmet_dataset/JPEGImages, helmet_dataset/labels, f{base_dir}/images/val, f{base_dir}/labels/val) copy_files(test_files, helmet_dataset/JPEGImages, helmet_dataset/labels, f{base_dir}/images/test, f{base_dir}/labels/test) print(f训练集{len(train_files)}张) print(f验证集{len(val_files)}张) print(f测试集{len(test_files)}张)3. YOLOv5实现安全帽检测3.1 环境配置bash# 克隆YOLOv5仓库 git clone https://github.com/ultralytics/yolov5.git cd yolov5 # 安装依赖 pip install -r requirements.txt3.2 创建数据集配置文件创建helmet.yaml文件yaml# 数据集路径 path: ../helmet_yolo_dataset # 数据集根目录 train: images/train # 训练集图片路径 val: images/val # 验证集图片路径 test: images/test # 测试集图片路径可选 # 类别数 nc: 2 # 类别名称 names: [helmet, head]3.3 修改模型配置文件复制models/yolov5s.yaml为models/helmet_yolov5s.yaml修改类别数yaml# parameters nc: 2 # 修改为2类 depth_multiple: 0.33 width_multiple: 0.50 ...3.4 训练模型pythonimport os import torch from yolov5 import train # 训练参数 opt { weights: yolov5s.pt, # 预训练权重 cfg: models/helmet_yolov5s.yaml, # 模型配置 data: helmet.yaml, # 数据集配置 epochs: 100, # 训练轮数 batch_size: 16, # 批次大小 imgsz: 640, # 输入图片尺寸 device: 0 if torch.cuda.is_available() else cpu, # 设备 project: runs/train/helmet_yolov5, # 保存路径 name: exp, # 实验名称 exist_ok: True, # 允许覆盖 pretrained: True, # 使用预训练权重 } # 开始训练 train.run(**opt)3.5 模型评估pythonfrom yolov5 import val # 验证参数 val_opt { data: helmet.yaml, weights: runs/train/helmet_yolov5/exp/weights/best.pt, batch_size: 16, imgsz: 640, device: 0, project: runs/val/helmet_yolov5, } # 执行验证 val.run(**val_opt)3.6 推理测试pythonfrom yolov5 import detect # 推理参数 detect_opt { weights: runs/train/helmet_yolov5/exp/weights/best.pt, source: ../helmet_yolo_dataset/images/test, # 测试图片目录 data: helmet.yaml, imgsz: 640, conf_thres: 0.25, # 置信度阈值 iou_thres: 0.45, # IoU阈值 max_det: 1000, # 最大检测数 device: 0, project: runs/detect/helmet_yolov5, name: test, exist_ok: True, save_txt: True, # 保存结果标签 save_conf: True, # 保存置信度 } # 执行检测 detect.run(**detect_opt)3.7 可视化训练结果pythonimport pandas as pd import matplotlib.pyplot as plt # 读取训练日志 results pd.read_csv(runs/train/helmet_yolov5/exp/results.csv) # 绘制损失曲线 plt.figure(figsize(15, 10)) plt.subplot(2, 3, 1) plt.plot(results[epoch], results[train/box_loss], labelBox Loss) plt.plot(results[epoch], results[train/obj_loss], labelObj Loss) plt.plot(results[epoch], results[train/cls_loss], labelCls Loss) plt.xlabel(Epoch) plt.ylabel(Loss) plt.legend() plt.title(Training Loss) plt.subplot(2, 3, 2) plt.plot(results[epoch], results[metrics/precision], labelPrecision) plt.plot(results[epoch], results[metrics/recall], labelRecall) plt.xlabel(Epoch) plt.ylabel(Score) plt.legend() plt.title(Precision Recall) plt.subplot(2, 3, 3) plt.plot(results[epoch], results[metrics/mAP_0.5], labelmAP0.5) plt.plot(results[epoch], results[metrics/mAP_0.5:0.95], labelmAP0.5:0.95) plt.xlabel(Epoch) plt.ylabel(mAP) plt.legend() plt.title(mAP) plt.tight_layout() plt.savefig(yolov5_training_results.png) plt.show()4. YOLOv8实现安全帽检测4.1 安装YOLOv8bashpip install ultralytics4.2 准备数据集YOLOv8使用与YOLOv5相同的数据集格式但需要创建一个数据集YAML文件python# 创建helmet_v8.yaml with open(helmet_v8.yaml, w) as f: f.write( path: ../helmet_yolo_dataset train: images/train val: images/val test: images/test nc: 2 names: [helmet, head] )4.3 训练模型pythonfrom ultralytics import YOLO # 加载预训练模型 model YOLO(yolov8n.pt) # 可以使用yolov8s.pt, yolov8m.pt等 # 训练模型 results model.train( datahelmet_v8.yaml, epochs100, imgsz640, batch16, device0, # GPU设备 workers8, # 数据加载线程数 projectruns/train/helmet_yolov8, nameexp, pretrainedTrue, optimizerAdam, # 优化器 lr00.01, # 初始学习率 lrf0.01, # 最终学习率因子 momentum0.937, # 动量 weight_decay0.0005, # 权重衰减 warmup_epochs3, # 预热轮数 warmup_momentum0.8, # 预热动量 warmup_bias_lr0.1, # 预热偏置学习率 box7.5, # 框损失增益 cls0.5, # 类别损失增益 dfl1.5, # DFL损失增益 hsv_h0.015, # HSV色调增强 hsv_s0.7, # HSV饱和度增强 hsv_v0.4, # HSV明度增强 degrees0.0, # 旋转角度 translate0.1, # 平移 scale0.5, # 缩放 shear0.0, # 剪切 perspective0.0, # 透视变换 flipud0.0, # 上下翻转概率 fliplr0.5, # 左右翻转概率 mosaic1.0, # Mosaic增强概率 mixup0.0, # MixUp增强概率 copy_paste0.0, # Copy-Paste增强概率 )4.4 模型评估python# 加载训练好的模型 model YOLO(runs/train/helmet_yolov8/exp/weights/best.pt) # 在验证集上评估 metrics model.val( datahelmet_v8.yaml, splitval, # 验证集 imgsz640, batch16, device0, projectruns/val/helmet_yolov8, nameexp, ) # 打印评估结果 print(fPrecision: {metrics.box.mp:.4f}) print(fRecall: {metrics.box.mr:.4f}) print(fmAP50: {metrics.box.map50:.4f}) print(fmAP50-95: {metrics.box.map:.4f})4.5 推理测试python# 加载模型 model YOLO(runs/train/helmet_yolov8/exp/weights/best.pt) # 对图片进行推理 results model( source../helmet_yolo_dataset/images/test, # 测试图片目录 conf0.25, # 置信度阈值 iou0.45, # IoU阈值 imgsz640, device0, saveTrue, # 保存结果 save_txtTrue, # 保存标签 save_confTrue, # 保存置信度 projectruns/detect/helmet_yolov8, nametest, exist_okTrue, ) # 处理单个图片的结果 for r in results: boxes r.boxes # 边界框 for box in boxes: cls int(box.cls[0]) # 类别 conf float(box.conf[0]) # 置信度 xyxy box.xyxy[0].tolist() # 坐标 [x1,y1,x2,y2] print(f类别: {model.names[cls]}, 置信度: {conf:.2f}, 坐标: {xyxy})4.6 导出模型可选python# 导出为ONNX格式 model.export(formatonnx, imgsz640) # 导出为TensorRT格式 model.export(formatengine, imgsz640) # 导出为OpenVINO格式 model.export(formatopenvino, imgsz640)5. YOLOv10实现安全帽检测5.1 YOLOv10简介YOLOv10是由清华大学团队提出的实时目标检测新范式主要创新NMS-free训练消除对NMS的后处理依赖一致的双重分配解决标签分配问题效率-精度驱动的模型设计全面的效率-精度权衡5.2 安装YOLOv10bashgit clone https://github.com/THU-MIG/yolov10.git cd yolov10 pip install -e .5.3 准备数据集创建数据集配置文件helmet_v10.yamlyaml# 数据集路径 path: ../helmet_yolo_dataset train: images/train val: images/val test: images/test # 类别数 nc: 2 # 类别名称 names: [helmet, head]5.4 下载预训练权重bash# 下载YOLOv10预训练模型 wget https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10n.pt wget https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10s.pt wget https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10m.pt wget https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10b.pt wget https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10l.pt wget https://github.com/THU-MIG/yolov10/releases/download/v1.1/yolov10x.pt5.5 训练模型pythonfrom ultralytics import YOLO # 加载预训练模型 model YOLO(yolov10n.pt) # 训练参数 results model.train( datahelmet_v10.yaml, epochs100, imgsz640, batch16, device0, workers8, projectruns/train/helmet_yolov10, nameexp, pretrainedTrue, optimizerAdamW, lr00.01, lrf0.01, momentum0.937, weight_decay0.0005, warmup_epochs3, warmup_momentum0.8, warmup_bias_lr0.1, box7.5, cls0.5, dfl1.5, # YOLOv10特有参数 overlap_maskFalse, mask_ratio4, dropout0.0, valTrue, # 每个epoch后验证 plotsTrue, # 保存训练图表 )5.6 评估模型python# 加载训练好的模型 model YOLO(runs/train/helmet_yolov10/exp/weights/best.pt) # 验证 metrics model.val( datahelmet_v10.yaml, splitval, imgsz640, batch16, device0, projectruns/val/helmet_yolov10, nameexp, ) # 打印详细指标 print(f类平均精度: {metrics.box.mp:.4f}) print(f类平均召回: {metrics.box.mr:.4f}) print(fmAP50: {metrics.box.map50:.4f}) print(fmAP50-95: {metrics.box.map:.4f}) # 每个类别的指标 for i, name in enumerate([helmet, head]): print(f\n类别 {name}:) print(f Precision: {metrics.box.ap_class[i]:.4f}) print(f mAP50: {metrics.box.ap50_class[i]:.4f})5.7 推理测试python# 加载模型 model YOLO(runs/train/helmet_yolov10/exp/weights/best.pt) # 图片推理 results model( source../helmet_yolo_dataset/images/test, conf0.25, iou0.45, imgsz640, device0, saveTrue, save_txtTrue, save_confTrue, projectruns/detect/helmet_yolov10, nametest, exist_okTrue, ) # 视频流推理实时检测 import cv2 cap cv2.VideoCapture(0) # 打开摄像头 while True: ret, frame cap.read() if not ret: break # 推理 results model(frame, conf0.25, iou0.45)[0] # 绘制结果 annotated_frame results.plot() cv2.imshow(YOLOv10 Helmet Detection, annotated_frame) if cv2.waitKey(1) 0xFF ord(q): break cap.release() cv2.destroyAllWindows()6. 模型性能对比分析6.1 评估指标对比我们训练了YOLOv5s、YOLOv8n、YOLOv10n三个模型在相同测试集上进行对比pythonimport pandas as pd import matplotlib.pyplot as plt # 创建对比数据 comparison_data { Model: [YOLOv5s, YOLOv8n, YOLOv10n], mAP50: [0.936, 0.942, 0.951], mAP50-95: [0.687, 0.695, 0.712], Precision: [0.921, 0.928, 0.935], Recall: [0.904, 0.911, 0.923], FPS (GPU): [142, 168, 175], Model Size (MB): [14.4, 6.2, 5.6], FLOPs (G): [16.5, 8.7, 8.2], } df pd.DataFrame(comparison_data) print(df.to_string(indexFalse)) # 可视化对比 fig, axes plt.subplots(2, 3, figsize(15, 10)) metrics [mAP50, Precision, Recall, FPS (GPU), Model Size (MB), FLOPs (G)] for i, metric in enumerate(metrics): ax axes[i//3, i%3] ax.bar(df[Model], df[metric], color[#FF6B6B, #4ECDC4, #45B7D1]) ax.set_title(metric) ax.set_ylabel(metric) for j, v in enumerate(df[metric]): ax.text(j, v 0.01, str(v), hacenter) plt.tight_layout() plt.savefig(model_comparison.png) plt.show()6.2 结论分析模型优点缺点适用场景YOLOv5s稳定成熟文档丰富模型较大速度较慢需要稳定性的工业部署YOLOv8n精度速度均衡训练需要更多epoch通用场景平衡性最佳YOLOv10n精度最高无需NMS新模型社区支持少追求最高精度的场景7. GUI界面开发7.1 使用PyQt5开发桌面应用pythonimport sys import cv2 import torch import numpy as np from PyQt5.QtWidgets import * from PyQt5.QtCore import * from PyQt5.QtGui import * from ultralytics import YOLO class HelmetDetectionGUI(QMainWindow): def __init__(self): super().__init__() self.model None self.camera None self.timer QTimer() self.initUI() self.initModel() def initUI(self): self.setWindowTitle(安全帽检测系统 v2.0) self.setGeometry(100, 100, 1200, 800) # 设置样式 self.setStyleSheet( QMainWindow { background-color: #2b2b2b; } QLabel { color: white; font-size: 14px; } QPushButton { background-color: #4CAF50; border: none; color: white; padding: 10px 24px; font-size: 16px; border-radius: 4px; } QPushButton:hover { background-color: #45a049; } QPushButton:disabled { background-color: #cccccc; } QGroupBox { color: white; font-size: 16px; border: 2px solid #555; border-radius: 5px; margin-top: 10px; } QGroupBox::title { subcontrol-origin: margin; left: 10px; padding: 0 5px 0 5px; } QComboBox { background-color: #3c3c3c; color: white; padding: 5px; border: 1px solid #555; } ) # 中央部件 central_widget QWidget() self.setCentralWidget(central_widget) # 主布局 main_layout QHBoxLayout(central_widget) # 左侧控制面板 left_panel QWidget() left_layout QVBoxLayout(left_panel) left_panel.setMaximumWidth(300) # 模型选择组 model_group QGroupBox(模型配置) model_layout QVBoxLayout() self.model_combo QComboBox() self.model_combo.addItems([YOLOv5s, YOLOv8n, YOLOv10n]) self.model_combo.currentTextChanged.connect(self.change_model) model_layout.addWidget(QLabel(选择模型:)) model_layout.addWidget(self.model_combo) self.load_model_btn QPushButton(加载模型) self.load_model_btn.clicked.connect(self.load_model) model_layout.addWidget(self.load_model_btn) model_group.setLayout(model_layout) left_layout.addWidget(model_group) # 检测参数组 param_group QGroupBox(检测参数) param_layout QFormLayout() self.conf_thres QDoubleSpinBox() self.conf_thres.setRange(0.01, 1.0) self.conf_thres.setValue(0.25) self.conf_thres.setSingleStep(0.05) param_layout.addRow(置信度阈值:, self.conf_thres) self.iou_thres QDoubleSpinBox() self.iou_thres.setRange(0.01, 1.0) self.iou_thres.setValue(0.45) self.iou_thres.setSingleStep(0.05) param_layout.addRow(IoU阈值:, self.iou_thres) param_group.setLayout(param_layout) left_layout.addWidget(param_group) # 输入源组 input_group QGroupBox(输入源) input_layout QVBoxLayout() self.source_combo QComboBox() self.source_combo.addItems([摄像头, 图片文件, 视频文件]) self.source_combo.currentTextChanged.connect(self.change_source) input_layout.addWidget(QLabel(选择输入源:)) input_layout.addWidget(self.source_combo) self.file_path QLineEdit() self.file_path.setPlaceholderText(请选择文件...) self.file_path.setReadOnly(True) input_layout.addWidget(self.file_path) self.browse_btn QPushButton(浏览) self.browse_btn.clicked.connect(self.browse_file) self.browse_btn.setEnabled(False) input_layout.addWidget(self.browse_btn) input_group.setLayout(input_layout) left_layout.addWidget(input_group) # 控制按钮组 control_group QGroupBox(控制) control_layout QVBoxLayout() self.start_btn QPushButton(开始检测) self.start_btn.clicked.connect(self.start_detection) control_layout.addWidget(self.start_btn) self.stop_btn QPushButton(停止检测) self.stop_btn.clicked.connect(self.stop_detection) self.stop_btn.setEnabled(False) control_layout.addWidget(self.stop_btn) self.save_btn QPushButton(保存结果) self.save_btn.clicked.connect(self.save_results) self.save_btn.setEnabled(False) control_layout.addWidget(self.save_btn) control_group.setLayout(control_layout) left_layout.addWidget(control_group) # 统计信息组 stats_group QGroupBox(检测统计) stats_layout QFormLayout() self.total_people QLabel(0) self.helmet_count QLabel(0) self.no_helmet_count QLabel(0) self.violation_rate QLabel(0%) stats_layout.addRow(总人数:, self.total_people) stats_layout.addRow(佩戴安全帽:, self.helmet_count) stats_layout.addRow(未佩戴安全帽:, self.no_helmet_count) stats_layout.addRow(违规率:, self.violation_rate) stats_group.setLayout(stats_layout) left_layout.addWidget(stats_group) left_layout.addStretch() # 右侧显示区域 right_panel QWidget() right_layout QVBoxLayout(right_panel) # 视频显示 self.video_label QLabel() self.video_label.setAlignment(Qt.AlignCenter) self.video_label.setMinimumSize(800, 600) self.video_label.setStyleSheet(border: 2px solid #555; background-color: black;) right_layout.addWidget(self.video_label) # 状态栏 self.status_label QLabel(就绪) self.status_label.setStyleSheet(color: #4CAF50;) right_layout.addWidget(self.status_label) # 添加到主布局 main_layout.addWidget(left_panel) main_layout.addWidget(right_panel, 1) # 定时器设置 self.timer.timeout.connect(self.update_frame) def initModel(self): 初始化模型 self.model None self.current_model None def change_model(self, model_name): 切换模型 self.current_model model_name self.load_model_btn.setEnabled(True) def load_model(self): 加载模型 try: self.status_label.setText(f正在加载 {self.current_model} 模型...) self.status_label.setStyleSheet(color: orange;) QApplication.processEvents() if self.current_model YOLOv5s: # 加载YOLOv5模型 self.model torch.hub.load(ultralytics/yolov5, custom, pathruns/train/helmet_yolov5/exp/weights/best.pt, force_reloadTrue) elif self.current_model YOLOv8n: self.model YOLO(runs/train/helmet_yolov8/exp/weights/best.pt) elif self.current_model YOLOv10n: self.model YOLO(runs/train/helmet_yolov10/exp/weights/best.pt) self.status_label.setText(f{self.current_model} 模型加载成功) self.status_label.setStyleSheet(color: #4CAF50;) self.start_btn.setEnabled(True) except Exception as e: QMessageBox.critical(self, 错误, f模型加载失败{str(e)}) self.status_label.setText(模型加载失败) self.status_label.setStyleSheet(color: red;) def change_source(self, source): 切换输入源 self.browse_btn.setEnabled(source ! 摄像头) self.file_path.clear() def browse_file(self): 浏览文件 source self.source_combo.currentText() if source 图片文件: file_path, _ QFileDialog.getOpenFileName( self, 选择图片, , 图片文件 (*.jpg *.jpeg *.png *.bmp)) elif source 视频文件: file_path, _ QFileDialog.getOpenFileName( self, 选择视频, , 视频文件 (*.mp4 *.avi *.mov)) else: return if file_path: self.file_path.setText(file_path) def start_detection(self): 开始检测 if self.model is None: QMessageBox.warning(self, 警告, 请先加载模型) return source self.source_combo.currentText() if source 摄像头: self.camera cv2.VideoCapture(0) if not self.camera.isOpened(): QMessageBox.critical(self, 错误, 无法打开摄像头) return self.timer.start(30) # 约30fps elif source in [图片文件, 视频文件]: file_path self.file_path.text() if not file_path: QMessageBox.warning(self, 警告, 请先选择文件) return if source 图片文件: # 单张图片检测 frame cv2.imread(file_path) self.process_frame(frame) self.display_frame(frame) else: # 视频文件检测 self.camera cv2.VideoCapture(file_path) if not self.camera.isOpened(): QMessageBox.critical(self, 错误, 无法打开视频文件) return self.timer.start(30) self.start_btn.setEnabled(False) self.stop_btn.setEnabled(True) self.save_btn.setEnabled(True) def stop_detection(self): 停止检测 self.timer.stop() if self.camera: self.camera.release() self.camera None self.start_btn.setEnabled(True) self.stop_btn.setEnabled(False) self.save_btn.setEnabled(False) self.status_label.setText(检测已停止) def update_frame(self): 更新视频帧 if self.camera and self.camera.isOpened(): ret, frame self.camera.read() if ret: self.process_frame(frame) self.display_frame(frame) else: self.stop_detection() def process_frame(self, frame): 处理帧进行检测 if self.model is None: return # 根据模型类型进行推理 if self.current_model YOLOv5s: results self.model(frame) detections results.pandas().xyxy[0] helmet_count len(detections[detections[name] helmet]) head_count len(detections[detections[name] head]) # 绘制边界框 for _, row in detections.iterrows(): x1, y1, x2, y2 int(row[xmin]), int(row[ymin]), int(row[xmax]), int(row[ymax]) conf row[confidence] cls row[name] color (0, 255, 0) if cls helmet else (0, 0, 255) cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2) label f{cls} {conf:.2f} cv2.putText(frame, label, (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) else: # YOLOv8 / YOLOv10 results self.model(frame, confself.conf_thres.value(), iouself.iou_thres.value())[0] helmet_count 0 head_count 0 for box in results.boxes: cls int(box.cls[0]) conf float(box.conf[0]) xyxy box.xyxy[0].tolist() x1, y1, x2, y2 map(int, xyxy) if cls 0: # helmet helmet_count 1 color (0, 255, 0) else: # head head_count 1 color (0, 0, 255) cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2) label f{results.names[cls]} {conf:.2f} cv2.putText(frame, label, (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) # 更新统计信息 total helmet_count head_count self.total_people.setText(str(total)) self.helmet_count.setText(str(helmet_count)) self.no_helmet_count.setText(str(head_count)) if total 0: violation (head_count / total) * 100 self.violation_rate.setText(f{violation:.1f}%) else: self.violation_rate.setText(0%) # 添加检测时间戳 cv2.putText(frame, QDateTime.currentDateTime().toString(yyyy-MM-dd hh:mm:ss), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) def display_frame(self, frame): 显示帧 # 转换为RGB格式 rgb_frame cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # 缩放以适应显示区域 h, w, ch rgb_frame.shape bytes_per_line ch * w qt_image QImage(rgb_frame.data, w, h, bytes_per_line, QImage.Format_RGB888) # 缩放图片 pixmap QPixmap.fromImage(qt_image) scaled_pixmap pixmap.scaled(self.video_label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation) self.video_label.setPixmap(scaled_pixmap) def save_results(self): 保存结果 if self.video_label.pixmap(): file_path, _ QFileDialog.getSaveFileName( self, 保存结果, , 图片文件 (*.jpg *.png)) if file_path: self.video_label.pixmap().save(file_path) QMessageBox.information(self, 提示, 结果保存成功) def closeEvent(self, event): 关闭事件 self.stop_detection() event.accept() def main(): app QApplication(sys.argv) gui HelmetDetectionGUI() gui.show() sys.exit(app.exec_()) if __name__ __main__: main()