yolov8-用自己的数据集+前端界面

Likeandno 2024-06-29 10:03:03 阅读 73

目录

前言

一、图像处理

1.1存放说明

1.2数据获取

1.3图像标注

1.4json转为xml文件 

二、数据处理

2.1划分数据集

2.2提取数据

三、利用YOLOv8并导出模型

 3.1下载模型

 3.2配置文件

3.2开始训练

3.3查看检测效果

四、前端

4.1内容介绍

4.2界面布局

按键

视图

源码


前言

Yolo(You Only Look Once)是一种one-stage目标检测算法,即仅需要 “看” 一次就可以识别出图片中物体的class类别和边界框。Yolov8是Ultralytics公司最新推出的Yolo系列目标检测算法,可以用于图像分类、物体检测和实例分割等任务。此次我们跟据yolov8在此基础上延申在“特定的检测目标”。yolo在此文章不做过多说明,详细的说明可以看官方文档YOLOv8 - Ultralytics YOLO Docs

运行环境:python11.9

一、图像处理

1.1存放说明

存放格式如下:

其中images存放的是我们图片的路劲,data_annotated为用labelme将要标注出来的json

dataSet

dataSet/images

dataSet/data_annotated

1.2数据获取

数据根据我们自己所需要的数据集用python进行爬取或者自行添加就可以了这里不过多赘述。

1.3图像标注

我们用labelme进行标注,对于目标伤员进行标注,并设置标签为fall。并导出格式为json。以下为标注时的参考:

1.4json转为xml文件 

首先我们在其运行labelme相同的根目录下添加labelme2voc.py文件,在GitHub也有,一共有三个labelme2voc.py。但我们只需用转为xml的即可以下为labelme2voc.py的代码。

#!/usr/bin/env python

from __future__ import print_function

import argparse

import glob

import os

import os.path as osp

import sys

import imgviz

import labelme

try:

import lxml.builder

import lxml.etree

except ImportError:

print("Please install lxml:\n\n pip install lxml\n")

sys.exit(1)

def main():

parser = argparse.ArgumentParser(

formatter_class=argparse.ArgumentDefaultsHelpFormatter

)

parser.add_argument("input_dir", help="input annotated directory")

parser.add_argument("output_dir", help="output dataset directory")

parser.add_argument("--labels", help="labels file", required=True)

parser.add_argument("--noviz", help="no visualization", action="store_true")

args = parser.parse_args()

if osp.exists(args.output_dir):

print("Output directory already exists:", args.output_dir)

sys.exit(1)

os.makedirs(args.output_dir)

os.makedirs(osp.join(args.output_dir, "JPEGImages"))

os.makedirs(osp.join(args.output_dir, "Annotations"))

if not args.noviz:

os.makedirs(osp.join(args.output_dir, "AnnotationsVisualization"))

print("Creating dataset:", args.output_dir)

class_names = []

class_name_to_id = {}

for i, line in enumerate(open(args.labels).readlines()):

class_id = i - 1 # starts with -1

class_name = line.strip()

class_name_to_id[class_name] = class_id

if class_id == -1:

assert class_name == "__ignore__"

continue

elif class_id == 0:

assert class_name == "_background_"

class_names.append(class_name)

class_names = tuple(class_names)

print("class_names:", class_names)

out_class_names_file = osp.join(args.output_dir, "class_names.txt")

with open(out_class_names_file, "w") as f:

f.writelines("\n".join(class_names))

print("Saved class_names:", out_class_names_file)

for filename in glob.glob(osp.join(args.input_dir, "*.json")):

print("Generating dataset from:", filename)

label_file = labelme.LabelFile(filename=filename)

base = osp.splitext(osp.basename(filename))[0]

out_img_file = osp.join(args.output_dir, "JPEGImages", base + ".jpg")

out_xml_file = osp.join(args.output_dir, "Annotations", base + ".xml")

if not args.noviz:

out_viz_file = osp.join(

args.output_dir, "AnnotationsVisualization", base + ".jpg"

)

img = labelme.utils.img_data_to_arr(label_file.imageData)

imgviz.io.imsave(out_img_file, img)

maker = lxml.builder.ElementMaker()

xml = maker.annotation(

maker.folder(),

maker.filename(base + ".jpg"),

maker.database(), # e.g., The VOC2007 Database

maker.annotation(), # e.g., Pascal VOC2007

maker.image(), # e.g., flickr

maker.size(

maker.height(str(img.shape[0])),

maker.width(str(img.shape[1])),

maker.depth(str(img.shape[2])),

),

maker.segmented(),

)

bboxes = []

labels = []

for shape in label_file.shapes:

if shape["shape_type"] != "rectangle":

print(

"Skipping shape: label={label}, " "shape_type={shape_type}".format(

**shape

)

)

continue

class_name = shape["label"]

class_id = class_names.index(class_name)

(xmin, ymin), (xmax, ymax) = shape["points"]

# swap if min is larger than max.

xmin, xmax = sorted([xmin, xmax])

ymin, ymax = sorted([ymin, ymax])

bboxes.append([ymin, xmin, ymax, xmax])

labels.append(class_id)

xml.append(

maker.object(

maker.name(shape["label"]),

maker.pose(),

maker.truncated(),

maker.difficult(),

maker.bndbox(

maker.xmin(str(xmin)),

maker.ymin(str(ymin)),

maker.xmax(str(xmax)),

maker.ymax(str(ymax)),

),

)

)

if not args.noviz:

captions = [class_names[label] for label in labels]

viz = imgviz.instances2rgb(

image=img,

labels=labels,

bboxes=bboxes,

captions=captions,

font_size=15,

)

imgviz.io.imsave(out_viz_file, viz)

with open(out_xml_file, "wb") as f:

f.write(lxml.etree.tostring(xml, pretty_print=True))

if __name__ == "__main__":

main()

然后我们在存放labelme2voc.py的根目录下,打开命令窗口输入我们转换的命令:

python labelme2voc.py datasets/data_annotated datasets/data_dataset_voc --labels datasets/label.txt#将路劲改为绝对路径

执行完毕后会生成datasets/data_dataset_voc文件夹,文件夹内的data_dataset_voc\Annotations就是我们要的xml文件。然后再将其Annotations文件夹移动到datasets文件夹下

二、数据处理

2.1划分数据集

对于我们的数据划分为4个txt文件(ImageSets文件夹自行创建)

ImageSets\trainval.txt,ImageSets\test.txt,ImageSets\train.txt,ImageSets\val.txt

参考代码如下:

import os

import random

# 计算训练验证数据集和测试数据集的文件分割比例

trainval_percent = 0.9 # 训练验证数据集占总数据集的比例

train_percent = 0.9 # 在训练验证数据集中,训练数据集占训练验证数据集的比例

total_xml = os.listdir("Annotations") # 获取xml文件列表

txtsavepath = 'ImageSets' # txt文件存储路径

num = len(total_xml) # 获取xml文件总数

list = range(num) # 生成长度为num的索引列表

tv = int(num * trainval_percent) # 计算训练验证数据集的长度

tr = int(tv * train_percent) # 计算训练数据集的长度

trainval = random.sample(list, tv) # 随机选择训练验证数据集的索引

train = random.sample(trainval, tr) # 从训练验证数据集中随机选择训练数据集的索引

# 打开要写入的文件(建议换为绝对路径例如:(r'E:\data\ImageSets\trainval.txt', 'w'))

ftrainval = open('ImageSets\trainval.txt', 'w')

# ftrainval = open('ImageSets/trainval.txt', 'w')

ftest = open('ImageSets\test.txt', 'w')

ftrain = open('ImageSets\train.txt', 'w')

fval = open('ImageSets\val.txt', 'w')

# 遍历xml文件列表,并根据索引将文件名写入对应的txt文件

for i in list:

name = total_xml[i][:-4] + '\n'

if i in trainval:

ftrainval.write(name)

if i in train:

ftrain.write(name)

else:

fval.write(name)

else:

ftest.write(name)

# 关闭文件流

ftrainval.close()

ftrain.close()

fval.close()

ftest.close()

2.2提取数据

我们利用提取出来的xml文件,提取目标值和图像特征,供yolov8处理,参考代码、及某个图片提取的特征如下:

# 处理labelme多边形矩阵的标注 json转化txt

import json

import os

name2id = {'fall': 0} #此处需要根据你自己的数据集类型进行修改

def convert(img_size, box):

# 将边界框坐标转换为归一化格式

dw = 1. / (img_size[0])

dh = 1. / (img_size[1])

x = (box[0] + box[2]) / 2.0

y = (box[1] + box[3]) / 2.0

w = abs(box[2] - box[0])

h = abs(box[3] - box[1])

x = x * dw

w = w * dw

y = y * dh

h = h * dh

return (x, y, w, h)

def decode_json(json_floder_path, txt_outer_path, json_name):

# 将单个JSON文件转换为TXT文件

txt_name = txt_outer_path + json_name[:-5] + '.txt'

with open(txt_name, 'w') as f:

json_path = os.path.join(json_floder_path, json_name) # os路径融合

data = json.load(open(json_path, 'r', encoding='gb2312', errors='ignore'))

img_w = data['imageWidth'] # 图片的高

img_h = data['imageHeight'] # 图片的宽

isshape_type = data['shapes'][0]['shape_type']

print(isshape_type)

for i in data['shapes']:

label_name = i['label'] # 得到json中你标记的类名

if (i['shape_type'] == 'polygon'): # 数据类型为多边形 需要转化为矩形

x_max = 0

y_max = 0

x_min = 100000

y_min = 100000

for lk in range(len(i['points'])):

x1 = float(i['points'][lk][0])

y1 = float(i['points'][lk][1])

# print(x1)

if x_max < x1:

x_max = x1

if y_max < y1:

y_max = y1

if y_min > y1:

y_min = y1

if x_min > x1:

x_min = x1

bb = (x_min, y_max, x_max, y_min)

if (i['shape_type'] == 'rectangle'): # 为矩形不需要转换

x1 = float(i['points'][0][0])

y1 = float(i['points'][0][1])

x2 = float(i['points'][1][0])

y2 = float(i['points'][1][1])

bb = (x1, y1, x2, y2)

bbox = convert((img_w, img_h), bb)

try:

f.write(str(name2id[label_name]) + " " + " ".join([str(a) for a in bbox]) + '\n')

except:

pass

if __name__ == "__main__":

json_floder_path = 'E:/datasets/data_annotated' # 存放json的文件夹的绝对路径

txt_outer_path = 'E:/datasets/labels/' # 存放txt的文件夹绝对路径

json_names = os.listdir(json_floder_path)

print("共有:{}个文件待转化".format(len(json_names)))

flagcount = 0

for json_name in json_names:

decode_json(json_floder_path, txt_outer_path, json_name)

flagcount += 1

print("还剩下{}个文件未转化".format(len(json_names) - flagcount))

# break

print('转化全部完毕')

读取数据集路劲存放进txt,参考代码如下:

import xml.etree.ElementTree as ET

import pickle

import os

from os import listdir, getcwd

from os.path import join

sets = ['train', 'val', 'test']

for image_set in sets:

# 先找labels文件夹如果不存在则创建

if not os.path.exists('E:\datasets\labels/'):

os.makedirs('E:\datasets\labels/')

# 读取在ImageSets/中的train、test..等文件的内容

# 包含对应的文件名称

image_ids = open(r'E:\datasets\ImageSets\%s.txt' % (image_set)).read().strip().split()

# 打开对应的2012_train.txt 文件对其进行写入准备

list_file = open(r'E:\datasets\%s.txt' % (image_set), 'w')

# 将对应的文件_id以及全路径写进去并换行

for image_id in image_ids:

list_file.write('E:/datasets/img_data/%s.jpg\n' % (image_id))

# 关闭文件

list_file.close()

三、利用YOLOv8并导出模型

 3.1下载模型

由于懒得安装yolov8,只安装我们需要的模型yolov8n.pt即可,对于各模型的说明可以去官方文档查看,安装链接如下需要的自行下载:

#用于目标检测

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x.pt

#语义分割

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt

#人体姿态

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt

#用于分类任务

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt

https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt

 3.2配置文件

配置文件(fall.yaml)放到与datasets/下即可,配置文件内容如下:

#请改为绝对路径

train:dataSet\train.txt

val: dataSet\val.txt

test: dataSet\test.txt

# 类别数

nc: 1

# 类别名

names: ['fall']

3.2开始训练

由于条件限制这边使用“cpu"训练(较久),有“GPU”的建议使用“GPU”,其他参数调配自行添加,参考代码如下:

import os

# 定义任务类型

task = "detect"

# 定义运行模式

mode = "train"

# 定义使用的模型

model = "yolov8n.pt"

# 定义数据配置文件

data = "fall.yaml"

# 定义批处理大小

batch_size = 32

# 定义训练的轮数

epochs = 100

# 定义图像尺寸

imgsz = 640

# 定义工作线程数

workers = 16

# 定义使用的设备

device = 'cpu'

# 构建命令行字符串

command = f"yolo task={task} mode={mode} model={model} data={data} batch={batch_size} epochs={epochs} imgsz={imgsz} workers={workers} device={device}"

# 执行命令行命令

os.system(command)

3.3导出可视化图表

在这也就是看一看,模型的召回、精确率等的可视化参数,自行观看,这里不做解释说明,以下为参考代码:

import os

# 定义任务类型

task = "detect"

# 定义运行模式

mode = "val"

# 定义模型路径

model = "runs/detect/train3/weights/best.pt"

# 定义数据配置文件路径

data = "fall.yaml"

# 定义设备类型

device = 'cpu'

# 定义是否绘制图表

plots = True

# 构建YOLO命令

command = f"yolo task={task} mode={mode} model={model} data={data} device={device} plots={plots}"

# 执行YOLO命令

os.system(command)

import torch

from ultralytics import YOLO

# 加载预训练的YOLO模型

model = YOLO('runs/detect/train3/weights/best.pt')

# 使用模型进行预测,指定输入图像路径和设备(CPU)

results = model.predict(source='e:\youguan\data\images', device=torch.device('cpu'))

# 打印预测结果

print(results)

3.3查看检测效果

import cv2

# 导入YOLO模型

from ultralytics import YOLO

model = YOLO('runs/detect/train3/weights/best.onnx')

classnameList=model.names

print(classnameList)

# 读取图片

image_path = 'cs2.webp'

img=cv2.imread(image_path)

results = model.predict(img,stream=True)

for result in results:

boxes = result.boxes.cpu().numpy()

for box in boxes:

r= box.xyxy[0].astype(int)# 获取边界框坐标

cv2.rectangle(img,r[:2],r[2:],(255,255,255),2)# 绘制边界框

classID=box.cls[0]

label=classnameList[classID]

# print(label)

x=r[0]-10

y=r[1] -10

print(x,y)

cv2.putText(img, label,(x,y),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255),2)# 在图像上添加标签

cv2.imshow('test',img)

cv2.waitKey(0)

效果:

四、前端

4.1内容介绍

这个前端是一个基于PyQt5的图像检测应用程序,它具有选择模型、图片检测、批量图片检测等功能。用户可以通过界面上的按钮进行相应的操作,如选择模型文件、选择要检测的图片或文件夹等。程序会根据用户的选择进行相应的处理,并将结果显示在界面上。

4.2界面布局

4.2.1按键

按键是用户与前端应用程序交互的重要方式之一。在这个基于PyQt5的图像检测应用程序中,设计了一系列按键,以满足用户的不同需求和操作。

模型选择按键允许用户加载预训练的YOLO模型文件。一旦用户点击该按键,将弹出一个文件对话框,用户可以从中选择所需的模型文件。加载成功后,图片检测和批量图片检测按键将被启用,以便用户进行后续操作。

图片检测按键用于单个图片的检测任务。用户点击后,将弹出文件对话框,选择需要检测的图片文件。应用程序将使用已加载的模型对所选图片进行检测,并实时显示检测结果。

批量图片检测按键面向需要检测多张图片的用户。点击后,用户需选择一个包含多张图片的文件夹。程序将自动检测该文件夹中所有符合条件的图片,并将结果显示在界面上。

为了方便用户查看不同图片的检测结果,提供了上一张和下一张图片按键。这两个按键使用户能够在检测完成的图片之间轻松切换,进行对比分析。

退出按键则允许用户安全地关闭应用程序,确保所有操作得到妥善处理。

这些按键的设计旨在提高用户体验,使图像检测过程更加直观、便捷。通过简单的点击操作,用户即可完成复杂的图像检测任务,无需深入了解背后的技术细节。

下面是图片展示

4.2.2视图

在基于PyQt5的图像检测应用程序中,图像的显示采用了一种直观的分割方式,左边原图右边检测图。原始图像显示在左侧,而经过YOLO模型检测后的图像则展示在右侧。这种并排呈现的方式,允许用户轻松对比原始场景与算法识别后的差异。左侧的原始图像展示了未处理的图片,用户可以直接查看图片的原始状态,没有附加任何检测标记或注释。相对应的,右侧的检测图像则通过矩形框、标签和连接线,清晰地标注出模型识别的对象及其类别。

这种左右分割的显示模式强化了用户对检测结果的理解和分析,使得每一处检测都清晰可见,每个标注都易于辨认。这样的设计不仅提升了用户体验,还有助于专业人士或研究者评估模型的检测准确性和改进方向。

下面是图片展示

4.2.3前端完整源码

前端参考代码如下:

import sys

import os

from PyQt5.QtCore import Qt

from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QVBoxLayout, QWidget, QPushButton, QHBoxLayout, QMessageBox, QFileDialog, QSplitter

from PyQt5.QtGui import QImage, QPixmap

import cv2

from ultralytics import YOLO

from PyQt5.QtWidgets import QLabel, QVBoxLayout, QWidget, QPushButton, QHBoxLayout, QMessageBox, QFileDialog, QSplitter

class Worker:

def __init__(self):

self.model = None

def load_model(self):

model_path, _ = QFileDialog.getOpenFileName(None, "选择模型文件", "", "模型文件 (*.pt)")

if model_path:

self.model = YOLO(model_path)

if self.model:

return True

else:

return False

def detect_image(self, image_path):

image = cv2.imread(image_path)

if image is not None:

results = self.model.predict(image)

return results

else:

return None

def detect_images(self, image_paths):

results = []

for image_path in image_paths:

result = self.detect_image(image_path)

if result:

results.append(result)

return results

def detect_images_in_folder(self, folder_path):

image_paths = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith(('.jpg', '.jpeg', '.png'))]

return self.detect_images(image_paths)

class MainWindow(QMainWindow):

def __init__(self):

super().__init__()

self.setWindowTitle("Numb753")

self.setGeometry(100, 100, 1200, 500)

self.label = QLabel()

self.label.setAlignment(Qt.AlignCenter)

layout = QVBoxLayout()

layout.addWidget(self.label)

central_widget = QWidget()

central_widget.setLayout(layout)

self.setCentralWidget(central_widget)

self.worker = Worker()

self.image_paths = []

self.results = []

# 创建主布局

main_layout = QVBoxLayout()

# 添加原图和检测结果的分割器

hbox_images = QHBoxLayout()

self.original_image_label = QLabel()

self.detected_image_label = QLabel()

hbox_images.addWidget(self.original_image_label)

hbox_images.addWidget(self.detected_image_label)

main_layout.addLayout(hbox_images)

# 创建按钮布局

hbox_buttons = QHBoxLayout()

# 添加模型选择按钮

self.load_model_button = QPushButton("模型选择")

self.load_model_button.clicked.connect(self.load_model)

hbox_buttons.addWidget(self.load_model_button)

# 添加图片检测按钮

self.image_detect_button = QPushButton("图片检测")

self.image_detect_button.clicked.connect(self.detect_image)

self.image_detect_button.setEnabled(False)

hbox_buttons.addWidget(self.image_detect_button)

# 添加批量图片检测按钮

self.batch_detect_button = QPushButton("批量图片检测")

self.batch_detect_button.clicked.connect(self.batch_detect_folder)

self.batch_detect_button.setEnabled(False)

hbox_buttons.addWidget(self.batch_detect_button)

# 添加上一张图片按钮

self.previous_image_button = QPushButton("上一张")

self.previous_image_button.clicked.connect(self.show_previous_image)

self.previous_image_button.setEnabled(False)

hbox_buttons.addWidget(self.previous_image_button)

# 添加下一张图片按钮

self.next_image_button = QPushButton("下一张")

self.next_image_button.clicked.connect(self.show_next_image)

self.next_image_button.setEnabled(False)

hbox_buttons.addWidget(self.next_image_button)

# 添加退出按钮

self.exit_button = QPushButton("退出")

self.exit_button.clicked.connect(self.exit_application)

hbox_buttons.addWidget(self.exit_button)

main_layout.addLayout(hbox_buttons)

# 设置主布局为窗口的布局

central_widget = QWidget()

central_widget.setLayout(main_layout)

self.setCentralWidget(central_widget)

def detect_image(self):

image_path, _ = QFileDialog.getOpenFileName(None, "选择图片文件", "", "图片文件 (*.jpg *.jpeg *.png)")

if image_path:

results = self.worker.detect_image(image_path)

if results:

annotated_image = results[0].plot()

height, width, channel = annotated_image.shape

bytesPerLine = 3 * width

qimage = QImage(annotated_image.data, width, height, bytesPerLine, QImage.Format_BGR888)

pixmap = QPixmap.fromImage(qimage)

self.detected_image_label.setPixmap(pixmap.scaled(self.detected_image_label.size(), Qt.KeepAspectRatio))

self.original_image_label.setPixmap(QPixmap.fromImage(QImage(image_path)).scaled(self.original_image_label.size(), Qt.KeepAspectRatio))

else:

QMessageBox.critical(self, "错误", "无法检测图片")

else:

QMessageBox.critical(self, "错误", "请选择图片文件")

def load_model(self):

if self.worker.load_model():

self.image_detect_button.setEnabled(True)

self.batch_detect_button.setEnabled(True)

def batch_detect_folder(self):

folder_path = QFileDialog.getExistingDirectory(None, "选择文件夹")

if folder_path:

self.image_paths = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith(('.jpg', '.jpeg', '.png', '.webp'))]

self.results = self.worker.detect_images(self.image_paths) # 将检测结果存储到self.results中

self.current_image_index = 0 # 将当前图片索引设置为0

self.show_next_image()

self.previous_image_button.setEnabled(True) # 启用上一张图片按钮

self.next_image_button.setEnabled(True) # 启用下一张图片按钮

else:

QMessageBox.critical(self, "错误", "请选择一个文件夹")

def show_previous_image(self):

if self.current_image_index > 0:

self.current_image_index -= 1

image_path = self.image_paths[self.current_image_index]

result = self.results[self.current_image_index] # 从self.results中获取检测结果

annotated_image = result[0].plot()

height, width, channel = annotated_image.shape

bytesPerLine = 3 * width

qimage = QImage(annotated_image.data, width, height, bytesPerLine, QImage.Format_BGR888)

pixmap = QPixmap.fromImage(qimage)

self.detected_image_label.setPixmap(pixmap.scaled(self.detected_image_label.size(), Qt.KeepAspectRatio))

self.original_image_label.setPixmap(QPixmap.fromImage(QImage(image_path)).scaled(self.original_image_label.size(), Qt.KeepAspectRatio))

else:

QMessageBox.information(self, "提示", "已经是第一张图片了")

def show_next_image(self):

if self.current_image_index < len(self.image_paths) - 1:

self.current_image_index += 1

image_path = self.image_paths[self.current_image_index]

result = self.results[self.current_image_index] # 从self.results中获取检测结果

annotated_image = result[0].plot()

height, width, channel = annotated_image.shape

bytesPerLine = 3 * width

qimage = QImage(annotated_image.data, width, height, bytesPerLine, QImage.Format_BGR888)

pixmap = QPixmap.fromImage(qimage)

self.detected_image_label.setPixmap(pixmap.scaled(self.detected_image_label.size(), Qt.KeepAspectRatio))

self.original_image_label.setPixmap(QPixmap.fromImage(QImage(image_path)).scaled(self.original_image_label.size(), Qt.KeepAspectRatio))

else:

QMessageBox.information(self, "提示", "已经是最后一张图片了")

def exit_application(self):

sys.exit()

if __name__ == "__main__":

app = QApplication(sys.argv)

window = MainWindow()

window.show()

sys.exit(app.exec_())

参考文献

YOLOv8教程系列:一、使用自定义数据集训练YOLOv8模型(详细版教程,你只看一篇->调参攻略),包含环境搭建/数据准备/模型训练/预测/验证/导出等_yolov8训练自己的数据集-CSDN博客文章浏览阅读10w+次,点赞544次,收藏4.5k次。YOLOv8教程系列:一、使用自定义数据集训练YOLOv8模型(详细版教程,你只看一篇->调参攻略),包含环境搭建/数据准备/模型训练/预测/验证/导出等_yolov8训练自己的数据集

https://blog.csdn.net/weixin_45921929/article/details/128673338?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522171858533416800226562079%2522%252C%2522scm%2522%253A%252220140713.130102334..%2522%257D&request_id=171858533416800226562079&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2~all~top_positive~default-2-128673338-null-null.142%5Ev100%5Epc_search_result_base7&utm_term=yolov8%E8%AE%AD%E7%BB%83%E8%87%AA%E5%B7%B1%E7%9A%84%E6%95%B0%E6%8D%AE%E9%9B%86&spm=1018.2226.3001.4187

(如有侵权请联系本作者) 



声明

本文内容仅代表作者观点,或转载于其他网站,本站不以此文作为商业用途
如有涉及侵权,请联系本站进行删除
转载本站原创文章,请注明来源及作者。