目录
环境配置
其实还是按照detectron2官方文档来的:
Installation — detectron2 0.6 documentation
准备torch环境
我这里硬件是2060 ubuntu18.04 cuda是最新的11.6,torch安装要按照torch官网给的命令装,官网给的命令可以帮你选好相关组件的版本:
我这里命令最终是这样:
因为我这里是11.6,只能选最高的CUDA11.3,命令拿出来在conda虚拟环境执行安装即可。
pip3 install torch==1.10.2+cu113 torchvision==0.11.3+cu113 torchaudio==0.10.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
detectron2环境安装配置
git clone https://github.com/facebookresearch/detectron2.git
#不要进入detectron2目录
python -m pip install -e detectron2
pip install opencv-python
有一次报:No such file or directory: ':/usr/local/cuda-11.1:/usr/local/cuda-11.1/bin/nvcc
虽然之前/etc/profile和~/.bashrc中都配置了并且另外一个虚拟环境正常使用detectron2的,说明cuda没问题,但是这里突然报找不到cuda有些懵。
我这样执行了一下命令可以安装了:
#执行一下
export CUDA_HOME=/usr/local/cuda-11.1
测试demo.py
因为我们要训练faster rcnn模型,先下载对应的预训练模型先测试一下,注意对应一下config-file,模型下载地址在detectron2 的github中:
detectron2/MODEL_ZOO.md at main · facebookresearch/detectron2 · GitHub
测试命令:
cd demo
python demo.py --config-file ../configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml --input 2.jpg --opts MODEL.WEIGHTS ../model_final_280758.pkl
我这里ssh远程的,不能imshow,因此屏蔽了相关show代码,如不屏蔽会报QT平台不支持问题。
数据集准备
我这里用之前labelimg打好标签的数据集:
labelimg制作VOC数据集并用yolov5训练目标检测模型_RayChiu757374816的博客-CSDN博客_labelimg yolo格式
然后将这种VOC格式的转为COCO格式的:
import os
import random
import shutil
import sys
import json
import glob
import xml.etree.ElementTree as ET
"""
You only need to set the following three parts
1.val_files_num : num of validation samples from your all samples
2.test_files_num = num of test samples from your all samples
3.voc_annotations : path to your VOC dataset Annotations
"""
# 只需要该下边四个值即可
val_files_num = 100
test_files_num = 100
voc_annotations = '../VOCData/Annotations/' # remember to modify the path
main_path = '../'
split = voc_annotations.split('/')
coco_name = split[-3]
del split[-3]
del split[-2]
del split[-1]
del split[0]
# print(split)
for i in split:
main_path += '/' + i
main_path = main_path + '/'
# print(main_path)
coco_path = os.path.join(main_path, coco_name + '_COCO/')
coco_images = os.path.join(main_path, coco_name + '_COCO/images')
coco_json_annotations = os.path.join(main_path, coco_name + '_COCO/annotations/')
xml_val = os.path.join(main_path, 'xml', 'xml_val/')
xml_test = os.path.join(main_path, 'xml/', 'xml_test/')
xml_train = os.path.join(main_path, 'xml/', 'xml_train/')
voc_images = os.path.join(main_path, coco_name, 'JPEGImages/')
# from https://www.php.cn/python-tutorials-424348.html
def mkdir(path):
path = path.strip()
path = path.rstrip("\\")
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
print(path + ' ----- folder created')
return True
else:
print(path + ' ----- folder existed')
return False
# foler to make, please enter full path
mkdir(coco_path)
mkdir(coco_images)
mkdir(coco_json_annotations)
mkdir(xml_val)
mkdir(xml_test)
mkdir(xml_train)
# voc images copy to coco images
for i in os.listdir(voc_images):
img_path = os.path.join(voc_images + i)
shutil.copy(img_path, coco_images)
# voc images copy to coco images
for i in os.listdir(voc_annotations):
img_path = os.path.join(voc_annotations + i)
shutil.copy(img_path, xml_train)
print("\n\n %s files copied to %s" % (val_files_num, xml_val))
for i in range(val_files_num):
if len(os.listdir(xml_train)) > 0:
random_file = random.choice(os.listdir(xml_train))
# print("%d) %s"%(i+1,random_file))
source_file = "%s/%s" % (xml_train, random_file)
if random_file not in os.listdir(xml_val):
shutil.move(source_file, xml_val)
else:
random_file = random.choice(os.listdir(xml_train))
source_file = "%s/%s" % (xml_train, random_file)
shutil.move(source_file, xml_val)
else:
print('The folders are empty, please make sure there are enough %d file to move' % (val_files_num))
break
for i in range(test_files_num):
if len(os.listdir(xml_train)) > 0:
random_file = random.choice(os.listdir(xml_train))
# print("%d) %s"%(i+1,random_file))
source_file = "%s/%s" % (xml_train, random_file)
if random_file not in os.listdir(xml_test):
shutil.move(source_file, xml_test)
else:
random_file = random.choice(os.listdir(xml_train))
source_file = "%s/%s" % (xml_train, random_file)
shutil.move(source_file, xml_test)
else:
print('The folders are empty, please make sure there are enough %d file to move' % (val_files_num))
break
print("\n\n" + "*" * 27 + "[ Done ! Go check your file ]" + "*" * 28)
# !/usr/bin/python
# pip install lxml
START_BOUNDING_BOX_ID = 1
PRE_DEFINE_CATEGORIES = None
# If necessary, pre-define category and its id
# PRE_DEFINE_CATEGORIES = {"aeroplane": 1, "bicycle": 2, "bird": 3, "boat": 4,
# "bottle":5, "bus": 6, "car": 7, "cat": 8, "chair": 9,
# "cow": 10, "diningtable": 11, "dog": 12, "horse": 13,
# "motorbike": 14, "person": 15, "pottedplant": 16,
# "sheep": 17, "sofa": 18, "train": 19, "tvmonitor": 20}
"""
main code below are from
https://github.com/Tony607/voc2coco
"""
def get(root, name):
vars = root.findall(name)
return vars
def get_and_check(root, name, length):
vars = root.findall(name)
if len(vars) == 0:
raise ValueError("Can not find %s in %s." % (name, root.tag))
if length > 0 and len(vars) != length:
raise ValueError(
"The size of %s is supposed to be %d, but is %d."
% (name, length, len(vars))
)
if length == 1:
vars = vars[0]
return vars
def get_filename_as_int(filename):
try:
filename = filename.replace("\\", "/")
filename = os.path.splitext(os.path.basename(filename))[0]
return int(filename)
except:
raise ValueError("Filename %s is supposed to be an integer." % (filename))
def get_categories(xml_files):
"""Generate category name to id mapping from a list of xml files.
Arguments:
xml_files {list} -- A list of xml file paths.
Returns:
dict -- category name to id mapping.
"""
classes_names = []
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall("object"):
classes_names.append(member[0].text)
classes_names = list(set(classes_names))
classes_names.sort()
return {name: i for i, name in enumerate(classes_names)}
def convert(xml_files, json_file):
json_dict = {"images": [], "type": "instances", "annotations": [], "categories": []}
if PRE_DEFINE_CATEGORIES is not None:
categories = PRE_DEFINE_CATEGORIES
else:
categories = get_categories(xml_files)
bnd_id = START_BOUNDING_BOX_ID
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
path = get(root, "path")
if len(path) == 1:
filename = os.path.basename(path[0].text)
elif len(path) == 0:
filename = get_and_check(root, "filename", 1).text
else:
raise ValueError("%d paths found in %s" % (len(path), xml_file))
## The filename must be a number
image_id = get_filename_as_int(filename)
size = get_and_check(root, "size", 1)
width = int(get_and_check(size, "width", 1).text)
height = int(get_and_check(size, "height", 1).text)
image = {
"file_name": filename,
"height": height,
"width": width,
"id": image_id,
}
json_dict["images"].append(image)
## Currently we do not support segmentation.
# segmented = get_and_check(root, 'segmented', 1).text
# assert segmented == '0'
for obj in get(root, "object"):
category = get_and_check(obj, "name", 1).text
if category not in categories:
new_id = len(categories)
categories[category] = new_id
category_id = categories[category]
bndbox = get_and_check(obj, "bndbox", 1)
xmin = int(get_and_check(bndbox, "xmin", 1).text) - 1
ymin = int(get_and_check(bndbox, "ymin", 1).text) - 1
xmax = int(get_and_check(bndbox, "xmax", 1).text)
ymax = int(get_and_check(bndbox, "ymax", 1).text)
assert xmax > xmin
assert ymax > ymin
o_width = abs(xmax - xmin)
o_height = abs(ymax - ymin)
ann = {
"area": o_width * o_height,
"iscrowd": 0,
"image_id": image_id,
"bbox": [xmin, ymin, o_width, o_height],
"category_id": category_id,
"id": bnd_id,
"ignore": 0,
"segmentation": [],
}
json_dict["annotations"].append(ann)
bnd_id = bnd_id + 1
for cate, cid in categories.items():
cat = {"supercategory": "none", "id": cid, "name": cate}
json_dict["categories"].append(cat)
os.makedirs(os.path.dirname(json_file), exist_ok=True)
json_fp = open(json_file, "w")
json_str = json.dumps(json_dict)
json_fp.write(json_str)
json_fp.close()
xml_val_files = glob.glob(os.path.join(xml_val, "*.xml"))
xml_test_files = glob.glob(os.path.join(xml_test, "*.xml"))
xml_train_files = glob.glob(os.path.join(xml_train, "*.xml"))
convert(xml_val_files, coco_json_annotations + 'val2017.json')
convert(xml_test_files, coco_json_annotations + 'test2017.json')
convert(xml_train_files, coco_json_annotations + 'train2017.json')
训练
训练脚本:
fruitsnuts_data.py
from detectron2.data.datasets import register_coco_instances
from detectron2.data import MetadataCatalog
import os
# 声明类别,尽量保持
CLASS_NAMES = ["defect", "normal"]
# 数据集路径
DATASET_ROOT = r'./VOCData_COCO/'
# 标注文件夹路径
ANN_ROOT = os.path.join(DATASET_ROOT, 'annotations')
# 训练图片路径
TRAIN_PATH = os.path.join(DATASET_ROOT, 'images')
# 验证图片路径
VAL_PATH = os.path.join(DATASET_ROOT, 'images')
# 测试图片路径
TEST_PATH = os.path.join(DATASET_ROOT, 'images')
# 训练集的标注文件
TRAIN_JSON = os.path.join(ANN_ROOT, 'train2017.json')
#验证集的标注文件
VAL_JSON = os.path.join(ANN_ROOT, 'val2017.json')
# 测试集的标注文件
TEST_JSON = os.path.join(ANN_ROOT, 'test2017.json')
register_coco_instances("my_train", {}, TRAIN_JSON, TRAIN_PATH)
MetadataCatalog.get("my_train").set(thing_classes=CLASS_NAMES, # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭
evaluator_type='coco', # 指定评估方式
json_file=TRAIN_JSON,
image_root=TRAIN_PATH)
register_coco_instances("my_val", {}, VAL_JSON, VAL_PATH)
MetadataCatalog.get("my_val").set(thing_classes=CLASS_NAMES, # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭
evaluator_type='coco', # 指定评估方式
json_file=VAL_JSON,
image_root=VAL_PATH)
register_coco_instances("my_test", {}, TEST_JSON, TEST_PATH)
MetadataCatalog.get("my_test").set(thing_classes=CLASS_NAMES, # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭
evaluator_type='coco', # 指定评估方式
json_file=TEST_JSON,
image_root=TEST_PATH)
train.py
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
from detectron2.utils.logger import setup_logger
import os
setup_logger()
import fruitsnuts_data # 导入注册文件,完成注册
if __name__ == "__main__":
cfg = get_cfg()
cfg.merge_from_file(
"./configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
)
cfg.DATASETS.TRAIN = ("my_train",)
cfg.DATASETS.VAL = ("my_val",)
cfg.DATASETS.TEST = ("my_test",) # 没有不用填
cfg.DATALOADER.NUM_WORKERS = 2
# 预训练模型文件
# 没有可以下载
cfg.MODEL.WEIGHTS = r"./model_final_280758.pkl"
# 或者使用自己的预训练模型
# cfg.MODEL.WEIGHTS = "../tools/output/model_0003191.pth"
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.0025
# 最大迭代次数
cfg.SOLVER.MAX_ITER = (20000)
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (128) # faster, and good enough for this toy dataset
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2 # 3 classes (data, fig, hazelnut)
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
命令:
python utilSelf/train.py
测试脚本:
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import cv2
import random
import datetime
import time
import os
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.data.datasets import register_coco_instances
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
from detectron2.utils.visualizer import ColorMode
def Predict():
register_coco_instances("custom", {}, "./VOCData_COCO/annotations/train2017.json", "./VOCData_COCO/images")
custom_metadata = MetadataCatalog.get("custom")
DatasetCatalog.get("custom")
# im = cv2.imread("./VOCData_COCO/images/1.bmp")
im = cv2.imread("./utilSelf/002.jpg")
print(type(im))
print(im.shape)
cfg = get_cfg()
cfg.merge_from_file("configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
cfg.DATASETS.TEST = ("custom", )
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.95
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (
128
)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
v = Visualizer(im[:, :, ::-1],
metadata=custom_metadata,
scale=1,
instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels
)
#print(outputs["instances"])
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
# cv2.imshow('Result',v.get_image()[:, :, ::-1])
# cv2.waitKey()
cv2.imwrite('Result.jpg',v.get_image()[:, :, ::-1])
if __name__ == "__main__":
Predict()