|
|
import cv2
|
|
|
import numpy as np
|
|
|
from shapely.geometry import Polygon
|
|
|
import glob
|
|
|
import torch
|
|
|
from torch import nn
|
|
|
import torchvision
|
|
|
from PIL import Image
|
|
|
from grasp_detect_singlebox import *
|
|
|
|
|
|
|
|
|
|
|
|
transform = torchvision.transforms.Compose([
|
|
|
transforms.ToTensor(),
|
|
|
|
|
|
])
|
|
|
|
|
|
|
|
|
|
|
|
def grasp_to_point(predict_grasp, radian=False):
|
|
|
|
|
|
x = predict_grasp[0].item()
|
|
|
y = predict_grasp[1].item()
|
|
|
w = predict_grasp[2].item()
|
|
|
h = predict_grasp[3].item()
|
|
|
theta = predict_grasp[4].item()
|
|
|
center = (x, y)
|
|
|
size = (w, h)
|
|
|
if radian:
|
|
|
angle = theta / 3.1415927 * 180
|
|
|
else:
|
|
|
angle = theta
|
|
|
box = cv2.boxPoints((center, size, angle))
|
|
|
|
|
|
return box
|
|
|
|
|
|
|
|
|
|
|
|
def intersection(g, p):
|
|
|
g = np.asarray(g)
|
|
|
p = np.asarray(p)
|
|
|
g = Polygon(g[:8].reshape((4, 2)))
|
|
|
p = Polygon(p[:8].reshape((4, 2)))
|
|
|
if not g.is_valid or not p.is_valid:
|
|
|
return 0
|
|
|
inter = Polygon(g).intersection(Polygon(p)).area
|
|
|
union = g.area + p.area - inter
|
|
|
if union == 0:
|
|
|
return 0
|
|
|
else:
|
|
|
return inter / union
|
|
|
|
|
|
|
|
|
|
|
|
def judge_availabel(predict_grasp, ground_truth):
|
|
|
predict_point = grasp_to_point(predict_grasp)
|
|
|
ground_truth_point = grasp_to_point(ground_truth, radian=True)
|
|
|
jaccard = intersection(ground_truth_point, predict_point)
|
|
|
theta_predict = predict_grasp[-1].data.item()
|
|
|
theta_ground_truth = ground_truth[-1] / 3.1415927 * 180
|
|
|
|
|
|
|
|
|
if theta_predict >= 180:
|
|
|
theta_predict -= 180
|
|
|
if theta_ground_truth >= 180:
|
|
|
theta_ground_truth -= 180
|
|
|
if theta_predict < 0:
|
|
|
theta_predict += 180
|
|
|
if theta_ground_truth < 0:
|
|
|
theta_ground_truth += 180
|
|
|
|
|
|
distance_of_theta1 = abs(theta_predict - theta_ground_truth)
|
|
|
|
|
|
if theta_predict > 90:
|
|
|
theta_predict -= 180
|
|
|
if theta_ground_truth > 90:
|
|
|
theta_ground_truth -= 180
|
|
|
|
|
|
distance_of_theta2 = abs(theta_predict - theta_ground_truth)
|
|
|
|
|
|
distance_of_theta = min(distance_of_theta1, distance_of_theta2)
|
|
|
|
|
|
if jaccard >= 0.25 and distance_of_theta <= 30:
|
|
|
available = 1
|
|
|
else:
|
|
|
available = 0
|
|
|
return available
|
|
|
|
|
|
|
|
|
|
|
|
def judge_picture(picture_path, text_path):
|
|
|
img = Image.open(picture_path)
|
|
|
img = img.convert('RGB')
|
|
|
img = transform(img)
|
|
|
img = img.unsqueeze(dim=0)
|
|
|
img = img.to(device)
|
|
|
predict_grasp = inference_single_image(img)
|
|
|
predict_grasp = predict_grasp[1:]
|
|
|
|
|
|
|
|
|
|
|
|
ground_truth = np.loadtxt(text_path)
|
|
|
flag = 0
|
|
|
for i in range(len(ground_truth)):
|
|
|
if judge_availabel(predict_grasp, ground_truth[i]) == 1:
|
|
|
flag = 1
|
|
|
break
|
|
|
return flag
|
|
|
|
|
|
|
|
|
|
|
|
def evaluate_grasp(picture_dir_path, text_dir_path):
|
|
|
text_path_s = glob.glob(text_dir_path + '\\' + '*.txt')
|
|
|
text_path_s.sort(key=lambda x: x.split('\\')[-1].split('.txt')[0])
|
|
|
img_path_s = glob.glob(picture_dir_path + '\\' + '*.png')
|
|
|
img_path_s.sort(key=lambda x: x.split('\\')[-1].split('.png')[0])
|
|
|
yes = 0
|
|
|
total = 0
|
|
|
for i in range(len(text_path_s)):
|
|
|
available = judge_picture(img_path_s[i], text_path_s[i])
|
|
|
if available == 1:
|
|
|
yes = yes + 1
|
|
|
total = total + 1
|
|
|
|
|
|
else:
|
|
|
print(img_path_s[i].split('\\')[-1] + ':False')
|
|
|
total = total + 1
|
|
|
print('检测总图片数:'+str(total))
|
|
|
print('检测有效抓取数:'+str(yes))
|
|
|
print('准确率:', yes/total)
|
|
|
return yes / total
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
|
weights_path = r'weights\epoch6_loss_8.045684943666645.pth'
|
|
|
|
|
|
picture_dir_path = r'J:\experiment_data\0.1 test\single-simple\img'
|
|
|
text_dir_path = r'J:\experiment_data\0.1 test\single-simple\label'
|
|
|
|
|
|
|
|
|
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
|
|
|
|
|
|
|
multi_GPU = False
|
|
|
|
|
|
|
|
|
inference_single_image = DetectSingleImage(device=device, weights_path=weights_path)
|
|
|
|
|
|
|
|
|
evaluate_grasp(picture_dir_path, text_dir_path)
|
|
|
|
|
|
|