6
6
from copy import deepcopy
7
7
import sys
8
8
9
+ # 问题: 通过draw_ground_truth_landmark.py 发现WFLW数据集标注的人脸框不是很准确
10
+ # 存在关键点在人脸框之外的情况 而caffe模型推理生成的关键点坐标是相对于人脸框的绝对位置
11
+ # 那么该如何去评测 现在采取的方法是评测时将关键点加上人脸框位置变为相对于图片的绝对坐标
12
+
9
13
sys .path .append ("./" )
10
- from utils import compute_5points5_ION_NME , compute_98points5_ION_NME , compute_AUC , compute_FAR
14
+ from utils import compute_5points5_ION_NME , compute_98points5_ION_NME , compute_98points98_ION_NME , compute_AUC , \
15
+ compute_FAR
11
16
17
+ # ssh人脸框
18
+ box_txt = "wflw_detect_test.txt"
12
19
# 真实关键点文件
13
20
ground_truth_txt = 'list_98pt_rect_attr_test.txt'
14
21
# 检测结果
15
- detect_txt = "caffe_list .txt"
22
+ detect_txt = "checkpoint_epoch_120_caffe_list .txt"
16
23
17
24
18
25
def load_ground_data (file_list ):
@@ -31,11 +38,11 @@ def load_ground_data(file_list):
31
38
box = line [196 :200 ]
32
39
attribute = line [200 :206 ]
33
40
34
- landmark = np .asarray (landmark , dtype = np .float32 ).reshape (( - 1 , 98 , 2 ) )
41
+ landmark = np .asarray (landmark , dtype = np .float32 ).reshape (98 , 2 )
35
42
attribute = np .asarray (attribute , dtype = np .int32 )
36
43
box = np .asarray (box , dtype = np .int32 )
37
44
38
- landmark = landmark - (box [0 ], box [1 ])
45
+ # landmark = landmark - (box[0], box[1])
39
46
40
47
filenames .append (path )
41
48
landmarks .append (landmark )
@@ -50,11 +57,27 @@ def load_ground_data(file_list):
50
57
return (filenames , landmarks , attributes , boxs )
51
58
52
59
53
- def load_detect_data (detect_txt , point = 5 ):
60
+ def load_detect_data_by_ssh_detect (detect_txt , point = 98 ):
54
61
"""
55
- 加载检测数据
62
+ 加载ssh检测数据
56
63
:return:
57
64
"""
65
+
66
+ # 人脸框 检测出来的关键点要根据人脸框进行坐标转换
67
+ boxsizes = []
68
+ tem = []
69
+ with open (box_txt ) as f :
70
+ lines = f .readlines ()
71
+ for i in range (1 , len (lines ), 2 ):
72
+ x , y , w , h = list (map (int , lines [i ].strip ().split ()))
73
+ d_boxsize = max (w , h )
74
+ boxsizes .append (d_boxsize )
75
+ d_center_x , d_center_y = x + w // 2 , y + h // 2
76
+
77
+ d_new_x1 = d_center_x - d_boxsize // 2
78
+ d_new_y1 = d_center_y - d_boxsize // 2
79
+ tem .append ([d_new_x1 , d_new_y1 ])
80
+
58
81
# 提取检测数据中的图片名和关键点
59
82
with open (detect_txt ) as f :
60
83
detect_lines = f .readlines ()
@@ -64,7 +87,55 @@ def load_detect_data(detect_txt, point=5):
64
87
detect_filename .append (detect_lines [index ].strip ())
65
88
landmark = detect_lines [index + 1 :index + point + 1 ]
66
89
for ldmk in landmark :
67
- tem_landmark .append (np .asarray (ldmk .split (), dtype = np .float32 ))
90
+ x , y = ldmk .strip ().split ()
91
+ i = int (index / (point + 1 ))
92
+ x = round (float (x ) * boxsizes [i ] + tem [i ][0 ], 6 )
93
+ y = round (float (y ) * boxsizes [i ] + tem [i ][1 ], 6 )
94
+ tem_landmark .append (np .asarray ([x , y ], dtype = np .float32 ))
95
+ detect_landmarks .append (tem_landmark )
96
+ detect_landmarks = np .asarray (detect_landmarks , dtype = np .float32 )
97
+ return detect_filename , detect_landmarks
98
+
99
+
100
+ def load_detect_data_by_wflw_box (detect_txt , point = 98 ):
101
+ """
102
+ 要和制作测试集一样使用wflw关键点作为人脸框 进行坐标转换
103
+ :param detect_txt:
104
+ :param point:
105
+ :return:
106
+ """
107
+ boxsizes = []
108
+ tem = []
109
+ with open (ground_truth_txt ) as f :
110
+ lines = f .readlines ()
111
+ for line in lines :
112
+ line = line .strip ().split ()
113
+ landmark = np .asarray (list (map (float , line [:196 ])), dtype = np .float32 ).reshape (- 1 , 2 )
114
+ xy = np .min (landmark , axis = 0 ).astype (np .int32 )
115
+ zz = np .max (landmark , axis = 0 ).astype (np .int32 )
116
+ wh = zz - xy + 1
117
+
118
+ center = (xy + wh / 2 ).astype (np .int32 )
119
+ boxsize = int (np .max (wh ) * 1.2 )
120
+ xy = center - boxsize // 2
121
+ x1 , y1 = xy
122
+ boxsizes .append (boxsize )
123
+ tem .append ([x1 , y1 ])
124
+
125
+ # 提取检测数据中的图片名和关键点
126
+ with open (detect_txt ) as f :
127
+ detect_lines = f .readlines ()
128
+ detect_filename , detect_landmarks = [], []
129
+ for index in range (0 , len (detect_lines ), point + 1 ):
130
+ tem_landmark = []
131
+ detect_filename .append (detect_lines [index ].strip ())
132
+ landmark = detect_lines [index + 1 :index + point + 1 ]
133
+ for ldmk in landmark :
134
+ x , y = ldmk .strip ().split ()
135
+ i = int (index / (point + 1 ))
136
+ x = round (float (x ) * boxsizes [i ] + tem [i ][0 ], 6 )
137
+ y = round (float (y ) * boxsizes [i ] + tem [i ][1 ], 6 )
138
+ tem_landmark .append (np .asarray ([x , y ], dtype = np .float32 ))
68
139
detect_landmarks .append (tem_landmark )
69
140
detect_landmarks = np .asarray (detect_landmarks , dtype = np .float32 )
70
141
return detect_filename , detect_landmarks
@@ -75,9 +146,8 @@ def main():
75
146
filenames , landmarks , attributes , boxs = load_ground_data (ground_truth_txt )
76
147
77
148
# 检测数据
78
- detect_filename , detect_landmarks = load_detect_data (detect_txt , point = 98 )
149
+ detect_filename , detect_landmarks = load_detect_data_by_wflw_box (detect_txt , point = 98 )
79
150
80
- totle_data = []
81
151
82
152
# 6 种不同子集的索引 [false,false,true....]
83
153
# 200: 姿态(pose) 0->正常姿态(normal pose) 1->大的姿态(large pose)
@@ -86,6 +156,9 @@ def main():
86
156
# 203: 化妆(make-up) 0->无化妆(no make-up) 1->化妆(make-up)
87
157
# 204: 遮挡(occlusion) 0->无遮挡(no occlusion) 1->遮挡(occlusion)
88
158
# 205: 模糊(blur) 0->清晰(clear) 1->模糊(blur)
159
+
160
+ totle_data = []
161
+ # totle_data = [[range(0,10)]]
89
162
for i in range (6 ):
90
163
totle_data .append (np .where (attributes [:, i ] == 1 ))
91
164
@@ -102,7 +175,7 @@ def main():
102
175
tem_ground_truth_landmark .append (landmarks [index ])
103
176
tem_detect_landmark .append (detect_landmarks [index ])
104
177
105
- error_per_image , error_per_point , nme = compute_98points5_ION_NME (ground_truth_all = tem_ground_truth_landmark ,
178
+ error_per_image , error_per_point , nme = compute_98points98_ION_NME (ground_truth_all = tem_ground_truth_landmark ,
106
179
detect_landmark_all = tem_detect_landmark )
107
180
_ , _ , auc = compute_AUC (error_per_point , x_limit = 0.1 )
108
181
failure_rate = compute_FAR (error_per_image , thresh = 0.1 )
0 commit comments