• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

cosanlab / py-feat / 15090929758

19 Oct 2024 05:10AM UTC coverage: 54.553%. First build
15090929758

push

github

web-flow
Merge pull request #228 from cosanlab/huggingface

WIP: Huggingface Integration

702 of 1620 new or added lines in 46 files covered. (43.33%)

3409 of 6249 relevant lines covered (54.55%)

3.27 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

19.72
/feat/face_detectors/Retinaface/Retinaface_test.py
1
from __future__ import print_function
6✔
2
import os
6✔
3
import torch
6✔
4
import numpy as np
6✔
5
from feat.face_detectors.Retinaface.Retinaface_model import PriorBox, RetinaFace
6✔
6
from feat.face_detectors.Retinaface.Retinaface_utils import decode_landm
6✔
7
from feat.utils import set_torch_device
6✔
8
from feat.utils.io import get_resource_path
6✔
9
from feat.utils.image_operations import (
6✔
10
    convert_color_vector_to_tensor,
11
    py_cpu_nms,
12
    decode,
13
)
14

15
model_config = {
6✔
16
    "Retinaface": {
17
        "name": "mobilenet0.25",
18
        "min_sizes": [[16, 32], [64, 128], [256, 512]],
19
        "steps": [8, 16, 32],
20
        "variance": [0.1, 0.2],
21
        "clip": False,
22
        "loc_weight": 2.0,
23
        "gpu_train": True,
24
        "batch_size": 32,
25
        "ngpu": 1,
26
        "epoch": 250,
27
        "decay1": 190,
28
        "decay2": 220,
29
        "image_size": 640,
30
        "pretrain": False,
31
        "return_layers": {"stage1": 1, "stage2": 2, "stage3": 3},
32
        "in_channel": 32,
33
        "out_channel": 64,
34
    }
35
}
36

37

38
class Retinaface:
6✔
39
    def __init__(
6✔
40
        self,
41
        cfg=model_config["Retinaface"],
42
        device="auto",
43
        resize=1,
44
        detection_threshold=0.5,
45
        nms_threshold=0.4,
46
        keep_top_k=750,
47
        top_k=5000,
48
        confidence_threshold=0.02,
49
        pretrained="local",
50
    ):
51
        """
52
        Function to perform inference with RetinaFace
53

54
        Args:
55
            device: (str)
56
            timer_flag: (bool)
57
            resize: (int)
58
            detection_threshold: (float)
59
            nms_threshold: (float)
60
            keep_top_k: (float)
61
            top_k: (float)
62
            confidence_threshold: (float)
63

64
        """
65

66
        torch.set_grad_enabled(False)
×
67

NEW
68
        self.device = set_torch_device(device=device)
×
NEW
69
        self.cfg = cfg
×
70

71
        # Initialize the model
NEW
72
        if pretrained == "huggingface":
×
73
            # model_file = hf_hub_download(repo_id="py-feat/retinaface", filename="model.safetensors")
74
            # model_state_dict = load_file(model_file)
NEW
75
            self.net = RetinaFace(cfg=self.cfg, phase="test")
×
NEW
76
            self.net = self.net.eval()
×
NEW
77
            self.net.from_pretrained("py-feat/retinaface")
×
NEW
78
        elif pretrained == "local":
×
79
            # net.load_state_dict(model_state_dict)
80
            # net = net.to(self.device)
81
            # self.net = net.eval()
82

NEW
83
            self.net = RetinaFace(cfg=self.cfg, phase="test")
×
NEW
84
            pretrained_dict = torch.load(
×
85
                os.path.join(get_resource_path(), "mobilenet0.25_Final.pth"),
86
                map_location=self.device,
87
            )
NEW
88
            self.net.load_state_dict(pretrained_dict, strict=False)
×
NEW
89
        self.net = self.net.to(self.device)
×
NEW
90
        self.net = self.net.eval()
×
91

92
        # Set cutoff parameters
93
        (
×
94
            self.resize,
95
            self.detection_threshold,
96
            self.nms_threshold,
97
            self.keep_top_k,
98
            self.top_k,
99
            self.confidence_threshold,
100
        ) = (
101
            resize,
102
            detection_threshold,
103
            nms_threshold,
104
            keep_top_k,
105
            top_k,
106
            confidence_threshold,
107
        )
108

109
    def __call__(self, img):
6✔
110
        """
111
        forward function
112

113
        Args:
114
            img: (B,C,H,W), B is batch number, C is channel, H is image height, and W is width
115
        """
116

117
        img = torch.sub(img, convert_color_vector_to_tensor(np.array([123, 117, 104])))
×
118

119
        im_height, im_width = img.shape[-2:]
×
120
        scale = torch.Tensor([im_height, im_width, im_height, im_width])
×
121
        img = img.to(self.device)
×
122
        scale = scale.to(self.device)
×
123

124
        loc, conf, landms = self.net(img)  # forward pass
×
125
        total_boxes = []
×
126
        for i in range(loc.shape[0]):
×
127
            tmp_box = self._calculate_boxinfo(
×
128
                im_height=im_height,
129
                im_width=im_width,
130
                loc=loc[i],
131
                conf=conf[i],
132
                landms=landms[i],
133
                scale=scale,
134
                img=img,
135
            )
136
            total_boxes.append(tmp_box)
×
137

138
        return total_boxes
×
139

140
    def _calculate_boxinfo(self, im_height, im_width, loc, conf, landms, scale, img):
6✔
141
        """
142
        helper function to calculate deep learning results
143
        """
144

145
        priorbox = PriorBox(self.cfg, image_size=(im_height, im_width))
×
146
        priors = priorbox.forward()
×
147
        priors = priors.to(self.device)
×
148
        boxes = decode(loc.data.squeeze(0), priors.data, self.cfg["variance"])
×
149
        boxes = boxes * scale / self.resize
×
150
        boxes = boxes.cpu().numpy()
×
151
        scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
×
152
        landms = decode_landm(landms.data.squeeze(0), priors.data, self.cfg["variance"])
×
153
        scale1 = torch.Tensor(
×
154
            [
155
                img.shape[3],
156
                img.shape[2],
157
                img.shape[3],
158
                img.shape[2],
159
                img.shape[3],
160
                img.shape[2],
161
                img.shape[3],
162
                img.shape[2],
163
                img.shape[3],
164
                img.shape[2],
165
            ]
166
        )
167
        scale1 = scale1.to(self.device)
×
168
        landms = landms * scale1 / self.resize
×
169
        landms = landms.cpu().numpy()
×
170

171
        # ignore low scores
172
        inds = np.where(scores > self.confidence_threshold)[0]
×
173
        boxes = boxes[inds]
×
174
        landms = landms[inds]
×
175
        scores = scores[inds]
×
176

177
        # keep top-K before NMS
178
        order = scores.argsort()[::-1][: self.top_k]
×
179
        boxes = boxes[order]
×
180
        landms = landms[order]
×
181
        scores = scores[order]
×
182

183
        # do NMS
184
        dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
×
185
        keep = py_cpu_nms(dets, self.nms_threshold)
×
186
        dets = dets[keep, :]
×
187
        landms = landms[keep]
×
188

189
        # keep top-K faster NMS
190
        dets = dets[: self.keep_top_k, :]
×
191

192
        # filter using detection_threshold - rescale box size to be proportional to image size
193
        scale_x, scale_y = (im_width / im_height, im_height / im_width)
×
194
        det_bboxes = []
×
195
        for b in dets:
×
196
            if b[4] > self.detection_threshold:
×
197
                xmin, ymin, xmax, ymax, score = b
×
198
                det_bboxes.append(
×
199
                    [
200
                        xmin * scale_x,
201
                        ymin * scale_y,
202
                        xmax * scale_x,
203
                        ymax * scale_y,
204
                        score,
205
                    ]
206
                )
207

208
        return det_bboxes
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc