• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

cosanlab / py-feat / 15090929758

19 Oct 2024 05:10AM UTC coverage: 54.553%. First build
15090929758

push

github

web-flow
Merge pull request #228 from cosanlab/huggingface

WIP: Huggingface Integration

702 of 1620 new or added lines in 46 files covered. (43.33%)

3409 of 6249 relevant lines covered (54.55%)

3.27 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

38.3
/feat/facepose_detectors/img2pose/img2pose_model.py
1
import torch
6✔
2
import torch.nn as nn
6✔
3
from torch.nn import DataParallel
6✔
4
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
6✔
5
from .deps.models import FasterDoFRCNN
6✔
6
from feat.utils import set_torch_device
6✔
7
import warnings
6✔
8
from huggingface_hub import PyTorchModelHubMixin
6✔
9

10

11
"""
6✔
12
Model adapted from https://github.com/vitoralbiero/img2pose
13
"""
14

15

16
class WrappedModel(nn.Module, PyTorchModelHubMixin):
6✔
17
    def __init__(self, module):
6✔
18
        super(WrappedModel, self).__init__()
×
19
        self.module = module
×
20

21
    def forward(self, images, targets=None):
6✔
22
        return self.module(images, targets)
×
23

24

25
class img2poseModel:
6✔
26
    def __init__(
6✔
27
        self,
28
        depth,
29
        min_size,
30
        max_size,
31
        device="auto",
32
        pose_mean=None,
33
        pose_stddev=None,
34
        threed_68_points=None,
35
        rpn_pre_nms_top_n_test=6000,  # 500
36
        rpn_post_nms_top_n_test=1000,  # 10,
37
        bbox_x_factor=1.1,
38
        bbox_y_factor=1.1,
39
        expand_forehead=0.3,
40
    ):
41
        self.depth = depth
×
42
        self.min_size = min_size
×
43
        self.max_size = max_size
×
44

45
        self.device = set_torch_device(device)
×
46

47
        # TODO: Update to handle deprecation warning:
48
        # UserWarning: Arguments other than a weight enum or `None` for 'weights'
49
        # are deprecated since 0.13 and may be removed in the future. The current
50
        # behavior is equivalent to passing
51
        # `weights=ResNet18_Weights.IMAGENET1K_V1`. You can also use
52
        # `weights=ResNet18_Weights.DEFAULT` to get the most up-to-date weights.
53
        # create network backbone
54
        with warnings.catch_warnings():
×
55
            warnings.simplefilter("ignore", UserWarning)
×
NEW
56
            backbone = resnet_fpn_backbone(
×
57
                backbone_name=f"resnet{self.depth}", weights=None
58
            )
59

60
        if pose_mean is not None:
×
61
            pose_mean = torch.tensor(pose_mean)
×
62
            pose_stddev = torch.tensor(pose_stddev)
×
63

64
        if threed_68_points is not None:
×
65
            threed_68_points = torch.tensor(threed_68_points)
×
66

67
        # create the feature pyramid network
68
        self.fpn_model = FasterDoFRCNN(
×
69
            backbone=backbone,
70
            num_classes=2,
71
            min_size=self.min_size,
72
            max_size=self.max_size,
73
            pose_mean=pose_mean,
74
            pose_stddev=pose_stddev,
75
            threed_68_points=threed_68_points,
76
            rpn_pre_nms_top_n_test=rpn_pre_nms_top_n_test,
77
            rpn_post_nms_top_n_test=rpn_post_nms_top_n_test,
78
            bbox_x_factor=bbox_x_factor,
79
            bbox_y_factor=bbox_y_factor,
80
            expand_forehead=expand_forehead,
81
        )
82

83
        if self.device.type == "cpu":
×
84
            # self.fpn_model = WrappedModel(self.fpn_model)
NEW
85
            self.fpn_model = self.fpn_model
×
86
        else:  # GPU
87
            self.fpn_model = DataParallel(self.fpn_model)
×
88
        self.fpn_model = self.fpn_model.to(self.device)
×
89

90
    def evaluate(self):
6✔
91
        self.fpn_model.eval()
×
92

93
    # UNCOMMENT to enable training
94
    # def train(self):
95
    #     self.fpn_model.train()
96

97
    # def run_model(self, imgs, targets=None):
98
    #     outputs = self.fpn_model(imgs, targets)
99
    #     return outputs
100

101
    def run_model(self, imgs):
6✔
NEW
102
        outputs = self.fpn_model(imgs)
×
103
        return outputs
×
104

105
    def forward(self, imgs, targets):
6✔
106
        losses = self.run_model(imgs, targets)
×
107
        return losses
×
108

109
    def predict(self, imgs):
6✔
110
        assert self.fpn_model.training is False
×
111

112
        with torch.no_grad():
×
113
            predictions = self.run_model(imgs)
×
114

115
        return predictions
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc