• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

cosanlab / py-feat / 15090929758

19 Oct 2024 05:10AM UTC coverage: 54.553%. First build
15090929758

push

github

web-flow
Merge pull request #228 from cosanlab/huggingface

WIP: Huggingface Integration

702 of 1620 new or added lines in 46 files covered. (43.33%)

3409 of 6249 relevant lines covered (54.55%)

3.27 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

75.0
/feat/utils/io.py
1
"""
2
Feat utility and helper functions for inputting and outputting data.
3
"""
4

5
import os
6✔
6
import contextlib
6✔
7
import pandas as pd
6✔
8
import feat
6✔
9
from feat.utils import (
6✔
10
    FEAT_EMOTION_COLUMNS,
11
    FEAT_FACEBOX_COLUMNS,
12
    FEAT_TIME_COLUMNS,
13
    OPENFACE_ORIG_COLUMNS,
14
    openface_AU_columns,
15
    openface_2d_landmark_columns,
16
    openface_facepose_columns,
17
    openface_gaze_columns,
18
    openface_time_columns,
19
    FEAT_FACEPOSE_COLUMNS_6D,
20
    FEAT_IDENTITY_COLUMNS,
21
)
22

23

24
from torchvision.datasets.utils import download_url as tv_download_url
6✔
25
from torchvision.io import read_image, read_video
6✔
26
from torchvision.transforms.functional import to_pil_image
6✔
27
import warnings
6✔
28
import av
6✔
29
import torch
6✔
30
from torch import swapaxes
6✔
31

32
__all__ = [
6✔
33
    "get_resource_path",
34
    "get_test_data_path",
35
    "validate_input",
36
    "download_url",
37
    "read_openface",
38
    "load_pil_img",
39
]
40

41

42
def get_resource_path():
6✔
43
    """Get path to feat resource directory."""
44
    return os.path.join(feat.__path__[0], "resources")
6✔
45

46

47
def get_test_data_path():
6✔
48
    """Get path to feat test data directory."""
49
    return os.path.join(feat.__path__[0], "tests", "data")
6✔
50

51

52
def validate_input(inputFname):
6✔
53
    """
54
    Given a string filename or list containing string files names, ensures that the
55
    file(s) exist. Always returns a non-nested list, potentionally containing a single element.
56

57
    Args:
58
        inputFname (str or list): file name(s)
59

60
    Raises:
61
        FileNotFoundError: if any file name(s) don't exist
62

63
    Returns:
64
        list: list of file names (even if input was a str)
65
    """
66

67
    assert isinstance(
×
68
        inputFname, (str, list)
69
    ), "inputFname must be a string path to image or list of image paths"
70

71
    if isinstance(inputFname, str):
×
72
        inputFname = [inputFname]
×
73

74
    for inputF in inputFname:
×
75
        if not os.path.exists(inputF):
×
76
            raise FileNotFoundError(f"File {inputF} not found.")
×
77
    return inputFname
×
78

79

80
def download_url(*args, **kwargs):
6✔
81
    """By default just call download_url from torch vision, but we pass a verbose =
82
    False keyword argument, then call download_url with a special context manager than
83
    supresses the print messages"""
84
    verbose = kwargs.pop("verbose", True)
6✔
85

86
    if verbose:
6✔
87
        return tv_download_url(*args, **kwargs)
×
88

89
    with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
6✔
90
        return tv_download_url(*args, **kwargs)
6✔
91

92

93
def read_feat(fexfile):
6✔
94
    """This function reads files extracted using the Detector from the Feat package.
95

96
    Args:
97
        fexfile: Path to facial expression file.
98

99
    Returns:
100
        Fex of processed facial expressions
101
    """
102
    d = pd.read_csv(fexfile)
6✔
103
    au_columns = [col for col in d.columns if "AU" in col]
6✔
104
    fex = feat.Fex(
6✔
105
        d,
106
        filename=fexfile,
107
        au_columns=au_columns,
108
        emotion_columns=FEAT_EMOTION_COLUMNS,
109
        facebox_columns=FEAT_FACEBOX_COLUMNS,
110
        landmark_columns=openface_2d_landmark_columns,
111
        facepose_columns=FEAT_FACEPOSE_COLUMNS_6D,
112
        identity_columns=FEAT_IDENTITY_COLUMNS[1:],
113
        detector="Feat",
114
        time_columns=FEAT_TIME_COLUMNS,
115
    )
116
    return fex
6✔
117

118

119
def read_openface(openfacefile, features=None):
6✔
120
    """
121
    This function reads in an OpenFace exported facial expression file.
122
    Args:
123
        features: If a list of column names are passed, those are returned. Otherwise, default returns the following features:
124
        ['frame', 'timestamp', 'confidence', 'success', 'gaze_0_x',
125
       'gaze_0_y', 'gaze_0_z', 'gaze_1_x', 'gaze_1_y', 'gaze_1_z',
126
       'pose_Tx', 'pose_Ty', 'pose_Tz', 'pose_Rx', 'pose_Ry', 'pose_Rz',
127
       'x_0', 'x_1', 'x_2', 'x_3', 'x_4', 'x_5', 'x_6', 'x_7', 'x_8',
128
       'x_9', 'x_10', 'x_11', 'x_12', 'x_13', 'x_14', 'x_15', 'x_16',
129
       'x_17', 'x_18', 'x_19', 'x_20', 'x_21', 'x_22', 'x_23', 'x_24',
130
       'x_25', 'x_26', 'x_27', 'x_28', 'x_29', 'x_30', 'x_31', 'x_32',
131
       'x_33', 'x_34', 'x_35', 'x_36', 'x_37', 'x_38', 'x_39', 'x_40',
132
       'x_41', 'x_42', 'x_43', 'x_44', 'x_45', 'x_46', 'x_47', 'x_48',
133
       'x_49', 'x_50', 'x_51', 'x_52', 'x_53', 'x_54', 'x_55', 'x_56',
134
       'x_57', 'x_58', 'x_59', 'x_60', 'x_61', 'x_62', 'x_63', 'x_64',
135
       'x_65', 'x_66', 'x_67', 'y_0', 'y_1', 'y_2', 'y_3', 'y_4', 'y_5',
136
       'y_6', 'y_7', 'y_8', 'y_9', 'y_10', 'y_11', 'y_12', 'y_13', 'y_14',
137
       'y_15', 'y_16', 'y_17', 'y_18', 'y_19', 'y_20', 'y_21', 'y_22',
138
       'y_23', 'y_24', 'y_25', 'y_26', 'y_27', 'y_28', 'y_29', 'y_30',
139
       'y_31', 'y_32', 'y_33', 'y_34', 'y_35', 'y_36', 'y_37', 'y_38',
140
       'y_39', 'y_40', 'y_41', 'y_42', 'y_43', 'y_44', 'y_45', 'y_46',
141
       'y_47', 'y_48', 'y_49', 'y_50', 'y_51', 'y_52', 'y_53', 'y_54',
142
       'y_55', 'y_56', 'y_57', 'y_58', 'y_59', 'y_60', 'y_61', 'y_62',
143
       'y_63', 'y_64', 'y_65', 'y_66', 'y_67', 'X_0', 'X_1', 'X_2', 'X_3',
144
       'X_4', 'X_5', 'X_6', 'X_7', 'X_8', 'X_9', 'X_10', 'X_11', 'X_12',
145
       'X_13', 'X_14', 'X_15', 'X_16', 'X_17', 'X_18', 'X_19', 'X_20',
146
       'X_21', 'X_22', 'X_23', 'X_24', 'X_25', 'X_26', 'X_27', 'X_28',
147
       'X_29', 'X_30', 'X_31', 'X_32', 'X_33', 'X_34', 'X_35', 'X_36',
148
       'X_37', 'X_38', 'X_39', 'X_40', 'X_41', 'X_42', 'X_43', 'X_44',
149
       'X_45', 'X_46', 'X_47', 'X_48', 'X_49', 'X_50', 'X_51', 'X_52',
150
       'X_53', 'X_54', 'X_55', 'X_56', 'X_57', 'X_58', 'X_59', 'X_60',
151
       'X_61', 'X_62', 'X_63', 'X_64', 'X_65', 'X_66', 'X_67', 'Y_0',
152
       'Y_1', 'Y_2', 'Y_3', 'Y_4', 'Y_5', 'Y_6', 'Y_7', 'Y_8', 'Y_9',
153
       'Y_10', 'Y_11', 'Y_12', 'Y_13', 'Y_14', 'Y_15', 'Y_16', 'Y_17',
154
       'Y_18', 'Y_19', 'Y_20', 'Y_21', 'Y_22', 'Y_23', 'Y_24', 'Y_25',
155
       'Y_26', 'Y_27', 'Y_28', 'Y_29', 'Y_30', 'Y_31', 'Y_32', 'Y_33',
156
       'Y_34', 'Y_35', 'Y_36', 'Y_37', 'Y_38', 'Y_39', 'Y_40', 'Y_41',
157
       'Y_42', 'Y_43', 'Y_44', 'Y_45', 'Y_46', 'Y_47', 'Y_48', 'Y_49',
158
       'Y_50', 'Y_51', 'Y_52', 'Y_53', 'Y_54', 'Y_55', 'Y_56', 'Y_57',
159
       'Y_58', 'Y_59', 'Y_60', 'Y_61', 'Y_62', 'Y_63', 'Y_64', 'Y_65',
160
       'Y_66', 'Y_67', 'Z_0', 'Z_1', 'Z_2', 'Z_3', 'Z_4', 'Z_5', 'Z_6',
161
       'Z_7', 'Z_8', 'Z_9', 'Z_10', 'Z_11', 'Z_12', 'Z_13', 'Z_14', 'Z_15',
162
       'Z_16', 'Z_17', 'Z_18', 'Z_19', 'Z_20', 'Z_21', 'Z_22', 'Z_23',
163
       'Z_24', 'Z_25', 'Z_26', 'Z_27', 'Z_28', 'Z_29', 'Z_30', 'Z_31',
164
       'Z_32', 'Z_33', 'Z_34', 'Z_35', 'Z_36', 'Z_37', 'Z_38', 'Z_39',
165
       'Z_40', 'Z_41', 'Z_42', 'Z_43', 'Z_44', 'Z_45', 'Z_46', 'Z_47',
166
       'Z_48', 'Z_49', 'Z_50', 'Z_51', 'Z_52', 'Z_53', 'Z_54', 'Z_55',
167
       'Z_56', 'Z_57', 'Z_58', 'Z_59', 'Z_60', 'Z_61', 'Z_62', 'Z_63',
168
       'Z_64', 'Z_65', 'Z_66', 'Z_67', 'p_scale', 'p_rx', 'p_ry', 'p_rz',
169
       'p_tx', 'p_ty', 'p_0', 'p_1', 'p_2', 'p_3', 'p_4', 'p_5', 'p_6',
170
       'p_7', 'p_8', 'p_9', 'p_10', 'p_11', 'p_12', 'p_13', 'p_14', 'p_15',
171
       'p_16', 'p_17', 'p_18', 'p_19', 'p_20', 'p_21', 'p_22', 'p_23',
172
       'p_24', 'p_25', 'p_26', 'p_27', 'p_28', 'p_29', 'p_30', 'p_31',
173
       'p_32', 'p_33', 'AU01_r', 'AU02_r', 'AU04_r', 'AU05_r', 'AU06_r',
174
       'AU07_r', 'AU09_r', 'AU10_r', 'AU12_r', 'AU14_r', 'AU15_r',
175
       'AU17_r', 'AU20_r', 'AU23_r', 'AU25_r', 'AU26_r', 'AU45_r',
176
       'AU01_c', 'AU02_c', 'AU04_c', 'AU05_c', 'AU06_c', 'AU07_c',
177
       'AU09_c', 'AU10_c', 'AU12_c', 'AU14_c', 'AU15_c', 'AU17_c',
178
       'AU20_c', 'AU23_c', 'AU25_c', 'AU26_c', 'AU28_c', 'AU45_c']
179

180
    Returns:
181
        dataframe of processed facial expressions
182

183
    """
184
    d = pd.read_csv(openfacefile, sep=",")
6✔
185
    d.columns = d.columns.str.strip(" ")
6✔
186
    # Check if features argument is passed and return only those features, else return basic emotion/AU features
187
    if isinstance(features, list):
6✔
188
        try:
6✔
189
            d = d[features]
6✔
190
        except Exception:
6✔
191
            raise KeyError([features, "not in openfacefile"])
6✔
192
    elif isinstance(features, type(None)):
6✔
193
        features = OPENFACE_ORIG_COLUMNS
6✔
194
        try:
6✔
195
            d = d[features]
6✔
196
        except Exception:
×
197
            pass
×
198
    fex = feat.Fex(
6✔
199
        d,
200
        filename=openfacefile,
201
        au_columns=openface_AU_columns,
202
        emotion_columns=None,
203
        facebox_columns=None,
204
        landmark_columns=openface_2d_landmark_columns,
205
        facepose_columns=openface_facepose_columns,
206
        gaze_columns=openface_gaze_columns,
207
        time_columns=openface_time_columns,
208
        detector="OpenFace",
209
    )
210
    fex["input"] = openfacefile
6✔
211
    return fex
6✔
212

213

214
def load_pil_img(file_name, frame_id):
6✔
215
    """Helper function to load a PIL image from a picture or video
216

217
    Args:
218
        file_name (str): path to file. Can be image or video
219
        frame_id (int): if video, load frame
220

221
    Returns:
222
        image: pil image instance
223
    """
224

NEW
225
    file_extension = os.path.basename(file_name).split(".")[-1]
×
NEW
226
    if file_extension.lower() in ["jpg", "jpeg", "png", "bmp", "tiff", "pdf"]:
×
NEW
227
        frame_img = read_image(file_name)  # image path
×
228
    else:
229
        # Ignore UserWarning: The pts_unit 'pts' gives wrong results. Please use
230
        # pts_unit 'sec'. See why it's ok in this issue:
231
        # https://github.com/pytorch/vision/issues/1931
NEW
232
        with warnings.catch_warnings():
×
NEW
233
            warnings.simplefilter("ignore", UserWarning)
×
NEW
234
            video, audio, info = read_video(file_name, output_format="TCHW")
×
NEW
235
        frame_img = video[frame_id, :, :]
×
NEW
236
    return to_pil_image(frame_img)
×
237

238

239
def video_to_tensor(file_name):
6✔
240
    container = av.open(file_name)
6✔
241
    stream = container.streams.video[0]
6✔
242
    tensor = []
6✔
243
    for frame in container.decode(stream):
6✔
244
        frame_data = torch.from_numpy(frame.to_ndarray(format="rgb24"))
6✔
245
        frame_data = swapaxes(swapaxes(frame_data, 0, -1), 1, 2)
6✔
246
        tensor.append(frame_data)
6✔
247
    container.close()
6✔
248
    return torch.stack(tensor, dim=0)
6✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc