• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

SciKit-Surgery / scikit-surgerycalibration / 8667492196

12 Apr 2024 07:45PM UTC coverage: 86.321% (+0.007%) from 86.314%
8667492196

push

github

web-flow
Merge pull request #63 from SciKit-Surgery/62-reduce-warnings

62 reduce warnings

160 of 190 new or added lines in 3 files covered. (84.21%)

4 existing lines in 2 files now uncovered.

1729 of 2003 relevant lines covered (86.32%)

12.93 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

72.64
/sksurgerycalibration/video/video_calibration_cost_functions.py
1
# -*- coding: utf-8 -*-
2

3
""" Cost functions for video calibration, used with scipy. """
15✔
4

5
# pylint:disable=invalid-name
6

7
import numpy as np
15✔
8

9
import sksurgerycalibration.video.video_calibration_metrics as vm
15✔
10
import sksurgerycalibration.video.video_calibration_utils as vu
15✔
11

12

13
def stereo_2d_error_for_extrinsics(x_0,
15✔
14
                                   common_object_points,
15
                                   common_left_image_points,
16
                                   common_right_image_points,
17
                                   left_intrinsics,
18
                                   left_distortion,
19
                                   right_intrinsics,
20
                                   right_distortion,
21
                                   l2r_rmat,
22
                                   l2r_tvec
23
                                   ):
24
    """
25
    Computes a vector of residuals between projected image points
26
    and actual image points, for left and right image. x_0 should
27
    contain left camera extrinsic parameters.
28
    """
29
    rvecs = []
15✔
30
    tvecs = []
15✔
31
    number_of_frames = len(common_object_points)
15✔
32
    for i in range(0, number_of_frames):
15✔
33
        rvec = np.zeros((3, 1))
15✔
34
        rvec[0][0] = x_0[6 * i + 0]
15✔
35
        rvec[1][0] = x_0[6 * i + 1]
15✔
36
        rvec[2][0] = x_0[6 * i + 2]
15✔
37
        tvec = np.zeros((3, 1))
15✔
38
        tvec[0][0] = x_0[6 * i + 3]
15✔
39
        tvec[1][0] = x_0[6 * i + 4]
15✔
40
        tvec[2][0] = x_0[6 * i + 5]
15✔
41
        rvecs.append(rvec)
15✔
42
        tvecs.append(tvec)
15✔
43

44
    residual = vm.compute_stereo_2d_err(l2r_rmat,
15✔
45
                                        l2r_tvec,
46
                                        common_object_points,
47
                                        common_left_image_points,
48
                                        left_intrinsics,
49
                                        left_distortion,
50
                                        common_object_points,
51
                                        common_right_image_points,
52
                                        right_intrinsics,
53
                                        right_distortion,
54
                                        rvecs,
55
                                        tvecs,
56
                                        return_residuals=True
57
                                        )
58
    return residual
15✔
59

60

61
def mono_proj_err_h2e(x_0,
15✔
62
                      object_points,
63
                      image_points,
64
                      intrinsics,
65
                      distortion,
66
                      pattern_tracking,
67
                      device_tracking,
68
                      pattern2marker_matrix
69
                      ):
70
    """
71
    Computes the SSE of projected
72
    image points and actual image points, for a single camera,
73
    where we have a tracked calibration pattern, and assume the
74
    pattern2marker transform should remain fixed. Therefore we
75
    only optimise hand-eye. So, x_0 should be of length 6.
76
    """
77
    assert len(x_0) == 6
15✔
78

79
    rvec = np.zeros((3, 1))
15✔
80
    rvec[0][0] = x_0[0]
15✔
81
    rvec[1][0] = x_0[1]
15✔
82
    rvec[2][0] = x_0[2]
15✔
83

84
    tvec = np.zeros((3, 1))
15✔
85
    tvec[0][0] = x_0[3]
15✔
86
    tvec[1][0] = x_0[4]
15✔
87
    tvec[2][0] = x_0[5]
15✔
88

89
    h2e = vu.extrinsic_vecs_to_matrix(rvec, tvec)
15✔
90

91
    number_of_frames = len(object_points)
15✔
92
    rvecs = []
15✔
93
    tvecs = []
15✔
94

95
    # Computes pattern2camera for each pose
96
    for i in range(0, number_of_frames):
15✔
97

98
        p2c = h2e @ np.linalg.inv(device_tracking[i]) @ \
15✔
99
              pattern_tracking[i] @ pattern2marker_matrix
100

101
        rvec, tvec = vu.extrinsic_matrix_to_vecs(p2c)
15✔
102

103
        rvecs.append(rvec)
15✔
104
        tvecs.append(tvec)
15✔
105

106
    proj, _ = vm.compute_mono_2d_err(object_points,
15✔
107
                                     image_points,
108
                                     rvecs,
109
                                     tvecs,
110
                                     intrinsics,
111
                                     distortion,
112
                                     return_residuals=False)
113
    return proj
15✔
114

115

116
def mono_proj_err_p2m_h2e(x_0,
15✔
117
                          object_points,
118
                          image_points,
119
                          intrinsics,
120
                          distortion,
121
                          pattern_tracking,
122
                          device_tracking
123
                          ):
124
    """
125
    Computes the SSE between projected
126
    image points to actual image points, for a single camera,
127
    where we have a tracked pattern. Both the
128
    pattern2marker and hand2eye are optimised.
129
    So, x_0 should be of length 12.
130
    """
131
    assert len(x_0) == 12
15✔
132

133
    rvec = np.zeros((3, 1))
15✔
134
    rvec[0][0] = x_0[0]
15✔
135
    rvec[1][0] = x_0[1]
15✔
136
    rvec[2][0] = x_0[2]
15✔
137

138
    tvec = np.zeros((3, 1))
15✔
139
    tvec[0][0] = x_0[3]
15✔
140
    tvec[1][0] = x_0[4]
15✔
141
    tvec[2][0] = x_0[5]
15✔
142

143
    p2m = vu.extrinsic_vecs_to_matrix(rvec, tvec)
15✔
144

145
    rvec[0][0] = x_0[6]
15✔
146
    rvec[1][0] = x_0[7]
15✔
147
    rvec[2][0] = x_0[8]
15✔
148

149
    tvec[0][0] = x_0[9]
15✔
150
    tvec[1][0] = x_0[10]
15✔
151
    tvec[2][0] = x_0[11]
15✔
152

153
    h2e = vu.extrinsic_vecs_to_matrix(rvec, tvec)
15✔
154

155
    number_of_frames = len(object_points)
15✔
156
    rvecs = []
15✔
157
    tvecs = []
15✔
158

159
    # Computes pattern2camera for each pose
160
    for i in range(0, number_of_frames):
15✔
161

162
        p2c = h2e @ np.linalg.inv(device_tracking[i])\
15✔
163
              @ pattern_tracking[i] @ p2m
164

165
        rvec, tvec = vu.extrinsic_matrix_to_vecs(p2c)
15✔
166

167
        rvecs.append(rvec)
15✔
168
        tvecs.append(tvec)
15✔
169

170
    proj, _ = vm.compute_mono_2d_err(object_points,
15✔
171
                                     image_points,
172
                                     rvecs,
173
                                     tvecs,
174
                                     intrinsics,
175
                                     distortion,
176
                                     return_residuals=False)
177
    return proj
15✔
178

179

180
def mono_proj_err_h2e_g2w(x_0,
15✔
181
                          object_points,
182
                          image_points,
183
                          intrinsics,
184
                          distortion,
185
                          device_tracking
186
                          ):
187
    """
188
    Method to the SSE of projected
189
    image points to actual image points, for a single camera,
190
    where we have an untracked pattern. Both the
191
    hand2eye and grid2world are optimised.
192
    So, x_0 should be of length 12.
193
    """
194
    assert len(x_0) == 12
×
195

196
    rvec = np.zeros((3, 1))
×
NEW
197
    rvec[0][0] = x_0[0]
×
NEW
198
    rvec[1][0] = x_0[1]
×
NEW
199
    rvec[2][0] = x_0[2]
×
200

201
    tvec = np.zeros((3, 1))
×
NEW
202
    tvec[0][0] = x_0[3]
×
NEW
203
    tvec[1][0] = x_0[4]
×
NEW
204
    tvec[2][0] = x_0[5]
×
205

206
    h2e = vu.extrinsic_vecs_to_matrix(rvec, tvec)
×
207

NEW
208
    rvec[0][0] = x_0[6]
×
NEW
209
    rvec[1][0] = x_0[7]
×
NEW
210
    rvec[2][0] = x_0[8]
×
211

NEW
212
    tvec[0][0] = x_0[9]
×
NEW
213
    tvec[1][0] = x_0[10]
×
NEW
214
    tvec[2][0] = x_0[11]
×
215

216
    g2w = vu.extrinsic_vecs_to_matrix(rvec, tvec)
×
217

218
    number_of_frames = len(object_points)
×
219
    rvecs = []
×
220
    tvecs = []
×
221

222
    # Computes pattern2camera for each pose
223
    for i in range(0, number_of_frames):
×
224

225
        p2c = h2e @ np.linalg.inv(device_tracking[i]) @ g2w
×
226

227
        rvec, tvec = vu.extrinsic_matrix_to_vecs(p2c)
×
228

229
        rvecs.append(rvec)
×
230
        tvecs.append(tvec)
×
231

232
    proj, _ = vm.compute_mono_2d_err(object_points,
×
233
                                     image_points,
234
                                     rvecs,
235
                                     tvecs,
236
                                     intrinsics,
237
                                     distortion,
238
                                     return_residuals=False)
239
    return proj
×
240

241

242
def mono_proj_err_h2e_int_dist(x_0,
15✔
243
                               object_points,
244
                               image_points,
245
                               device_tracking,
246
                               pattern_tracking,
247
                               pattern2marker_matrix
248
                               ):
249
    """
250
    Computes the SSE between projected
251
    image points to actual image points, for a single camera,
252
    where we have a tracked pattern. The handeye, intrinsics and
253
    distortion parameters are optimised.
254
    So, x_0 should be of length 6+4+5 = 15.
255
    """
256
    assert len(x_0) == 15
×
257

258
    rvec = np.zeros((3, 1))
×
NEW
259
    rvec[0][0] = x_0[0]
×
NEW
260
    rvec[1][0] = x_0[1]
×
NEW
261
    rvec[2][0] = x_0[2]
×
262

263
    tvec = np.zeros((3, 1))
×
NEW
264
    tvec[0][0] = x_0[3]
×
NEW
265
    tvec[1][0] = x_0[4]
×
NEW
266
    tvec[2][0] = x_0[5]
×
267

268
    h2e = vu.extrinsic_vecs_to_matrix(rvec, tvec)
×
269

270
    intrinsics = np.zeros((3, 3))
×
271
    intrinsics[0][0] = x_0[6]
×
272
    intrinsics[1][1] = x_0[7]
×
273
    intrinsics[0][2] = x_0[8]
×
274
    intrinsics[1][2] = x_0[9]
×
275

276
    distortion = np.zeros((1, 5))
×
277
    distortion[0][0] = x_0[10]
×
278
    distortion[0][1] = x_0[11]
×
279
    distortion[0][2] = x_0[12]
×
280
    distortion[0][3] = x_0[13]
×
281
    distortion[0][4] = x_0[14]
×
282

283
    number_of_frames = len(object_points)
×
284
    rvecs = []
×
285
    tvecs = []
×
286

287
    # Computes pattern2camera for each pose
288
    for i in range(0, number_of_frames):
×
289

290
        p2c = h2e @ np.linalg.inv(device_tracking[i]) @ \
×
291
              pattern_tracking[i] @ pattern2marker_matrix
292

293
        rvec, tvec = vu.extrinsic_matrix_to_vecs(p2c)
×
294

295
        rvecs.append(rvec)
×
296
        tvecs.append(tvec)
×
297

298
    proj, _ = vm.compute_mono_2d_err(object_points,
×
299
                                     image_points,
300
                                     rvecs,
301
                                     tvecs,
302
                                     intrinsics,
303
                                     distortion)
304
    return proj
×
305

306

307
# pylint:disable=too-many-arguments
308
def stereo_proj_err_h2e(x_0,
15✔
309
                        common_object_points,
310
                        common_left_image_points,
311
                        common_right_image_points,
312
                        left_intrinsics,
313
                        left_distortion,
314
                        right_intrinsics,
315
                        right_distortion,
316
                        l2r_rmat,
317
                        l2r_tvec,
318
                        device_tracking_array,
319
                        pattern_tracking_array,
320
                        left_pattern2marker_matrix=None
321
                        ):
322
    """
323
    Computes the SSE of projected image points
324
    and actual image points for left and right cameras. x_0 should contain
325
    the 6DOF of hand-to-eye, and if left_pattern2marker_matrix is None,
326
    then an additional 6DOF of pattern-to-marker. So, x_0 can be either
327
    length 6 or length 12.
328

329
    :param x_0:
330
    :param common_object_points:
331
    :param common_left_image_points:
332
    :param common_right_image_points:
333
    :param left_intrinsics:
334
    :param left_distortion:
335
    :param right_intrinsics:
336
    :param right_distortion:
337
    :param l2r_rmat:
338
    :param l2r_tvec:
339
    :param device_tracking_array:
340
    :param pattern_tracking_array:
341
    :param left_pattern2marker_matrix:
342
    :return: matrix of residuals for Levenberg-Marquardt optimisation.
343
    """
344
    rvecs = []
15✔
345
    tvecs = []
15✔
346
    number_of_frames = len(common_object_points)
15✔
347

348
    h2e_rvec = np.zeros((3, 1))
15✔
349
    h2e_rvec[0][0] = x_0[0]
15✔
350
    h2e_rvec[1][0] = x_0[1]
15✔
351
    h2e_rvec[2][0] = x_0[2]
15✔
352

353
    h2e_tvec = np.zeros((3, 1))
15✔
354
    h2e_tvec[0][0] = x_0[3]
15✔
355
    h2e_tvec[1][0] = x_0[4]
15✔
356
    h2e_tvec[2][0] = x_0[5]
15✔
357

358
    h2e = vu.extrinsic_vecs_to_matrix(h2e_rvec, h2e_tvec)
15✔
359

360
    if left_pattern2marker_matrix is None:
15✔
361

362
        p2m_rvec = np.zeros((3, 1))
15✔
363
        p2m_rvec[0][0] = x_0[6]
15✔
364
        p2m_rvec[1][0] = x_0[7]
15✔
365
        p2m_rvec[2][0] = x_0[8]
15✔
366

367
        p2m_tvec = np.zeros((3, 1))
15✔
368
        p2m_tvec[0][0] = x_0[9]
15✔
369
        p2m_tvec[1][0] = x_0[10]
15✔
370
        p2m_tvec[2][0] = x_0[11]
15✔
371

372
        p2m = vu.extrinsic_vecs_to_matrix(p2m_rvec, p2m_tvec)
15✔
373

374
    else:
375

376
        p2m = left_pattern2marker_matrix
15✔
377

378
    for i in range(0, number_of_frames):
15✔
379

380
        p2c = h2e \
15✔
381
              @ np.linalg.inv(device_tracking_array[i]) \
382
              @ pattern_tracking_array[i] \
383
              @ p2m
384

385
        rvec, tvec = vu.extrinsic_matrix_to_vecs(p2c)
15✔
386

387
        rvecs.append(rvec)
15✔
388
        tvecs.append(tvec)
15✔
389

390
    proj, _ = vm.compute_stereo_2d_err(l2r_rmat,
15✔
391
                                       l2r_tvec,
392
                                       common_object_points,
393
                                       common_left_image_points,
394
                                       left_intrinsics,
395
                                       left_distortion,
396
                                       common_object_points,
397
                                       common_right_image_points,
398
                                       right_intrinsics,
399
                                       right_distortion,
400
                                       rvecs,
401
                                       tvecs
402
                                       )
403
    return proj
15✔
404

405

406
def stereo_proj_err_h2e_int_dist_l2r(x_0,
15✔
407
                                     common_object_points,
408
                                     common_left_image_points,
409
                                     common_right_image_points,
410
                                     device_tracking_array,
411
                                     pattern_tracking_array,
412
                                     left_pattern2marker_matrix
413
                                     ):
414
    """
415
    Computes the SSE of projected image points against actual
416
    image points. x_0 should be 30 DOF.
417
    """
418
    h2e_rvec = np.zeros((3, 1))
15✔
419
    h2e_rvec[0][0] = x_0[0]
15✔
420
    h2e_rvec[1][0] = x_0[1]
15✔
421
    h2e_rvec[2][0] = x_0[2]
15✔
422

423
    h2e_tvec = np.zeros((3, 1))
15✔
424
    h2e_tvec[0][0] = x_0[3]
15✔
425
    h2e_tvec[1][0] = x_0[4]
15✔
426
    h2e_tvec[2][0] = x_0[5]
15✔
427

428
    h2e = vu.extrinsic_vecs_to_matrix(h2e_rvec, h2e_tvec)
15✔
429

430
    l2r_rvec = np.zeros((3, 1))
15✔
431
    l2r_rvec[0][0] = x_0[6]
15✔
432
    l2r_rvec[1][0] = x_0[7]
15✔
433
    l2r_rvec[2][0] = x_0[8]
15✔
434

435
    l2r_tvec = np.zeros((3, 1))
15✔
436
    l2r_tvec[0][0] = x_0[9]
15✔
437
    l2r_tvec[1][0] = x_0[10]
15✔
438
    l2r_tvec[2][0] = x_0[11]
15✔
439

440
    l2r = vu.extrinsic_vecs_to_matrix(l2r_rvec, l2r_tvec)
15✔
441

442
    left_intrinsics = np.zeros((3, 3))
15✔
443
    left_intrinsics[0][0] = x_0[12]
15✔
444
    left_intrinsics[1][1] = x_0[13]
15✔
445
    left_intrinsics[0][2] = x_0[14]
15✔
446
    left_intrinsics[1][2] = x_0[15]
15✔
447

448
    left_distortion = np.zeros((1, 5))
15✔
449
    left_distortion[0][0] = x_0[16]
15✔
450
    left_distortion[0][1] = x_0[17]
15✔
451
    left_distortion[0][2] = x_0[18]
15✔
452
    left_distortion[0][3] = x_0[19]
15✔
453
    left_distortion[0][4] = x_0[20]
15✔
454

455
    right_intrinsics = np.zeros((3, 3))
15✔
456
    right_intrinsics[0][0] = x_0[21]
15✔
457
    right_intrinsics[1][1] = x_0[22]
15✔
458
    right_intrinsics[0][2] = x_0[23]
15✔
459
    right_intrinsics[1][2] = x_0[24]
15✔
460

461
    right_distortion = np.zeros((1, 5))
15✔
462
    right_distortion[0][0] = x_0[25]
15✔
463
    right_distortion[0][1] = x_0[26]
15✔
464
    right_distortion[0][2] = x_0[27]
15✔
465
    right_distortion[0][3] = x_0[28]
15✔
466
    right_distortion[0][4] = x_0[29]
15✔
467

468
    rvecs = []
15✔
469
    tvecs = []
15✔
470
    number_of_frames = len(common_object_points)
15✔
471

472
    for i in range(0, number_of_frames):
15✔
473

474
        p2c = h2e \
15✔
475
              @ np.linalg.inv(device_tracking_array[i]) \
476
              @ pattern_tracking_array[i] \
477
              @ left_pattern2marker_matrix
478

479
        rvec, tvec = vu.extrinsic_matrix_to_vecs(p2c)
15✔
480

481
        rvecs.append(rvec)
15✔
482
        tvecs.append(tvec)
15✔
483

484
    proj, _ = vm.compute_stereo_2d_err(l2r[0:3, 0:3],
15✔
485
                                       l2r[0:3, 3],
486
                                       common_object_points,
487
                                       common_left_image_points,
488
                                       left_intrinsics,
489
                                       left_distortion,
490
                                       common_object_points,
491
                                       common_right_image_points,
492
                                       right_intrinsics,
493
                                       right_distortion,
494
                                       rvecs,
495
                                       tvecs
496
                                       )
497
    return proj
15✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc