• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

SciKit-Surgery / scikit-surgerycalibration / 8667492196

12 Apr 2024 07:45PM UTC coverage: 86.321% (+0.007%) from 86.314%
8667492196

push

github

web-flow
Merge pull request #63 from SciKit-Surgery/62-reduce-warnings

62 reduce warnings

160 of 190 new or added lines in 3 files covered. (84.21%)

4 existing lines in 2 files now uncovered.

1729 of 2003 relevant lines covered (86.32%)

12.93 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

88.08
/sksurgerycalibration/video/video_calibration_wrapper.py
1
# -*- coding: utf-8 -*-
2

3
""" Video Calibration functions, that wrap OpenCV functions mainly. """
15✔
4

5
# pylint:disable=too-many-lines
6

7
import copy
15✔
8
import logging
15✔
9
from typing import List
15✔
10

11
import cv2
15✔
12
import numpy as np
15✔
13
import sksurgerycore.transforms.matrix as skcm
15✔
14
from scipy.optimize import least_squares
15✔
15
from scipy.optimize import minimize
15✔
16

17
import sksurgerycalibration.video.video_calibration_cost_functions as vcf
15✔
18
import sksurgerycalibration.video.video_calibration_hand_eye as he
15✔
19
import sksurgerycalibration.video.video_calibration_metrics as vm
15✔
20
import sksurgerycalibration.video.video_calibration_utils as vu
15✔
21

22
LOGGER = logging.getLogger(__name__)
15✔
23

24

25
def mono_video_calibration(object_points, image_points, image_size, flags=0):
15✔
26
    """
27
    Calibrates a video camera using Zhang's 2000 method, as implemented in
28
    OpenCV. We wrap it here, so we have a place to add extra validation code,
29
    and a space for documentation. The aim is to check everything before
30
    we pass it to OpenCV, and raise Exceptions consistently for any error
31
    we can detect before we pass it to OpenCV, as OpenCV just dies
32
    without throwing exceptions.
33

34
      - N = number of images
35
      - M = number of points for that image
36
      - rvecs = list of 1x3 Rodrigues rotation parameters
37
      - tvecs = list of 3x1 translation vectors
38
      - camera_matrix = [3x3] ndarray containing fx, fy, cx, cy
39
      - dist_coeffs = [1x5] ndarray, containing distortion coefficients
40

41
    :param object_points: Vector (N) of Vector (M) of 1x3 points of type float
42
    :param image_points: Vector (N) of Vector (M) of 1x2 points of type float
43
    :param image_size: (x, y) tuple, size in pixels, e.g. (1920, 1080)
44
    :param flags: OpenCV flags to pass to calibrateCamera().
45
    :return: RMS projection error, camera_matrix, dist_coeffs, rvecs, tvecs
46
    """
47
    if image_size[0] < 1:
15✔
48
        raise ValueError("Image width must be > 0.")
×
49
    if image_size[1] < 1:
15✔
50
        raise ValueError("Image height must be > 0.")
×
51
    if len(object_points) < 2:
15✔
52
        raise ValueError("Must have at least 2 sets of object points.")
×
53
    if len(image_points) < 2:
15✔
54
        raise ValueError("Must have at least 2 sets of image points.")
×
55
    if len(object_points) != len(image_points):
15✔
56
        raise ValueError("Image points and object points differ in length.")
×
57
    for i, _ in enumerate(object_points):
15✔
58
        if len(object_points[i]) < 3:
15✔
59
            raise ValueError(str(i) + ": Must have at least 3 object points.")
×
60
        if len(image_points[i]) < 3:
15✔
61
            raise ValueError(str(i) + ": Must have at least 3 image points.")
×
62
        if len(object_points[i]) != len(image_points[i]):
15✔
63
            raise ValueError(str(i) + ": Must have the same number of points.")
×
64

65
    _, camera_matrix, dist_coeffs, rvecs, tvecs \
15✔
66
        = cv2.calibrateCamera(object_points,
67
                              image_points,
68
                              image_size,
69
                              None, None,
70
                              flags=flags)
71

72
    # Recompute this, for consistency with stereo methods.
73
    # i.e. so we know what the calculation is exactly.
74
    sse, num = vm.compute_mono_2d_err(object_points,
15✔
75
                                      image_points,
76
                                      rvecs,
77
                                      tvecs,
78
                                      camera_matrix,
79
                                      dist_coeffs)
80
    mse = sse / num
15✔
81
    final_rms = np.sqrt(mse)
15✔
82

83
    return final_rms, camera_matrix, dist_coeffs, rvecs, tvecs
15✔
84

85

86
# pylint:disable=too-many-arguments,too-many-statements
87
def mono_handeye_calibration(object_points: List,
15✔
88
                             image_points: List,
89
                             camera_matrix: np.ndarray,
90
                             camera_distortion: np.ndarray,
91
                             device_tracking_array: List,
92
                             pattern_tracking_array: List,
93
                             rvecs: List[np.ndarray],
94
                             tvecs: List[np.ndarray],
95
                             override_pattern2marker: np.ndarray = None,
96
                             use_opencv: bool = True,
97
                             do_bundle_adjust: bool = False):
98
    """
99
    Wrapper around handeye calibration functions and reprojection /
100
    reconstruction error metrics.
101

102
    :param object_points: Vector of Vectors of 1x3 object points, float32
103
    :type object_points: List
104
    :param image_points: Vector of Vectors of 1x2 object points, float32
105
    :type image_points: List
106
    :param ids: Vector of ndarrays containing integer point ids.
107
    :type ids: List
108
    :param camera_matrix: Camera intrinsic matrix
109
    :type camera_matrix: np.ndarray
110
    :param camera_distortion: Camera distortion coefficients
111
    :type camera_distortion: np.ndarray
112
    :param device_tracking_array: Tracking data for camera (hand)
113
    :type device_tracking_array: List
114
    :param pattern_tracking_array: Tracking data for calibration target
115
    :type pattern_tracking_array: List
116
    :param rvecs: Vector of 3x1 ndarray, Rodrigues rotations for each camera
117
    :type rvecs: List[np.ndarray]
118
    :param tvecs: Vector of [3x1] ndarray, translations for each camera
119
    :type tvecs: List[np.ndarray]
120
    :param override_pattern2marker: If provided a 4x4 pattern2marker that
121
    is taken as constant.
122
    :param use_opencv: If True we use OpenCV based methods, if false,
123
    Guofang Xiao's method.
124
    :param do_bundle_adjust: If True we do an additional bundle adjustment
125
    at the end. Needs pattern tracking too.
126
    :return: Reprojection error, handeye matrix, patter to marker matrix
127
    :rtype: float, float, np.ndarray, np.ndarray
128
    """
129

130
    if not use_opencv and override_pattern2marker is None:
15✔
131

132
        quat_model2hand_array, trans_model2hand_array = \
15✔
133
            he.set_model2hand_arrays(pattern_tracking_array,
134
                                     device_tracking_array,
135
                                     use_quaternions=False)
136

137
        handeye_matrix, pattern2marker_matrix =  \
15✔
138
            he.guofang_xiao_handeye_calibration(rvecs, tvecs,
139
                                                quat_model2hand_array,
140
                                                trans_model2hand_array)
141

142
    else:
143

144
        pattern2marker_matrix = override_pattern2marker
15✔
145

146
        if pattern2marker_matrix is None \
15✔
147
                and len(pattern_tracking_array) > 3 \
148
                and pattern_tracking_array[0] is not None:
149

150
            handeye_matrix, pattern2marker_matrix = \
15✔
151
                he.calibrate_hand_eye_and_pattern_to_marker(
152
                    rvecs,
153
                    tvecs,
154
                    device_tracking_array,
155
                    pattern_tracking_array,
156
                    method=cv2.CALIB_ROBOT_WORLD_HAND_EYE_SHAH
157
                    )
158

159
            # Now optimise p2m and h2e
160
            x_0 = np.zeros(12)
15✔
161
            rvec, tvec = vu.extrinsic_matrix_to_vecs(pattern2marker_matrix)
15✔
162
            x_0[0] = rvec[0][0]
15✔
163
            x_0[1] = rvec[1][0]
15✔
164
            x_0[2] = rvec[2][0]
15✔
165
            x_0[3] = tvec[0][0]
15✔
166
            x_0[4] = tvec[1][0]
15✔
167
            x_0[5] = tvec[2][0]
15✔
168

169
            rvec, tvec = vu.extrinsic_matrix_to_vecs(handeye_matrix)
15✔
170
            x_0[6] = rvec[0][0]
15✔
171
            x_0[7] = rvec[1][0]
15✔
172
            x_0[8] = rvec[2][0]
15✔
173
            x_0[9] = tvec[0][0]
15✔
174
            x_0[10] = tvec[1][0]
15✔
175
            x_0[11] = tvec[2][0]
15✔
176

177
            res = minimize(vcf.mono_proj_err_p2m_h2e, x_0,
15✔
178
                           args=(object_points,
179
                                 image_points,
180
                                 camera_matrix,
181
                                 camera_distortion,
182
                                 pattern_tracking_array,
183
                                 device_tracking_array
184
                                 ),
185
                           method='Powell',
186
                           )
187

188
            x_1 = res.x
15✔
189
            rvec[0][0] = x_1[0]
15✔
190
            rvec[1][0] = x_1[1]
15✔
191
            rvec[2][0] = x_1[2]
15✔
192
            tvec[0][0] = x_1[3]
15✔
193
            tvec[1][0] = x_1[4]
15✔
194
            tvec[2][0] = x_1[5]
15✔
195
            pattern2marker_matrix = vu.extrinsic_vecs_to_matrix(rvec, tvec)
15✔
196

197
            rvec[0][0] = x_1[6]
15✔
198
            rvec[1][0] = x_1[7]
15✔
199
            rvec[2][0] = x_1[8]
15✔
200
            tvec[0][0] = x_1[9]
15✔
201
            tvec[1][0] = x_1[10]
15✔
202
            tvec[2][0] = x_1[11]
15✔
203
            handeye_matrix = vu.extrinsic_vecs_to_matrix(rvec, tvec)
15✔
204

205
        elif pattern2marker_matrix is not None \
15✔
206
                and len(pattern_tracking_array) > 3 \
207
                and pattern_tracking_array[0] is not None:
208

209
            handeye_matrix, _ = \
15✔
210
                he.calibrate_hand_eye_and_pattern_to_marker(
211
                    rvecs,
212
                    tvecs,
213
                    device_tracking_array,
214
                    pattern_tracking_array,
215
                    method=cv2.CALIB_ROBOT_WORLD_HAND_EYE_SHAH
216
                    )
217

218
            # Now optimise just the h2e
219
            x_0 = np.zeros(6)
15✔
220
            rvec, tvec = vu.extrinsic_matrix_to_vecs(handeye_matrix)
15✔
221
            x_0[0] = rvec[0][0]
15✔
222
            x_0[1] = rvec[1][0]
15✔
223
            x_0[2] = rvec[2][0]
15✔
224
            x_0[3] = tvec[0][0]
15✔
225
            x_0[4] = tvec[1][0]
15✔
226
            x_0[5] = tvec[2][0]
15✔
227

228
            res = minimize(vcf.mono_proj_err_h2e, x_0,
15✔
229
                           args=(object_points,
230
                                 image_points,
231
                                 camera_matrix,
232
                                 camera_distortion,
233
                                 pattern_tracking_array,
234
                                 device_tracking_array,
235
                                 pattern2marker_matrix
236
                                 ),
237
                           method='Powell',
238
                           )
239

240
            x_1 = res.x
15✔
241
            rvec[0][0] = x_1[0]
15✔
242
            rvec[1][0] = x_1[1]
15✔
243
            rvec[2][0] = x_1[2]
15✔
244
            tvec[0][0] = x_1[3]
15✔
245
            tvec[1][0] = x_1[4]
15✔
246
            tvec[2][0] = x_1[5]
15✔
247
            handeye_matrix = vu.extrinsic_vecs_to_matrix(rvec, tvec)
15✔
248

249
        else:
250

251
            handeye_matrix = \
15✔
252
                he.calibrate_hand_eye_using_stationary_pattern(
253
                    rvecs,
254
                    tvecs,
255
                    device_tracking_array,
256
                    method=cv2.CALIB_HAND_EYE_TSAI)
257

258
    if do_bundle_adjust \
15✔
259
            and len(pattern_tracking_array) > 3 \
260
            and pattern_tracking_array[0] is not None:
261

262
        # Now optimise h2e, intrinsics, distortion
263
        x_0 = np.zeros(15)
×
264
        rvec, tvec = vu.extrinsic_matrix_to_vecs(handeye_matrix)
×
NEW
265
        x_0[0] = rvec[0][0]
×
NEW
266
        x_0[1] = rvec[1][0]
×
NEW
267
        x_0[2] = rvec[2][0]
×
NEW
268
        x_0[3] = tvec[0][0]
×
NEW
269
        x_0[4] = tvec[1][0]
×
NEW
270
        x_0[5] = tvec[2][0]
×
271
        x_0[6] = camera_matrix[0][0]
×
272
        x_0[7] = camera_matrix[1][1]
×
273
        x_0[8] = camera_matrix[0][2]
×
274
        x_0[9] = camera_matrix[1][2]
×
275
        x_0[10] = camera_distortion[0][0]
×
276
        x_0[11] = camera_distortion[0][1]
×
277
        x_0[12] = camera_distortion[0][2]
×
278
        x_0[13] = camera_distortion[0][3]
×
279
        x_0[14] = camera_distortion[0][4]
×
280

281
        res = minimize(vcf.mono_proj_err_h2e_int_dist, x_0,
×
282
                       args=(object_points,
283
                             image_points,
284
                             device_tracking_array,
285
                             pattern_tracking_array,
286
                             pattern2marker_matrix
287
                             ),
288
                       method='Powell',
289
                       )
290
        x_1 = res.x
×
NEW
291
        rvec[0][0] = x_1[0]
×
NEW
292
        rvec[1][0] = x_1[1]
×
NEW
293
        rvec[2][0] = x_1[2]
×
NEW
294
        tvec[0][0] = x_1[3]
×
NEW
295
        tvec[1][0] = x_1[4]
×
NEW
296
        tvec[2][0] = x_1[5]
×
UNCOV
297
        handeye_matrix = vu.extrinsic_vecs_to_matrix(rvec, tvec)
×
298

299
        camera_matrix[0][0] = x_1[6]
×
300
        camera_matrix[1][1] = x_1[7]
×
301
        camera_matrix[0][2] = x_1[8]
×
302
        camera_matrix[1][2] = x_1[9]
×
303
        camera_distortion[0][0] = x_1[10]
×
304
        camera_distortion[0][1] = x_1[11]
×
305
        camera_distortion[0][2] = x_1[12]
×
306
        camera_distortion[0][3] = x_1[13]
×
307
        camera_distortion[0][4] = x_1[14]
×
308

309
    elif do_bundle_adjust and (len(pattern_tracking_array) == 0
15✔
310
                               or pattern_tracking_array[0] is None):
311
        # To Do: We could still optimise h2e and g2w, for untracked cases?
312
        raise NotImplementedError("Bundled adjustment isn't implemented for "
×
313
                                  "untracked calibration patterns.")
314

315
    if len(pattern_tracking_array) > 3 \
15✔
316
            and pattern_tracking_array[0] is not None:
317

318
        sse, num_samples = \
15✔
319
            vm.compute_mono_2d_err_handeye(object_points,
320
                                           image_points,
321
                                           camera_matrix,
322
                                           camera_distortion,
323
                                           device_tracking_array,
324
                                           pattern_tracking_array,
325
                                           handeye_matrix,
326
                                           pattern2marker_matrix
327
                                           )
328
    else:
329

330
        sse, num_samples = vm.compute_mono_2d_err(object_points,
15✔
331
                                                  image_points,
332
                                                  rvecs,
333
                                                  tvecs,
334
                                                  camera_matrix,
335
                                                  camera_distortion)
336
    mse = sse / num_samples
15✔
337
    reproj_err = np.sqrt(mse)
15✔
338

339
    return reproj_err, handeye_matrix, pattern2marker_matrix
15✔
340

341

342
# pylint: disable=too-many-locals, too-many-arguments
343
def stereo_video_calibration(left_ids,
15✔
344
                             left_object_points,
345
                             left_image_points,
346
                             right_ids,
347
                             right_object_points,
348
                             right_image_points,
349
                             image_size,
350
                             flags=cv2.CALIB_USE_INTRINSIC_GUESS,
351
                             override_left_intrinsics=None,
352
                             override_left_distortion=None,
353
                             override_right_intrinsics=None,
354
                             override_right_distortion=None,
355
                             override_l2r_rmat=None,
356
                             override_l2r_tvec=None
357
                             ):
358
    """
359
    Default stereo calibration, using OpenCV methods.
360

361
    We wrap it here, so we have a place to add extra validation code,
362
    and a space for documentation. The aim is to check everything before
363
    we pass it to OpenCV, and raise Exceptions consistently for any error
364
    we can detect before we pass it to OpenCV.
365

366
    :param left_ids: Vector of ndarrays containing integer point ids.
367
    :param left_object_points: Vector of Vectors of 1x3 object points, float32
368
    :param left_image_points:  Vector of Vectors of 1x2 object points, float32
369
    :param right_ids: Vector of ndarrays containing integer point ids.
370
    :param right_object_points: Vector of Vectors of 1x3 object points, float32
371
    :param right_image_points: Vector of Vectors of 1x2 object points, float32
372
    :param image_size: (x, y) tuple, size in pixels, e.g. (1920, 1080)
373
    :param flags: OpenCV flags to pass to calibrateCamera().
374
    :return:
375
    """
376

377
    # We only do override if all override params are specified.
378
    # pylint:disable=too-many-boolean-expressions
379
    do_override = False
15✔
380
    if override_left_intrinsics is not None \
15✔
381
        and override_left_distortion is not None \
382
        and override_right_intrinsics is not None \
383
        and override_right_distortion is not None \
384
        and override_l2r_rmat is not None \
385
        and override_l2r_tvec is not None:
386

387
        do_override = True
15✔
388

389
        l_c = override_left_intrinsics
15✔
390
        l_d = override_left_distortion
15✔
391
        r_c = override_right_intrinsics
15✔
392
        r_d = override_right_distortion
15✔
393

394
    number_of_frames = len(left_object_points)
15✔
395

396
    l_rvecs = []
15✔
397
    l_tvecs = []
15✔
398
    r_rvecs = []
15✔
399
    r_tvecs = []
15✔
400

401
    if do_override:
15✔
402

403
        for i in range(0, number_of_frames):
15✔
404

405
            _, rvecs, tvecs = cv2.solvePnP(
15✔
406
                left_object_points[i],
407
                left_image_points[i],
408
                l_c,
409
                l_d)
410
            l_rvecs.append(rvecs)
15✔
411
            l_tvecs.append(tvecs)
15✔
412

413
            _, rvecs, tvecs = cv2.solvePnP(
15✔
414
                right_object_points[i],
415
                right_image_points[i],
416
                r_c,
417
                r_d)
418
            r_rvecs.append(rvecs)
15✔
419
            r_tvecs.append(tvecs)
15✔
420

421
    else:
422

423
        _, l_c, l_d, l_rvecs, l_tvecs \
15✔
424
            = cv2.calibrateCamera(left_object_points,
425
                                  left_image_points,
426
                                  image_size,
427
                                  None, None)
428

429
        _, r_c, r_d, r_rvecs, r_tvecs \
15✔
430
            = cv2.calibrateCamera(right_object_points,
431
                                  right_image_points,
432
                                  image_size,
433
                                  None, None)
434

435
    # For stereo, OpenCV needs common points.
436
    _, common_object_points, common_left_image_points, \
15✔
437
        common_right_image_points \
438
        = vu.filter_common_points_all_images(left_ids,
439
                                             left_object_points,
440
                                             left_image_points,
441
                                             right_ids,
442
                                             right_image_points, 10)
443

444
    if do_override:
15✔
445

446
        # Do OpenCV stereo calibration, using override intrinsics,
447
        # just so we can get the essential and fundamental matrix out.
448
        _, l_c, l_d, r_c, r_d, \
15✔
449
            l2r_r, l2r_t, essential, fundamental = cv2.stereoCalibrate(
450
                common_object_points,
451
                common_left_image_points,
452
                common_right_image_points,
453
                l_c,
454
                l_d,
455
                r_c,
456
                r_d,
457
                image_size,
458
                flags=cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_FIX_INTRINSIC)
459

460
        l2r_r = override_l2r_rmat
15✔
461
        l2r_t = override_l2r_tvec
15✔
462

463
        assert np.allclose(l_c, override_left_intrinsics)
15✔
464
        assert np.allclose(l_d, override_left_distortion)
15✔
465
        assert np.allclose(r_c, override_right_intrinsics)
15✔
466
        assert np.allclose(r_d, override_right_distortion)
15✔
467

468
    else:
469

470
        # Do OpenCV stereo calibration, using intrinsics from OpenCV mono.
471
        _, l_c, l_d, r_c, r_d, \
15✔
472
            l2r_r, l2r_t, essential, fundamental = cv2.stereoCalibrate(
473
                common_object_points,
474
                common_left_image_points,
475
                common_right_image_points,
476
                l_c,
477
                l_d,
478
                r_c,
479
                r_d,
480
                image_size,
481
                flags=cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_FIX_INTRINSIC)
482

483
        # Then do it again, using the passed in flags.
484
        _, l_c, l_d, r_c, r_d, \
15✔
485
            l2r_r, l2r_t, essential, fundamental = cv2.stereoCalibrate(
486
                common_object_points,
487
                common_left_image_points,
488
                common_right_image_points,
489
                l_c,
490
                l_d,
491
                r_c,
492
                r_d,
493
                image_size,
494
                flags=flags)
495

496
    if do_override:
15✔
497

498
        # Stereo calibration is hard for a laparoscope.
499
        # In clinical practice, the data may be way too variable.
500
        # For stereo scopes, they are often fixed focus,
501
        # i.e. fixed intrinsics, and fixed stereo.
502
        # So, we may prefer to just do the best possible calibration
503
        # in the lab, and then keep those values constant.
504
        # But we then would still want to optimise the camera extrinsics
505
        # as the camera poses directly affect the hand-eye calibration.
506

507
        _, l_rvecs, l_tvecs, \
15✔
508
            = stereo_calibration_extrinsics(
509
                common_object_points,
510
                common_left_image_points,
511
                common_right_image_points,
512
                l_rvecs,
513
                l_tvecs,
514
                l_c,
515
                l_d,
516
                r_c,
517
                r_d,
518
                l2r_r,
519
                l2r_t
520
            )
521

522
    else:
523

524
        # Normal OpenCV stereo calibration optimises intrinsics,
525
        # distortion, and stereo parameters, but doesn't output pose.
526
        # So here, we recompute the left camera pose.
527

528
        #as of opencv 4.5.4.58 rvecs and tvecs are tuples, not lists and are
529
        #thus immutable, causing problems if we try and change a member
530
        l_rvecs = list(l_rvecs)
15✔
531
        l_tvecs = list(l_tvecs)
15✔
532
        for i in range(0, number_of_frames):
15✔
533
            _, l_rvecs[i], l_tvecs[i] = cv2.solvePnP(
15✔
534
                common_object_points[i],
535
                common_left_image_points[i],
536
                l_c,
537
                l_d)
538

539
    # Here, we are computing the right hand side rvecs and tvecs
540
    # given the new left hand side rvecs, tvecs and the l2r.
541
    left_to_right = skcm.construct_rigid_transformation(l2r_r, l2r_t)
15✔
542

543
    r_rvecs = list(r_rvecs)
15✔
544
    r_tvecs = list(r_tvecs)
15✔
545

546
    for i in range(0, number_of_frames):
15✔
547
        left_chessboard_to_camera = \
15✔
548
            vu.extrinsic_vecs_to_matrix(l_rvecs[i], l_tvecs[i])
549
        right_chessboard_to_camera = \
15✔
550
            np.matmul(left_to_right, left_chessboard_to_camera)
551
        r_rvecs[i], r_tvecs[i] = \
15✔
552
            vu.extrinsic_matrix_to_vecs(right_chessboard_to_camera)
553

554
    # And recompute stereo projection error, given left camera and l2r.
555
    # We also use all points, not just common points, for comparison
556
    # with other methods outside of this function.
557
    sse, num_samples = \
15✔
558
        vm.compute_stereo_2d_err(l2r_r,
559
                                 l2r_t,
560
                                 common_object_points,
561
                                 common_left_image_points,
562
                                 l_c,
563
                                 l_d,
564
                                 common_object_points,
565
                                 common_right_image_points,
566
                                 r_c,
567
                                 r_d,
568
                                 l_rvecs,
569
                                 l_tvecs
570
                                 )
571
    mse = sse / num_samples
15✔
572
    s_reproj = np.sqrt(mse)
15✔
573

574
    sse, num_samples = \
15✔
575
        vm.compute_stereo_3d_error(l2r_r,
576
                                   l2r_t,
577
                                   common_object_points,
578
                                   common_left_image_points,
579
                                   l_c,
580
                                   l_d,
581
                                   common_right_image_points,
582
                                   r_c,
583
                                   r_d,
584
                                   l_rvecs,
585
                                   l_tvecs
586
                                   )
587

588
    mse = sse / num_samples
15✔
589
    s_recon = np.sqrt(mse)
15✔
590

591
    LOGGER.info("Stereo Calib: proj=%s, recon=%s",
15✔
592
                str(s_reproj), str(s_recon))
593

594
    return s_reproj, s_recon, \
15✔
595
        l_c, l_d, l_rvecs, l_tvecs, \
596
        r_c, r_d, r_rvecs, r_tvecs, \
597
        l2r_r, l2r_t, \
598
        essential, fundamental
599

600

601
# pylint:disable=too-many-arguments,too-many-statements
602
def stereo_handeye_calibration(l2r_rmat: np.ndarray,
15✔
603
                               l2r_tvec: np.ndarray,
604
                               left_ids: List,
605
                               left_object_points: List,
606
                               left_image_points: List,
607
                               right_ids: List,
608
                               right_image_points: List,
609
                               left_camera_matrix: np.ndarray,
610
                               left_camera_distortion: np.ndarray,
611
                               right_camera_matrix: np.ndarray,
612
                               right_camera_distortion: np.ndarray,
613
                               device_tracking_array: List,
614
                               calibration_tracking_array: List,
615
                               left_rvecs: List[np.ndarray],
616
                               left_tvecs: List[np.ndarray],
617
                               override_pattern2marker=None,
618
                               use_opencv: bool = True,
619
                               do_bundle_adjust: bool = False
620
                               ):
621
    """
622
    Wrapper around handeye calibration functions and reprojection /
623
    reconstruction error metrics.
624

625
    :param l2r_rmat: [3x3] ndarray, rotation for l2r transform
626
    :type l2r_rmat: np.ndarray
627
    :param l2r_tvec: [3x1] ndarray, translation for l2r transform
628
    :type l2r_tvec: np.ndarray
629
    :param left_ids: Vector of ndarrays containing integer point ids.
630
    :type left_ids: List
631
    :param left_object_points: Vector of Vector of 1x3 of type float32
632
    :type left_object_points: List
633
    :param left_image_points: Vector of Vector of 1x2 of type float32
634
    :type left_image_points: List
635
    :param right_ids: Vector of ndarrays containing integer point ids.
636
    :type right_ids: List
637
    :param right_image_points: Vector of Vector of 1x3 of type float32
638
    :type right_image_points: List
639
    :param left_camera_matrix: Camera intrinsic matrix
640
    :type left_camera_matrix: np.ndarray
641
    :param left_camera_distortion: Camera distortion coefficients
642
    :type left_camera_distortion: np.ndarray
643
    :param right_camera_matrix: Camera intrinsic matrix
644
    :type right_camera_matrix: np.ndarray
645
    :param right_camera_distortion: Camera distortion coefficients
646
    :type right_camera_distortion: np.ndarray
647
    :param device_tracking_array: Tracking data for camera (hand)
648
    :type device_tracking_array: List
649
    :param calibration_tracking_array: Tracking data for calibration target
650
    :type calibration_tracking_array: List
651
    :param left_rvecs: Vector of 3x1 ndarray, Rodrigues rotations for each
652
    camera
653
    :type left_rvecs: List[np.ndarray]
654
    :param left_tvecs: Vector of [3x1] ndarray, translations for each camera
655
    :type left_tvecs: List[np.ndarray]
656
    :param right_rvecs: Vector of 3x1 ndarray, Rodrigues rotations for each
657
    camera
658
    :type right_rvecs: List[np.ndarray]
659
    :param right_tvecs: Vector of [3x1] ndarray, translations for each camera
660
    :type right_tvecs: List[np.ndarray]
661
    :param override_pattern2marker: If provided a 4x4 pattern2marker that
662
    is taken as constant.
663
    :param use_opencv: If True we use OpenCV based methods, if false,
664
    Guofang Xiao's method.
665
    :param do_bundle_adjust: If True we do an additional bundle adjustment
666
    at the end.
667
    :return: Reprojection error, reconstruction error, left handeye matrix,
668
    left pattern to marker matrix, right handeye, right pattern to marker
669
    :rtype: float, float, np.ndarray, np.ndarray, np.ndarray, np.ndarray
670
    """
671

672
    # First, we do mono calibration, for maximum code re-use.
673
    _, left_handeye_matrix, left_pattern2marker_matrix = \
15✔
674
        mono_handeye_calibration(
675
            left_object_points,
676
            left_image_points,
677
            left_camera_matrix,
678
            left_camera_distortion,
679
            device_tracking_array,
680
            calibration_tracking_array,
681
            left_rvecs,
682
            left_tvecs,
683
            override_pattern2marker=override_pattern2marker,
684
            use_opencv=use_opencv,
685
            do_bundle_adjust=False
686
        )
687

688
    # Filter common image points
689
    minimum_points = 10
15✔
690
    _, common_object_pts, common_l_image_pts, common_r_image_pts = \
15✔
691
        vu.filter_common_points_all_images(
692
            left_ids, left_object_points, left_image_points,
693
            right_ids, right_image_points,
694
            minimum_points)
695

696
    if do_bundle_adjust:
15✔
697

698
        if override_pattern2marker is None \
15✔
699
                and len(calibration_tracking_array) > 3 \
700
                and calibration_tracking_array[0] is not None:
701

702
            # Now optimise p2m and h2e
703
            x_0 = np.zeros(12)
15✔
704

705
            rvec, tvec = vu.extrinsic_matrix_to_vecs(left_handeye_matrix)
15✔
706
            x_0[0] = rvec[0][0]
15✔
707
            x_0[1] = rvec[1][0]
15✔
708
            x_0[2] = rvec[2][0]
15✔
709
            x_0[3] = tvec[0][0]
15✔
710
            x_0[4] = tvec[1][0]
15✔
711
            x_0[5] = tvec[2][0]
15✔
712

713
            rvec, tvec = vu.extrinsic_matrix_to_vecs(left_pattern2marker_matrix)
15✔
714
            x_0[6] = rvec[0][0]
15✔
715
            x_0[7] = rvec[1][0]
15✔
716
            x_0[8] = rvec[2][0]
15✔
717
            x_0[9] = tvec[0][0]
15✔
718
            x_0[10] = tvec[1][0]
15✔
719
            x_0[11] = tvec[2][0]
15✔
720

721
            res = minimize(vcf.stereo_proj_err_h2e, x_0,
15✔
722
                           args=(common_object_pts,
723
                                 common_l_image_pts,
724
                                 common_r_image_pts,
725
                                 left_camera_matrix,
726
                                 left_camera_distortion,
727
                                 right_camera_matrix,
728
                                 right_camera_distortion,
729
                                 l2r_rmat,
730
                                 l2r_tvec,
731
                                 device_tracking_array,
732
                                 calibration_tracking_array
733
                                 ),
734
                           method='Powell',
735
                           )
736

737
            LOGGER.info("Stereo Handeye Re-Optimised p2m and h2e: status=%s",
15✔
738
                        str(res.status))
739
            LOGGER.info("Stereo Handeye Re-Optimised p2m and h2e: success=%s",
15✔
740
                        str(res.success))
741
            LOGGER.info("Stereo Handeye Re-Optimised p2m and h2e: msg=%s",
15✔
742
                        str(res.message))
743

744
            x_1 = res.x
15✔
745

746
            rvec[0][0] = x_1[0]
15✔
747
            rvec[1][0] = x_1[1]
15✔
748
            rvec[2][0] = x_1[2]
15✔
749
            tvec[0][0] = x_1[3]
15✔
750
            tvec[1][0] = x_1[4]
15✔
751
            tvec[2][0] = x_1[5]
15✔
752
            left_handeye_matrix = vu.extrinsic_vecs_to_matrix(rvec, tvec)
15✔
753

754
            rvec[0][0] = x_1[6]
15✔
755
            rvec[1][0] = x_1[7]
15✔
756
            rvec[2][0] = x_1[8]
15✔
757
            tvec[0][0] = x_1[9]
15✔
758
            tvec[1][0] = x_1[10]
15✔
759
            tvec[2][0] = x_1[11]
15✔
760
            left_pattern2marker_matrix = vu.extrinsic_vecs_to_matrix(rvec, tvec)
15✔
761

762
        elif override_pattern2marker is not None \
15✔
763
                and len(calibration_tracking_array) > 3 \
764
                and calibration_tracking_array[0] is not None:
765

766
            # Now optimise just the h2e
767
            x_0 = np.zeros(6)
15✔
768
            rvec, tvec = vu.extrinsic_matrix_to_vecs(left_handeye_matrix)
15✔
769
            x_0[0] = rvec[0][0]
15✔
770
            x_0[1] = rvec[1][0]
15✔
771
            x_0[2] = rvec[2][0]
15✔
772
            x_0[3] = tvec[0][0]
15✔
773
            x_0[4] = tvec[1][0]
15✔
774
            x_0[5] = tvec[2][0]
15✔
775

776
            res = minimize(vcf.stereo_proj_err_h2e, x_0,
15✔
777
                           args=(common_object_pts,
778
                                 common_l_image_pts,
779
                                 common_r_image_pts,
780
                                 left_camera_matrix,
781
                                 left_camera_distortion,
782
                                 right_camera_matrix,
783
                                 right_camera_distortion,
784
                                 l2r_rmat,
785
                                 l2r_tvec,
786
                                 device_tracking_array,
787
                                 calibration_tracking_array,
788
                                 left_pattern2marker_matrix
789
                                 ),
790
                           method='Powell')
791

792
            LOGGER.info("Stereo Handeye Re-Optimised h2e: status=%s",
15✔
793
                        str(res.status))
794
            LOGGER.info("Stereo Handeye Re-Optimised h2e: success=%s",
15✔
795
                        str(res.success))
796
            LOGGER.info("Stereo Handeye Re-Optimised h2e: msg=%s",
15✔
797
                        str(res.message))
798

799
            x_1 = res.x
15✔
800
            rvec[0][0] = x_1[0]
15✔
801
            rvec[1][0] = x_1[1]
15✔
802
            rvec[2][0] = x_1[2]
15✔
803
            tvec[0][0] = x_1[3]
15✔
804
            tvec[1][0] = x_1[4]
15✔
805
            tvec[2][0] = x_1[5]
15✔
806
            left_handeye_matrix = vu.extrinsic_vecs_to_matrix(rvec, tvec)
15✔
807

808
        # Now, final case, optimise handeye and stereo camera parameters.
809
        # This means hand-eye (6DOF), left intrinsics (4DOF), left
810
        # distortion (5DOF), right intrinsics (4DOF), right distortion (5DOF),
811
        # l2r (6DOF) = 30 DOF.
812

813
        x_0 = np.zeros(30)
15✔
814

815
        rvec, tvec = vu.extrinsic_matrix_to_vecs(left_handeye_matrix)
15✔
816
        x_0[0] = rvec[0][0]
15✔
817
        x_0[1] = rvec[1][0]
15✔
818
        x_0[2] = rvec[2][0]
15✔
819
        x_0[3] = tvec[0][0]
15✔
820
        x_0[4] = tvec[1][0]
15✔
821
        x_0[5] = tvec[2][0]
15✔
822

823
        l2r = skcm.construct_rigid_transformation(l2r_rmat, l2r_tvec)
15✔
824
        rvec, tvec = vu.extrinsic_matrix_to_vecs(l2r)
15✔
825
        x_0[6] = rvec[0][0]
15✔
826
        x_0[7] = rvec[1][0]
15✔
827
        x_0[8] = rvec[2][0]
15✔
828
        x_0[9] = tvec[0][0]
15✔
829
        x_0[10] = tvec[1][0]
15✔
830
        x_0[11] = tvec[2][0]
15✔
831

832
        x_0[12] = left_camera_matrix[0][0]
15✔
833
        x_0[13] = left_camera_matrix[1][1]
15✔
834
        x_0[14] = left_camera_matrix[0][2]
15✔
835
        x_0[15] = left_camera_matrix[1][2]
15✔
836
        x_0[16] = left_camera_distortion[0][0]
15✔
837
        x_0[17] = left_camera_distortion[0][1]
15✔
838
        x_0[18] = left_camera_distortion[0][2]
15✔
839
        x_0[19] = left_camera_distortion[0][3]
15✔
840
        x_0[20] = left_camera_distortion[0][4]
15✔
841

842
        x_0[21] = right_camera_matrix[0][0]
15✔
843
        x_0[22] = right_camera_matrix[1][1]
15✔
844
        x_0[23] = right_camera_matrix[0][2]
15✔
845
        x_0[24] = right_camera_matrix[1][2]
15✔
846
        x_0[25] = right_camera_distortion[0][0]
15✔
847
        x_0[26] = right_camera_distortion[0][1]
15✔
848
        x_0[27] = right_camera_distortion[0][2]
15✔
849
        x_0[28] = right_camera_distortion[0][3]
15✔
850
        x_0[29] = right_camera_distortion[0][4]
15✔
851

852
        res = minimize(vcf.stereo_proj_err_h2e_int_dist_l2r, x_0,
15✔
853
                       args=(common_object_pts,
854
                             common_l_image_pts,
855
                             common_r_image_pts,
856
                             device_tracking_array,
857
                             calibration_tracking_array,
858
                             left_pattern2marker_matrix
859
                             ),
860
                       method='Powell')
861

862
        LOGGER.info("Stereo Handeye bundle adjustment: status=%s",
15✔
863
                    str(res.status))
864
        LOGGER.info("Stereo Handeye bundle adjustment: success=%s",
15✔
865
                    str(res.success))
866
        LOGGER.info("Stereo Handeye bundle adjustment: msg=%s",
15✔
867
                    str(res.message))
868

869
        x_1 = res.x
15✔
870
        rvec[0][0] = x_1[0]
15✔
871
        rvec[1][0] = x_1[1]
15✔
872
        rvec[2][0] = x_1[2]
15✔
873
        tvec[0][0] = x_1[3]
15✔
874
        tvec[1][0] = x_1[4]
15✔
875
        tvec[2][0] = x_1[5]
15✔
876
        left_handeye_matrix = vu.extrinsic_vecs_to_matrix(rvec, tvec)
15✔
877

878
        rvec[0][0] = x_1[6]
15✔
879
        rvec[1][0] = x_1[7]
15✔
880
        rvec[2][0] = x_1[8]
15✔
881
        tvec[0][0] = x_1[9]
15✔
882
        tvec[1][0] = x_1[10]
15✔
883
        tvec[2][0] = x_1[11]
15✔
884
        l2r = vu.extrinsic_vecs_to_matrix(rvec, tvec)
15✔
885
        l2r_rmat = l2r[0:3, 0:3]
15✔
886
        l2r_tvec = l2r[0:3, 3]
15✔
887

888
        left_camera_matrix[0][0] = x_1[12]
15✔
889
        left_camera_matrix[1][1] = x_1[13]
15✔
890
        left_camera_matrix[0][2] = x_1[14]
15✔
891
        left_camera_matrix[1][2] = x_1[15]
15✔
892
        left_camera_distortion[0][0] = x_1[16]
15✔
893
        left_camera_distortion[0][1] = x_1[17]
15✔
894
        left_camera_distortion[0][2] = x_1[18]
15✔
895
        left_camera_distortion[0][3] = x_1[19]
15✔
896
        left_camera_distortion[0][4] = x_1[20]
15✔
897

898
        right_camera_matrix[0][0] = x_1[21]
15✔
899
        right_camera_matrix[1][1] = x_1[22]
15✔
900
        right_camera_matrix[0][2] = x_1[23]
15✔
901
        right_camera_matrix[1][2] = x_1[24]
15✔
902
        right_camera_distortion[0][0] = x_1[25]
15✔
903
        right_camera_distortion[0][1] = x_1[26]
15✔
904
        right_camera_distortion[0][2] = x_1[27]
15✔
905
        right_camera_distortion[0][3] = x_1[28]
15✔
906
        right_camera_distortion[0][4] = x_1[29]
15✔
907

908
    # Ensure right side is consistent.
909
    l2r_matrix = skcm.construct_rigid_transformation(l2r_rmat, l2r_tvec)
15✔
910
    right_handeye_matrix = l2r_matrix @ left_handeye_matrix
15✔
911
    right_pattern2marker_matrix = copy.deepcopy(left_pattern2marker_matrix)
15✔
912

913
    # Now compute some output statistics.
914
    if len(calibration_tracking_array) > 3 \
15✔
915
            and calibration_tracking_array[0] is not None:
916

917
        sse, num_samples = vm.compute_stereo_2d_err_handeye(
15✔
918
            common_object_pts,
919
            common_l_image_pts,
920
            left_camera_matrix,
921
            left_camera_distortion,
922
            common_r_image_pts,
923
            right_camera_matrix,
924
            right_camera_distortion,
925
            device_tracking_array,
926
            calibration_tracking_array,
927
            left_handeye_matrix,
928
            left_pattern2marker_matrix,
929
            right_handeye_matrix,
930
            right_pattern2marker_matrix
931
        )
932
        mse = sse / num_samples
15✔
933
        reproj_err = np.sqrt(mse)
15✔
934

935
        sse, num_samples = vm.compute_stereo_3d_err_handeye(
15✔
936
            l2r_rmat,
937
            l2r_tvec,
938
            common_object_pts,
939
            common_l_image_pts,
940
            left_camera_matrix,
941
            left_camera_distortion,
942
            common_r_image_pts,
943
            right_camera_matrix,
944
            right_camera_distortion,
945
            device_tracking_array,
946
            calibration_tracking_array,
947
            left_handeye_matrix,
948
            left_pattern2marker_matrix,
949
        )
950
        mse = sse / num_samples
15✔
951
        recon_err = np.sqrt(mse)
15✔
952

953
    else:
954

955
        sse, num_samples = vm.compute_stereo_2d_err(l2r_rmat,
15✔
956
                                                    l2r_tvec,
957
                                                    common_object_pts,
958
                                                    common_l_image_pts,
959
                                                    left_camera_matrix,
960
                                                    left_camera_distortion,
961
                                                    common_object_pts,
962
                                                    common_r_image_pts,
963
                                                    right_camera_matrix,
964
                                                    right_camera_distortion,
965
                                                    left_rvecs,
966
                                                    left_tvecs
967
                                                    )
968
        mse = sse / num_samples
15✔
969
        reproj_err = np.sqrt(mse)
15✔
970

971
        recon_err, num_samples = \
15✔
972
            vm.compute_stereo_3d_error(l2r_rmat,
973
                                       l2r_tvec,
974
                                       common_object_pts,
975
                                       common_l_image_pts,
976
                                       left_camera_matrix,
977
                                       left_camera_distortion,
978
                                       common_r_image_pts,
979
                                       right_camera_matrix,
980
                                       right_camera_distortion,
981
                                       left_rvecs,
982
                                       left_tvecs
983
                                       )
984
        mse = sse / num_samples
15✔
985
        recon_err = np.sqrt(mse)
15✔
986

987
    return reproj_err, recon_err, \
15✔
988
        left_handeye_matrix, left_pattern2marker_matrix, \
989
        right_handeye_matrix, right_pattern2marker_matrix
990

991

992
def stereo_calibration_extrinsics(common_object_points,
15✔
993
                                  common_left_image_points,
994
                                  common_right_image_points,
995
                                  l_rvecs,
996
                                  l_tvecs,
997
                                  override_left_intrinsics,
998
                                  override_left_distortion,
999
                                  override_right_intrinsics,
1000
                                  override_right_distortion,
1001
                                  override_l2r_rmat,
1002
                                  override_l2r_tvec):
1003
    """
1004
    Simply re-optimises the extrinsic parameters.
1005
    :return: error, l_rvecs, l_tvecs
1006
    """
1007
    number_of_frames = len(common_object_points)
15✔
1008
    number_of_parameters = 6 * number_of_frames
15✔
1009
    x_0 = np.zeros(number_of_parameters)
15✔
1010
    for i in range(0, number_of_frames):
15✔
1011
        x_0[i * 6 + 0] = l_rvecs[i][0][0]
15✔
1012
        x_0[i * 6 + 1] = l_rvecs[i][1][0]
15✔
1013
        x_0[i * 6 + 2] = l_rvecs[i][2][0]
15✔
1014
        x_0[i * 6 + 3] = l_tvecs[i][0][0]
15✔
1015
        x_0[i * 6 + 4] = l_tvecs[i][1][0]
15✔
1016
        x_0[i * 6 + 5] = l_tvecs[i][2][0]
15✔
1017

1018
    res = least_squares(vcf.stereo_2d_error_for_extrinsics, x_0,
15✔
1019
                        args=(common_object_points,
1020
                              common_left_image_points,
1021
                              common_right_image_points,
1022
                              override_left_intrinsics,
1023
                              override_left_distortion,
1024
                              override_right_intrinsics,
1025
                              override_right_distortion,
1026
                              override_l2r_rmat,
1027
                              override_l2r_tvec),
1028
                        method='lm',
1029
                        x_scale='jac',
1030
                        verbose=0)
1031

1032
    LOGGER.info("Stereo Re-Calibration: status=%s", str(res.status))
15✔
1033
    LOGGER.info("Stereo Re-Calibration: success=%s", str(res.success))
15✔
1034
    LOGGER.info("Stereo Re-Calibration: msg=%s", str(res.message))
15✔
1035

1036
    x_1 = res.x
15✔
1037
    for i in range(0, number_of_frames):
15✔
1038
        l_rvecs[i][0][0] = x_1[i * 6 + 0]
15✔
1039
        l_rvecs[i][1][0] = x_1[i * 6 + 1]
15✔
1040
        l_rvecs[i][2][0] = x_1[i * 6 + 2]
15✔
1041
        l_tvecs[i][0][0] = x_1[i * 6 + 3]
15✔
1042
        l_tvecs[i][1][0] = x_1[i * 6 + 4]
15✔
1043
        l_tvecs[i][2][0] = x_1[i * 6 + 5]
15✔
1044

1045
    return res.fun, l_rvecs, l_tvecs
15✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc