• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

SciKit-Surgery / scikit-surgeryvtk / 4323596638

pending completion
4323596638

push

github-actions

Miguel Xochicale
refactors unit tests skipif messages (#187)

332 of 1997 relevant lines covered (16.62%)

0.66 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

11.49
/sksurgeryvtk/widgets/vtk_lus_simulator.py
1
# -*- coding: utf-8 -*-
2

3
"""
4✔
4
Module to provide a simulator to render a laparoscopic view comprising
5
models of anatomy along with a laparoscopic ultrasound probe.
6
"""
7

8
# pylint: disable=too-many-arguments
9

10
import numpy as np
4✔
11
import vtk
4✔
12
import sksurgerycore.transforms.matrix as cmu
4✔
13
import sksurgeryvtk.widgets.vtk_rendering_generator as rg
4✔
14
import sksurgeryvtk.utils.matrix_utils as vmu
4✔
15

16

17
class VTKLUSSimulator(rg.VTKRenderingGenerator):
4✔
18
    """
19
    Class derived from VTKRenderingGenerator to provide additional
20
    functions to set up the position of anatomy and LUS probe with
21
    respect to a stationary camera, placed at the world origin,
22
    and pointing along +ve z axis, as per OpenCV conventions.
23

24
    Note: The mesh representing the LUS probe body must be called 'probe',
25
    and there must be at least one other mesh called 'liver'. Any other
26
    meshes, e.g. gallbladder, arterties etc., will have the same transform
27
    applied as the liver surface.
28

29
    :param models_json_file: JSON file describing VTK models, in SNAPPY format
30
    :param background_image_file: RGB image to render in background
31
    :param camera_intrinsics_file: [3x3] matrix in text file, in numpy format
32
    :param liver2camera_reference_file: [4x4] matrix in text file, numpy format
33
    :param probe2camera_reference_file: [4x4] matrix in text file, numpy format
34
    """
35
    def __init__(self,
4✔
36
                 models_json_file,
37
                 background_image_file,
38
                 camera_intrinsics_file,
39
                 liver2camera_reference_file,
40
                 probe2camera_reference_file,
41
                 camera_to_world=None,
42
                 left_to_right=None,
43
                 offscreen=False,
44
                 zbuffer=False,
45
                 clipping_range=(1, 1000)
46
                 ):
47
        super().__init__(models_json_file,
×
48
                         background_image_file,
49
                         camera_intrinsics_file,
50
                         camera_to_world=camera_to_world,
51
                         left_to_right=left_to_right,
52
                         offscreen=offscreen,
53
                         zbuffer=zbuffer,
54
                         gaussian_sigma=0,
55
                         gaussian_window_size=11,
56
                         clipping_range=clipping_range
57
                         )
58

59
        self.reference_l2c = np.loadtxt(liver2camera_reference_file)
×
60
        self.reference_p2c = np.loadtxt(probe2camera_reference_file)
×
61

62
        self.cyl = vtk.vtkCylinderSource()
×
63
        self.cyl.SetResolution(88)
×
64
        self.cyl.SetRadius(5)
×
65
        self.cyl.SetHeight(1000)
×
66
        self.cyl.SetCenter((0, self.cyl.GetHeight() / 2.0, 0))
×
67
        self.cyl.Update()
×
68

69
        self.cyl_matrix = vtk.vtkMatrix4x4()
×
70
        self.cyl_matrix.Identity()
×
71
        self.cyl_trans = vtk.vtkTransform()
×
72
        self.cyl_trans.SetMatrix(self.cyl_matrix)
×
73
        self.cyl_transform_filter = vtk.vtkTransformPolyDataFilter()
×
74
        self.cyl_transform_filter.SetInputData(self.cyl.GetOutput())
×
75
        self.cyl_transform_filter.SetTransform(self.cyl_trans)
×
76

77
        self.cyl_mapper = vtk.vtkPolyDataMapper()
×
78
        self.cyl_mapper.SetInputConnection(
×
79
            self.cyl_transform_filter.GetOutputPort())
80
        self.cyl_mapper.Update()
×
81
        self.cyl_actor = vtk.vtkActor()
×
82
        self.cyl_actor.SetMapper(self.cyl_mapper)
×
83

84
        probe_model = self.model_loader.get_surface_model('probe')
×
85
        probe_colour = probe_model.get_colour()
×
86
        self.cyl_actor.GetProperty().SetColor(probe_colour)
×
87
        if probe_model.get_no_shading():
×
88
            self.cyl_actor.GetProperty().SetAmbient(1)
×
89
            self.cyl_actor.GetProperty().SetDiffuse(0)
×
90
            self.cyl_actor.GetProperty().SetSpecular(0)
×
91

92
        self.overlay.add_vtk_actor(self.cyl_actor)
×
93

94
        self.set_clipping_range(clipping_range[0], clipping_range[1])
×
95
        self.setup_camera_extrinsics(camera_to_world, left_to_right)
×
96

97
    def set_pose(self,
4✔
98
                 anatomy_pose_params,
99
                 probe_pose_params,
100
                 angle_of_handle,
101
                 anatomy_location=None
102
                 ):
103
        """
104
        This is the main method to call to setup the pose of all anatomy and
105
        for the LUS probe, and the handle.
106
        You can then call get_image() to get the rendered image,
107
        or call get_masks() to get a set of rendered masks,
108
        and the relevant pose parameters for ML purposes.
109
        The liver2camera and probe2camera are returned as 4x4 matrices.
110
        This is because there are multiple different parameterisations
111
        that the user might be working in. e.g. Euler angles, Rodrigues etc.
112
        :param anatomy_pose_params: [rx, ry, rz, tx, ty, tz] in deg/mm
113
        :param probe_pose_params: [rx, ry, rz, tx, ty, tz] in deg/mm
114
        :param angle_of_handle: angle in deg
115
        :param anatomy_location: [1x3] location of random point on liver surface
116
        :return: [liver2camera4x4, probe2camera4x4, angle, anatomy_location1x3]
117
        """
118
        # The 'anatomy_location' picks a point on the surface and moves
119
        # the LUS probe to have it's centroid based there. This is in effect
120
        # updating the so-called 'reference' position of the probe.
121
        # Subsequent offsets in [rx, ry, rz, tx, ty, tz] are from this new posn.
122
        p2c = self.reference_p2c
×
123

124
        if anatomy_location is not None:
×
125
            picked = np.zeros((4, 1))
×
126
            picked[0][0] = anatomy_location[0]
×
127
            picked[1][0] = anatomy_location[1]
×
128
            picked[2][0] = anatomy_location[2]
×
129
            picked[3][0] = 1
×
130
            picked_point = self.reference_l2c @ picked
×
131

132
            # This p2c then becomes the 'reference_probe2camera'.
133
            p2c[0][3] = picked_point[0]
×
134
            p2c[1][3] = picked_point[1]
×
135
            p2c[2][3] = picked_point[2]
×
136

137
        # Compute the transformation for the anatomy.
138
        # We assume that the anatomy has been normalised (zero-centred).
139
        rotation_tx = vmu.create_matrix_from_list([anatomy_pose_params[0],
×
140
                                                   anatomy_pose_params[1],
141
                                                   anatomy_pose_params[2],
142
                                                   0, 0, 0],
143
                                                  is_in_radians=False)
144
        translation_tx = vmu.create_matrix_from_list([0, 0, 0,
×
145
                                                      anatomy_pose_params[3],
146
                                                      anatomy_pose_params[4],
147
                                                      anatomy_pose_params[5]],
148
                                                     is_in_radians=False)
149
        anatomy_tx = translation_tx @ self.reference_l2c @ rotation_tx
×
150
        full_anatomy_tx_vtk = \
×
151
            vmu.create_vtk_matrix_from_numpy(anatomy_tx)
152

153
        # Now we compute the position of the probe.
154
        # We assume that the probe model has been normalised (zero-centred).
155
        probe_tx = vmu.create_matrix_from_list(probe_pose_params,
×
156
                                               is_in_radians=False)
157
        p2l = np.linalg.inv(self.reference_l2c) @ p2c
×
158
        probe_actor_tx = p2l @ probe_tx
×
159

160
        full_probe_actor_tx = anatomy_tx @ probe_actor_tx
×
161
        full_probe_actor_tx_vtk = \
×
162
            vmu.create_vtk_matrix_from_numpy(full_probe_actor_tx)
163

164
        # Apply the transforms to each actor.
165
        self.set_pose_with_matrices(full_probe_actor_tx_vtk,
×
166
                                    full_anatomy_tx_vtk,
167
                                    angle_of_handle)
168

169
        # Return parameters for final solution.
170
        liver_model = self.model_loader.get_surface_model('liver')
×
171
        final_l2c = \
×
172
            vmu.create_numpy_matrix_from_vtk(liver_model.actor.GetMatrix())
173
        probe_model = self.model_loader.get_surface_model('probe')
×
174
        final_p2c = \
×
175
            vmu.create_numpy_matrix_from_vtk(probe_model.actor.GetMatrix())
176

177
        return [final_l2c, final_p2c, angle_of_handle, anatomy_location]
×
178

179
    def set_pose_with_matrices(self, p2c, l2c, angle_of_handle):
4✔
180
        """
181
        Method to apply 4x4 transformations to actors.
182
        :param p2c: 4x4 matrix, either numpy or vtk matrix.
183
        :param l2c: 4x4 matrix, either numpy or vtk matrix.
184
        :param angle_of_handle: angle in deg.
185
        :return: N/A
186
        """
187

188
        # First we can compute the angle of the handle.
189
        # This applies directly to the data, as it comes out
190
        # of the vtkTransformPolyDataFilter, before the actor transformation.
191
        probe_offset = np.eye(4)
×
192
        probe_offset[0][3] = 0.007877540588378196
×
193
        probe_offset[1][3] = 36.24640712738037
×
194
        probe_offset[2][3] = -3.8626091003417997
×
195
        r_x = \
×
196
            cmu.construct_rx_matrix(angle_of_handle, is_in_radians=False)
197
        rotation_about_x = \
×
198
            cmu.construct_rigid_transformation(r_x, np.zeros((3, 1)))
199
        self.cyl_trans.SetMatrix(
×
200
            vmu.create_vtk_matrix_from_numpy(probe_offset @ rotation_about_x))
201
        self.cyl_transform_filter.Update()
×
202

203
        # Check p2c, l2c: if numpy, convert to vtk.
204
        if isinstance(p2c, np.ndarray):
×
205
            p2c = vmu.create_vtk_matrix_from_numpy(p2c)
×
206
        if isinstance(l2c, np.ndarray):
×
207
            l2c = vmu.create_vtk_matrix_from_numpy(l2c)
×
208

209
        # This is where we apply transforms to each actor.
210
        # Apply p2c to probe
211
        self.cyl_actor.PokeMatrix(p2c)
×
212
        probe_model = self.model_loader.get_surface_model('probe')
×
213
        probe_model.actor.PokeMatrix(p2c)
×
214
        # Apply l2c to organs in scene.
215
        for model in self.model_loader.get_surface_models():
×
216
            if model.get_name() != 'probe':
×
217
                model.actor.PokeMatrix(l2c)
×
218

219
        # Force re-render
220
        self.overlay.Render()
×
221
        self.repaint()
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc