• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

mkofler96 / DeepSDFStruct / 19530884105

20 Nov 2025 08:46AM UTC coverage: 82.144% (-0.09%) from 82.235%
19530884105

push

github

mkofler96
Address PR review feedback: remove hardcoded version, fix cap_border_dict description, add mmapy reference, update fallback version to 'latest', remove deformation mapping bullet

Co-authored-by: mkofler96 <18218171+mkofler96@users.noreply.github.com>

324 of 385 branches covered (84.16%)

Branch coverage included in aggregate %.

2749 of 3356 relevant lines covered (81.91%)

0.82 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.8
DeepSDFStruct/optimization.py
1
"""
2
Structural Optimization Utilities
3
==================================
4

5
This module provides tools for gradient-based optimization of SDF-based geometries,
6
with a focus on structural design problems. It integrates with TorchFEM for
7
finite element analysis and provides optimization algorithms suitable for
8
constrained design problems.
9

10
Key Features
11
------------
12

13
MMA Optimizer
14
    Implementation of the Method of Moving Asymptotes (MMA), a gradient-based
15
    algorithm well-suited for structural optimization with nonlinear constraints.
16
    MMA is particularly effective for:
17
    - Topology optimization
18
    - Shape optimization with constraints
19
    - Problems with expensive objective evaluations
20
    - Highly nonlinear design spaces
21

22
Finite Element Integration
23
    - Conversion between TorchFEM and PyVista mesh formats
24
    - Support for tetrahedral and hexahedral elements
25
    - Linear and quadratic element types
26
    - Integration with gradient computation
27

28
Mesh Quality Utilities
29
    - Signed volume computation for tetrahedra
30
    - Mesh quality metrics
31
    - Degeneracy detection
32

33
The module is designed to work seamlessly with differentiable SDF representations,
34
enabling gradient-based optimization of complex 3D structures.
35
"""
36

37
import torchfem.materials
1✔
38
import torchfem.solid
1✔
39
from torchfem.elements import Hexa1, Hexa2, Tetra1, Tetra2
1✔
40
import torch
1✔
41
import numpy as np
1✔
42
from mmapy import mmasub
1✔
43
import pyvista
1✔
44
import logging
1✔
45
import DeepSDFStruct
1✔
46

47

48
logger = logging.getLogger(DeepSDFStruct.__name__)
1✔
49

50

51
def get_mesh_from_torchfem(Solid: torchfem.Solid) -> pyvista.UnstructuredGrid:
1✔
52
    """Convert a TorchFEM Solid mesh to PyVista UnstructuredGrid.
53
    
54
    This function enables visualization and export of TorchFEM finite element
55
    meshes using PyVista. It supports both tetrahedral and hexahedral elements
56
    with linear and quadratic shape functions.
57
    
58
    Parameters
59
    ----------
60
    Solid : torchfem.Solid
61
        TorchFEM solid mesh object containing nodes, elements, and element type.
62
        
63
    Returns
64
    -------
65
    pyvista.UnstructuredGrid
66
        PyVista mesh representation suitable for visualization and I/O.
67
        
68
    Raises
69
    ------
70
    NotImplementedError
71
        If input is not a torchfem.Solid object.
72
        
73
    Notes
74
    -----
75
    Supported element types:
76
    - Tetra1: 4-node linear tetrahedron
77
    - Tetra2: 10-node quadratic tetrahedron
78
    - Hexa1: 8-node linear hexahedron
79
    - Hexa2: 20-node quadratic hexahedron
80
    
81
    Examples
82
    --------
83
    >>> from DeepSDFStruct.optimization import get_mesh_from_torchfem
84
    >>> import torchfem
85
    >>> 
86
    >>> # Assume we have a TorchFEM solid mesh
87
    >>> # solid = torchfem.Solid(...)
88
    >>> 
89
    >>> # Convert to PyVista for visualization
90
    >>> pv_mesh = get_mesh_from_torchfem(solid)
91
    >>> pv_mesh.plot()
92
    """
93
    if not isinstance(Solid, torchfem.Solid):
1✔
94
        raise NotImplementedError("Currently only solid mesh is supported.")
×
95
    # VTK cell types
96
    if isinstance(Solid.etype, Tetra1):
1✔
97
        cell_types = Solid.n_elem * [pyvista.CellType.TETRA]
1✔
98
    elif isinstance(Solid.etype, Tetra2):
×
99
        cell_types = Solid.n_elem * [pyvista.CellType.QUADRATIC_TETRA]
×
100
    elif isinstance(Solid.etype, Hexa1):
×
101
        cell_types = Solid.n_elem * [pyvista.CellType.HEXAHEDRON]
×
102
    elif isinstance(Solid.etype, Hexa2):
×
103
        cell_types = Solid.n_elem * [pyvista.CellType.QUADRATIC_HEXAHEDRON]
×
104

105
    # VTK element list
106
    el = len(Solid.elements[0]) * torch.ones(Solid.n_elem, dtype=Solid.elements.dtype)
1✔
107
    elements = torch.cat([el[:, None], Solid.elements], dim=1).view(-1).tolist()
1✔
108

109
    # Deformed node positions
110
    pos = Solid.nodes
1✔
111

112
    # Create unstructured mesh
113
    mesh = pyvista.UnstructuredGrid(elements, cell_types, pos.tolist())
1✔
114
    return mesh
1✔
115

116

117
def tet_signed_vol(vertices, tets):
1✔
118
    """Compute signed volumes of tetrahedral elements.
119
    
120
    Calculates the signed volume of each tetrahedron, which is positive for
121
    correctly oriented elements and negative for inverted elements. This is
122
    useful for detecting mesh degeneracies and enforcing mesh quality constraints.
123
    
124
    Parameters
125
    ----------
126
    vertices : torch.Tensor
127
        Vertex coordinates of shape (N, 3).
128
    tets : torch.Tensor
129
        Tetrahedral connectivity of shape (M, 4), where each row contains
130
        vertex indices [v0, v1, v2, v3].
131
        
132
    Returns
133
    -------
134
    torch.Tensor
135
        Signed volumes of shape (M,), one per tetrahedron. Positive volumes
136
        indicate correctly oriented elements.
137
        
138
    Notes
139
    -----
140
    The signed volume is computed as:
141
        V = (1/6) * ((v1-v0) × (v2-v0)) · (v3-v0)
142
        
143
    Examples
144
    --------
145
    >>> import torch
146
    >>> from DeepSDFStruct.optimization import tet_signed_vol
147
    >>> 
148
    >>> # Define a simple tetrahedron
149
    >>> vertices = torch.tensor([
150
    ...     [0.0, 0.0, 0.0],
151
    ...     [1.0, 0.0, 0.0],
152
    ...     [0.0, 1.0, 0.0],
153
    ...     [0.0, 0.0, 1.0]
154
    ... ])
155
    >>> tets = torch.tensor([[0, 1, 2, 3]])
156
    >>> volumes = tet_signed_vol(vertices, tets)
157
    >>> print(f"Volume: {volumes[0]:.3f}")  # Should be 1/6 ≈ 0.167
158
    """
159
    v0 = vertices[tets[:, 0]]
1✔
160
    v1 = vertices[tets[:, 1]]
1✔
161
    v2 = vertices[tets[:, 2]]
1✔
162
    v3 = vertices[tets[:, 3]]
1✔
163
    vols = torch.einsum("ij,ij->i", torch.cross(v1 - v0, v2 - v0, dim=1), v3 - v0) / 6.0
1✔
164
    return vols
1✔
165

166

167
class MMA:
1✔
168
    """Method of Moving Asymptotes (MMA) optimizer for constrained problems.
169
    
170
    MMA is a gradient-based optimization algorithm designed for nonlinear
171
    constrained problems. It constructs convex subproblems using moving
172
    asymptotes and is particularly effective for structural optimization.
173
    
174
    The optimizer handles a single objective function and a single constraint,
175
    with box bounds on design variables. It automatically normalizes the
176
    objective by its initial value for better numerical behavior.
177
    
178
    Parameters
179
    ----------
180
    parameters : torch.Tensor
181
        Initial design variables (will be optimized in-place).
182
    bounds : array-like of shape (n, 2)
183
        Box constraints [[lower_1, upper_1], ..., [lower_n, upper_n]]
184
        for each design variable.
185
    max_step : float, default 0.1
186
        Maximum allowed change in design variables per iteration,
187
        as a fraction of the bound range.
188
        
189
    Attributes
190
    ----------
191
    parameters : torch.Tensor
192
        Current design variables (updated in-place each iteration).
193
    loop : int
194
        Current iteration number.
195
    x : ndarray
196
        Current design variables in numpy format.
197
    xold1, xold2 : ndarray
198
        Design variables from previous two iterations (for MMA history).
199
        
200
    Methods
201
    -------
202
    step(F, dF, G, dG)
203
        Perform one MMA optimization step given objective, constraint,
204
        and their gradients.
205
        
206
    Notes
207
    -----
208
    MMA was developed by Krister Svanberg and is widely used in topology
209
    optimization. It is particularly effective for problems where:
210
    - The objective and constraints are expensive to evaluate
211
    - Gradients are available (via automatic differentiation)
212
    - The design space is high-dimensional
213
    - Strong nonlinearity is present
214
    
215
    The implementation uses the mmapy package for the core MMA algorithm.
216
    
217
    Examples
218
    --------
219
    >>> import torch
220
    >>> from DeepSDFStruct.optimization import MMA
221
    >>> 
222
    >>> # Define design variables
223
    >>> params = torch.ones(10, requires_grad=True)
224
    >>> bounds = [[0.0, 2.0]] * 10
225
    >>> 
226
    >>> # Create optimizer
227
    >>> optimizer = MMA(params, bounds, max_step=0.1)
228
    >>> 
229
    >>> # Optimization loop
230
    >>> for i in range(100):
231
    ...     # Compute objective and constraint
232
    ...     objective = (params ** 2).sum()
233
    ...     constraint = params.sum() - 5.0
234
    ...     
235
    ...     # Compute gradients
236
    ...     dF = torch.autograd.grad(objective, params, create_graph=True)[0]
237
    ...     dG = torch.autograd.grad(constraint, params, create_graph=True)[0]
238
    ...     
239
    ...     # MMA step
240
    ...     optimizer.step(objective, dF, constraint, dG)
241
    ...     
242
    ...     if optimizer.ch < 1e-3:
243
    ...         break
244
    
245
    References
246
    ----------
247
    .. [1] Svanberg, K. (1987). "The method of moving asymptotes—a new method
248
           for structural optimization." International Journal for Numerical
249
           Methods in Engineering, 24(2), 359-373.
250
    .. [2] mmapy: Python implementation of MMA
251
           https://github.com/arjendeetman/mmapy
252
    """
253
    
254
    def __init__(self, parameters, bounds, max_step=0.1):
1✔
255
        self.max_step = max_step
1✔
256
        self.bounds = np.array(bounds)
1✔
257
        self.parameters = parameters
1✔
258
        self.m = 1
1✔
259
        self.n = len(parameters)
1✔
260
        self.x = parameters.detach().cpu().numpy()
1✔
261
        self.xold1 = parameters.detach().cpu().numpy()
1✔
262
        self.xold2 = parameters.detach().cpu().numpy()
1✔
263
        self.low = []
1✔
264
        self.upp = []
1✔
265
        self.a0_MMA = 1
1✔
266
        self.a_MMA = np.zeros((self.m, 1))
1✔
267
        self.c_MMA = 10000 * np.ones((self.m, 1))
1✔
268
        self.d_MMA = np.zeros((self.m, 1))
1✔
269

270
        self.loop = 0
1✔
271
        self.ch = 1.0
1✔
272
        self.F0 = None
1✔
273

274
    def step(self, F, dF, G, dG):
1✔
275
        """Perform one MMA optimization step.
276
        
277
        Updates design variables by solving a convex subproblem constructed
278
        from the objective, constraint, and their gradients.
279
        
280
        Parameters
281
        ----------
282
        F : torch.Tensor or float
283
            Objective function value at current design.
284
        dF : torch.Tensor
285
            Gradient of objective w.r.t. design variables, shape (n,).
286
        G : torch.Tensor or float
287
            Constraint function value at current design (≤ 0 is feasible).
288
        dG : torch.Tensor
289
            Gradient of constraint w.r.t. design variables, shape (n,).
290
            
291
        Notes
292
        -----
293
        The method automatically:
294
        - Normalizes the objective by its initial value
295
        - Enforces move limits based on max_step
296
        - Updates MMA history (xold1, xold2)
297
        - Computes and logs convergence metric (ch)
298
        - Updates self.parameters in-place
299
        
300
        The convergence metric ch is the relative change in design variables.
301
        """
302
        orig_shape = dF.shape
1✔
303
        F_np = F.detach().cpu().numpy().reshape(-1, 1)
1✔
304
        dFdx_np = dF.detach().cpu().numpy().reshape(-1, 1)
1✔
305
        G_np = G.detach().cpu().numpy().reshape(-1, 1)
1✔
306
        dGdx_np = dG.detach().cpu().numpy().reshape(1, -1)
1✔
307
        if self.loop == 0:
1✔
308
            self.F0 = F_np
1✔
309
        F_np = F_np / self.F0
1✔
310
        dFdx_np = dFdx_np / self.F0
1✔
311

312
        xmin = np.maximum(self.x - self.max_step, self.bounds[:, 0].reshape(-1, 1))
1✔
313
        xmax = np.minimum(self.x + self.max_step, self.bounds[:, 1].reshape(-1, 1))
1✔
314
        move = 0.1
1✔
315
        self.loop = self.loop + 1
1✔
316
        xmma, ymma, zmma, lam, xsi, eta, muMMA, zet, s, low, upp = mmasub(
1✔
317
            self.m,
318
            self.n,
319
            self.loop,
320
            self.x,
321
            xmin,
322
            xmax,
323
            self.xold1,
324
            self.xold2,
325
            F_np,
326
            dFdx_np,
327
            G_np,
328
            dGdx_np,
329
            self.low,
330
            self.upp,
331
            self.a0_MMA,
332
            self.a_MMA,
333
            self.c_MMA,
334
            self.d_MMA,
335
        )
336

337
        self.xold2 = self.xold1.copy()
1✔
338
        self.xold1 = self.x.copy()
1✔
339
        self.x = xmma
1✔
340
        self.upp = upp
1✔
341
        self.low = low
1✔
342

343
        ch = np.abs(np.mean(self.x.T - self.xold1.T) / np.mean(self.x.T))
1✔
344
        with torch.no_grad():
1✔
345
            self.parameters.copy_(
1✔
346
                torch.tensor(
347
                    xmma,
348
                    dtype=self.parameters.dtype,
349
                    device=self.parameters.device,
350
                )
351
            )
352
        logger.info(
1✔
353
            "It.: {0:4} | J.: {1:1.3e} | Constr.:  {2:1.3e} | ch.: {3:1.3e}".format(
354
                self.loop, F_np[0][0], G_np[0][0], ch
355
            )
356
        )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc