• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

mkofler96 / DeepSDFStruct / 21151427419

19 Jan 2026 09:05PM UTC coverage: 81.612%. Remained the same
21151427419

Pull #16

github

web-flow
Merge aa7e4fb86 into 5946a7c85
Pull Request #16: [pre-commit.ci] pre-commit autoupdate

349 of 415 branches covered (84.1%)

Branch coverage included in aggregate %.

2860 of 3517 relevant lines covered (81.32%)

0.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.8
DeepSDFStruct/optimization.py
1
"""
2
Structural Optimization Utilities
3
==================================
4

5
This module provides tools for gradient-based optimization of SDF-based geometries,
6
with a focus on structural design problems. It integrates with TorchFEM for
7
finite element analysis and provides optimization algorithms suitable for
8
constrained design problems.
9

10
Key Features
11
------------
12

13
MMA Optimizer
14
    Implementation of the Method of Moving Asymptotes (MMA), a gradient-based
15
    algorithm well-suited for structural optimization with nonlinear constraints.
16
    MMA is particularly effective for:
17
    - Topology optimization
18
    - Shape optimization with constraints
19
    - Problems with expensive objective evaluations
20
    - Highly nonlinear design spaces
21

22
Finite Element Integration
23
    - Conversion between TorchFEM and PyVista mesh formats
24
    - Support for tetrahedral and hexahedral elements
25
    - Linear and quadratic element types
26
    - Integration with gradient computation
27

28
Mesh Quality Utilities
29
    - Signed volume computation for tetrahedra
30
    - Mesh quality metrics
31
    - Degeneracy detection
32

33
The module is designed to work seamlessly with differentiable SDF representations,
34
enabling gradient-based optimization of complex 3D structures.
35
"""
36

37
import torchfem.materials
1✔
38
import torchfem.solid
1✔
39
from torchfem.elements import Hexa1, Hexa2, Tetra1, Tetra2
1✔
40
import torch
1✔
41
import numpy as np
1✔
42
from mmapy import mmasub
1✔
43
import pyvista
1✔
44
import logging
1✔
45
import DeepSDFStruct
1✔
46

47
logger = logging.getLogger(DeepSDFStruct.__name__)
1✔
48

49

50
def get_mesh_from_torchfem(Solid: torchfem.Solid) -> pyvista.UnstructuredGrid:
1✔
51
    """Convert a TorchFEM Solid mesh to PyVista UnstructuredGrid.
52

53
    This function enables visualization and export of TorchFEM finite element
54
    meshes using PyVista. It supports both tetrahedral and hexahedral elements
55
    with linear and quadratic shape functions.
56

57
    Parameters
58
    ----------
59
    Solid : torchfem.Solid
60
        TorchFEM solid mesh object containing nodes, elements, and element type.
61

62
    Returns
63
    -------
64
    pyvista.UnstructuredGrid
65
        PyVista mesh representation suitable for visualization and I/O.
66

67
    Raises
68
    ------
69
    NotImplementedError
70
        If input is not a torchfem.Solid object.
71

72
    Notes
73
    -----
74
    Supported element types:
75
    - Tetra1: 4-node linear tetrahedron
76
    - Tetra2: 10-node quadratic tetrahedron
77
    - Hexa1: 8-node linear hexahedron
78
    - Hexa2: 20-node quadratic hexahedron
79

80
    Examples
81
    --------
82
    >>> from DeepSDFStruct.optimization import get_mesh_from_torchfem
83
    >>> import torchfem
84
    >>>
85
    >>> # Assume we have a TorchFEM solid mesh
86
    >>> # solid = torchfem.Solid(...)
87
    >>>
88
    >>> # Convert to PyVista for visualization
89
    >>> pv_mesh = get_mesh_from_torchfem(solid)
90
    >>> pv_mesh.plot()
91
    """
92
    if not isinstance(Solid, torchfem.Solid):
1✔
93
        raise NotImplementedError("Currently only solid mesh is supported.")
×
94
    # VTK cell types
95
    if isinstance(Solid.etype, Tetra1):
1✔
96
        cell_types = Solid.n_elem * [pyvista.CellType.TETRA]
1✔
97
    elif isinstance(Solid.etype, Tetra2):
×
98
        cell_types = Solid.n_elem * [pyvista.CellType.QUADRATIC_TETRA]
×
99
    elif isinstance(Solid.etype, Hexa1):
×
100
        cell_types = Solid.n_elem * [pyvista.CellType.HEXAHEDRON]
×
101
    elif isinstance(Solid.etype, Hexa2):
×
102
        cell_types = Solid.n_elem * [pyvista.CellType.QUADRATIC_HEXAHEDRON]
×
103

104
    # VTK element list
105
    el = len(Solid.elements[0]) * torch.ones(Solid.n_elem, dtype=Solid.elements.dtype)
1✔
106
    elements = torch.cat([el[:, None], Solid.elements], dim=1).view(-1).tolist()
1✔
107

108
    # Deformed node positions
109
    pos = Solid.nodes
1✔
110

111
    # Create unstructured mesh
112
    mesh = pyvista.UnstructuredGrid(elements, cell_types, pos.tolist())
1✔
113
    return mesh
1✔
114

115

116
def tet_signed_vol(vertices, tets):
1✔
117
    """Compute signed volumes of tetrahedral elements.
118

119
    Calculates the signed volume of each tetrahedron, which is positive for
120
    correctly oriented elements and negative for inverted elements. This is
121
    useful for detecting mesh degeneracies and enforcing mesh quality constraints.
122

123
    Parameters
124
    ----------
125
    vertices : torch.Tensor
126
        Vertex coordinates of shape (N, 3).
127
    tets : torch.Tensor
128
        Tetrahedral connectivity of shape (M, 4), where each row contains
129
        vertex indices [v0, v1, v2, v3].
130

131
    Returns
132
    -------
133
    torch.Tensor
134
        Signed volumes of shape (M,), one per tetrahedron. Positive volumes
135
        indicate correctly oriented elements.
136

137
    Notes
138
    -----
139
    The signed volume is computed as:
140
        V = (1/6) * ((v1-v0) × (v2-v0)) · (v3-v0)
141

142
    Examples
143
    --------
144
    >>> import torch
145
    >>> from DeepSDFStruct.optimization import tet_signed_vol
146
    >>>
147
    >>> # Define a simple tetrahedron
148
    >>> vertices = torch.tensor([
149
    ...     [0.0, 0.0, 0.0],
150
    ...     [1.0, 0.0, 0.0],
151
    ...     [0.0, 1.0, 0.0],
152
    ...     [0.0, 0.0, 1.0]
153
    ... ])
154
    >>> tets = torch.tensor([[0, 1, 2, 3]])
155
    >>> volumes = tet_signed_vol(vertices, tets)
156
    >>> print(f"Volume: {volumes[0]:.3f}")  # Should be 1/6 ≈ 0.167
157
    """
158
    v0 = vertices[tets[:, 0]]
1✔
159
    v1 = vertices[tets[:, 1]]
1✔
160
    v2 = vertices[tets[:, 2]]
1✔
161
    v3 = vertices[tets[:, 3]]
1✔
162
    vols = torch.einsum("ij,ij->i", torch.cross(v1 - v0, v2 - v0, dim=1), v3 - v0) / 6.0
1✔
163
    return vols
1✔
164

165

166
class MMA:
1✔
167
    """Method of Moving Asymptotes (MMA) optimizer for constrained problems.
168

169
    MMA is a gradient-based optimization algorithm designed for nonlinear
170
    constrained problems. It constructs convex subproblems using moving
171
    asymptotes and is particularly effective for structural optimization.
172

173
    The optimizer handles a single objective function and a single constraint,
174
    with box bounds on design variables. It automatically normalizes the
175
    objective by its initial value for better numerical behavior.
176

177
    Parameters
178
    ----------
179
    parameters : torch.Tensor
180
        Initial design variables (will be optimized in-place).
181
    bounds : array-like of shape (n, 2)
182
        Box constraints [[lower_1, upper_1], ..., [lower_n, upper_n]]
183
        for each design variable.
184
    max_step : float, default 0.1
185
        Maximum allowed change in design variables per iteration,
186
        as a fraction of the bound range.
187

188
    Attributes
189
    ----------
190
    parameters : torch.Tensor
191
        Current design variables (updated in-place each iteration).
192
    loop : int
193
        Current iteration number.
194
    x : ndarray
195
        Current design variables in numpy format.
196
    xold1, xold2 : ndarray
197
        Design variables from previous two iterations (for MMA history).
198

199
    Methods
200
    -------
201
    step(F, dF, G, dG)
202
        Perform one MMA optimization step given objective, constraint,
203
        and their gradients.
204

205
    Notes
206
    -----
207
    MMA was developed by Krister Svanberg and is widely used in topology
208
    optimization. It is particularly effective for problems where:
209
    - The objective and constraints are expensive to evaluate
210
    - Gradients are available (via automatic differentiation)
211
    - The design space is high-dimensional
212
    - Strong nonlinearity is present
213

214
    The implementation uses the mmapy package for the core MMA algorithm.
215

216
    Examples
217
    --------
218
    >>> import torch
219
    >>> from DeepSDFStruct.optimization import MMA
220
    >>>
221
    >>> # Define design variables
222
    >>> params = torch.ones(10, requires_grad=True)
223
    >>> bounds = [[0.0, 2.0]] * 10
224
    >>>
225
    >>> # Create optimizer
226
    >>> optimizer = MMA(params, bounds, max_step=0.1)
227
    >>>
228
    >>> # Optimization loop
229
    >>> for i in range(100):
230
    ...     # Compute objective and constraint
231
    ...     objective = (params ** 2).sum()
232
    ...     constraint = params.sum() - 5.0
233
    ...
234
    ...     # Compute gradients
235
    ...     dF = torch.autograd.grad(objective, params, create_graph=True)[0]
236
    ...     dG = torch.autograd.grad(constraint, params, create_graph=True)[0]
237
    ...
238
    ...     # MMA step
239
    ...     optimizer.step(objective, dF, constraint, dG)
240
    ...
241
    ...     if optimizer.ch < 1e-3:
242
    ...         break
243

244
    References
245
    ----------
246
    .. [1] Svanberg, K. (1987). "The method of moving asymptotes—a new method
247
           for structural optimization." International Journal for Numerical
248
           Methods in Engineering, 24(2), 359-373.
249
    .. [2] mmapy: Python implementation of MMA
250
           https://github.com/arjendeetman/mmapy
251
    """
252

253
    def __init__(self, parameters, bounds, max_step=0.1):
1✔
254
        self.max_step = max_step
1✔
255
        self.bounds = np.array(bounds)
1✔
256
        self.parameters = parameters
1✔
257
        self.m = 1
1✔
258
        self.n = len(parameters)
1✔
259
        self.x = parameters.detach().cpu().numpy()
1✔
260
        self.xold1 = parameters.detach().cpu().numpy()
1✔
261
        self.xold2 = parameters.detach().cpu().numpy()
1✔
262
        self.low = []
1✔
263
        self.upp = []
1✔
264
        self.a0_MMA = 1
1✔
265
        self.a_MMA = np.zeros((self.m, 1))
1✔
266
        self.c_MMA = 10000 * np.ones((self.m, 1))
1✔
267
        self.d_MMA = np.zeros((self.m, 1))
1✔
268

269
        self.loop = 0
1✔
270
        self.ch = 1.0
1✔
271
        self.F0 = None
1✔
272

273
    def step(self, F, dF, G, dG):
1✔
274
        """Perform one MMA optimization step.
275

276
        Updates design variables by solving a convex subproblem constructed
277
        from the objective, constraint, and their gradients.
278

279
        Parameters
280
        ----------
281
        F : torch.Tensor or float
282
            Objective function value at current design.
283
        dF : torch.Tensor
284
            Gradient of objective w.r.t. design variables, shape (n,).
285
        G : torch.Tensor or float
286
            Constraint function value at current design (≤ 0 is feasible).
287
        dG : torch.Tensor
288
            Gradient of constraint w.r.t. design variables, shape (n,).
289

290
        Notes
291
        -----
292
        The method automatically:
293
        - Normalizes the objective by its initial value
294
        - Enforces move limits based on max_step
295
        - Updates MMA history (xold1, xold2)
296
        - Computes and logs convergence metric (ch)
297
        - Updates self.parameters in-place
298

299
        The convergence metric ch is the relative change in design variables.
300
        """
301
        orig_shape = dF.shape
1✔
302
        F_np = F.detach().cpu().numpy().reshape(-1, 1)
1✔
303
        dFdx_np = dF.detach().cpu().numpy().reshape(-1, 1)
1✔
304
        G_np = G.detach().cpu().numpy().reshape(-1, 1)
1✔
305
        dGdx_np = dG.detach().cpu().numpy().reshape(1, -1)
1✔
306
        if self.loop == 0:
1✔
307
            self.F0 = F_np
1✔
308
        F_np = F_np / self.F0
1✔
309
        dFdx_np = dFdx_np / self.F0
1✔
310

311
        xmin = np.maximum(self.x - self.max_step, self.bounds[:, 0].reshape(-1, 1))
1✔
312
        xmax = np.minimum(self.x + self.max_step, self.bounds[:, 1].reshape(-1, 1))
1✔
313
        move = 0.1
1✔
314
        self.loop = self.loop + 1
1✔
315
        xmma, ymma, zmma, lam, xsi, eta, muMMA, zet, s, low, upp = mmasub(
1✔
316
            self.m,
317
            self.n,
318
            self.loop,
319
            self.x,
320
            xmin,
321
            xmax,
322
            self.xold1,
323
            self.xold2,
324
            F_np,
325
            dFdx_np,
326
            G_np,
327
            dGdx_np,
328
            self.low,
329
            self.upp,
330
            self.a0_MMA,
331
            self.a_MMA,
332
            self.c_MMA,
333
            self.d_MMA,
334
        )
335

336
        self.xold2 = self.xold1.copy()
1✔
337
        self.xold1 = self.x.copy()
1✔
338
        self.x = xmma
1✔
339
        self.upp = upp
1✔
340
        self.low = low
1✔
341

342
        ch = np.abs(np.mean(self.x.T - self.xold1.T) / np.mean(self.x.T))
1✔
343
        with torch.no_grad():
1✔
344
            self.parameters.copy_(
1✔
345
                torch.tensor(
346
                    xmma, dtype=self.parameters.dtype, device=self.parameters.device
347
                )
348
            )
349
        logger.info(
1✔
350
            "It.: {0:4} | J.: {1:1.3e} | Constr.:  {2:1.3e} | ch.: {3:1.3e}".format(
351
                self.loop, F_np[0][0], G_np[0][0], ch
352
            )
353
        )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc