• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

KarlNaumann / MacroStat / 21241273980

22 Jan 2026 08:24AM UTC coverage: 96.528% (+0.09%) from 96.437%
21241273980

push

github

web-flow
Differentiation tools (#61)

* Add core autodiff and numerical Jacobian infrastructure

* Add LINEAR2D testing model following standard Behavior pattern

* Make apply_parameter_shocks robust to missing vector_sectors

* Add LINEAR2D-based tests for diff Jacobian tools

* Document diff tools and LINEAR2D testing model

301 of 306 branches covered (98.37%)

Branch coverage included in aggregate %.

226 of 244 new or added lines in 11 files covered. (92.62%)

2 existing lines in 1 file now uncovered.

2090 of 2171 relevant lines covered (96.27%)

0.96 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

89.09
/src/macrostat/diff/jacobian_numerical.py
1
"""
2
Numerical (finite-difference) Jacobian computation for MacroStat models.
3

4
This module approximates the gradient of a scalar loss with respect to the
5
parameters of a model's Behavior module using finite differences.
6
"""
7

8
from __future__ import annotations
1✔
9

10
from typing import Callable, Dict, Literal
1✔
11

12
import torch
1✔
13

14
from macrostat.diff.jacobian_base import JacobianBase
1✔
15

16
LossFn = Callable[[Dict[str, torch.Tensor]], torch.Tensor]
1✔
17

18

19
class JacobianNumerical(JacobianBase):
1✔
20
    """
21
    Compute Jacobians via finite differences.
22

23
    Notes
24
    -----
25
    - Currently supports differentiation with respect to **parameters only**.
26
    - The loss function must return a **scalar tensor**.
27
    """
28

29
    def __init__(self, model, scenario: int | str = 0, epsilon: float = 1e-5):
1✔
30
        super().__init__(model=model, scenario=scenario)
1✔
31
        self.epsilon = float(epsilon)
1✔
32

33
    def compute(
1✔
34
        self,
35
        loss_fn: LossFn,
36
        mode: Literal["central", "forward", "backward"] = "central",
37
    ) -> Dict[str, torch.Tensor]:
38
        """
39
        Compute the numerical Jacobian of the loss w.r.t. model parameters.
40

41
        Parameters
42
        ----------
43
        loss_fn :
44
            A callable that takes the output dictionary from the model
45
            (as returned by ``Behavior.forward``) and returns a **scalar**
46
            ``torch.Tensor`` loss.
47
        mode :
48
            Finite-difference scheme to use:
49

50
            - ``\"central\"``: central differences (default).
51
            - ``\"forward\"``: forward differences.
52
            - ``\"backward\"``: backward differences.
53

54
        Returns
55
        -------
56
        dict[str, torch.Tensor]
57
            A dictionary mapping parameter names to gradient tensors of the
58
            same shape as the corresponding parameters.
59
        """
60
        if mode not in {"central", "forward", "backward"}:
1✔
NEW
61
            raise ValueError(
×
62
                f"Unsupported mode '{mode}'. Use 'central', 'forward', or 'backward'."
63
            )
64

65
        behavior, base_params = self._get_behavior_and_params()
1✔
66

67
        def compute_loss(params: Dict[str, torch.Tensor]) -> torch.Tensor:
1✔
68
            # We re-use the same behavior instance but override parameters
69
            from torch.func import functional_call
1✔
70

71
            with torch.no_grad():
1✔
72
                output = functional_call(behavior, params, ())
1✔
73
                loss = loss_fn(output)
1✔
74
                if loss.ndim != 0:
1✔
NEW
75
                    raise ValueError(
×
76
                        "JacobianNumerical currently expects loss_fn to return a scalar "
77
                        f"tensor, but got shape {tuple(loss.shape)}."
78
                    )
79
                return loss
1✔
80

81
        epsilon = self.epsilon
1✔
82

83
        # Evaluate base loss once
84
        base_loss = compute_loss(base_params)
1✔
85

86
        jacobian: Dict[str, torch.Tensor] = {}
1✔
87

88
        for name, p in base_params.items():
1✔
89
            # Work on a flattened view for simplicity
90
            p_flat = p.detach().clone().reshape(-1)
1✔
91
            grad_flat = torch.empty_like(p_flat)
1✔
92

93
            for i in range(p_flat.numel()):
1✔
94
                # Prepare perturbed parameter vectors
95
                if mode in {"central", "forward"}:
1✔
96
                    p_pos = p_flat.clone()
1✔
97
                    p_pos[i] += epsilon
1✔
98
                    params_pos = dict(base_params)
1✔
99
                    params_pos[name] = p_pos.reshape_as(p)
1✔
100
                    loss_pos = compute_loss(params_pos)
1✔
101

102
                if mode in {"central", "backward"}:
1✔
103
                    p_neg = p_flat.clone()
1✔
104
                    p_neg[i] -= epsilon
1✔
105
                    params_neg = dict(base_params)
1✔
106
                    params_neg[name] = p_neg.reshape_as(p)
1✔
107
                    loss_neg = compute_loss(params_neg)
1✔
108

109
                if mode == "central":
1✔
110
                    grad_flat[i] = (loss_pos - loss_neg) / (2.0 * epsilon)
1✔
NEW
111
                elif mode == "forward":
×
NEW
112
                    grad_flat[i] = (loss_pos - base_loss) / epsilon
×
113
                else:  # backward
NEW
114
                    grad_flat[i] = (base_loss - loss_neg) / epsilon
×
115

116
            jacobian[name] = grad_flat.reshape_as(p)
1✔
117

118
        return jacobian
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc