• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

KarlNaumann / MacroStat / 21241273980

22 Jan 2026 08:24AM UTC coverage: 96.528% (+0.09%) from 96.437%
21241273980

push

github

web-flow
Differentiation tools (#61)

* Add core autodiff and numerical Jacobian infrastructure

* Add LINEAR2D testing model following standard Behavior pattern

* Make apply_parameter_shocks robust to missing vector_sectors

* Add LINEAR2D-based tests for diff Jacobian tools

* Document diff tools and LINEAR2D testing model

301 of 306 branches covered (98.37%)

Branch coverage included in aggregate %.

226 of 244 new or added lines in 11 files covered. (92.62%)

2 existing lines in 1 file now uncovered.

2090 of 2171 relevant lines covered (96.27%)

0.96 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.46
/src/macrostat/diff/checker.py
1
"""
2
High-level differentiability checks for MacroStat models.
3

4
This module provides a small API to compare:
5

6
- Reverse-mode vs forward-mode autograd Jacobians, and
7
- Autograd vs numerical finite-difference Jacobians
8

9
for a user-specified scalar loss function of model outputs.
10
"""
11

12
from __future__ import annotations
1✔
13

14
from dataclasses import dataclass
1✔
15
from typing import Callable, Dict, Literal, Optional
1✔
16

17
import torch
1✔
18

19
from macrostat.diff.jacobian_autograd import JacobianAutograd
1✔
20
from macrostat.diff.jacobian_numerical import JacobianNumerical
1✔
21

22
LossFn = Callable[[Dict[str, torch.Tensor]], torch.Tensor]
1✔
23

24

25
@dataclass
1✔
26
class DifferentiabilityReport:
1✔
27
    """Summary of differentiability checks."""
28

29
    passed: bool
1✔
30
    nan_or_inf: bool
1✔
31
    fwd_vs_rev_ok: Optional[bool]
1✔
32
    autodiff_vs_numerical_ok: Optional[bool]
1✔
33
    max_abs_diff_fwd_rev: Optional[float]
1✔
34
    max_abs_diff_autodiff_num: Optional[float]
1✔
35
    rel_err_fwd_rev: Optional[float]
1✔
36
    rel_err_autodiff_num: Optional[float]
1✔
37
    details: Dict[str, dict]
1✔
38

39
    def summary(self) -> str:
1✔
40
        """Return a short human-readable summary."""
NEW
41
        lines = []
×
NEW
42
        lines.append(f"Passed: {self.passed}")
×
NEW
43
        lines.append(f"NaN/Inf gradients: {self.nan_or_inf}")
×
NEW
44
        if self.fwd_vs_rev_ok is not None:
×
NEW
45
            lines.append(
×
46
                f"Forward vs reverse: rel≈{self.rel_err_fwd_rev:.3e} "
47
                f"(max abs diff={self.max_abs_diff_fwd_rev})"
48
            )
NEW
49
        if self.autodiff_vs_numerical_ok is not None:
×
NEW
50
            lines.append(
×
51
                f"Autograd vs numerical: rel≈{self.rel_err_autodiff_num:.3e} "
52
                f"(max abs diff={self.max_abs_diff_autodiff_num})"
53
            )
NEW
54
        return "\n".join(lines)
×
55

56

57
def _max_abs_diff_dict(
1✔
58
    a: Dict[str, torch.Tensor],
59
    b: Dict[str, torch.Tensor],
60
) -> float:
61
    """Compute the maximum absolute difference between two grad dicts."""
62
    max_diff = 0.0
1✔
63
    for name in a.keys() & b.keys():
1✔
64
        diff = (a[name] - b[name]).abs().max().item()
1✔
65
        max_diff = max(max_diff, float(diff))
1✔
66
    return max_diff
1✔
67

68

69
def _max_abs_val_dict(
1✔
70
    grads: Dict[str, torch.Tensor],
71
) -> float:
72
    """Maximum absolute value across all gradients in a dict."""
73
    max_val = 0.0
1✔
74
    for g in grads.values():
1✔
75
        if g.numel() == 0:
1✔
NEW
76
            continue
×
77
        val = g.abs().max().item()
1✔
78
        max_val = max(max_val, float(val))
1✔
79
    return max_val
1✔
80

81

82
def _has_nan_or_inf(grads: Dict[str, torch.Tensor]) -> bool:
1✔
83
    for g in grads.values():
1✔
84
        if not torch.isfinite(g).all():
1✔
NEW
85
            return True
×
86
    return False
1✔
87

88

89
def check_model_differentiability(
1✔
90
    model,
91
    loss_fn: LossFn,
92
    scenario: int | str = 0,
93
    target: Literal["parameters"] = "parameters",
94
    rtol: float = 1e-5,
95
    atol: float = 1e-8,
96
    compare_forward_reverse: bool = True,
97
    compare_numerical: bool = True,
98
    numerical_mode: Literal["central", "forward", "backward"] = "central",
99
    epsilon: float = 1e-5,
100
    raise_on_failure: Optional[bool] = None,
101
) -> DifferentiabilityReport:
102
    """
103
    Run a suite of differentiability checks on a MacroStat model.
104

105
    Parameters
106
    ----------
107
    model :
108
        MacroStat model instance.
109
    loss_fn :
110
        Scalar loss function of the model outputs (dict[str, torch.Tensor]).
111
    scenario :
112
        Scenario index or name to use.
113
    target :
114
        Currently only ``\"parameters\"`` is supported (placeholder for future).
115
    rtol, atol :
116
        Relative and absolute tolerances for comparisons.
117
    compare_forward_reverse :
118
        If True, compare reverse-mode and forward-mode autograd gradients.
119
    compare_numerical :
120
        If True, compare autograd gradients to numerical finite-difference gradients.
121
    numerical_mode :
122
        Finite-difference scheme to use when comparing against numerical.
123
    epsilon :
124
        Step size for finite differences.
125
    raise_on_failure :
126
        If True and checks fail, raise a RuntimeError instead of just returning
127
        the report. If None, do not raise.
128
    """
129
    if target != "parameters":
1✔
130
        raise NotImplementedError(
131
            "Only target='parameters' is supported at the moment."
132
        )
133

134
    details: Dict[str, dict] = {}
1✔
135

136
    # ------------------------------------------------------------------
137
    # Autograd (reverse-mode) baseline
138
    # ------------------------------------------------------------------
139
    auto = JacobianAutograd(model=model, scenario=scenario)
1✔
140
    grads_rev = auto.compute(loss_fn=loss_fn, mode="rev")
1✔
141
    nan_or_inf = _has_nan_or_inf(grads_rev)
1✔
142
    details["autograd_rev"] = {"nan_or_inf": nan_or_inf}
1✔
143

144
    # ------------------------------------------------------------------
145
    # Forward vs reverse comparison
146
    # ------------------------------------------------------------------
147
    fwd_vs_rev_ok: Optional[bool] = None
1✔
148
    max_abs_diff_fwd_rev: Optional[float] = None
1✔
149
    rel_err_fwd_rev: Optional[float] = None
1✔
150

151
    if compare_forward_reverse:
1✔
152
        grads_fwd = auto.compute(loss_fn=loss_fn, mode="fwd")
1✔
153
        max_abs_diff_fwd_rev = _max_abs_diff_dict(grads_rev, grads_fwd)
1✔
154
        # Scale tolerance by the typical gradient magnitude
155
        scale = max(
1✔
156
            _max_abs_val_dict(grads_rev),
157
            _max_abs_val_dict(grads_fwd),
158
            1.0,
159
        )
160
        rel_err_fwd_rev = max_abs_diff_fwd_rev / scale
1✔
161
        fwd_vs_rev_ok = max_abs_diff_fwd_rev <= (atol + rtol * scale)
1✔
162
        details["autograd_fwd"] = {
1✔
163
            "max_abs_diff_fwd_rev": max_abs_diff_fwd_rev,
164
            "ok": fwd_vs_rev_ok,
165
            "rel_err": rel_err_fwd_rev,
166
        }
167

168
    # ------------------------------------------------------------------
169
    # Numerical comparison
170
    # ------------------------------------------------------------------
171
    autodiff_vs_numerical_ok: Optional[bool] = None
1✔
172
    max_abs_diff_autodiff_num: Optional[float] = None
1✔
173
    rel_err_autodiff_num: Optional[float] = None
1✔
174

175
    if compare_numerical:
1✔
176
        num = JacobianNumerical(model=model, scenario=scenario, epsilon=epsilon)
1✔
177
        grads_num = num.compute(loss_fn=loss_fn, mode=numerical_mode)
1✔
178
        max_abs_diff_autodiff_num = _max_abs_diff_dict(grads_rev, grads_num)
1✔
179
        scale = max(
1✔
180
            _max_abs_val_dict(grads_rev),
181
            _max_abs_val_dict(grads_num),
182
            1.0,
183
        )
184
        rel_err_autodiff_num = max_abs_diff_autodiff_num / scale
1✔
185
        autodiff_vs_numerical_ok = max_abs_diff_autodiff_num <= (atol + rtol * scale)
1✔
186
        details["numerical"] = {
1✔
187
            "max_abs_diff_autodiff_num": max_abs_diff_autodiff_num,
188
            "ok": autodiff_vs_numerical_ok,
189
            "rel_err": rel_err_autodiff_num,
190
        }
191

192
    # ------------------------------------------------------------------
193
    # Overall status
194
    # ------------------------------------------------------------------
195
    passed_checks = [not nan_or_inf]
1✔
196
    if fwd_vs_rev_ok is not None:
1✔
197
        passed_checks.append(fwd_vs_rev_ok)
1✔
198
    if autodiff_vs_numerical_ok is not None:
1✔
199
        passed_checks.append(autodiff_vs_numerical_ok)
1✔
200

201
    passed = all(passed_checks)
1✔
202

203
    report = DifferentiabilityReport(
1✔
204
        passed=passed,
205
        nan_or_inf=nan_or_inf,
206
        fwd_vs_rev_ok=fwd_vs_rev_ok,
207
        autodiff_vs_numerical_ok=autodiff_vs_numerical_ok,
208
        max_abs_diff_fwd_rev=max_abs_diff_fwd_rev,
209
        max_abs_diff_autodiff_num=max_abs_diff_autodiff_num,
210
        rel_err_fwd_rev=rel_err_fwd_rev,
211
        rel_err_autodiff_num=rel_err_autodiff_num,
212
        details=details,
213
    )
214

215
    if raise_on_failure and not passed:
1✔
NEW
216
        raise RuntimeError(f"Differentiability check failed:\n{report.summary()}")
×
217

218
    return report
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc