• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

mkofler96 / DeepSDFStruct / 21431967093

28 Jan 2026 09:07AM UTC coverage: 81.582% (+0.1%) from 81.481%
21431967093

Pull #22

github

web-flow
Merge af7185ae7 into baad0e0f1
Pull Request #22: Add module-level docstrings across codebase

361 of 427 branches covered (84.54%)

Branch coverage included in aggregate %.

2961 of 3645 relevant lines covered (81.23%)

0.81 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

54.55
DeepSDFStruct/deep_sdf/plotting.py
1
"""
2
Training Visualization and Logging
3
==================================
4

5
This module provides utilities for visualizing DeepSDF training progress
6
and generating plots of training metrics.
7

8
Functions
9
---------
10

11
plot_logs
12
    Plot training loss curves with smoothing and optional learning rate overlay.
13
    Useful for monitoring training progress and diagnosing convergence issues.
14

15
plot_reconstruction_loss
16
    Visualize loss during shape reconstruction, showing how well the model
17
    fits to target geometries.
18

19
extract_paths
20
    Helper for extracting file paths from nested data structures.
21

22
running_mean
23
    Compute running average for smoothing noisy loss curves.
24

25
The module integrates with the workspace utilities to load training logs
26
and generate publication-quality plots for analysis and presentation.
27
"""
28

29
import numpy as np
1✔
30
import os
1✔
31
import logging
1✔
32
import torch
1✔
33
import matplotlib.pyplot as plt
1✔
34

35
import DeepSDFStruct.deep_sdf.workspace as ws
1✔
36

37

38
def extract_paths(data, current_path=""):
1✔
39
    paths = []
×
40

41
    if isinstance(data, dict):
×
42
        for key, value in data.items():
×
43
            new_path = f"{current_path}/{key}" if current_path else key
×
44
            paths.extend(extract_paths(value, new_path))
×
45

46
    elif isinstance(data, list):
×
47
        for item in data:
×
48
            paths.extend(extract_paths(item, current_path))
×
49

50
    else:
51
        paths.append(f"{current_path}/{data}")
×
52

53
    return paths
×
54

55

56
def running_mean(x, N):
1✔
57
    cumsum = np.cumsum(np.insert(x, 0, 0))
1✔
58
    return (cumsum[N:] - cumsum[:-N]) / float(N)
1✔
59

60

61
def plot_logs(experiment_directory, show_lr=False, ax=None, filename=None):
1✔
62

63
    logs = torch.load(os.path.join(experiment_directory, ws.logs_filename))
1✔
64

65
    logging.info("latest epoch is {}".format(logs["epoch"]))
1✔
66

67
    num_iters = len(logs["loss"])
1✔
68
    iters_per_epoch = num_iters / logs["epoch"]
1✔
69

70
    logging.info("{} iters per epoch".format(iters_per_epoch))
1✔
71

72
    smoothed_loss_41 = running_mean(logs["loss"], 41)
1✔
73

74
    show_plt = False
1✔
75

76
    if show_lr:
1✔
77
        if ax is None:
1✔
78
            fig, ax = plt.subplots(2, 1)
1✔
79
            fig.tight_layout()
1✔
80
            show_plt = True
1✔
81
    else:
82
        if ax is None:
×
83
            fig, ax = plt.subplots()
×
84
            show_plt = True
×
85
        ax = [ax]
×
86

87
    ax[0].plot(
1✔
88
        np.arange(num_iters) / iters_per_epoch,
89
        logs["loss"],
90
        "#82c6eb",
91
        np.arange(20, num_iters - 20) / iters_per_epoch,
92
        smoothed_loss_41,
93
        "#2a9edd",
94
    )
95
    ax[0].set_yscale("log")
1✔
96

97
    ax[0].set(xlabel="Epoch", ylabel="Loss")
1✔
98
    ax[0].legend(["Loss", "Loss (Running Mean)", "Loss (Running Mean 41)"])
1✔
99

100
    if show_lr:
1✔
101
        combined_lrs = np.array(logs["learning_rate"])
1✔
102
        ax[1].plot(
1✔
103
            np.arange(combined_lrs.shape[0]),
104
            combined_lrs[:, 0],
105
            np.arange(combined_lrs.shape[0]),
106
            combined_lrs[:, 1],
107
        )
108
        ax[1].set(xlabel="Epoch", ylabel="Learning Rate")
1✔
109
        ax[1].legend(["Decoder", "Latent Vector"])
1✔
110

111
    for axis in ax:
1✔
112
        axis.grid()
1✔
113
    if filename is not None:
1✔
114
        plt.savefig(filename, bbox_inches="tight")
1✔
115
    elif show_plt:
×
116
        plt.show()
×
117

118

119
def plot_reconstruction_loss(loss_history, iters_per_epoch, filename=None):
1✔
120

121
    losses = np.array(loss_history)
×
122
    num_iters = len(losses)
×
123

124
    latest_epoch = num_iters / iters_per_epoch
×
125
    logging.info("latest epoch is {}".format(latest_epoch))
×
126
    logging.info("{} iters per epoch".format(iters_per_epoch))
×
127

128
    smoothed_loss_41 = running_mean(losses, 41)
×
129

130
    fig, ax = plt.subplots()
×
131

132
    ax.plot(
×
133
        np.arange(num_iters) / iters_per_epoch,
134
        losses,
135
        "#82c6eb",
136
        np.arange(20, num_iters - 20) / iters_per_epoch,
137
        smoothed_loss_41,
138
        "#2a9edd",
139
    )
140
    ax.set_yscale("log")
×
141
    ax.set(xlabel="Epoch", ylabel="Loss")
×
142
    ax.legend(["Loss", "Loss (Running Mean 41)"])
×
143
    ax.grid()
×
144
    plt.savefig(filename, bbox_inches="tight")
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc