• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

ContinualAI / avalanche / 4993189103

pending completion
4993189103

Pull #1370

github

Unknown Committer
Unknown Commit Message
Pull Request #1370: Add base elements to support distributed comms. Add supports_distributed plugin flag.

258 of 822 new or added lines in 27 files covered. (31.39%)

80 existing lines in 5 files now uncovered.

15585 of 21651 relevant lines covered (71.98%)

2.88 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

76.56
/avalanche/models/utils.py
1
from avalanche.benchmarks.utils import make_classification_dataset
4✔
2
from avalanche.models.dynamic_modules import MultiTaskModule, DynamicModule
4✔
3
import torch.nn as nn
4✔
4
from torch.nn.parallel import DistributedDataParallel
4✔
5
from collections import OrderedDict
4✔
6

7
from avalanche.benchmarks.scenarios import CLExperience
4✔
8

9

10
def is_multi_task_module(model: nn.Module) -> bool:
4✔
11
    return isinstance(model, MultiTaskModule) or \
4✔
12
        (isinstance(model, DistributedDataParallel) and 
13
         isinstance(model.module, MultiTaskModule))
14

15

16
def avalanche_forward(model, x, task_labels):
4✔
17
    if is_multi_task_module(model):
4✔
18
        return model(x, task_labels)
4✔
19
    else:  # no task labels
20
        return model(x)
4✔
21

22

23
def avalanche_model_adaptation(model: nn.Module, experience: CLExperience):
4✔
24
    if isinstance(model, DistributedDataParallel):
4✔
NEW
25
        raise RuntimeError('The model is wrapped in DistributedDataParallel. '
×
26
                           'Please unwrap it before calling this method.')
27
    for module in model.modules():
4✔
28
        if isinstance(module, DynamicModule):
4✔
29
            module.adaptation(experience)
4✔
30

31

32
class FeatureExtractorBackbone(nn.Module):
4✔
33
    """
4✔
34
    This PyTorch module allows us to extract features from a backbone network
35
    given a layer name.
36
    """
37

38
    def __init__(self, model, output_layer_name):
4✔
39
        super(FeatureExtractorBackbone, self).__init__()
4✔
40
        self.model = model
4✔
41
        self.output_layer_name = output_layer_name
4✔
42
        self.output = None  # this will store the layer output
4✔
43
        self.add_hooks(self.model)
4✔
44

45
    def forward(self, x):
4✔
46
        self.model(x)
4✔
47
        return self.output
4✔
48

49
    def get_name_to_module(self, model):
4✔
50
        name_to_module = {}
4✔
51
        for m in model.named_modules():
4✔
52
            name_to_module[m[0]] = m[1]
4✔
53
        return name_to_module
4✔
54

55
    def get_activation(self):
4✔
56
        def hook(model, input, output):
4✔
57
            self.output = output.detach()
4✔
58

59
        return hook
4✔
60

61
    def add_hooks(self, model):
4✔
62
        """
63
        :param model:
64
        :param outputs: Outputs from layers specified in `output_layer_names`
65
        will be stored in `output` variable
66
        :param output_layer_names:
67
        :return:
68
        """
69
        name_to_module = self.get_name_to_module(model)
4✔
70
        name_to_module[self.output_layer_name].register_forward_hook(
4✔
71
            self.get_activation()
72
        )
73

74

75
class Flatten(nn.Module):
4✔
76
    """
4✔
77
    Simple nn.Module to flatten each tensor of a batch of tensors.
78
    """
79

80
    def __init__(self):
4✔
81
        super(Flatten, self).__init__()
×
82

83
    def forward(self, x):
4✔
84
        batch_size = x.shape[0]
×
85
        return x.view(batch_size, -1)
×
86

87

88
class MLP(nn.Module):
4✔
89
    """
4✔
90
    Simple nn.Module to create a multi-layer perceptron
91
    with BatchNorm and ReLU activations.
92

93
    :param hidden_size: An array indicating the number of neurons in each layer.
94
    :type hidden_size: int[]
95
    :param last_activation: Indicates whether to add BatchNorm and ReLU
96
                            after the last layer.
97
    :type last_activation: Boolean
98
    """
99

100
    def __init__(self, hidden_size, last_activation=True):
4✔
101
        super(MLP, self).__init__()
×
102
        q = []
×
103
        for i in range(len(hidden_size) - 1):
×
104
            in_dim = hidden_size[i]
×
105
            out_dim = hidden_size[i + 1]
×
106
            q.append(("Linear_%d" % i, nn.Linear(in_dim, out_dim)))
×
107
            if (i < len(hidden_size) - 2) or (
×
108
                (i == len(hidden_size) - 2) and (last_activation)
109
            ):
110
                q.append(("BatchNorm_%d" % i, nn.BatchNorm1d(out_dim)))
×
111
                q.append(("ReLU_%d" % i, nn.ReLU(inplace=True)))
×
112
        self.mlp = nn.Sequential(OrderedDict(q))
×
113

114
    def forward(self, x):
4✔
115
        return self.mlp(x)
×
116

117

118
__all__ = ["avalanche_forward", "FeatureExtractorBackbone", "MLP", "Flatten"]
4✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc