• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

vanvalenlab / deepcell-label / 4578689396

pending completion
4578689396

Pull #436

github

GitHub
Merge ddb425c30 into 6a993cb7a
Pull Request #436: Model training overhaul: SNGP model, uncertainty visualization, and custom embedding support

462 of 1163 branches covered (39.72%)

Branch coverage included in aggregate %.

20 of 628 new or added lines in 27 files covered. (3.18%)

76 existing lines in 5 files now uncovered.

3248 of 5431 relevant lines covered (59.8%)

543.49 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/frontend/src/Project/service/labels/tensorflow/execute.ts
1
import { dispose, memory, SymbolicTensor, Tensor, util } from '@tensorflow/tfjs';
2
import {
3
  cachedRecipientCounts,
4
  cachedSorted,
5
  ExecutionProbe,
6
  FeedDict,
7
  getTopologicalSortAndRecipientCountsForOneFetch,
8
  RecipientMap,
9
} from '@tensorflow/tfjs-layers/dist/engine/executor';
10
import { InputLayer } from '@tensorflow/tfjs-layers/dist/engine/input_layer';
11
import { toList } from '@tensorflow/tfjs-layers/dist/utils/generic_utils';
12
import { Kwargs } from './spectralNormalizationLayer';
13

14
type RecipientCounts = {
15
  [fetchName: string]: number;
16
};
17

18
function recipientMap2Counts(recipientMap: RecipientMap): RecipientCounts {
NEW
19
  const recipientCounts: RecipientCounts = {};
×
NEW
20
  for (const name in recipientMap) {
×
NEW
21
    recipientCounts[name] = recipientMap[name].size;
×
22
  }
NEW
23
  return recipientCounts;
×
24
}
25

26
/**
27
 * Sort the `SymbolicTensor`s topologically, for an array of fetches.
28
 *
29
 * This function calls getTopologicalSortAndRecipientCountsForOneFetch and
30
 * merges their results.
31
 *
32
 * @param fetch The array of fetches requested. Must be a non-empty array.
33
 * @param feedDict The dictionary of fed values.
34
 * @returns sorted: Topologically-sorted array of SymbolicTensors.
35
 *   recipientCounts: Recipient counts for all SymbolicTensors in `sorted`.
36
 */
37
function getTopologicalSortAndRecipientCounts(
38
  fetches: SymbolicTensor[],
39
  feedDict: FeedDict
40
): { sorted: SymbolicTensor[]; recipientCounts: RecipientCounts } {
NEW
41
  util.assert(fetches != null && fetches.length > 0, () => `Expected at least one fetch, got none`);
×
42

NEW
43
  let finalSorted: SymbolicTensor[] = [];
×
NEW
44
  let finalRecipientMap: RecipientMap = {};
×
NEW
45
  if (fetches.length === 1) {
×
46
    // Special-casing 1 fetch for efficiency.
NEW
47
    const out = getTopologicalSortAndRecipientCountsForOneFetch(fetches[0], feedDict);
×
NEW
48
    finalSorted = out.sorted;
×
NEW
49
    finalRecipientMap = out.recipientMap;
×
50
  } else {
NEW
51
    const visited = new Set<string>();
×
NEW
52
    for (const fetch of fetches) {
×
NEW
53
      const { sorted, recipientMap } = getTopologicalSortAndRecipientCountsForOneFetch(
×
54
        fetch,
55
        feedDict
56
      );
57

58
      // Merge sorted SymbolicTensor Arrays.
NEW
59
      for (const symbolicTensor of sorted) {
×
NEW
60
        if (!visited.has(symbolicTensor.name)) {
×
NEW
61
          finalSorted.push(symbolicTensor);
×
NEW
62
          visited.add(symbolicTensor.name);
×
63
        }
64
      }
65

66
      // Merge recipient maps.
NEW
67
      for (const name in recipientMap) {
×
NEW
68
        if (finalRecipientMap[name] == null) {
×
NEW
69
          finalRecipientMap[name] = new Set<string>();
×
70
        }
NEW
71
        recipientMap[name].forEach((recipient) => finalRecipientMap[name].add(recipient));
×
72
      }
73
    }
74
  }
NEW
75
  return {
×
76
    sorted: finalSorted,
77
    recipientCounts: recipientMap2Counts(finalRecipientMap),
78
  };
79
}
80

81
/**
82
 * Execute a SymbolicTensor by using concrete feed values.
83
 *
84
 * A `SymbolicTensor` object is a node in a computation graph of TF.js
85
 * Layers. The object is backed by a source layer and input
86
 * `SymbolicTensor`s to the source layer. This method evaluates
87
 * the `call()` method of the source layer, using concrete values of the
88
 * inputs obtained from either
89
 * * `feedDict`, if the input key exists in `feedDict`, or else,
90
 * * a recursive call to `execute()` itself.
91
 *
92
 * @param x: The `SymbolicTensor` to execute.
93
 * @param feedDict: The feed values, as base condition of the recursion.
94
 *   execution.
95
 * @param kwargs: Optional keyword arguments.
96
 * @param probe: A probe object (of interface `ExecutionProbe`) used for
97
 *   testing memory footprint of `execute` calls.
98
 * @returns Result of the execution.
99
 * @throws ValueError: If any `SymbolicTensor`s from `InputLayer`s
100
 *   encountered during the execution lacks a feed value in `feedDict`.
101
 */
102
export function execute(
103
  fetches: SymbolicTensor | SymbolicTensor[],
104
  feedDict: FeedDict,
105
  kwargs?: Kwargs,
106
  probe?: ExecutionProbe
107
): Tensor | Tensor[] | [Tensor | Tensor[]] {
NEW
108
  const training: boolean = kwargs == null ? false : kwargs['training'];
×
109

NEW
110
  const arrayFetches = Array.isArray(fetches);
×
NEW
111
  const fetchArray: SymbolicTensor[] = arrayFetches ? fetches : [fetches];
×
112

NEW
113
  const outputNames = fetchArray.map((t) => t.name);
×
NEW
114
  const finalOutputs: Tensor[] = [];
×
NEW
115
  const feedNames = feedDict.names();
×
NEW
116
  for (const outputName of outputNames) {
×
NEW
117
    if (feedNames.indexOf(outputName) !== -1) {
×
NEW
118
      finalOutputs.push(feedDict.getValue(outputName));
×
119
    } else {
120
      // @ts-ignore
NEW
121
      finalOutputs.push(null);
×
122
    }
123
  }
124

NEW
125
  if (probe != null) {
×
126
    // For optional probing of memory footprint during execution.
NEW
127
    probe.maxNumTensors = -Infinity;
×
NEW
128
    probe.minNumTensors = Infinity;
×
129
  }
130

131
  // Check cache.
NEW
132
  const fetchAndFeedKey = outputNames.join(',') + '|' + feedDict.names().sort().join(',');
×
NEW
133
  let sorted: SymbolicTensor[] = cachedSorted.get(fetchAndFeedKey);
×
134
  let recipientCounts: { [fetchName: string]: number };
NEW
135
  if (sorted == null) {
×
136
    // Cache doesn't contain the desired combination of fetches. Compute
137
    // topological sort for the combination for the first time.
NEW
138
    const out = getTopologicalSortAndRecipientCounts(fetchArray, feedDict);
×
NEW
139
    sorted = out.sorted;
×
NEW
140
    recipientCounts = out.recipientCounts;
×
141

142
    // Store results in cache for future use.
NEW
143
    cachedSorted.put(fetchAndFeedKey, sorted);
×
NEW
144
    cachedRecipientCounts.put(fetchAndFeedKey, recipientCounts);
×
145
  }
NEW
146
  recipientCounts = {};
×
NEW
147
  if (!training) {
×
NEW
148
    Object.assign(recipientCounts, cachedRecipientCounts.get(fetchAndFeedKey));
×
149
  }
150

NEW
151
  const internalFeedDict = new FeedDict(feedDict);
×
152

153
  // Start iterative execution on the topologically-sorted SymbolicTensors.
NEW
154
  for (let i = 0; i < sorted.length; ++i) {
×
NEW
155
    if (probe != null) {
×
156
      // For optional probing of memory usage during execution.
NEW
157
      const numTensors = memory().numTensors;
×
158
      // @ts-ignore
NEW
159
      if (numTensors > probe.maxNumTensors) {
×
NEW
160
        probe.maxNumTensors = numTensors;
×
161
      }
162
      // @ts-ignore
NEW
163
      if (numTensors < probe.minNumTensors) {
×
NEW
164
        probe.minNumTensors = numTensors;
×
165
      }
166
    }
167

NEW
168
    const symbolic = sorted[i];
×
NEW
169
    const srcLayer = symbolic.sourceLayer;
×
NEW
170
    if (srcLayer instanceof InputLayer) {
×
NEW
171
      continue;
×
172
    }
NEW
173
    const inputValues: Tensor[] = [];
×
NEW
174
    const inputMasks: Tensor[] = [];
×
NEW
175
    const tensorsToDispose: Tensor[] = [];
×
176

NEW
177
    let maskExists = false;
×
NEW
178
    for (const input of symbolic.inputs) {
×
NEW
179
      const value = internalFeedDict.getValue(input);
×
NEW
180
      const mask = internalFeedDict.getMask(input);
×
NEW
181
      inputValues.push(value);
×
NEW
182
      inputMasks.push(mask);
×
NEW
183
      if (mask != null) {
×
NEW
184
        maskExists = true;
×
185
      }
NEW
186
      if (!training) {
×
NEW
187
        recipientCounts[input.name]--;
×
NEW
188
        if (
×
189
          recipientCounts[input.name] === 0 &&
×
190
          !feedDict.hasKey(input) &&
191
          outputNames.indexOf(input.name) === -1 &&
192
          !value.isDisposed &&
193
          input.sourceLayer.stateful !== true
194
        ) {
NEW
195
          tensorsToDispose.push(value);
×
196
        }
197
      }
198
    }
199

NEW
200
    if (maskExists) {
×
NEW
201
      kwargs = kwargs || {};
×
NEW
202
      kwargs['mask'] = inputMasks[0];
×
203
    }
NEW
204
    const outputTensors = toList(srcLayer.apply(inputValues, kwargs)) as Tensor[];
×
NEW
205
    let outputMask: Tensor | Tensor[] | undefined = undefined;
×
NEW
206
    if (srcLayer.supportsMasking) {
×
NEW
207
      outputMask = srcLayer.computeMask(inputValues, inputMasks);
×
208
    }
NEW
209
    const layerOutputs = getNodeOutputs(symbolic);
×
210

NEW
211
    const outputSymbolicTensors = Array.isArray(layerOutputs) ? layerOutputs : [layerOutputs];
×
NEW
212
    for (let i = 0; i < outputSymbolicTensors.length; ++i) {
×
NEW
213
      if (!internalFeedDict.hasKey(outputSymbolicTensors[i])) {
×
NEW
214
        internalFeedDict.add(
×
215
          outputSymbolicTensors[i],
216
          outputTensors[i],
217
          Array.isArray(outputMask) ? outputMask[0] : outputMask
×
218
        );
219
      }
NEW
220
      const index = outputNames.indexOf(outputSymbolicTensors[i].name);
×
NEW
221
      if (index !== -1) {
×
NEW
222
        finalOutputs[index] = outputTensors[i];
×
NEW
223
        finalOutputs[index + 1] = outputTensors[i + 1];
×
224
      }
225
    }
226

NEW
227
    if (!training) {
×
228
      // Clean up Tensors that are no longer needed.
NEW
229
      dispose(tensorsToDispose);
×
230
    }
231
  }
232
  // NOTE(cais): Unlike intermediate tensors, we don't discard mask
233
  // tensors as we go, because these tensors are sometimes passed over a
234
  // series of mutliple layers, i.e., not obeying the immediate input
235
  // relations in the graph. If this becomes a memory-usage concern,
236
  // we can improve this in the future.
NEW
237
  internalFeedDict.disposeMasks();
×
238

NEW
239
  return arrayFetches ? finalOutputs : finalOutputs[0];
×
240
}
241

242
/**
243
 * Get the symbolic output tensors of the node to which a given fetch belongs.
244
 * @param fetch The fetched symbolic tensor.
245
 * @returns The Array of symbolic tensors output by the node to which `fetch`
246
 *   belongs.
247
 */
248
function getNodeOutputs(fetch: SymbolicTensor): SymbolicTensor | SymbolicTensor[] {
249
  let layerOutputs: SymbolicTensor | SymbolicTensor[];
NEW
250
  if (fetch.sourceLayer.inboundNodes.length === 1) {
×
NEW
251
    layerOutputs = fetch.sourceLayer.output;
×
252
  } else {
253
    // @ts-ignore
NEW
254
    let nodeIndex: number = null;
×
NEW
255
    for (let i = 0; i < fetch.sourceLayer.inboundNodes.length; ++i) {
×
NEW
256
      for (const outputTensor of fetch.sourceLayer.inboundNodes[i].outputTensors) {
×
NEW
257
        if (outputTensor.id === fetch.id) {
×
NEW
258
          nodeIndex = i;
×
NEW
259
          break;
×
260
        }
261
      }
262
    }
NEW
263
    layerOutputs = fetch.sourceLayer.getOutputAt(nodeIndex);
×
264
  }
NEW
265
  return layerOutputs;
×
266
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc