• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

LearnLib / learnlib / 6605620268

22 Oct 2023 06:53PM UTC coverage: 93.218% (+0.9%) from 92.296%
6605620268

push

github

mtf90
cleanup passive integration tests

11546 of 12386 relevant lines covered (93.22%)

1.68 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.69
/algorithms/active/ttt-vpa/src/main/java/de/learnlib/algorithm/ttt/vpa/TTTLearnerVPA.java
1
/* Copyright (C) 2013-2023 TU Dortmund
2
 * This file is part of LearnLib, http://www.learnlib.de/.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
package de.learnlib.algorithm.ttt.vpa;
17

18
import java.util.ArrayDeque;
19
import java.util.ArrayList;
20
import java.util.Collections;
21
import java.util.Deque;
22
import java.util.HashMap;
23
import java.util.HashSet;
24
import java.util.List;
25
import java.util.Map;
26
import java.util.Objects;
27
import java.util.Set;
28

29
import com.github.misberner.buildergen.annotations.GenerateBuilder;
30
import com.google.common.collect.Iterables;
31
import de.learnlib.acex.AcexAnalyzer;
32
import de.learnlib.algorithm.observationpack.vpa.OPLearnerVPA;
33
import de.learnlib.algorithm.observationpack.vpa.hypothesis.AbstractHypTrans;
34
import de.learnlib.algorithm.observationpack.vpa.hypothesis.BlockList;
35
import de.learnlib.algorithm.observationpack.vpa.hypothesis.ContextPair;
36
import de.learnlib.algorithm.observationpack.vpa.hypothesis.DTNode;
37
import de.learnlib.algorithm.observationpack.vpa.hypothesis.HypLoc;
38
import de.learnlib.algorithm.observationpack.vpa.hypothesis.TransList;
39
import de.learnlib.api.oracle.MembershipOracle;
40
import de.learnlib.api.query.DefaultQuery;
41
import de.learnlib.datastructure.discriminationtree.SplitData;
42
import net.automatalib.alphabet.VPAlphabet;
43
import net.automatalib.automaton.vpa.StackContents;
44
import net.automatalib.automaton.vpa.State;
45
import net.automatalib.word.Word;
46
import org.checkerframework.checker.nullness.qual.Nullable;
47

48
/**
49
 * @param <I>
50
 *         input symbol type
51
 */
52
public class TTTLearnerVPA<I> extends OPLearnerVPA<I> {
2✔
53

54
    private final BlockList<I> blockList = new BlockList<>();
2✔
55

56
    @GenerateBuilder
57
    public TTTLearnerVPA(VPAlphabet<I> alphabet, MembershipOracle<I, Boolean> oracle, AcexAnalyzer analyzer) {
58
        super(alphabet, oracle, analyzer);
2✔
59
    }
2✔
60

61
    @Override
62
    protected State<HypLoc<I>> getDefinitiveSuccessor(State<HypLoc<I>> baseState, Word<I> suffix) {
63
        NonDetState<HypLoc<I>> curr = NonDetState.fromDet(baseState);
2✔
64
        int lastDet = 0;
2✔
65
        NonDetState<HypLoc<I>> lastDetState = curr;
2✔
66
        int i = 0;
2✔
67
        for (I sym : suffix) {
2✔
68
            if (alphabet.isCallSymbol(sym)) {
2✔
69
                Set<Integer> stackSyms = new HashSet<>();
2✔
70
                for (HypLoc<I> loc : curr.getLocations()) {
2✔
71
                    int stackSym = hypothesis.encodeStackSym(loc, sym);
2✔
72
                    stackSyms.add(stackSym);
2✔
73
                }
2✔
74
                NondetStackContents nsc = NondetStackContents.push(stackSyms, curr.getStack());
2✔
75
                curr = new NonDetState<>(Collections.singleton(hypothesis.getInitialLocation()), nsc);
2✔
76
            } else if (alphabet.isReturnSymbol(sym)) {
2✔
77
                Set<HypLoc<I>> succs = new HashSet<>();
2✔
78
                for (HypLoc<I> loc : curr.getLocations()) {
2✔
79
                    for (int stackSym : curr.getStack().peek()) {
2✔
80
                        AbstractHypTrans<I> trans = hypothesis.getReturnTransition(loc, sym, stackSym);
2✔
81
                        if (trans.isTree()) {
2✔
82
                            succs.add(trans.getTreeTarget());
×
83
                        } else {
84
                            Iterables.addAll(succs, trans.getNonTreeTarget().subtreeLocations());
2✔
85
                        }
86
                    }
2✔
87
                }
2✔
88
                curr = new NonDetState<>(succs, curr.getStack().pop());
2✔
89
            } else {
2✔
90
                Set<HypLoc<I>> succs = new HashSet<>();
2✔
91
                for (HypLoc<I> loc : curr.getLocations()) {
2✔
92
                    AbstractHypTrans<I> trans = hypothesis.getInternalTransition(loc, sym);
2✔
93
                    if (trans.isTree()) {
2✔
94
                        succs.add(trans.getTreeTarget());
2✔
95
                    } else {
96
                        Iterables.addAll(succs, trans.getNonTreeTarget().subtreeLocations());
2✔
97
                    }
98
                }
2✔
99
                curr = new NonDetState<>(succs, curr.getStack());
2✔
100
            }
101
            i++;
2✔
102
            if (!curr.isNonDet()) {
2✔
103
                lastDet = i;
2✔
104
                lastDetState = curr;
2✔
105
            }
106
        }
2✔
107

108
        if (lastDet < suffix.length()) {
2✔
109
            determinize(lastDetState.determinize(), suffix.subWord(lastDet));
×
110
        }
111
        return hypothesis.getSuccessor(baseState, suffix);
2✔
112
    }
113

114
    @Override
115
    protected boolean refineHypothesisSingle(DefaultQuery<I, Boolean> ceQuery) {
116
        Word<I> ceWord = ceQuery.getInput();
2✔
117

118
        Boolean out = computeHypothesisOutput(ceWord);
2✔
119

120
        if (Objects.equals(out, ceQuery.getOutput())) {
2✔
121
            return false;
2✔
122
        }
123

124
        OutputInconsistency<I> outIncons = new OutputInconsistency<>(hypothesis.getInitialLocation(),
2✔
125
                                                                     new ContextPair<>(Word.epsilon(), ceWord),
2✔
126
                                                                     ceQuery.getOutput());
2✔
127

128
        do {
129
            splitState(outIncons);
2✔
130
            closeTransitions();
2✔
131
            while (finalizeAny()) {
2✔
132
                closeTransitions();
2✔
133
            }
134

135
            outIncons = findOutputInconsistency();
2✔
136
        } while (outIncons != null);
2✔
137

138
        return true;
2✔
139
    }
140

141
    protected boolean computeHypothesisOutput(Word<I> word) {
142
        State<HypLoc<I>> curr = hypothesis.getInitialState();
2✔
143
        for (I sym : word) {
2✔
144
            curr = getAnySuccessor(curr, sym);
2✔
145
        }
2✔
146
        return hypothesis.isAccepting(curr);
2✔
147
    }
148

149
    private void splitState(OutputInconsistency<I> outIncons) {
150
        PrefixTransformAcex acex = deriveAcex(outIncons);
2✔
151
        int breakpoint = analyzer.analyzeAbstractCounterexample(acex);
2✔
152

153
        Word<I> acexSuffix = acex.getSuffix();
2✔
154
        Word<I> prefix = acexSuffix.prefix(breakpoint);
2✔
155
        I act = acexSuffix.getSymbol(breakpoint);
2✔
156
        Word<I> suffix = acexSuffix.subWord(breakpoint + 1);
2✔
157

158
        State<HypLoc<I>> state = hypothesis.getSuccessor(acex.getBaseState(), prefix);
2✔
159
        assert state != null;
2✔
160
        State<HypLoc<I>> succState = hypothesis.getSuccessor(state, act);
2✔
161
        assert succState != null;
2✔
162

163
        ContextPair<I> context = new ContextPair<>(transformAccessSequence(succState.getStackContents()), suffix);
2✔
164

165
        AbstractHypTrans<I> trans = hypothesis.getInternalTransition(state, act);
2✔
166
        assert trans != null;
2✔
167

168
        HypLoc<I> newLoc = makeTree(trans);
2✔
169
        DTNode<I> oldDtNode = succState.getLocation().getLeaf();
2✔
170
        openTransitions.addAll(oldDtNode.getIncoming());
2✔
171
        DTNode<I>.SplitResult children = oldDtNode.split(context, acex.effect(breakpoint), acex.effect(breakpoint + 1));
2✔
172
        oldDtNode.setTemp(true);
2✔
173
        if (!oldDtNode.getParent().isTemp()) {
2✔
174
            blockList.add(oldDtNode);
2✔
175
        }
176
        link(children.nodeOld, newLoc);
2✔
177
        link(children.nodeNew, succState.getLocation());
2✔
178
        initializeLocation(newLoc);
2✔
179
    }
2✔
180

181
    protected boolean finalizeAny() {
182
        assert openTransitions.isEmpty();
2✔
183

184
        GlobalSplitter<I> splitter = findSplitterGlobal();
2✔
185
        if (splitter != null) {
2✔
186
            finalizeDiscriminator(splitter.blockRoot, splitter.localSplitter);
2✔
187
            return true;
2✔
188
        }
189
        return false;
2✔
190
    }
191

192
    private @Nullable OutputInconsistency<I> findOutputInconsistency() {
193
        OutputInconsistency<I> best = null;
2✔
194

195
        for (HypLoc<I> loc : hypothesis.getLocations()) {
2✔
196
            int locAsLen = loc.getAccessSequence().length();
2✔
197
            DTNode<I> node = loc.getLeaf();
2✔
198
            while (!node.isRoot()) {
2✔
199
                boolean expectedOut = node.getParentOutcome();
2✔
200
                node = node.getParent();
2✔
201
                ContextPair<I> discr = node.getDiscriminator();
2✔
202
                if (best == null || discr.getLength() + locAsLen < best.totalLength()) {
2✔
203
                    boolean hypOut = computeHypothesisOutput(discr.getPrefix()
2✔
204
                                                                  .concat(loc.getAccessSequence(), discr.getSuffix()));
2✔
205
                    if (hypOut != expectedOut) {
2✔
206
                        best = new OutputInconsistency<>(loc, discr, expectedOut);
2✔
207
                    }
208
                }
209
            }
2✔
210
        }
2✔
211
        return best;
2✔
212
    }
213

214
    protected State<HypLoc<I>> getAnySuccessor(State<HypLoc<I>> state, I sym) {
215
        final VPAlphabet.SymbolType type = alphabet.getSymbolType(sym);
2✔
216
        final StackContents stackContents = state.getStackContents();
2✔
217

218
        switch (type) {
2✔
219
            case INTERNAL: {
220
                AbstractHypTrans<I> trans = hypothesis.getInternalTransition(state.getLocation(), sym);
2✔
221
                HypLoc<I> succLoc;
222
                if (trans.isTree()) {
2✔
223
                    succLoc = trans.getTreeTarget();
2✔
224
                } else {
225
                    succLoc = trans.getNonTreeTarget().subtreeLocsIterator().next();
2✔
226
                }
227
                return new State<>(succLoc, stackContents);
2✔
228
            }
229
            case CALL: {
230
                int stackSym = hypothesis.encodeStackSym(state.getLocation(), sym);
2✔
231
                return new State<>(hypothesis.getInitialLocation(), StackContents.push(stackSym, stackContents));
2✔
232
            }
233
            case RETURN: {
234
                assert stackContents != null;
2✔
235
                AbstractHypTrans<I> trans =
2✔
236
                        hypothesis.getReturnTransition(state.getLocation(), sym, stackContents.peek());
2✔
237
                HypLoc<I> succLoc;
238
                if (trans.isTree()) {
2✔
239
                    succLoc = trans.getTreeTarget();
2✔
240
                } else {
241
                    succLoc = trans.getNonTreeTarget().subtreeLocsIterator().next();
2✔
242
                }
243
                return new State<>(succLoc, stackContents.pop());
2✔
244
            }
245
            default:
246
                throw new IllegalStateException("Unhandled type " + type);
×
247
        }
248
    }
249

250
    protected PrefixTransformAcex deriveAcex(OutputInconsistency<I> outIncons) {
251
        PrefixTransformAcex acex =
2✔
252
                new PrefixTransformAcex(outIncons.location.getAccessSequence(), outIncons.discriminator);
2✔
253
        acex.setEffect(0, outIncons.expectedOut);
2✔
254
        acex.setEffect(acex.getLength() - 1, !outIncons.expectedOut);
2✔
255

256
        return acex;
2✔
257
    }
258

259
    /**
260
     * Determines a global splitter, i.e., a splitter for any block. This method may (but is not required to) employ
261
     * heuristics to obtain a splitter with a relatively short suffix length.
262
     *
263
     * @return a splitter for any of the blocks
264
     */
265
    private @Nullable GlobalSplitter<I> findSplitterGlobal() {
266
        DTNode<I> bestBlockRoot = null;
2✔
267
        Splitter<I> bestSplitter = null;
2✔
268

269
        for (DTNode<I> blockRoot : blockList) {
2✔
270
            Splitter<I> splitter = findSplitter(blockRoot);
2✔
271

272
            if (splitter != null && (bestSplitter == null ||
2✔
273
                                     splitter.getNewDiscriminatorLength() < bestSplitter.getNewDiscriminatorLength())) {
×
274
                bestSplitter = splitter;
2✔
275
                bestBlockRoot = blockRoot;
2✔
276
            }
277
        }
2✔
278

279
        if (bestSplitter == null) {
2✔
280
            return null;
2✔
281
        }
282

283
        return new GlobalSplitter<>(bestBlockRoot, bestSplitter);
2✔
284
    }
285

286
    /**
287
     * Finalize a discriminator. Given a block root and a {@link Splitter}, replace the discriminator at the block root
288
     * by the one derived from the splitter, and update the discrimination tree accordingly.
289
     *
290
     * @param blockRoot
291
     *         the block root whose discriminator to finalize
292
     * @param splitter
293
     *         the splitter to use for finalization
294
     */
295
    private void finalizeDiscriminator(DTNode<I> blockRoot, Splitter<I> splitter) {
296
        assert blockRoot.isBlockRoot();
2✔
297

298
        ContextPair<I> newDiscr = splitter.getNewDiscriminator();
2✔
299

300
        assert !blockRoot.getDiscriminator().equals(newDiscr);
2✔
301

302
        ContextPair<I> finalDiscriminator = prepareSplit(blockRoot, splitter);
2✔
303
        Map<Boolean, DTNode<I>> repChildren = new HashMap<>();
2✔
304
        for (Boolean label : blockRoot.getSplitData().getLabels()) {
2✔
305
            repChildren.put(label, extractSubtree(blockRoot, label));
2✔
306
        }
2✔
307
        blockRoot.replaceChildren(repChildren);
2✔
308

309
        blockRoot.setDiscriminator(finalDiscriminator);
2✔
310

311
        declareFinal(blockRoot);
2✔
312
    }
2✔
313

314
    /**
315
     * Determines a (local) splitter for a given block. This method may (but is not required to) employ heuristics to
316
     * obtain a splitter with a relatively short suffix.
317
     *
318
     * @param blockRoot
319
     *         the root of the block
320
     *
321
     * @return a splitter for this block, or {@code null} if no such splitter could be found.
322
     */
323
    private @Nullable Splitter<I> findSplitter(DTNode<I> blockRoot) {
324
        int alphabetSize =
2✔
325
                alphabet.getNumInternals() + alphabet.getNumCalls() * alphabet.getNumReturns() * hypothesis.size() * 2;
2✔
326

327
        @SuppressWarnings("unchecked")
328
        DTNode<I>[] lcas = new DTNode[alphabetSize];
2✔
329

330
        for (HypLoc<I> loc : blockRoot.subtreeLocations()) {
2✔
331
            int i = 0;
2✔
332
            for (I intSym : alphabet.getInternalAlphabet()) {
2✔
333
                DTNode<I> currLca = lcas[i];
2✔
334
                AbstractHypTrans<I> trans = hypothesis.getInternalTransition(loc, intSym);
2✔
335
                assert trans.getTargetNode() != null;
2✔
336
                if (currLca == null) {
2✔
337
                    lcas[i] = trans.getTargetNode();
2✔
338
                } else {
339
                    lcas[i] = dtree.leastCommonAncestor(currLca, trans.getTargetNode());
2✔
340
                }
341
                i++;
2✔
342
            }
2✔
343
            for (I retSym : alphabet.getReturnAlphabet()) {
2✔
344
                for (I callSym : alphabet.getCallAlphabet()) {
2✔
345
                    for (HypLoc<I> stackLoc : hypothesis.getLocations()) {
2✔
346
                        AbstractHypTrans<I> trans = hypothesis.getReturnTransition(loc, retSym, stackLoc, callSym);
2✔
347
                        DTNode<I> currLca = lcas[i];
2✔
348
                        assert trans.getTargetNode() != null;
2✔
349
                        if (currLca == null) {
2✔
350
                            lcas[i] = trans.getTargetNode();
2✔
351
                        } else {
352
                            lcas[i] = dtree.leastCommonAncestor(currLca, trans.getTargetNode());
2✔
353
                        }
354
                        i++;
2✔
355

356
                        trans = hypothesis.getReturnTransition(stackLoc, retSym, loc, callSym);
2✔
357
                        currLca = lcas[i];
2✔
358
                        if (currLca == null) {
2✔
359
                            lcas[i] = trans.getTargetNode();
2✔
360
                        } else {
361
                            lcas[i] = dtree.leastCommonAncestor(currLca, trans.getTargetNode());
2✔
362
                        }
363
                        i++;
2✔
364
                    }
2✔
365
                }
2✔
366
            }
2✔
367
        }
2✔
368

369
        int shortestLen = Integer.MAX_VALUE;
2✔
370
        Splitter<I> shortestSplitter = null;
2✔
371

372
        int i = 0;
2✔
373
        for (I intSym : alphabet.getInternalAlphabet()) {
2✔
374
            DTNode<I> currLca = lcas[i];
2✔
375
            if (!currLca.isLeaf() && !currLca.isTemp()) {
2✔
376
                Splitter<I> splitter = new Splitter<>(intSym, currLca);
2✔
377
                int newLen = splitter.getNewDiscriminatorLength();
2✔
378
                if (shortestSplitter == null || shortestLen > newLen) {
2✔
379
                    shortestSplitter = splitter;
2✔
380
                    shortestLen = newLen;
2✔
381
                }
382
            }
383
            i++;
2✔
384
        }
2✔
385
        for (I retSym : alphabet.getReturnAlphabet()) {
2✔
386
            for (I callSym : alphabet.getCallAlphabet()) {
2✔
387
                for (HypLoc<I> stackLoc : hypothesis.getLocations()) {
2✔
388
                    DTNode<I> currLca = lcas[i];
2✔
389
                    assert currLca != null;
2✔
390
                    if (!currLca.isLeaf() && !currLca.isTemp()) {
2✔
391
                        Splitter<I> splitter = new Splitter<>(retSym, stackLoc, callSym, false, currLca);
2✔
392
                        int newLen = splitter.getNewDiscriminatorLength();
2✔
393
                        if (shortestSplitter == null || shortestLen > newLen) {
2✔
394
                            shortestSplitter = splitter;
×
395
                            shortestLen = newLen;
×
396
                        }
397
                    }
398
                    i++;
2✔
399

400
                    currLca = lcas[i];
2✔
401
                    assert currLca != null;
2✔
402
                    if (!currLca.isLeaf() && !currLca.isTemp()) {
2✔
403
                        Splitter<I> splitter = new Splitter<>(callSym, stackLoc, retSym, true, currLca);
2✔
404
                        int newLen = splitter.getNewDiscriminatorLength();
2✔
405
                        if (shortestSplitter == null || shortestLen > newLen) {
2✔
406
                            shortestSplitter = splitter;
×
407
                            shortestLen = newLen;
×
408
                        }
409
                    }
410
                    i++;
2✔
411
                }
2✔
412
            }
2✔
413
        }
2✔
414

415
        return shortestSplitter;
2✔
416
    }
417

418
    /**
419
     * Prepare a split operation on a block, by marking all the nodes and transitions in the subtree (and annotating
420
     * them with {@link SplitData} objects).
421
     *
422
     * @param node
423
     *         the block root to be split
424
     * @param splitter
425
     *         the splitter to use for splitting the block
426
     *
427
     * @return the discriminator to use for splitting
428
     */
429
    private ContextPair<I> prepareSplit(DTNode<I> node, Splitter<I> splitter) {
430
        ContextPair<I> discriminator = splitter.getNewDiscriminator();
2✔
431

432
        Deque<DTNode<I>> dfsStack = new ArrayDeque<>();
2✔
433

434
        DTNode<I> succSeparator = splitter.succSeparator;
2✔
435

436
        dfsStack.push(node);
2✔
437
        assert node.getSplitData() == null;
2✔
438

439
        while (!dfsStack.isEmpty()) {
2✔
440
            DTNode<I> curr = dfsStack.pop();
2✔
441
            assert curr.getSplitData() == null;
2✔
442

443
            curr.setSplitData(new SplitData<>(TransList::new));
2✔
444

445
            for (AbstractHypTrans<I> trans : curr.getIncoming()) {
2✔
446
                Boolean outcome = query(trans, discriminator);
2✔
447
                curr.getSplitData().getIncoming(outcome).add(trans);
2✔
448
                markAndPropagate(curr, outcome);
2✔
449
            }
2✔
450

451
            if (curr.isInner()) {
2✔
452
                for (DTNode<I> child : curr.getChildren()) {
2✔
453
                    dfsStack.push(child);
2✔
454
                }
2✔
455
            } else {
456
                HypLoc<I> loc = curr.getData();
2✔
457
                assert loc != null;
2✔
458

459
                // Try to deduct the outcome from the DT target of
460
                // the respective transition
461
                AbstractHypTrans<I> trans = getSplitterTrans(loc, splitter);
2✔
462
                Boolean outcome = succSeparator.subtreeLabel(trans.getTargetNode());
2✔
463
                assert outcome != null;
2✔
464
                curr.getSplitData().setStateLabel(outcome);
2✔
465
                markAndPropagate(curr, outcome);
2✔
466
            }
467

468
        }
2✔
469

470
        return discriminator;
2✔
471
    }
472

473
    /**
474
     * Extract a (reduced) subtree containing all nodes with the given label from the subtree given by its root.
475
     * "Reduced" here refers to the fact that the resulting subtree will contain no inner nodes with only one child. The
476
     * tree returned by this method (represented by its root) will have as a parent node the root that was passed to
477
     * this method.
478
     *
479
     * @param root
480
     *         the root of the subtree from which to extract
481
     * @param label
482
     *         the label of the nodes to extract
483
     *
484
     * @return the extracted subtree
485
     */
486
    private DTNode<I> extractSubtree(DTNode<I> root, Boolean label) {
487
        assert root.getSplitData() != null;
2✔
488
        assert root.getSplitData().isMarked(label);
2✔
489

490
        Deque<ExtractRecord<I>> stack = new ArrayDeque<>();
2✔
491

492
        DTNode<I> firstExtracted = new DTNode<>(root, label);
2✔
493

494
        stack.push(new ExtractRecord<>(root, firstExtracted));
2✔
495
        while (!stack.isEmpty()) {
2✔
496
            ExtractRecord<I> curr = stack.pop();
2✔
497

498
            DTNode<I> original = curr.original;
2✔
499
            DTNode<I> extracted = curr.extracted;
2✔
500

501
            moveIncoming(extracted, original, label);
2✔
502

503
            if (original.isLeaf()) {
2✔
504
                if (Objects.equals(original.getSplitData().getStateLabel(), label)) {
2✔
505
                    link(extracted, original.getData());
2✔
506
                } else {
507
                    createNewState(extracted);
×
508
                }
509
                extracted.updateIncoming();
2✔
510
            } else {
511
                List<DTNode<I>> markedChildren = new ArrayList<>();
2✔
512

513
                for (DTNode<I> child : original.getChildren()) {
2✔
514
                    if (child.getSplitData().isMarked(label)) {
2✔
515
                        markedChildren.add(child);
2✔
516
                    }
517
                }
2✔
518

519
                if (markedChildren.size() > 1) {
2✔
520
                    Map<Boolean, DTNode<I>> childMap = new HashMap<>();
×
521
                    for (DTNode<I> c : markedChildren) {
×
522
                        Boolean childLabel = c.getParentOutcome();
×
523
                        DTNode<I> extractedChild = new DTNode<>(extracted, childLabel);
×
524
                        childMap.put(childLabel, extractedChild);
×
525
                        stack.push(new ExtractRecord<>(c, extractedChild));
×
526
                    }
×
527
                    extracted.split(original.getDiscriminator(), childMap);
×
528
                    extracted.updateIncoming();
×
529
                    extracted.setTemp(true);
×
530
                } else if (markedChildren.size() == 1) {
2✔
531
                    stack.push(new ExtractRecord<>(markedChildren.get(0), extracted));
2✔
532
                } else { // markedChildren.isEmppty()
533
                    createNewState(extracted);
×
534
                    extracted.updateIncoming();
×
535
                }
536
            }
537

538
            assert extracted.getSplitData() == null;
2✔
539
        }
2✔
540

541
        return firstExtracted;
2✔
542
    }
543

544
    protected void declareFinal(DTNode<I> blockRoot) {
545
        blockRoot.setTemp(false);
2✔
546
        blockRoot.setSplitData(null);
2✔
547

548
        blockRoot.removeFromBlockList();
2✔
549

550
        for (DTNode<I> subtree : blockRoot.getChildren()) {
2✔
551
            assert subtree.getSplitData() == null;
2✔
552
            //blockRoot.setChild(subtree.getParentLabel(), subtree);
553
            // Register as blocks, if they are non-trivial subtrees
554
            if (subtree.isInner()) {
2✔
555
                blockList.add(subtree);
×
556
            }
557
        }
2✔
558

559
        openTransitions.addAll(blockRoot.getIncoming());
2✔
560
    }
2✔
561

562
    /**
563
     * Marks a node, and propagates the label up to all nodes on the path from the block root to this node.
564
     *
565
     * @param node
566
     *         the node to mark
567
     * @param label
568
     *         the label to mark the node with
569
     */
570
    private static <I> void markAndPropagate(DTNode<I> node, Boolean label) {
571
        DTNode<I> curr = node;
2✔
572

573
        while (curr != null && curr.getSplitData() != null) {
2✔
574
            if (!curr.getSplitData().mark(label)) {
2✔
575
                return;
2✔
576
            }
577
            curr = curr.getParent();
2✔
578
        }
579
    }
2✔
580

581
    public AbstractHypTrans<I> getSplitterTrans(HypLoc<I> loc, Splitter<I> splitter) {
582
        switch (splitter.type) {
2✔
583
            case INTERNAL:
584
                return hypothesis.getInternalTransition(loc, splitter.symbol);
2✔
585
            case RETURN:
586
                return hypothesis.getReturnTransition(loc, splitter.symbol, splitter.location, splitter.otherSymbol);
×
587
            case CALL:
588
                return hypothesis.getReturnTransition(splitter.location, splitter.otherSymbol, loc, splitter.symbol);
×
589
            default:
590
                throw new IllegalStateException("Unhandled type " + splitter.type);
×
591
        }
592
    }
593

594
    private static <I> void moveIncoming(DTNode<I> newNode, DTNode<I> oldNode, Boolean label) {
595
        newNode.getIncoming().addAll(oldNode.getSplitData().getIncoming(label));
2✔
596
    }
2✔
597

598
    /**
599
     * Create a new state during extraction on-the-fly. This is required if a node in the DT has an incoming transition
600
     * with a certain label, but in its subtree there are no leaves with this label as their state label.
601
     *
602
     * @param newNode
603
     *         the extracted node
604
     */
605
    private void createNewState(DTNode<I> newNode) {
606
        AbstractHypTrans<I> newTreeTrans = newNode.getIncoming().chooseMinimal();
×
607
        assert newTreeTrans != null;
×
608

609
        HypLoc<I> newLoc = makeTree(newTreeTrans);
×
610
        link(newNode, newLoc);
×
611
        initializeLocation(newLoc);
×
612
    }
×
613

614
    protected void determinize(State<HypLoc<I>> state, Word<I> suffix) {
615
        State<HypLoc<I>> curr = state;
×
616
        for (I sym : suffix) {
×
617
            if (!alphabet.isCallSymbol(sym)) {
×
618
                AbstractHypTrans<I> trans = hypothesis.getInternalTransition(curr, sym);
×
619
                assert trans != null;
×
620
                if (!trans.isTree() && !trans.getNonTreeTarget().isLeaf()) {
×
621
                    updateDTTargets(Collections.singletonList(trans), true);
×
622
                }
623
            }
624
            curr = hypothesis.getSuccessor(curr, sym);
×
625
        }
×
626
    }
×
627

628
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc