• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

idaholab / MontePy / 13600151525

01 Mar 2025 02:35AM UTC coverage: 97.83% (-0.02%) from 97.845%
13600151525

Pull #673

github

web-flow
Merge 357026b0a into df3d0a506
Pull Request #673: Fixed parsing error with sigma barryons

5 of 5 new or added lines in 2 files covered. (100.0%)

1 existing line in 1 file now uncovered.

6583 of 6729 relevant lines covered (97.83%)

0.98 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.28
/montepy/input_parser/parser_base.py
1
# Copyright 2024, Battelle Energy Alliance, LLC All Rights Reserved.
2
from montepy.input_parser.tokens import MCNP_Lexer
1✔
3
from montepy.input_parser import syntax_node
1✔
4
from sly import Parser
1✔
5
import sly
1✔
6

7
_dec = sly.yacc._decorator
1✔
8

9

10
class MetaBuilder(sly.yacc.ParserMeta):
1✔
11
    """
12
    Custom MetaClass for allowing subclassing of MCNP_Parser.
13

14

15
    .. versionadded:: 0.2.0
16
        This was added with the major parser rework.
17

18
    Note: overloading functions is not allowed.
19
    """
20

21
    protected_names = {
1✔
22
        "debugfile",
23
        "errok",
24
        "error",
25
        "index_position",
26
        "line_position",
27
        "log",
28
        "parse",
29
        "restart",
30
        "tokens",
31
        "dont_copy",
32
    }
33

34
    def __new__(meta, classname, bases, attributes):
1✔
35
        if classname != "MCNP_Parser":
1✔
36
            for basis in bases:
1✔
37
                MetaBuilder._flatten_rules(classname, basis, attributes)
1✔
38
        cls = super().__new__(meta, classname, bases, attributes)
1✔
39
        return cls
1✔
40

41
    @staticmethod
1✔
42
    def _flatten_rules(classname, basis, attributes):
1✔
43
        for attr_name in dir(basis):
1✔
44
            if (
1✔
45
                not attr_name.startswith("_")
46
                and attr_name not in MetaBuilder.protected_names
47
                and attr_name not in attributes.get("dont_copy", set())
48
            ):
49
                func = getattr(basis, attr_name)
1✔
50
                attributes[attr_name] = func
1✔
51
        parent = basis.__bases__
1✔
52
        for par_basis in parent:
1✔
53
            if par_basis != Parser:
1✔
54
                return
1✔
55

56

57
class SLY_Supressor:
1✔
58
    """
59
    This is a fake logger meant to mostly make warnings dissapear.
60

61
    .. versionadded:: 0.2.0
62
        This was added with the major parser rework.
63
    """
64

65
    def __init__(self):
1✔
66
        self._parse_fail_queue = []
1✔
67

68
    def debug(self, msg, *args, **kwargs):
1✔
69
        pass
70

71
    info = debug
1✔
72

73
    warning = debug
1✔
74

75
    error = debug
1✔
76

77
    critical = debug
1✔
78

79
    def parse_error(self, msg, token=None, lineno=0, index=0):
1✔
80
        """
81
        Adds a SLY parsing error to the error queue for being dumped later.
82

83
        :param msg: The message to display.
84
        :type msg: str
85
        :param token: the token that caused the error if any.
86
        :type token: Token
87
        :param lineno: the current lineno of the error (from SLY not the file), if any.
88
        :type lineno: int
89
        """
90
        self._parse_fail_queue.append(
1✔
91
            {"message": msg, "token": token, "line": lineno, "index": index}
92
        )
93

94
    def clear_queue(self):
1✔
95
        """
96
        Clears the error queue and returns all errors.
97

98
        Returns a list of dictionaries. The dictionary has the keys: "message", "token", "line.
99

100
        :returns: A list of the errors since the queue was last cleared.
101
        :rtype: list
102
        """
103
        ret = self._parse_fail_queue
1✔
104
        self._parse_fail_queue = []
1✔
105
        return ret
1✔
106

107
    def __len__(self):
1✔
108
        return len(self._parse_fail_queue)
1✔
109

110

111
class MCNP_Parser(Parser, metaclass=MetaBuilder):
1✔
112
    """
113
    Base class for all MCNP parsers that provides basics.
114

115
    .. versionadded:: 0.2.0
116
        This was added with the major parser rework.
117
    """
118

119
    # Remove this if trying to see issues with parser
120
    log = SLY_Supressor()
1✔
121
    tokens = MCNP_Lexer.tokens
1✔
122
    debugfile = None
1✔
123

124
    def restart(self):
1✔
125
        """
126
        Clears internal state information about the current parse.
127

128
        Should be ran before a new object is parsed.
129
        """
130
        self.log.clear_queue()
1✔
131
        super().restart()
1✔
132

133
    def parse(self, token_generator, input=None):
1✔
134
        """
135
        Parses the token stream and returns a syntax tree.
136

137
        If the parsing fails None will be returned.
138
        The error queue can be retrieved from ``parser.log.clear_queue()``.
139

140
        :param token_generator: the token generator from ``lexer.tokenize``.
141
        :type token_generator: generator
142
        :param input: the input that is being lexed and parsed.
143
        :type input: Input
144
        :rtype: SyntaxNode
145
        """
146
        self._input = input
1✔
147

148
        # debug every time a token is taken
149
        def gen_wrapper():
1✔
150
            while True:
×
151
                token = next(token_generator, None)
×
152
                self._debug_parsing_error(token)
×
153
                yield token
×
154

155
        # change to using `gen_wrapper()` to debug
156
        tree = super().parse(token_generator)
1✔
157
        # treat any previous errors as being fatal even if it recovered.
158
        if len(self.log) > 0:
1✔
159
            return None
1✔
160
        self.tokens = {}
1✔
161
        return tree
1✔
162

163
    precedence = (("left", SPACE), ("left", TEXT))
1✔
164

165
    @_("NUMBER", "NUMBER padding")
1✔
166
    def number_phrase(self, p):
1✔
167
        """
168
        A non-zero number with or without padding.
169

170
        :returns: a float ValueNode
171
        :rtype: ValueNode
172
        """
173
        return self._flush_phrase(p, float)
1✔
174

175
    @_("NUMBER", "NUMBER padding")
1✔
176
    def identifier_phrase(self, p):
1✔
177
        """
178
        A non-zero number with or without padding converted to int.
179

180
        :returns: an int ValueNode
181
        :rtype: ValueNode
182
        """
183
        return self._flush_phrase(p, int)
1✔
184

185
    @_(
1✔
186
        "numerical_phrase",
187
        "shortcut_phrase",
188
        "number_sequence numerical_phrase",
189
        "number_sequence shortcut_phrase",
190
    )
191
    def number_sequence(self, p):
1✔
192
        """
193
        A list of numbers.
194

195
        :rtype: ListNode
196
        """
197
        if len(p) == 1:
1✔
198
            sequence = syntax_node.ListNode("number sequence")
1✔
199
            if type(p[0]) == syntax_node.ListNode:
1✔
200
                return p[0]
1✔
201
            sequence.append(p[0])
1✔
202
        else:
203
            sequence = p[0]
1✔
204
            if type(p[1]) == syntax_node.ListNode:
1✔
205
                for node in p[1].nodes:
×
206
                    sequence.append(node)
×
207
            else:
208
                sequence.append(p[1])
1✔
209
        return sequence
1✔
210

211
    @_("number_phrase", "null_phrase")
1✔
212
    def numerical_phrase(self, p):
1✔
213
        """
214
        Any number, including 0, with its padding.
215

216
        :returns: a float ValueNode
217
        :rtype: ValueNode
218
        """
219
        return p[0]
1✔
220

221
    @_("numerical_phrase", "shortcut_phrase")
1✔
222
    def shortcut_start(self, p):
1✔
223
        return p[0]
1✔
224

225
    @_(
1✔
226
        "shortcut_start NUM_REPEAT",
227
        "shortcut_start REPEAT",
228
        "shortcut_start NUM_MULTIPLY",
229
        "shortcut_start MULTIPLY",
230
        "shortcut_start NUM_INTERPOLATE padding number_phrase",
231
        "shortcut_start INTERPOLATE padding number_phrase",
232
        "shortcut_start NUM_LOG_INTERPOLATE padding number_phrase",
233
        "shortcut_start LOG_INTERPOLATE padding number_phrase",
234
        "NUM_JUMP",
235
        "JUMP",
236
    )
237
    def shortcut_sequence(self, p):
1✔
238
        """
239
        A shortcut (repeat, multiply, interpolate, or jump).
240

241
        :returns: the parsed shortcut.
242
        :rtype: ShortcutNode
243
        """
244
        short_cut = syntax_node.ShortcutNode(p)
1✔
245
        if isinstance(p[0], syntax_node.ShortcutNode):
1✔
246
            list_node = syntax_node.ListNode("next_shortcuts")
1✔
247
            list_node.append(p[0])
1✔
248
            list_node.append(short_cut)
1✔
249
            return list_node
1✔
250
        return short_cut
1✔
251

252
    @_("shortcut_sequence", "shortcut_sequence padding")
1✔
253
    def shortcut_phrase(self, p):
1✔
254
        """
255
        A complete shortcut, which should be used, and not shortcut_sequence.
256

257
        :returns: the parsed shortcut.
258
        :rtype: ShortcutNode
259
        """
260
        sequence = p.shortcut_sequence
1✔
261
        if len(p) == 2:
1✔
262
            sequence.end_padding = p.padding
1✔
263
        return sequence
1✔
264

265
    @_("NULL", "NULL padding")
1✔
266
    def null_phrase(self, p):
1✔
267
        """
268
        A zero number with or without its padding.
269

270
        :returns: a float ValueNode
271
        :rtype: ValueNode
272
        """
273
        return self._flush_phrase(p, float)
1✔
274

275
    @_("NULL", "NULL padding")
1✔
276
    def null_ident_phrase(self, p):
1✔
277
        """
278
        A zero number with or without its padding, for identification.
279

280
        :returns: an int ValueNode
281
        :rtype: ValueNode
282
        """
283
        return self._flush_phrase(p, int)
1✔
284

285
    @_("TEXT", "TEXT padding")
1✔
286
    def text_phrase(self, p):
1✔
287
        """
288
        A string with or without its padding.
289

290
        :returns: a str ValueNode.
291
        :rtype: ValueNode
292
        """
293
        return self._flush_phrase(p, str)
1✔
294

295
    def _flush_phrase(self, p, token_type):
1✔
296
        """
297
        Creates a ValueNode.
298
        """
299
        if len(p) > 1:
1✔
300
            padding = p[1]
1✔
301
        else:
302
            padding = None
1✔
303
        return syntax_node.ValueNode(p[0], token_type, padding)
1✔
304

305
    @_("SPACE", "DOLLAR_COMMENT", "COMMENT")
1✔
306
    def padding(self, p):
1✔
307
        """
308
        Anything that is not semantically significant: white space, and comments.
309

310
        :returns: All sequential padding.
311
        :rtype: PaddingNode
312
        """
313
        if hasattr(p, "DOLLAR_COMMENT") or hasattr(p, "COMMENT"):
1✔
314
            is_comment = True
1✔
315
        else:
316
            is_comment = False
1✔
317
        return syntax_node.PaddingNode(p[0], is_comment)
1✔
318

319
    @_("padding SPACE", "padding DOLLAR_COMMENT", "padding COMMENT", 'padding "&"')
1✔
320
    def padding(self, p):
1✔
321
        """
322
        Anything that is not semantically significant: white space, and comments.
323

324
        :returns: All sequential padding.
325
        :rtype: PaddingNode
326
        """
327
        if hasattr(p, "DOLLAR_COMMENT") or hasattr(p, "COMMENT"):
1✔
328
            is_comment = True
1✔
329
        else:
330
            is_comment = False
1✔
331
        p[0].append(p[1], is_comment)
1✔
332
        return p[0]
1✔
333

334
    @_("parameter", "parameters parameter")
1✔
335
    def parameters(self, p):
1✔
336
        """
337
        A list of the parameters (key, value pairs) for this input.
338

339
        :returns: all parameters
340
        :rtype: ParametersNode
341
        """
342
        if len(p) == 1:
1✔
343
            params = syntax_node.ParametersNode()
1✔
344
            param = p[0]
1✔
345
        else:
346
            params = p[0]
1✔
347
            param = p[1]
1✔
348
        params.append(param)
1✔
349
        return params
1✔
350

351
    @_(
1✔
352
        "classifier param_seperator number_sequence",
353
        "classifier param_seperator text_phrase",
354
    )
355
    def parameter(self, p):
1✔
356
        """
357
        A singular Key-value pair.
358

359
        :returns: the parameter.
360
        :rtype: SyntaxNode
361
        """
362
        return syntax_node.SyntaxNode(
1✔
363
            p.classifier.prefix.value,
364
            {"classifier": p.classifier, "seperator": p.param_seperator, "data": p[2]},
365
        )
366

367
    @_("file_atom", "file_name file_atom")
1✔
368
    def file_name(self, p):
1✔
369
        """
370
        A file name.
371

372
        :rtype: str
373
        """
374
        ret = p[0]
1✔
375
        if len(p) > 1:
1✔
376
            ret += p[1]
1✔
377
        return ret
1✔
378

379
    @_(
1✔
380
        "TEXT",
381
        "FILE_PATH",
382
        "NUMBER",
383
        "PARTICLE",
384
        "PARTICLE_SPECIAL",
385
        "INTERPOLATE",
386
        "JUMP",
387
        "KEYWORD",
388
        "LOG_INTERPOLATE",
389
        "NULL",
390
        "REPEAT",
391
        "SURFACE_TYPE",
392
        "THERMAL_LAW",
393
        "ZAID",
394
        "NUMBER_WORD",
395
    )
396
    def file_atom(self, p):
1✔
397
        return p[0]
1✔
398

399
    @_("file_name", "file_name padding")
1✔
400
    def file_phrase(self, p):
1✔
401
        """
402
        A file name with or without its padding.
403

404
        :returns: a str ValueNode.
405
        :rtype: ValueNode
406
        """
407
        return self._flush_phrase(p, str)
1✔
408

409
    @_("padding", "equals_sign", "padding equals_sign")
1✔
410
    def param_seperator(self, p):
1✔
411
        """
412
        The seperation between a key and value for a parameter.
413

414
        :returns: a str ValueNode
415
        :rtype: ValueNode
416
        """
417
        padding = p[0]
1✔
418
        if len(p) > 1:
1✔
419
            padding += p[1]
1✔
420
        return padding
1✔
421

422
    @_('"="', '"=" padding')
1✔
423
    def equals_sign(self, p):
1✔
424
        """
425
        The seperation between a key and value for a parameter.
426

427
        :returns: a str ValueNode
428
        :rtype: ValueNode
429
        """
430
        padding = syntax_node.PaddingNode(p[0])
1✔
431
        if hasattr(p, "padding"):
1✔
432
            padding += p.padding
1✔
433
        return padding
1✔
434

435
    @_('":" part', 'particle_type "," part')
1✔
436
    def particle_type(self, p):
1✔
437
        if hasattr(p, "particle_type"):
1✔
438
            token = p.particle_type.token + "".join(list(p)[1:])
1✔
439
            particle_node = syntax_node.ParticleNode("data particles", token)
1✔
440
        else:
441
            particle_node = syntax_node.ParticleNode("data particles", "".join(list(p)))
1✔
442

443
        return particle_node
1✔
444

445
    @_("PARTICLE", "PARTICLE_SPECIAL")
1✔
446
    def part(self, p):
1✔
447
        return p[0]
1✔
448

449
    @_(
1✔
450
        "TEXT",
451
        "KEYWORD",
452
        "PARTICLE",
453
        "SOURCE_COMMENT",
454
        "TALLY_COMMENT",
455
    )
456
    def data_prefix(self, p):
1✔
457
        return syntax_node.ValueNode(p[0], str)
1✔
458

459
    @_(
1✔
460
        "modifier data_prefix",
461
        "data_prefix",
462
        "classifier NUMBER",
463
        "classifier NULL",
464
        "classifier particle_type",
465
    )
466
    def classifier(self, p):
1✔
467
        """
468
        The classifier of a data input.
469

470
        This represents the first word of the data input.
471
        E.g.: ``M4``, `IMP:N`, ``F104:p``
472

473
        :rtype: ClassifierNode
474
        """
475
        if hasattr(p, "classifier"):
1✔
476
            classifier = p.classifier
1✔
477
        else:
478
            classifier = syntax_node.ClassifierNode()
1✔
479

480
        if hasattr(p, "modifier"):
1✔
481
            classifier.modifier = syntax_node.ValueNode(p.modifier, str)
1✔
482
        if hasattr(p, "data_prefix"):
1✔
483
            classifier.prefix = p.data_prefix
1✔
484
        if hasattr(p, "NUMBER") or hasattr(p, "NULL"):
1✔
485
            if hasattr(p, "NUMBER"):
1✔
486
                num = p.NUMBER
1✔
487
            else:
488
                num = p.NULL
1✔
489
            classifier.number = syntax_node.ValueNode(num, int)
1✔
490
        if hasattr(p, "particle_type"):
1✔
491
            classifier.particles = p.particle_type
1✔
492
        return classifier
1✔
493

494
    @_("classifier padding", "classifier")
1✔
495
    def classifier_phrase(self, p):
1✔
496
        """
497
        A classifier with its padding.
498

499
        :rtype: ClassifierNode
500
        """
501
        classifier = p.classifier
1✔
502
        if len(p) > 1:
1✔
503
            classifier.padding = p.padding
1✔
504
        return classifier
1✔
505

506
    @_('"*"', "PARTICLE_SPECIAL")
1✔
507
    def modifier(self, p):
1✔
508
        """
509
        A character that modifies a classifier, e.g., ``*TR``.
510

511
        :returns: the modifier
512
        :rtype: str
513
        """
514
        if hasattr(p, "PARTICLE_SPECIAL"):
1✔
515
            if p.PARTICLE_SPECIAL == "*":
1✔
516
                return "*"
1✔
UNCOV
517
        return p[0]
×
518

519
    def error(self, token):
1✔
520
        """
521
        Default error handling.
522

523
        Puts the data into a queue that can be pulled out later for one final clear debug.
524

525
        :param token: the token that broke the parsing rules.
526
        :type token: Token
527
        """
528
        if token:
1✔
529
            lineno = getattr(token, "lineno", 0)
1✔
530
            if self._input and self._input.lexer:
1✔
531
                lexer = self._input.lexer
1✔
532
                index = lexer.find_column(lexer.text, token)
1✔
533
            else:
534
                index = 0
1✔
535
            if lineno:
1✔
536
                self.log.parse_error(
1✔
537
                    f"sly: Syntax error at line {lineno}, token={token.type}\n",
538
                    token,
539
                    lineno,
540
                    index,
541
                )
542
            else:
543
                self.log.parse_error(
×
544
                    f"sly: Syntax error, token={token.type}", token, lineno
545
                )
546
        else:
547
            self.log.parse_error("sly: Parse error in input. EOF\n")
1✔
548

549
    def _debug_parsing_error(self, token):  # pragma: no cover
550
        """
551
        A function that should be called from error when debugging a parsing error.
552

553
        Call this from the method error. Also you will need the relevant debugfile to be set and saving the parser
554
        tables to file. e.g.,
555

556
        debugfile = 'parser.out'
557
        """
558
        print(f"********* New Parsing Error from: {type(self)} ************ ")
559
        print(f"Token: {token}")
560
        print(f"State: {self.state}, statestack: {self.statestack}")
561
        print(f"Symstack: {self.symstack}")
562
        print(f"Log length: {len(self.log)}")
563
        print()
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc