• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pypest / pyemu / 5887625428

17 Aug 2023 06:23AM UTC coverage: 79.857% (+1.5%) from 78.319%
5887625428

push

github

briochh
Merge branch 'develop'

11386 of 14258 relevant lines covered (79.86%)

6.77 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

82.76
/pyemu/pst/pst_utils.py
1
"""Various PEST(++) control file peripheral operations"""
9✔
2
from __future__ import print_function, division
9✔
3
import os
9✔
4
import warnings
9✔
5
import multiprocessing as mp
9✔
6
import re
9✔
7
import numpy as np
9✔
8
import pandas as pd
9✔
9

10
pd.options.display.max_colwidth = 100
9✔
11

12
import pyemu
9✔
13
from ..pyemu_warnings import PyemuWarning
9✔
14

15
# formatters
16
# SFMT = lambda x: "{0:>20s}".format(str(x.decode()))
17
def SFMT(item):
9✔
18
    try:
9✔
19
        s = "{0:<20s} ".format(item.decode())
9✔
20
    except:
9✔
21
        s = "{0:<20s} ".format(str(item))
9✔
22
    return s
9✔
23

24

25
SFMT_LONG = lambda x: "{0:<50s} ".format(str(x))
9✔
26
IFMT = lambda x: "{0:<10d} ".format(int(x))
9✔
27
FFMT = lambda x: "{0:<20.10E} ".format(float(x))
9✔
28

29

30
def str_con(item):
9✔
31
    if len(item) == 0:
9✔
32
        return np.NaN
×
33
    return item.lower().strip()
9✔
34

35
pst_config = {}
9✔
36

37
# parameter stuff
38
pst_config["tied_dtype"] = np.dtype([("parnme", "U20"), ("partied", "U20")])
9✔
39
pst_config["tied_fieldnames"] = ["parnme", "partied"]
9✔
40
pst_config["tied_format"] = {"parnme": SFMT, "partied": SFMT}
9✔
41
pst_config["tied_converters"] = {"parnme": str_con, "partied": str_con}
9✔
42
pst_config["tied_defaults"] = {"parnme": "dum", "partied": "dum"}
9✔
43

44
pst_config["par_dtype"] = np.dtype(
9✔
45
    [
46
        ("parnme", "U20"),
47
        ("partrans", "U20"),
48
        ("parchglim", "U20"),
49
        ("parval1", np.float64),
50
        ("parlbnd", np.float64),
51
        ("parubnd", np.float64),
52
        ("pargp", "U20"),
53
        ("scale", np.float64),
54
        ("offset", np.float64),
55
        ("dercom", int),
56
    ]
57
)
58
pst_config["par_fieldnames"] = (
9✔
59
    "PARNME PARTRANS PARCHGLIM PARVAL1 PARLBND PARUBND " + "PARGP SCALE OFFSET DERCOM"
60
)
61
pst_config["par_fieldnames"] = pst_config["par_fieldnames"].lower().strip().split()
9✔
62
pst_config["par_format"] = {
9✔
63
    "parnme": SFMT,
64
    "partrans": SFMT,
65
    "parchglim": SFMT,
66
    "parval1": FFMT,
67
    "parlbnd": FFMT,
68
    "parubnd": FFMT,
69
    "pargp": SFMT,
70
    "scale": FFMT,
71
    "offset": FFMT,
72
    "dercom": IFMT,
73
}
74
pst_config["par_alias_map"] = {
9✔
75
    "name": "parnme",
76
    "transform": "partrans",
77
    "value": "parval1",
78
    "upper_bound": "parubnd",
79
    "lower_bound": "parlbnd",
80
    "group": "pargp",
81
}
82
pst_config["par_converters"] = {
9✔
83
    "parnme": str_con,
84
    "pargp": str_con,
85
    "parval1": np.float64,
86
    "parubnd": np.float64,
87
    "parlbnd": np.float64,
88
    "scale": np.float64,
89
    "offset": np.float64,
90
}
91
pst_config["par_defaults"] = {
9✔
92
    "parnme": "dum",
93
    "partrans": "log",
94
    "parchglim": "factor",
95
    "parval1": 1.0,
96
    "parlbnd": 1.1e-10,
97
    "parubnd": 1.1e10,
98
    "pargp": "pargp",
99
    "scale": 1.0,
100
    "offset": 0.0,
101
    "dercom": 1,
102
}
103

104

105
# parameter group stuff
106
pst_config["pargp_dtype"] = np.dtype(
9✔
107
    [
108
        ("pargpnme", "U20"),
109
        ("inctyp", "U20"),
110
        ("derinc", np.float64),
111
        ("derinclb", np.float64),
112
        ("forcen", "U20"),
113
        ("derincmul", np.float64),
114
        ("dermthd", "U20"),
115
        ("splitthresh", np.float64),
116
        ("splitreldiff", np.float64),
117
        ("splitaction", "U20"),
118
    ]
119
)
120
pst_config["pargp_fieldnames"] = (
9✔
121
    "PARGPNME INCTYP DERINC DERINCLB FORCEN DERINCMUL "
122
    + "DERMTHD SPLITTHRESH SPLITRELDIFF SPLITACTION"
123
)
124
pst_config["pargp_fieldnames"] = pst_config["pargp_fieldnames"].lower().strip().split()
9✔
125

126
pst_config["pargp_format"] = {
9✔
127
    "pargpnme": SFMT,
128
    "inctyp": SFMT,
129
    "derinc": FFMT,
130
    "forcen": SFMT,
131
    "derincmul": FFMT,
132
    "dermthd": SFMT,
133
    "splitthresh": FFMT,
134
    "splitreldiff": FFMT,
135
    "splitaction": SFMT,
136
}
137

138
pst_config["pargp_converters"] = {
9✔
139
    "pargpnme": str_con,
140
    "inctyp": str_con,
141
    "dermethd": str_con,
142
    "derinc": np.float64,
143
    "derinclb": np.float64,
144
    "splitaction": str_con,
145
    "forcen": str_con,
146
    "derincmul": np.float64,
147
}
148
pst_config["pargp_defaults"] = {
9✔
149
    "pargpnme": "pargp",
150
    "inctyp": "relative",
151
    "derinc": 0.01,
152
    "derinclb": 0.0,
153
    "forcen": "switch",
154
    "derincmul": 2.0,
155
    "dermthd": "parabolic",
156
    "splitthresh": 1.0e-5,
157
    "splitreldiff": 0.5,
158
    "splitaction": "smaller",
159
}
160

161

162
# observation stuff
163
pst_config["obs_fieldnames"] = "OBSNME OBSVAL WEIGHT OBGNME".lower().split()
9✔
164
pst_config["obs_dtype"] = np.dtype(
9✔
165
    [
166
        ("obsnme", "U20"),
167
        ("obsval", np.float64),
168
        ("weight", np.float64),
169
        ("obgnme", "U20"),
170
    ]
171
)
172
pst_config["obs_format"] = {
9✔
173
    "obsnme": SFMT,
174
    "obsval": FFMT,
175
    "weight": FFMT,
176
    "obgnme": SFMT,
177
}
178
pst_config["obs_converters"] = {
9✔
179
    "obsnme": str_con,
180
    "obgnme": str_con,
181
    "weight": np.float64,
182
    "obsval": np.float64,
183
}
184
pst_config["obs_defaults"] = {
9✔
185
    "obsnme": "dum",
186
    "obsval": 1.0e10,
187
    "weight": 1.0,
188
    "obgnme": "obgnme",
189
}
190
pst_config["obs_alias_map"] = {"name": "obsnme", "value": "obsval", "group": "obgnme"}
9✔
191

192
# prior info stuff
193
pst_config["null_prior"] = pd.DataFrame({"pilbl": None, "obgnme": None}, index=[])
9✔
194
pst_config["prior_format"] = {
9✔
195
    "pilbl": SFMT,
196
    "equation": SFMT_LONG,
197
    "weight": FFMT,
198
    "obgnme": SFMT,
199
}
200
pst_config["prior_fieldnames"] = ["pilbl", "equation", "weight", "obgnme"]
9✔
201

202
pst_config["model_io_fieldnames"] = ["pest_file", "model_file"]
9✔
203
pst_config["model_io_format"] = {"pest_file": SFMT_LONG, "model_file": SFMT_LONG}
9✔
204
pst_config["null_model_io"] = pd.DataFrame(
9✔
205
    {"pest_file": None, "model_file": None}, index=[]
206
)
207
pst_config["model_io_defaults"] = {"pest_file": "pest_file", "model_file": "model_file"}
9✔
208

209
# other containers
210
pst_config["model_command"] = []
9✔
211
# pst_config["template_files"] = []
212
# pst_config["input_files"] = []
213
# pst_config["instruction_files"] = []
214
# pst_config["output_files"] = []
215
pst_config["other_lines"] = []
9✔
216
pst_config["tied_lines"] = []
9✔
217
pst_config["regul_lines"] = []
9✔
218
pst_config["pestpp_options"] = {}
9✔
219

220

221
def read_resfile(resfile):
9✔
222
    """load a PEST-style residual file into a pandas.DataFrame
223

224
    Args:
225
         resfile (`str`): path and name of an existing residual file
226

227
     Returns:
228
         `pandas.DataFrame`: a dataframe of info from the residuals file.
229
         Column names are the names from the residuals file: "name", "group",
230
         "measured", "modelled" (with two "L"s), "residual", "weight".
231

232
     Example::
233

234
         df = pyemu.pst_utils.read_resfile("my.res")
235
         df.residual.plot(kind="hist")
236

237
    """
238
    assert os.path.exists(
9✔
239
        resfile
240
    ), "read_resfile() error: resfile " + "{0} not found".format(resfile)
241
    converters = {"name": str_con, "group": str_con}
9✔
242
    f = open(resfile, "r")
9✔
243
    while True:
4✔
244
        line = f.readline()
9✔
245
        if line == "":
9✔
246
            raise Exception(
×
247
                "Pst.get_residuals: EOF before finding "
248
                + "header in resfile: "
249
                + resfile
250
            )
251
        if "name" in line.lower():
9✔
252
            header = line.lower().strip().split()
9✔
253
            break
9✔
254
    res_df = pd.read_csv(
9✔
255
        f, header=None, names=header, sep=r"\s+", converters=converters, 
256
        usecols=header #on_bad_lines='skip'
257
    )
258
    # strip the "Cov.", "Mat." and "na" strings that PEST records in the *.res file; make float
259
    float_cols = [x for x in res_df.columns if x not in ['name','group']]
9✔
260
    res_df[float_cols] = res_df[float_cols].replace(['Cov.', 'Mat.', 'na'], np.nan).astype(float)
9✔
261
    res_df.index = res_df.name
9✔
262
    f.close()
9✔
263
    return res_df
9✔
264

265

266
def res_from_en(pst, enfile):
9✔
267
    """load ensemble results from PESTPP-IES into a PEST-style
268
    residuals `pandas.DataFrame`
269

270
    Args:
271
        enfile (`str`): CSV-format ensemble file name
272

273
    Returns:
274
        `pandas.DataFrame`: a dataframe with the same columns as a
275
        residual dataframe (a la `pst_utils.read_resfile()`)
276

277
    Note:
278
        If a "base" realization is found in the ensemble, it is used
279
        as the "modelled" column in the residuals dataframe.  Otherwise,
280
        the mean of the ensemble is used as "modelled"
281

282
    Example::
283

284
        df = pyemu.pst_utils.res_from_en("my.0.obs.csv")
285
        df.residual.plot(kind="hist")
286

287
    """
288
    converters = {"name": str_con, "group": str_con}
8✔
289
    obs = pst.observation_data
8✔
290
    if isinstance(enfile, str):
8✔
291
        df = pd.read_csv(enfile, converters=converters)
×
292
        df.columns = df.columns.str.lower()
×
293
        df = df.set_index("real_name").T.rename_axis("name").rename_axis(None, 1)
×
294
    else:
295
        df = enfile.T
8✔
296
    if "base" in df.columns:
8✔
297
        modelled = df["base"]
×
298
        std = df.std(axis=1)
×
299
    else:
300
        modelled = df.mean(axis=1)
8✔
301
        std = df.std(axis=1)
8✔
302
    # probably a more pandastic way to do this
303
    res_df = pd.DataFrame({"modelled": modelled, "std": std}, index=obs.obsnme.values)
8✔
304
    res_df["group"] = obs["obgnme"].copy()
8✔
305
    res_df["measured"] = obs["obsval"].copy()
8✔
306
    res_df["weight"] = obs["weight"].copy()
8✔
307
    res_df["residual"] = res_df["measured"] - res_df["modelled"]
8✔
308
    return res_df
8✔
309

310

311
def read_parfile(parfile):
9✔
312
    """load a PEST-style parameter value file into a pandas.DataFrame
313

314
    Args:
315
        parfile (`str`): path and name of existing parameter file
316

317
    Returns:
318
        `pandas.DataFrame`: a dataframe with columns of "parnme", "parval1",
319
        "scale" and "offset"
320

321
    Example::
322

323
        df = pyemu.pst_utils.read_parfile("my.par1")
324

325
    """
326
    if not os.path.exists(parfile):
9✔
327
        raise Exception(
×
328
            "pst_utils.read_parfile: parfile not found: {0}".format(parfile)
329
        )
330
    f = open(parfile, "r")
9✔
331
    header = f.readline()
9✔
332
    par_df = pd.read_csv(
9✔
333
        f, header=None, names=["parnme", "parval1", "scale", "offset"], sep=r"\s+"
334
    )
335
    par_df.index = par_df.parnme
9✔
336
    return par_df
9✔
337

338

339
def write_parfile(df, parfile):
9✔
340
    """write a PEST-style parameter file from a dataframe
341

342
    Args:
343
        df (`pandas.DataFrame`): a dataframe with column names
344
            that correspond to the entries in the parameter data
345
            section of the pest control file
346
        parfile (`str`): name of the parameter file to write
347

348
    Example::
349

350
        pyemu.pst_utils.write_parfile(pst.parameter_data,"my.par")
351

352
    """
353
    columns = ["parnme", "parval1", "scale", "offset"]
×
354
    formatters = {
×
355
        "parnme": lambda x: "{0:20s}".format(x),
356
        "parval1": lambda x: "{0:20.7E}".format(x),
357
        "scale": lambda x: "{0:20.7E}".format(x),
358
        "offset": lambda x: "{0:20.7E}".format(x),
359
    }
360

361
    for col in columns:
×
362
        assert (
×
363
            col in df.columns
364
        ), "write_parfile() error: " + "{0} not found in df".format(col)
365
    with open(parfile, "w") as f:
×
366
        f.write("single point\n")
×
367
        f.write(
×
368
            df.to_string(
369
                col_space=0,
370
                columns=columns,
371
                formatters=formatters,
372
                justify="right",
373
                header=False,
374
                index=False,
375
                index_names=False,
376
            )
377
            + "\n"
378
        )
379

380

381
def parse_tpl_file(tpl_file):
9✔
382
    """parse a PEST-style template file to get the parameter names
383

384
    Args:
385
    tpl_file (`str`): path and name of a template file
386

387
    Returns:
388
        [`str`] : list of parameter names found in `tpl_file`
389

390
    Example::
391

392
        par_names = pyemu.pst_utils.parse_tpl_file("my.tpl")
393

394
    """
395
    par_names = set()
9✔
396
    with open(tpl_file, "r") as f:
9✔
397
        try:
9✔
398
            header = f.readline().strip().split()
9✔
399
            assert header[0].lower() in [
9✔
400
                "ptf",
401
                "jtf",
402
            ], "template file error: must start with [ptf,jtf], not:" + str(header[0])
403
            assert (
9✔
404
                len(header) == 2
405
            ), "template file error: header line must have two entries: " + str(header)
406

407
            marker = header[1]
9✔
408
            assert (
9✔
409
                len(marker) == 1
410
            ), "template file error: marker must be a single character, not:" + str(
411
                marker
412
            )
413
            for line in f:
9✔
414
                par_line = set(line.lower().strip().split(marker)[1::2])
9✔
415
                par_names.update(par_line)
9✔
416
                # par_names.extend(par_line)
417
                # for p in par_line:
418
                #    if p not in par_names:
419
                #        par_names.append(p)
420
        except Exception as e:
×
421
            raise Exception(
×
422
                "error processing template file " + tpl_file + " :\n" + str(e)
423
            )
424
    # par_names = [pn.strip().lower() for pn in par_names]
425
    # seen = set()
426
    # seen_add = seen.add
427
    # return [x for x in par_names if not (x in seen or seen_add(x))]
428
    return [p.strip() for p in list(par_names)]
9✔
429

430

431
def write_input_files(pst, pst_path="."):
9✔
432
    """write parameter values to model input files
433

434
    Args:
435
        pst (`pyemu.Pst`): a Pst instance
436
        pst_path (`str`): the path to where the control file and template
437
            files reside.  Default is '.'.
438

439
    Note:
440

441
        This function uses template files with the current parameter \
442
        values (stored in `pst.parameter_data.parval1`).
443

444
        This function uses multiprocessing - one process per template file
445

446
        This is a simple implementation of what PEST does.  It does not
447
        handle all the special cases, just a basic function...user beware
448

449

450
    """
451
    par = pst.parameter_data.copy()
8✔
452
    par.index = par.index.str.lower()
8✔
453
    par.loc[:, "parval1_trans"] = (par.parval1 * par.scale) + par.offset
8✔
454
    pairs = np.array(list(zip(pst.template_files, pst.input_files)))
8✔
455
    num_tpl = len(pairs)
8✔
456
    chunk_len = 50
8✔
457
    num_chunk_floor = num_tpl // chunk_len
8✔
458
    main_chunks = (
8✔
459
        pairs[: num_chunk_floor * chunk_len].reshape([-1, chunk_len, 2]).tolist()
460
    )  # the list of files broken down into chunks
461
    remainder = pairs[num_chunk_floor * chunk_len :].tolist()  # remaining files
8✔
462
    chunks = main_chunks + [remainder]
8✔
463
    #    procs = []
464
    #   for chunk in chunks:
465
    #        # write_to_template(pst.parameter_data.parval1_trans,os.path.join(pst_path,tpl_file),
466
    #        #                  os.path.join(pst_path,in_file))
467
    #        p = mp.Process(
468
    #            target=_write_chunk_to_template,
469
    #            args=[chunk, pst.parameter_data.parval1_trans, pst_path],
470
    #        )
471
    #        p.start()
472
    #        procs.append(p)
473
    #    for p in procs:
474
    #        p.join()
475
    pool = mp.Pool(processes=min(mp.cpu_count(), len(chunks), 60))
8✔
476
    x = [
8✔
477
        pool.apply_async(
478
            _write_chunk_to_template,
479
            args=(chunk, par.parval1_trans, pst_path),
480
        )
481
        for i, chunk in enumerate(chunks)
482
    ]
483
    [xx.get() for xx in x]
8✔
484
    pool.close()
8✔
485
    pool.join()
8✔
486

487

488
def _write_chunk_to_template(chunk, parvals, pst_path):
9✔
489
    for tpl_file, in_file in chunk:
5✔
490
        tpl_file = os.path.join(pst_path, tpl_file)
5✔
491
        in_file = os.path.join(pst_path, in_file)
5✔
492
        write_to_template(parvals, tpl_file, in_file)
5✔
493

494

495
def write_to_template(parvals, tpl_file, in_file):
9✔
496
    """write parameter values to a model input file using
497
    the corresponding template file
498

499
    Args:
500
        parvals (`dict`): a container of parameter names and values.  Can
501
            also be a `pandas.Series`
502
        tpl_file (`str`): path and name of a template file
503
        in_file (`str`): path and name of model input file to write
504

505
    Examples::
506

507
        pyemu.pst_utils.write_to_template(par.parameter_data.parval1,
508
                                          "my.tpl","my.input")
509

510
    """
511
    f_in = open(in_file, "w")
8✔
512
    f_tpl = open(tpl_file, "r")
8✔
513
    header = f_tpl.readline().strip().split()
8✔
514
    if header[0].lower() not in ["ptf", "jtf"]:
8✔
515
        raise Exception(
×
516
            "template file error: must start with [ptf,jtf], not:" + str(header[0])
517
        )
518
    if len(header) != 2:
8✔
519
        raise Exception(
×
520
            "template file error: header line must have two entries: " + str(header)
521
        )
522

523
    marker = header[1]
8✔
524
    if len(marker) != 1:
8✔
525
        raise Exception(
×
526
            "template file error: marker must be a single character, not:" + str(marker)
527
        )
528
    for line in f_tpl:
8✔
529
        if marker not in line:
8✔
530
            f_in.write(line)
8✔
531
        else:
532
            line = line.rstrip()
8✔
533
            par_names = line.lower().split(marker)[1::2]
8✔
534
            par_names = [name.strip() for name in par_names]
8✔
535
            start, end = _get_marker_indices(marker, line)
8✔
536
            if len(par_names) != len(start):
8✔
537
                raise Exception("par_names != start")
×
538
            new_line = line[: start[0]]
8✔
539
            between = [line[e:s] for s, e in zip(start[1:], end[:-1])]
8✔
540
            for i, name in enumerate(par_names):
8✔
541
                s, e = start[i], end[i]
8✔
542
                w = e - s
8✔
543
                if w > 15:
8✔
544
                    d = 6
5✔
545
                else:
546
                    d = 3
8✔
547
                fmt = "{0:" + str(w) + "." + str(d) + "E}"
8✔
548
                val_str = fmt.format(parvals[name])
8✔
549
                new_line += val_str
8✔
550
                if i != len(par_names) - 1:
8✔
551
                    new_line += between[i]
8✔
552
            new_line += line[end[-1] :]
8✔
553
            f_in.write(new_line + "\n")
8✔
554
    f_tpl.close()
8✔
555
    f_in.close()
8✔
556

557

558
def _get_marker_indices(marker, line):
9✔
559
    """method to find the start and end parameter markers
560
    on a template file line.  Used by write_to_template()
561

562
    """
563
    indices = [i for i, ltr in enumerate(line) if ltr == marker]
8✔
564
    start = indices[0:-1:2]
8✔
565
    end = [i + 1 for i in indices[1::2]]
8✔
566
    assert len(start) == len(end)
8✔
567
    return start, end
8✔
568

569

570
def parse_ins_file(ins_file):
9✔
571
    """parse a PEST-style instruction file to get observation names
572

573
    Args:
574
        ins_file (`str`): path and name of an existing instruction file
575

576
    Returns:
577
        [`str`]: a list of observation names found in `ins_file`
578

579
    Note:
580
        This is a basic function for parsing instruction files to
581
        look for observation names.
582

583
    Example::
584

585
        obs_names = pyemu.pst_utils.parse_ins_file("my.ins")
586

587
    """
588

589
    obs_names = []
9✔
590
    with open(ins_file, "r") as f:
9✔
591
        header = f.readline().strip().split()
9✔
592
        assert header[0].lower() in [
9✔
593
            "pif",
594
            "jif",
595
        ], "instruction file error: must start with [pif,jif], not:" + str(header[0])
596
        marker = header[1]
9✔
597
        assert (
9✔
598
            len(marker) == 1
599
        ), "instruction file error: marker must be a single character, not:" + str(
600
            marker
601
        )
602
        for line in f:
9✔
603
            line = line.lower()
9✔
604
            if marker in line:
9✔
605
                # this still only returns and obs if "[": "]", "(": ")", "!": "!" in items
606
                raw = line.strip().split(marker)
9✔
607
                for item in raw[::2]:
9✔
608
                    if len(item) > 1:
9✔
609
                        # possible speedup, only attempting to parse if item
610
                        # is more than 1 char
611
                        obs_names.extend(_parse_ins_string(item))
9✔
612
            else:
613
                obs_names.extend(_parse_ins_string(line.strip()))
9✔
614
    # obs_names = [on.strip().lower() for on in obs_names]
615
    return obs_names
9✔
616

617

618
def _parse_ins_string(string):
9✔
619
    """split up an instruction file line to get the observation names"""
620
    istart_markers = set(["[", "(", "!"])
9✔
621
    marker_dict = {"[": "]", "(": ")", "!": "!"}
9✔
622
    # iend_markers = set(["]",")","!"])
623
    setdum = {"dum", "DUM"}
9✔
624
    obs_names = []
9✔
625
    slen = len(string)
9✔
626
    idx = 0
9✔
627
    while True:
4✔
628
        if idx >= slen - 1:
9✔
629
            break
9✔
630
        char = string[idx]
9✔
631
        if char in istart_markers:
9✔
632
            # em = iend_markers[istart_markers.index(char)]
633
            em = marker_dict[char]
9✔
634
            # print("\n",idx)
635
            # print(string)
636
            # print(string[idx+1:])
637
            # print(string[idx+1:].index(em))
638
            # print(string[idx+1:].index(em)+idx+1)
639
            eidx = min(slen, string.find(em, idx + 1))
9✔
640
            obs_name = string[idx + 1 : eidx]
9✔
641
            if obs_name not in setdum:
9✔
642
                obs_names.append(obs_name)
9✔
643
            idx = eidx + 1
9✔
644
        else:
645
            idx += 1
9✔
646
    return obs_names
9✔
647

648

649
def _populate_dataframe(index, columns, default_dict, dtype):
9✔
650
    """helper function to populate a generic Pst dataframe attribute.
651

652
    Note:
653
        This function is called as part of constructing a generic Pst instance
654

655
    """
656
    new_df = pd.concat(
9✔
657
        [pd.Series(default_dict[fieldname],
658
                   index=index,
659
                   name=fieldname).astype(dt[1])
660
         for fieldname, dt in zip(columns, dtype.descr)],
661
        axis=1
662
    )
663
    return new_df
9✔
664

665

666
def generic_pst(par_names=["par1"], obs_names=["obs1"], addreg=False):
9✔
667
    """generate a generic pst instance.
668

669
    Args:
670
        par_names ([`str`], optional): parameter names to include in the new
671
            `pyemu.Pst`.  Default is ["par2"].
672
        obs_names ([`str`], optional): observation names to include in the new
673
            `pyemu.Pst`.  Default is ["obs1"].
674
        addreg (`bool`): flag to add zero-order Tikhonov prior information
675
            equations to the new control file
676

677
    Returns:
678
        `pyemu.Pst`: a new control file instance. This instance does not have
679
        all the info needed to run, but is a placeholder that can then be
680
        filled in later.
681

682
    Example::
683

684
        par_names = ["par1","par2"]
685
        obs_names = ["obs1","obs2"]
686
        pst = pyemu.pst_utils.generic_pst(par_names,obs_names]
687

688
    """
689
    if not isinstance(par_names, list):
9✔
690
        par_names = list(par_names)
×
691
    if not isinstance(obs_names, list):
9✔
692
        obs_names = list(obs_names)
×
693
    new_pst = pyemu.Pst("pest.pst", load=False)
9✔
694
    pargp_data = _populate_dataframe(
9✔
695
        ["pargp"], new_pst.pargp_fieldnames, new_pst.pargp_defaults, new_pst.pargp_dtype
696
    )
697
    new_pst.parameter_groups = pargp_data
9✔
698

699
    par_data = _populate_dataframe(
9✔
700
        par_names, new_pst.par_fieldnames, new_pst.par_defaults, new_pst.par_dtype
701
    )
702
    par_data.loc[:, "parnme"] = par_names
9✔
703
    par_data.index = par_names
9✔
704
    par_data.sort_index(inplace=True)
9✔
705
    new_pst.parameter_data = par_data
9✔
706
    obs_data = _populate_dataframe(
9✔
707
        obs_names, new_pst.obs_fieldnames, new_pst.obs_defaults, new_pst.obs_dtype
708
    )
709
    obs_data.loc[:, "obsnme"] = obs_names
9✔
710
    obs_data.index = obs_names
9✔
711
    obs_data.sort_index(inplace=True)
9✔
712
    new_pst.observation_data = obs_data
9✔
713

714
    # new_pst.template_files = ["file.tpl"]
715
    # new_pst.input_files = ["file.in"]
716
    # new_pst.instruction_files = ["file.ins"]
717
    # new_pst.output_files = ["file.out"]
718
    new_pst.model_command = ["model.bat"]
9✔
719

720
    new_pst.prior_information = new_pst.null_prior
9✔
721

722
    # new_pst.other_lines = ["* singular value decomposition\n","1\n",
723
    #                       "{0:d} {1:15.6E}\n".format(new_pst.npar_adj,1.0E-6),
724
    #                       "1 1 1\n"]
725
    if addreg:
9✔
726
        new_pst.zero_order_tikhonov()
×
727

728
    return new_pst
9✔
729

730

731
def try_read_input_file_with_tpl(tpl_file, input_file=None):
9✔
732
    """attempt to read parameter values from an input file using a template file
733
    Args:
734
        tpl_file (`str`): path and name of a template file
735
        input_file (`str`,optional): path and name of existing model
736
            input file to process.  If `None`, `tpl_file.replace(".tpl","")`
737
            is used.  Default is None.
738

739
    Returns:
740
        `pandas.DataFrame`: a dataframe of parameter name and values
741
        extracted from `input_file`.
742

743
    Note:
744
        If an exception is raised when reading the input file, the exception
745
        is echoed to the screen and `None` is returned.
746

747
    Example::
748

749
        df = pyemu.pst_utils.try_process_output_file("my.tpl","my.input")
750

751
    """
752

753
    if input_file is None:
9✔
754
        input_file = tpl_file.replace(".tpl", "")
9✔
755
    if not os.path.exists(input_file):
9✔
756
        return None
9✔
757
    # read the names first to see what we are dealing with
758
    # and also to do some basic error checking
759
    parnames = parse_tpl_file(tpl_file)
8✔
760
    try:
8✔
761
        df = _read_infile_with_tplfile(tpl_file, input_file)
8✔
762
    except Exception as e:
8✔
763
        print("error trying to read input file with tpl file:{0}".format(str(e)))
8✔
764
        return None
8✔
765
    return df
8✔
766

767

768
def _read_infile_with_tplfile(tpl_file, input_file):
9✔
769
    """attempt to read parameter values from an input file using a template file,
770
    raising heaps of exceptions.
771
        Args:
772
            tpl_file (`str`): path and name of a template file
773
            input_file (`str`): path and name of existing model
774

775
        Returns:
776
            `pandas.DataFrame`: a dataframe of parameter name and values
777
            extracted from `input_file`.
778

779
        Note:
780
            use try_read_inputfile_with_tpl instead of this one.
781

782
    """
783

784
    if not os.path.exists(input_file):
8✔
785
        raise Exception("input file '{0}' not found".format(input_file))
×
786

787
    f_tpl = open(tpl_file, "r")
8✔
788
    f_in = open(input_file, "r")
8✔
789

790
    # read the tpl header
791
    _, marker = f_tpl.readline().split()
8✔
792
    itpl, iin = 1, 0
8✔
793
    pnames, pvals = [], []
8✔
794
    pdict = {}
8✔
795
    while True:
4✔
796
        tpl_line = f_tpl.readline()
8✔
797
        if tpl_line == "":
8✔
798
            break
8✔
799

800
        in_line = f_in.readline()
8✔
801
        if in_line == "":
8✔
802
            raise Exception(
×
803
                "input file EOF, tpl file line {0}, in file line {1}".format(itpl, iin)
804
            )
805

806
        if marker in tpl_line:
8✔
807
            idxs = [i for i, ltr in enumerate(tpl_line) if ltr == marker]
8✔
808
            if len(idxs) % 2 != 0:
8✔
809
                raise Exception("unbalanced markers on tpl line {0}".format(itpl))
×
810

811
            for s, e in zip(idxs[0:-1:2], idxs[1::2]):
8✔
812
                tpl_str = tpl_line[s : e + 1]
8✔
813
                pname = tpl_str.replace(marker, "").strip().lower()
8✔
814
                if s > len(in_line):
8✔
815
                    raise Exception(
×
816
                        "input file EOL line {0}, tpl line {1}, looking for {2}".format(
817
                            iin, itpl, tpl_str
818
                        )
819
                    )
820
                junk_val = "Jennyigotunumber8675309"
8✔
821
                tmp = tpl_line[:s] + " {} ".format(junk_val) + tpl_line[e + 1 :]
8✔
822
                if len(tmp.split()) == len(in_line.split()):
8✔
823
                    # treat this as whitespace delimited
824
                    in_str = in_line.split()[tmp.split().index(junk_val)]
8✔
825
                else:
826
                    # or we must assume the params are written using the same spacing as template file
827
                    in_str = in_line[s : e + 1]
8✔
828
                try:
8✔
829
                    v = float(in_str)
8✔
830
                except Exception as e:
8✔
831
                    raise Exception(
8✔
832
                        "error casting '{0}' to float on in line {1}, tpl line {2} for {3}: {4}".format(
833
                            in_str, iin, itpl, tpl_str, str(e)
834
                        )
835
                    )
836

837
                if pname in pdict:
8✔
838
                    eval = pdict[pname]
8✔
839
                    if not np.isclose(eval, v, 1.0e-6):
8✔
840
                        raise Exception(
×
841
                            "different values {0}:{1} for par {2} on in line {3}".format(
842
                                v, eval, pname, iin
843
                            )
844
                        )
845
                else:
846
                    pnames.append(pname)
8✔
847
                    pvals.append(v)
8✔
848
                pdict[pname] = v
8✔
849
        itpl += 1
8✔
850
        iin += 1
8✔
851
    df = pd.DataFrame({"parnme": pnames, "parval1": pvals}, index=pnames)
8✔
852
    return df
8✔
853

854

855
def try_process_output_file(ins_file, output_file=None):
9✔
856
    """attempt to process a model output file using a PEST-style instruction file
857

858
    Args:
859
        ins_file (`str`): path and name of an instruction file
860
        output_file (`str`,optional): path and name of existing model
861
            output file to process.  If `None`, `ins_file.replace(".ins","")`
862
            is used.  Default is None.
863

864
    Returns:
865
        `pandas.DataFrame`: a dataframe of observation name and simulated outputs
866
        extracted from `output_file`.
867

868
    Note:
869
        If an exception is raised when processing the output file, the exception
870
        is echoed to the screen and `None` is returned.
871

872
    Example::
873

874
        df = pyemu.pst_utils.try_process_output_file("my.ins","my.output")
875

876
    """
877
    if output_file is None:
9✔
878
        output_file = ins_file.replace(".ins", "")
9✔
879
    df = None
9✔
880
    i = InstructionFile(ins_file)
9✔
881
    try:
9✔
882
        df = i.read_output_file(output_file)
9✔
883
    except Exception as e:
9✔
884
        print("error processing instruction/output file pair: {0}".format(str(e)))
9✔
885
    return df
9✔
886

887

888
def try_process_output_pst(pst):
9✔
889
    """attempt to process each instruction file, model output
890
    file pair in a `pyemu.Pst`.
891

892
    Args:
893
        pst (`pyemu.Pst`): a control file instance
894

895
    Returns:
896
        `pandas.DataFrame`: a dataframe of observation names and simulated outputs
897
        extracted from model output files.
898

899
    Note:
900
        This function first tries to process the output files using the
901
        InstructionFile class,  If that failes, then it tries to run
902
        INSCHEK. If an instructionfile is processed successfully,
903
        the extract simulated values are used to populate the
904
        `pst.observation_data.obsval` attribute.
905

906

907
    """
908
    for ins_file, out_file in zip(pst.instruction_files, pst.output_files):
9✔
909
        df = None
9✔
910
        try:
9✔
911
            i = InstructionFile(ins_file, pst=pst)
9✔
912
            df = i.read_output_file(out_file)
9✔
913
        except Exception as e:
8✔
914
            warnings.warn(
8✔
915
                "error processing instruction file {0}, trying inschek: {1}".format(
916
                    ins_file, str(e)
917
                )
918
            )
919
            df = _try_run_inschek(ins_file, out_file)
8✔
920
        if df is not None:
9✔
921
            pst.observation_data.loc[df.index, "obsval"] = df.obsval
9✔
922

923

924
def _try_run_inschek(ins_file, out_file, cwd="."):
9✔
925
    """try to run inschek and load the resulting obf file"""
926
    try:
8✔
927
        pyemu.os_utils.run("inschek {0} {1}".format(ins_file, out_file), cwd=cwd)
8✔
928
        obf_file = os.path.join(cwd, ins_file.replace(".ins", ".obf"))
×
929
        df = pd.read_csv(
×
930
            obf_file, delim_whitespace=True, skiprows=0, index_col=0, names=["obsval"]
931
        )
932
        df.index = df.index.map(str.lower)
×
933
        return df
×
934
    except Exception as e:
8✔
935
        print(
8✔
936
            "error using inschek for instruction file {0}:{1}".format(ins_file, str(e))
937
        )
938
        print("observations in this instruction file will have" + "generic values.")
8✔
939
        return None
8✔
940

941

942
def get_phi_comps_from_recfile(recfile):
9✔
943
    """read the phi components from a record file by iteration
944

945
    Args:
946
        recfile (`str`): pest record file name
947

948
    Returns:
949
        `dict`:  nested dictionary of iteration number, {group,contribution}
950

951
    Note:
952
        It is really poor form to use the record file in this way.  Please only
953
        use this as a last resort!
954

955
    """
956
    iiter = 1
×
957
    iters = {}
×
958
    f = open(recfile, "r")
×
959
    while True:
960
        line = f.readline()
×
961
        if line == "":
×
962
            break
×
963
        if (
×
964
            "starting phi for this iteration" in line.lower()
965
            or "final phi" in line.lower()
966
        ):
967
            contributions = {}
×
968
            while True:
969
                line = f.readline()
×
970
                if line == "":
×
971
                    break
×
972
                if "contribution to phi" not in line.lower():
×
973
                    iters[iiter] = contributions
×
974
                    iiter += 1
×
975
                    break
×
976
                raw = line.strip().split()
×
977
                val = float(raw[-1])
×
978
                group = raw[-3].lower().replace('"', "")
×
979
                contributions[group] = val
×
980
    return iters
×
981

982

983
def res_from_obseravtion_data(observation_data):
9✔
984
    """create a PEST-style residual dataframe filled with np.NaN for
985
    missing information
986

987
    Args:
988
        observation_data (`pandas.DataFrame`): the "* observation data"
989
            `pandas.DataFrame` from `pyemu.Pst.observation_data`
990

991
    Returns:
992
        `pandas.DataFrame`: a dataframe with the same columns as the
993
        residual dataframe ("name","group","measured","modelled",
994
        "residual","weight").
995

996

997

998
    """
999
    res_df = observation_data.copy()
×
1000
    res_df.loc[:, "name"] = res_df.pop("obsnme")
×
1001
    res_df.loc[:, "measured"] = res_df.pop("obsval")
×
1002
    res_df.loc[:, "group"] = res_df.pop("obgnme")
×
1003
    res_df.loc[:, "modelled"] = np.NaN
×
1004
    res_df.loc[:, "residual"] = np.NaN
×
1005
    return res_df
×
1006

1007

1008
def clean_missing_exponent(pst_filename, clean_filename="clean.pst"):
9✔
1009
    """fixes the issue where some terrible fortran program may have
1010
    written a floating point format without the 'e' - like 1.0-3, really?!
1011

1012
    Args:
1013
        pst_filename (`str`): the pest control file
1014
        clean_filename (`str`, optional):  the new pest control file to write.
1015
            Default is "clean.pst"
1016

1017
    """
1018
    lines = []
8✔
1019
    with open(pst_filename, "r") as f:
8✔
1020
        for line in f:
8✔
1021
            line = line.lower().strip()
8✔
1022
            if "+" in line:
8✔
1023
                raw = line.split("+")
8✔
1024
                for i, r in enumerate(raw[:-1]):
8✔
1025
                    if r[-1] != "e":
8✔
1026
                        r = r + "e"
8✔
1027
                    raw[i] = r
8✔
1028
                lines.append("+".join(raw))
8✔
1029
            else:
1030
                lines.append(line)
8✔
1031
    with open(clean_filename, "w") as f:
8✔
1032
        for line in lines:
8✔
1033
            f.write(line + "\n")
8✔
1034

1035

1036
def csv_to_ins_file(
9✔
1037
    csv_filename,
1038
    ins_filename=None,
1039
    only_cols=None,
1040
    only_rows=None,
1041
    marker="~",
1042
    includes_header=True,
1043
    includes_index=True,
1044
    prefix="",
1045
    head_lines_len=0,
1046
    sep=",",
1047
    gpname=False,
1048
):
1049
    """write a PEST-style instruction file from an existing CSV file
1050

1051
    Args:
1052
        csv_filename (`str`): path and name of existing CSV file
1053
        ins_filename (`str`, optional): path and name of the instruction
1054
            file to create.  If `None`, then `csv_filename`+".ins" is used.
1055
            Default is `None`.
1056
        only_cols ([`str`]): list of columns to add observations for in the
1057
            resulting instruction file. If `None`, all columns are used.
1058
        only_rows ([`str`]): list of rows to add observations for in the
1059
            resulting instruction file. If `None`, all rows are used.
1060
        marker (`str`): the PEST instruction marker to use.  Default is "~"
1061
        includes_header (`bool`): flag to indicate `csv_filename` includes a
1062
            header row as the first row.  Default is True.
1063
        includes_index (`bool`): lag to indicate `csv_filename` includes a
1064
            index column as the first column.  Default is True.
1065
        prefix (`str`, optional): a prefix to prepend to observation names.
1066
            Default is ""
1067
        gpname (`str` or [`str`]): Optional PEST group name for columns
1068

1069
    Returns:
1070
        `pandas.DataFrame`: a dataframe of observation names and values found in
1071
        `csv_filename`
1072

1073
    Note:
1074
        resulting observation names in `ins_filename` are a combiation of index and
1075
        header values.
1076

1077

1078
    """
1079
    # process the csv_filename in case it is a dataframe
1080
    if isinstance(csv_filename, str):
9✔
1081
        df = pd.read_csv(csv_filename, index_col=0)
×
1082
        df.columns = df.columns.map(str.lower)
×
1083
        df.index = df.index.map(lambda x: str(x).lower())
×
1084
    else:
1085
        df = csv_filename
9✔
1086

1087
    # process only_cols
1088
    if only_cols is None:
9✔
1089
        only_cols = set(df.columns.map(lambda x: x.lower().strip()).tolist())
8✔
1090
    else:
1091
        if isinstance(only_cols, str):  # incase it is a single name
9✔
1092
            only_cols = [only_cols]
8✔
1093
        only_cols = set(only_cols)
9✔
1094
    only_cols = {c.lower() if isinstance(c, str) else c for c in only_cols}
9✔
1095

1096
    if only_rows is None:
9✔
1097
        only_rows = set(df.index.map(lambda x: x.lower().strip()).tolist())
9✔
1098
    else:
1099
        if isinstance(only_rows, str):  # incase it is a single name
8✔
1100
            only_rows = [only_rows]
8✔
1101
        only_rows = set(only_rows)
8✔
1102
    only_rows = {r.lower() if isinstance(r, str) else r for r in only_rows}
9✔
1103

1104
    # process the row labels, handling duplicates
1105
    rlabels = []
9✔
1106
    row_visit = {}
9✔
1107
    only_rlabels = []
9✔
1108
    for rname_org in df.index:
9✔
1109
        rname = str(rname_org).strip().lower()
9✔
1110
        if rname in row_visit:
9✔
1111
            rsuffix = "_" + str(int(row_visit[rname] + 1))
8✔
1112
            row_visit[rname] += 1
8✔
1113
        else:
1114
            row_visit[rname] = 1
9✔
1115
            rsuffix = ""
9✔
1116
        rlabel = rname + rsuffix
9✔
1117
        rlabels.append(rlabel)
9✔
1118
        if rname in only_rows or rname_org in only_rows:
9✔
1119
            only_rlabels.append(rlabel)
9✔
1120
    only_rlabels = set(only_rlabels)
9✔
1121

1122
    # process the col labels, handling duplicates
1123
    clabels = []
9✔
1124
    col_visit = {}
9✔
1125
    only_clabels = []
9✔
1126
    for cname_org in df.columns:
9✔
1127
        cname = str(cname_org).strip().lower()
9✔
1128
        if cname in col_visit:
9✔
1129
            csuffix = "_" + str(int(col_visit[cname] + 1))
8✔
1130
            col_visit[cname] += 1
8✔
1131
        else:
1132
            col_visit[cname] = 1
9✔
1133
            csuffix = ""
9✔
1134
        clabel = cname + csuffix
9✔
1135
        clabels.append(clabel)
9✔
1136
        if cname in only_cols or cname_org in only_cols:
9✔
1137
            only_clabels.append(clabel)
9✔
1138
    only_clabels = set(only_clabels)
9✔
1139
    if len(only_clabels) == 0:
9✔
1140
        print("only_cols:", only_cols)
×
1141
        raise Exception("csv_to_ins_file(): only_clabels is empty")
×
1142

1143
    if ins_filename is None:
9✔
1144
        if not isinstance(csv_filename, str):
×
1145
            raise Exception("ins_filename is None but csv_filename is not string")
×
1146
        ins_filename = csv_filename + ".ins"
×
1147
    row_visit, col_visit = {}, {}
9✔
1148
    onames = []
9✔
1149
    ovals = []
9✔
1150
    ognames = []
9✔
1151
    only_clabels_len = len(only_clabels)
9✔
1152
    clabels_len = len(clabels)
9✔
1153
    prefix_is_str = isinstance(prefix, str)
9✔
1154
    vals = df.values.copy()  # wasteful but way faster
9✔
1155
    with open(ins_filename, "w") as f:
9✔
1156
        f.write(f"pif {marker}\n")
9✔
1157
        [f.write("l1\n") for _ in range(head_lines_len)]
9✔
1158
        if includes_header:
9✔
1159
            f.write("l1\n")  # skip the row (index) label
9✔
1160
        for i, rlabel in enumerate(rlabels):  # loop over rows
9✔
1161
            f.write("l1")
9✔
1162
            if rlabel not in only_rlabels:
9✔
1163
                f.write("\n")
8✔
1164
                continue
8✔
1165
            c_count = 0
9✔
1166
            line = ""
9✔
1167
            for j, clabel in enumerate(clabels):  # loop over columns
9✔
1168

1169
                if j == 0:
9✔
1170
                    # if first col and input file has an index need additional spacer
1171
                    if includes_index:
9✔
1172
                        if sep == ",":
8✔
1173
                            # f.write(f" {marker},{marker}")
1174
                            line += f" {marker},{marker}"
8✔
1175
                        else:
1176
                            # f.write(" !dum!")
1177
                            line += " !dum! "
×
1178

1179
                if c_count < only_clabels_len:
9✔
1180
                    if clabel in only_clabels:  # and rlabel in only_rlabels:
9✔
1181
                        oname = ""
9✔
1182
                        # define obs names
1183
                        if not prefix_is_str:
9✔
1184
                            nprefix = prefix[c_count]
×
1185
                        else:
1186
                            nprefix = prefix
9✔
1187
                        if len(nprefix) > 0:
9✔
1188
                            nname = f"{nprefix}_usecol:{clabel}"
9✔
1189
                        else:
1190
                            nname = f"usecol:{clabel}"
8✔
1191
                        oname = f"{nname}_{rlabel}"
9✔
1192
                        onames.append(oname)  # append list of obs
9✔
1193
                        ovals.append(vals[i, j])  # store current obs val
9✔
1194
                        # defin group name
1195
                        if gpname is False or gpname[c_count] is False:
9✔
1196
                            # keeping consistent behaviour
1197
                            ngpname = None  # nname
8✔
1198
                        elif gpname is True or gpname[c_count] is True:
9✔
1199
                            ngpname = nname  #  set to base of obs name
9✔
1200
                        else:  # a group name has been specified
1201
                            if not isinstance(gpname, str):
8✔
1202
                                ngpname = gpname[c_count]
8✔
1203
                            else:
1204
                                ngpname = gpname
×
1205
                        ognames.append(ngpname)  # add to list of group names
9✔
1206
                        # start defining string to write in ins
1207
                        oname = f" !{oname}!"
9✔
1208
                        line += f" {oname} "
9✔
1209
                        if j < len(clabels) - 1:
9✔
1210
                            if sep == ",":
9✔
1211
                                line += f" {marker},{marker} "
9✔
1212
                            # else:
1213
                            #    line += " !dum! "
1214
                        c_count += 1
9✔
1215
                    elif (
9✔
1216
                        j < len(clabels) - 1
1217
                    ):  # this isnt a row-col to observationalize (nice word!)
1218
                        if sep == ",":
9✔
1219
                            line += f" {marker},{marker} "
9✔
1220
                        else:
1221
                            line += " !dum! "
8✔
1222
            f.write(line + "\n")
9✔
1223
    odf = pd.DataFrame(
9✔
1224
        {"obsnme": onames, "obsval": ovals, "obgnme": ognames}, index=onames
1225
    ).dropna(
1226
        axis=1
1227
    )  # dropna to keep consistent after adding obgnme
1228
    return odf
9✔
1229

1230

1231
class InstructionFile(object):
9✔
1232
    """class for handling instruction files.
9✔
1233

1234
    Args:
1235
        ins_filename (`str`): path and name of an existing instruction file
1236
        pst (`pyemu.Pst`, optional): Pst instance - used for checking that instruction file is
1237
            compatible with the control file (e.g. no duplicates)
1238

1239
    Example::
1240

1241
        i = InstructionFile("my.ins")
1242
        df = i.read_output_file("my.output")
1243

1244
    """
1245

1246
    def __init__(self, ins_filename, pst=None):
9✔
1247
        self._ins_linecount = 0
9✔
1248
        self._out_linecount = 0
9✔
1249
        self._ins_filename = ins_filename
9✔
1250
        # self._pst = pst
1251
        self._marker = None
9✔
1252
        self._ins_filehandle = None
9✔
1253
        self._out_filehandle = None
9✔
1254
        self._last_line = ""
9✔
1255
        self._full_oname_set = None
9✔
1256
        if pst is not None:
9✔
1257
            self._full_oname_set = set(pst.obs_names)
9✔
1258
        self._found_oname_set = set()
9✔
1259

1260
        self._instruction_lines = []
9✔
1261
        self._instruction_lcount = []
9✔
1262

1263
        self.read_ins_file()
9✔
1264

1265
    @property
9✔
1266
    def obs_name_set(self):
9✔
1267
        return self._found_oname_set
8✔
1268

1269
    def read_ins_file(self):
9✔
1270
        """read the instruction and do some minimal error checking.
1271

1272
        Note:
1273

1274
            This is called by the constructor
1275

1276
        """
1277
        self._instruction_lines = []
9✔
1278
        self._instruction_lcount = []
9✔
1279
        first_line = self._readline_ins()
9✔
1280
        if len(first_line) < 2:
9✔
1281
            raise Exception(
×
1282
                "first line of ins file must have atleast two entries, not '{0}'".format(
1283
                    ",".join(first_line)
1284
                )
1285
            )
1286
        if first_line[0] != "pif":
9✔
1287
            raise Exception(
×
1288
                "first line of ins file '{0}' must start with 'pif', not '{1}'".format(
1289
                    self._ins_filename, first_line[0]
1290
                )
1291
            )
1292
        self._marker = first_line[1]
9✔
1293
        while True:
4✔
1294
            line = self._readline_ins()
9✔
1295

1296
            if line is None:
9✔
1297
                break
9✔
1298
            elif len(line) == 0:
9✔
1299
                self.throw_ins_warning("empty line, breaking")
8✔
1300
                break
8✔
1301
            else:
1302
                c1 = line[0][:1]
9✔
1303
                if c1 == "l":
9✔
1304
                    pass
9✔
1305
                elif c1 == self._marker:
8✔
1306
                    pass
8✔
1307
                elif c1 == "&":
×
1308
                    self.throw_ins_error("line continuation not supported")
×
1309
                else:
1310
                    self.throw_ins_error(
×
1311
                        "first token must be line advance ('l'), primary marker, or continuation ('&'),"
1312
                        + "not: {0}".format(line[0])
1313
                    )
1314

1315
            for token in line[1:]:
9✔
1316
                t1 = token[:1]
9✔
1317
                if t1 == "t":
9✔
1318
                    self.throw_ins_error("tab instruction not supported")
×
1319
                elif t1 == self._marker:
9✔
1320
                    tn = token[-1:]
9✔
1321
                    if not tn == self._marker:
9✔
1322
                        self.throw_ins_error(
×
1323
                            "unbalanced secondary marker in token '{0}'".format(token)
1324
                        )
1325

1326
                for somarker, eomarker in zip(["!", "[", "("], ["!", "]", ")"]):
9✔
1327
                    #
1328
                    if t1 == somarker:
9✔
1329
                        ofound = True
9✔
1330
                        if eomarker not in token[1:]:
9✔
1331
                            self.throw_ins_error(
×
1332
                                "unmatched observation marker '{0}', looking for '{1}' in token '{2}'".format(
1333
                                    somarker, eomarker, token
1334
                                )
1335
                            )
1336
                        raw = token[1:].split(eomarker)[0].replace(somarker, "")
9✔
1337
                        if raw == "dum":
9✔
1338
                            pass
8✔
1339
                        else:
1340
                            if (
9✔
1341
                                self._full_oname_set is not None
1342
                                and raw not in self._full_oname_set
1343
                            ):
1344
                                self.throw_ins_error(
×
1345
                                    "obs name '{0}' not in pst".format(raw)
1346
                                )
1347
                            elif raw in self._found_oname_set:
9✔
1348
                                self.throw_ins_error(
×
1349
                                    "obs name '{0}' is listed more than once".format(
1350
                                        raw
1351
                                    )
1352
                                )
1353
                            self._found_oname_set.add(raw)
9✔
1354
                        break
9✔
1355
                        # print(raw)
1356

1357
            self._instruction_lines.append(line)
9✔
1358
            self._instruction_lcount.append(self._ins_linecount)
9✔
1359

1360
    def throw_ins_warning(self, message, lcount=None):
9✔
1361
        """throw a verbose PyemuWarning
1362

1363
        Args:
1364
            message (`str`): the warning message
1365
            lcount (`int`, optional): warning line number.  If None, self._ins_linecount is used
1366

1367
        """
1368
        if lcount is None:
8✔
1369
            lcount = self._ins_linecount
8✔
1370
        warnings.warn(
8✔
1371
            "InstructionFile error processing instruction file {0} on line number {1}: {2}".format(
1372
                self._ins_filename, lcount, message
1373
            ),
1374
            PyemuWarning,
1375
        )
1376

1377
    def throw_ins_error(self, message, lcount=None):
9✔
1378
        """throw a verbose instruction file error
1379

1380
        Args:
1381
            message (`str`): the error message
1382
            lcount (`int`, optional): error line number.  If None, self._ins_linecount is used
1383
        """
1384
        if lcount is None:
×
1385
            lcount = self._ins_linecount
×
1386
        raise Exception(
×
1387
            "InstructionFile error processing instruction file on line number {0}: {1}".format(
1388
                lcount, message
1389
            )
1390
        )
1391

1392
    def throw_out_error(self, message, lcount=None):
9✔
1393
        """throw a verbose output file error
1394

1395
        Args:
1396
            message (`str`): the error message
1397
            lcount (`int`, optional): error line number.  If None, self._ins_linecount is used
1398

1399
        """
1400
        if lcount is None:
8✔
1401
            lcount = self._out_linecount
8✔
1402
        raise Exception(
8✔
1403
            "InstructionFile error processing output file on line number {0}: {1}".format(
1404
                lcount, message
1405
            )
1406
        )
1407

1408
    def read_output_file(self, output_file):
9✔
1409
        """process a model output file using  `InstructionFile.instruction_set`
1410

1411
        Args:
1412
            output_file (`str`): path and name of existing output file
1413

1414
        Returns:
1415

1416
            `pd.DataFrame`: a dataframe with observation names and simulated values
1417
            extracted from `output_file`
1418

1419

1420
        """
1421
        self._out_filename = output_file
9✔
1422
        val_dict = {}
9✔
1423
        for ins_line, ins_lcount in zip(
9✔
1424
            self._instruction_lines, self._instruction_lcount
1425
        ):
1426
            # try:
1427
            val_dict.update(self._execute_ins_line(ins_line, ins_lcount))
9✔
1428
            # except Exception as e:
1429
            #    raise Exception(str(e))
1430
        df = pd.DataFrame.from_dict(val_dict, orient="index", columns=["obsval"])
9✔
1431
        # s = pd.Series(val_dict)
1432
        # s.sort_index(inplace=True)
1433

1434
        return df.sort_index()
9✔
1435

1436
    def _execute_ins_line(self, ins_line, ins_lcount):
9✔
1437
        """private method to process output file lines with an instruction line"""
1438
        cursor_pos = 0  # starting cursor position
9✔
1439
        val_dict = {}  # storage dict for obsname: obsval pairs in line
9✔
1440
        # for ii,ins in enumerate(ins_line):
1441
        ii = 0  # counter over instruction entries
9✔
1442
        all_markers = True
9✔
1443
        line_seps = set([",", " ", "\t"])
9✔
1444
        n_ins = len(ins_line)  # number of instructions on line
9✔
1445
        maxsearch = 500  # maximum number of characters to search when slicing line
9✔
1446
        while True:
4✔
1447
            if ii >= n_ins:
9✔
1448
                break
9✔
1449
            ins = ins_line[ii]  # extract instruction
9✔
1450
            i1 = ins[:1]  # first char in instruction
9✔
1451
            # primary marker
1452
            if ii == 0 and i1 == self._marker:
9✔
1453
                # if first and instruction starts with primary marker
1454
                # search for presence of primary marker e.g. ~start~
1455
                mstr = ins.replace(self._marker, "")
8✔
1456
                while True:
4✔
1457
                    # loop over lines until primary marker is found
1458
                    line = self._readline_output()  # read line from output
8✔
1459
                    if line is None:
8✔
1460
                        self.throw_out_error(
×
1461
                            "EOF when trying to find primary marker '{0}' from "
1462
                            "instruction file line {1}".format(mstr, ins_lcount)
1463
                        )
1464
                    if mstr in line:  # when marker is found break and update
8✔
1465
                        # cursor position in current line
1466
                        break
8✔
1467
                # copy a version of line commas replaced
1468
                # (to support comma sep strings)
1469
                rline = line.replace(",", " ").replace("\t","")
8✔
1470

1471
                cursor_pos = line.index(mstr) + len(mstr)
8✔
1472

1473
            # line advance
1474
            elif i1 == "l":  # if start of instruction is line advance
9✔
1475
                try:
9✔
1476
                    nlines = int(ins[1:])  # try and get advance number
9✔
1477
                except Exception as e:
×
1478
                    self.throw_ins_error(
×
1479
                        "casting line advance to int for "
1480
                        "instruction '{0}'".format(ins),
1481
                        ins_lcount,
1482
                    )
1483
                for i in range(nlines):
9✔
1484
                    line = self._readline_output()
9✔
1485
                    if line is None:
9✔
1486
                        self.throw_out_error(
×
1487
                            "EOF when trying to read {0} lines for line "
1488
                            "advance instruction '{1}', from instruction "
1489
                            "file line number {2}".format(nlines, ins, ins_lcount)
1490
                        )
1491
                # copy a version of line commas replaced
1492
                # (to support comma sep strings)
1493
                rline = line.replace(",", " ")
9✔
1494
            elif ins == "w":  # whole string comparison
9✔
1495
                raw = rline[cursor_pos : cursor_pos + maxsearch].split(
9✔
1496
                    None, 2
1497
                )  # TODO: maybe slow for long strings -- hopefuly maxsearch helps
1498
                if line[cursor_pos] in line_seps:
9✔
1499
                    raw.insert(0, "")
9✔
1500
                if len(raw) == 1:
9✔
1501
                    self.throw_out_error(
8✔
1502
                        "no whitespaces found on output line {0} past {1}".format(
1503
                            line, cursor_pos
1504
                        )
1505
                    )
1506
                # step over current value
1507
                cursor_pos = rline.replace("\t"," ").find(" ", cursor_pos)
9✔
1508
                # now find position of next entry
1509
                cursor_pos = rline.find(raw[1], cursor_pos)
9✔
1510
                # raw[1]
1511
            # )
1512

1513
            elif i1 == "!":  # indicates obs instruction folows
9✔
1514
                oname = ins.replace("!", "")
9✔
1515
                # look a head for a second/closing marker
1516
                if ii < n_ins - 1 and ins_line[ii + 1] == self._marker:
9✔
1517
                    # if penultimate instruction and last instruction is
1518
                    # primary marker, look for that marker in line
1519
                    m = ins_line[ii + 1].replace(self._marker, "")
×
1520
                    es = line.find(m, cursor_pos)
×
1521
                    if es == -1:  # m not in rest of line
×
1522
                        self.throw_out_error(
×
1523
                            "secondary marker '{0}' not found from cursor_pos {1}".format(
1524
                                m, cursor_pos
1525
                            )
1526
                        )
1527
                    # read to closing marker
1528
                    val_str = line[cursor_pos:es]
×
1529
                else:
1530
                    # find next space in (r)line -- signifies end of entry
1531
                    es = rline.find(" ", cursor_pos)
9✔
1532
                    if es == -1 or es == cursor_pos:
9✔
1533
                        # if no space or current position is space
1534
                        # use old fashioned split to get value
1535
                        # -- this will happen if there are leading blanks before
1536
                        # vals in output file (e.g. formatted)
1537
                        val_str = rline[cursor_pos : cursor_pos + maxsearch].split(
9✔
1538
                            None, 1
1539
                        )[0]
1540
                    else:
1541
                        # read val (constrained slice is faster for big strings)
1542
                        val_str = rline[cursor_pos:es]
9✔
1543
                try:
9✔
1544
                    val = float(val_str)
9✔
1545
                except Exception as e:
8✔
1546
                    if oname != "dum":
8✔
1547
                        self.throw_out_error(
8✔
1548
                            "casting string '{0}' to float for instruction '{1}'".format(
1549
                                val_str, ins
1550
                            )
1551
                        )
1552

1553
                if oname != "dum":
9✔
1554
                    val_dict[oname] = val
9✔
1555
                ipos = line.find(val_str.strip(), cursor_pos)
9✔
1556
                # val_len = len(val_str)
1557
                cursor_pos = ipos + len(val_str)  # update cursor
9✔
1558
                all_markers = False
9✔
1559

1560
            elif i1 == self._marker:
9✔
1561
                m = ins.replace(self._marker, "")  # extract just primary marker
9✔
1562
                # find position of primary marker in line
1563
                es = line.find(m, cursor_pos)
9✔
1564
                if es == -1:  # m not in rest of line
9✔
1565
                    if all_markers:
8✔
1566
                        ii = 0
8✔
1567
                        continue
8✔
1568
                    else:
1569
                        self.throw_out_error(
×
1570
                            "secondary marker '{0}' not found from "
1571
                            "cursor_pos {1}".format(m, cursor_pos)
1572
                        )
1573
                cursor_pos = es + len(m)
9✔
1574

1575
            elif i1 == "(":
8✔
1576
                if ")" not in ins:
8✔
1577
                    self.throw_ins_error("unmatched ')'", self._instruction_lcount)
×
1578
                oname = ins[1:].split(")", 1)[0].lower()
8✔
1579
                raw = ins.split(")")[1]
8✔
1580
                if ":" not in raw:
8✔
1581
                    self.throw_ins_error(
×
1582
                        "couldnt find ':' in semi-fixed instruction: '{0}'".format(ins),
1583
                        lcount=self._instruction_lcount,
1584
                    )
1585
                raw = raw.split(":")
8✔
1586
                try:
8✔
1587
                    s_idx = int(raw[0]) - 1
8✔
1588
                except Exception as e:
×
1589
                    self.throw_ins_error(
×
1590
                        "error converting '{0}' to integer in semi-fixed instruction: '{1}'".format(
1591
                            raw[0], ins
1592
                        ),
1593
                        lcount=self._instruction_lcount,
1594
                    )
1595
                try:
8✔
1596
                    e_idx = int(raw[1])
8✔
1597
                except Exception as e:
×
1598
                    self.throw_ins_error(
×
1599
                        "error converting '{0}' to integer in semi-fixed instruction: '{1}'".format(
1600
                            raw[1], ins
1601
                        ),
1602
                        lcount=self._instruction_lcount,
1603
                    )
1604

1605
                if len(line) < e_idx:
8✔
1606
                    self.throw_out_error(
×
1607
                        "output line only {0} chars long, semi-fixed ending col {1}".format(
1608
                            len(line), e_idx
1609
                        )
1610
                    )
1611

1612
                if cursor_pos > e_idx:
8✔
1613
                    self.throw_out_error(
×
1614
                        "cursor at {0} has already read past semi-fixed ending col {1}".format(
1615
                            cursor_pos, e_idx
1616
                        )
1617
                    )
1618

1619
                ss_idx = max(cursor_pos, s_idx)
8✔
1620
                raw = line[ss_idx : ss_idx + maxsearch].split(
8✔
1621
                    None, 1
1622
                )  # slpitting only 1 might be margin faster
1623
                rs_idx = line.index(raw[0])
8✔
1624
                if rs_idx > e_idx:
8✔
1625
                    self.throw_out_error(
×
1626
                        "no non-whitespace chars found in semi-fixed observation {0}".format(
1627
                            ins
1628
                        )
1629
                    )
1630
                re_idx = rs_idx + len(raw[0])
8✔
1631
                val_str = line[rs_idx:re_idx]
8✔
1632
                try:
8✔
1633
                    val = float(val_str)
8✔
1634
                except Exception as e:
×
1635
                    if oname != "dum":
×
1636
                        self.throw_out_error(
×
1637
                            "casting string '{0}' to float for instruction '{1}'".format(
1638
                                val_str, ins
1639
                            )
1640
                        )
1641

1642
                if oname != "dum":
8✔
1643
                    val_dict[oname] = val
8✔
1644
                cursor_pos = re_idx
8✔
1645

1646
            elif i1 == "[":
8✔
1647
                if "]" not in ins:
8✔
1648
                    self.throw_ins_error("unmatched ']'", self._instruction_lcount)
×
1649
                oname = ins[1:].split("]", 1)[0].lower()
8✔
1650
                raw = ins.split("]")[1]
8✔
1651
                if ":" not in raw:
8✔
1652
                    self.throw_ins_error(
×
1653
                        "couldnt find ':' in fixed instruction: '{0}'".format(ins),
1654
                        lcount=self._instruction_lcount,
1655
                    )
1656
                raw = raw.split(":")
8✔
1657
                try:
8✔
1658
                    s_idx = int(raw[0]) - 1
8✔
1659
                except Exception as e:
×
1660
                    self.throw_ins_error(
×
1661
                        "error converting '{0}' to integer in fixed instruction: '{1}'".format(
1662
                            raw[0], ins
1663
                        ),
1664
                        lcount=self._instruction_lcount,
1665
                    )
1666
                try:
8✔
1667
                    e_idx = int(raw[1])
8✔
1668
                except Exception as e:
×
1669
                    self.throw_ins_error(
×
1670
                        "error converting '{0}' to integer in fixed instruction: '{1}'".format(
1671
                            raw[1], ins
1672
                        ),
1673
                        lcount=self._instruction_lcount,
1674
                    )
1675

1676
                if len(line) < e_idx:
8✔
1677
                    self.throw_out_error(
×
1678
                        "output line only {0} chars long, fixed ending col {1}".format(
1679
                            len(line), e_idx
1680
                        )
1681
                    )
1682

1683
                if cursor_pos > s_idx:
8✔
1684
                    self.throw_out_error(
×
1685
                        "cursor at {0} has already read past fixed starting col {1}".format(
1686
                            cursor_pos, e_idx
1687
                        )
1688
                    )
1689

1690
                val_str = line[s_idx:e_idx]
8✔
1691
                try:
8✔
1692
                    val = float(val_str)
8✔
1693
                except Exception as e:
×
1694
                    if oname != "dum":
×
1695
                        self.throw_out_error(
×
1696
                            "casting string '{0}' to float for instruction '{1}'".format(
1697
                                val_str, ins
1698
                            )
1699
                        )
1700

1701
                if oname != "dum":
8✔
1702
                    val_dict[oname] = val
8✔
1703
                cursor_pos = e_idx
8✔
1704

1705
            else:
1706
                self.throw_out_error(
×
1707
                    "unrecognized instruction '{0}' on ins file line {1}".format(
1708
                        ins, ins_lcount
1709
                    )
1710
                )
1711
            ii += 1
9✔
1712
        return val_dict
9✔
1713

1714
    def _readline_ins(self):
9✔
1715
        """consolidate private method to read the next instruction file line.  Casts to lower and splits
1716
        on whitespace
1717
        """
1718
        if self._ins_filehandle is None:
9✔
1719
            if not os.path.exists(self._ins_filename):
9✔
1720
                raise Exception(
×
1721
                    "instruction file '{0}' not found".format(self._ins_filename)
1722
                )
1723
            self._ins_filehandle = open(self._ins_filename, "r")
9✔
1724
        line = self._ins_filehandle.readline()
9✔
1725
        self._ins_linecount += 1
9✔
1726
        if line == "":
9✔
1727
            return None
9✔
1728
        self._last_line = line
9✔
1729
        # check for spaces in between the markers - this gets ugly
1730
        line = line.lower()
9✔
1731
        if self._marker is not None and self._marker in line:
9✔
1732

1733
            # def find_all(a_str, sub):
1734
            #     start = 0
1735
            #     while True:
1736
            #         start = a_str.find(sub, start)
1737
            #         if start == -1:
1738
            #             return
1739
            #         yield start
1740
            #         start += len(sub)
1741
            # poss speedup using regex
1742
            midx = [m.start() for m in re.finditer(re.escape(self._marker), line)]
9✔
1743
            # midx = list(find_all(line, self._marker))
1744
            midx.append(len(line))
9✔
1745
            first = line[: midx[0]].strip()
9✔
1746
            tokens = []
9✔
1747
            if len(first) > 0:
9✔
1748
                # tokens.append(first)
1749
                tokens.extend([f.strip() for f in first.split()])
9✔
1750
            for idx in range(1, len(midx) - 1, 2):
9✔
1751
                mstr = line[midx[idx - 1] : midx[idx] + 1]
9✔
1752
                ostr = line[midx[idx] + 1 : midx[idx + 1]]
9✔
1753
                tokens.append(mstr)
9✔
1754
                tokens.extend(ostr.split())
9✔
1755
        else:
1756
            tokens = line.strip().split()
9✔
1757
        return tokens
9✔
1758

1759
    def _readline_output(self):
9✔
1760
        """consolidate private method to read the next output file line.  Casts to lower"""
1761
        if self._out_filehandle is None:
9✔
1762
            if not os.path.exists(self._out_filename):
9✔
1763
                raise Exception(
9✔
1764
                    "output file '{0}' not found".format(self._out_filename)
1765
                )
1766
            self._out_filehandle = open(self._out_filename, "r")
9✔
1767
        line = self._out_filehandle.readline()
9✔
1768
        self._out_linecount += 1
9✔
1769
        if line == "":
9✔
1770
            return None
×
1771
        self._last_line = line
9✔
1772
        return line.lower()
9✔
1773

1774

1775
def process_output_files(pst, pst_path="."):
9✔
1776
    """helper function to process output files using the
1777
      InstructionFile class
1778

1779
    Args:
1780
         pst (`pyemu.Pst`): control file instance
1781

1782
         pst_path (`str`): path to instruction and output files to append to the front
1783
             of the names in the Pst instance
1784

1785
     Returns:
1786
         `pd.DataFrame`: dataframe of observation names and simulated values
1787
         extracted from the model output files listed in `pst`
1788

1789
     Example::
1790

1791
         pst = pyemu.Pst("my.pst")
1792
         df = pyemu.pst_utils.process_output_files(pst)
1793

1794

1795
    """
1796
    if not isinstance(pst, pyemu.Pst):
×
1797
        raise Exception(
×
1798
            "process_output_files error: 'pst' arg must be pyemu.Pst instance"
1799
        )
1800
    series = []
×
1801
    for ins, out in zip(pst.instruction_files, pst.output_files):
×
1802
        ins = os.path.join(pst_path, ins)
×
1803
        out = os.path.join(pst_path, out)
×
1804
        if not os.path.exists(out):
×
1805
            warnings.warn("out file '{0}' not found".format(out), PyemuWarning)
×
1806
        f = os.path.join(pst_path, ins)
×
1807
        i = InstructionFile(ins, pst=pst)
×
1808
        try:
×
1809
            s = i.read_output_file(out)
×
1810
            series.append(s)
×
1811
        except Exception as e:
×
1812
            warnings.warn("error processing output file '{0}': {1}".format(out, str(e)))
×
1813
    if len(series) == 0:
×
1814
        return None
×
1815
    series = pd.concat(series)
×
1816
    # print(series)
1817
    return series
×
1818

1819

1820
def check_interface(pst,pst_path=".",warn=False):
9✔
1821
    """check that the tpl and ins file entries are in
1822
    sync with the control file entries
1823

1824
    Args:
1825
        pst (`pyemu.Pst`): control file instance
1826
        pst_path (`str`): the path from where python is running to the control file
1827
        warn (`bool`): flag to treat errors as warnings
1828

1829
    """
1830

1831
    tpl_pnames = set()
8✔
1832
    for tpl_file in pst.model_input_data.pest_file:
8✔
1833
        names = parse_tpl_file(os.path.join(pst_path,tpl_file))
8✔
1834
        tpl_pnames.update(set(names))
8✔
1835
    pst_pnames = set(pst.par_names)
8✔
1836
    diff = tpl_pnames - pst_pnames
8✔
1837
    mess = ""
8✔
1838
    if len(diff) > 0:
8✔
1839
        mess += "\nthe following par names are not in the ctrl file but are in the tpl files: "+",".join(diff)+"\n\n"
8✔
1840
    diff = pst_pnames - tpl_pnames
8✔
1841
    if len(diff) > 0:
8✔
1842
        mess += "\nthe following par names are not in the tpl files but are in the ctrl file: " + ",".join(diff) + "\n\n"
8✔
1843
    ins_onames = set()
8✔
1844
    for ins_file in pst.model_output_data.pest_file:
8✔
1845
        i = InstructionFile(os.path.join(pst_path,ins_file))
8✔
1846
        ins_onames.update(i.obs_name_set)
8✔
1847
    pst_onames = set(pst.obs_names)
8✔
1848
    diff = ins_onames - pst_onames
8✔
1849
    if len(diff) > 0:
8✔
1850
        mess += "\nthe following obs names are not in the ctrl file but are in the ins files: " + ",".join(
8✔
1851
            diff) + "\n\n"
1852
    diff = pst_onames - ins_onames
8✔
1853
    if len(diff) > 0:
8✔
1854
        mess += "\nthe following ons names are not in the ins files but are in the ctrl file: " + ",".join(
8✔
1855
            diff) + "\n\n"
1856
    if len(mess) > 0:
8✔
1857
        if warn:
8✔
1858
            warnings.warn(mess,PyemuWarning)
×
1859
        else:
1860
            raise Exception(mess)
8✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc