• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

pypest / pyemu / 11660552677

24 Oct 2024 07:04PM UTC coverage: 79.565% (+0.1%) from 79.466%
11660552677

push

github

web-flow
Fix for change with quotechar and delim flex in built in csvwriter (#547)

* Update CI to test py3.13

Expecting fails

* fix for py3.13 csv writer change (quotchar!=delim)

For now writing df rows as space delim strings

* same treatment for hfb tpl write

* tpl no longer written with random extra space

6 of 6 new or added lines in 3 files covered. (100.0%)

6 existing lines in 1 file now uncovered.

12078 of 15180 relevant lines covered (79.57%)

8.26 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

84.88
/pyemu/utils/pp_utils.py
1
"""Pilot point support utilities
8✔
2
"""
3
import os
11✔
4
import copy
11✔
5
import numpy as np
11✔
6
import pandas as pd
11✔
7
import warnings
11✔
8

9

10

11
pd.options.display.max_colwidth = 100
11✔
12
from pyemu.pst.pst_utils import SFMT, IFMT, FFMT, pst_config
11✔
13
from ..pyemu_warnings import PyemuWarning
11✔
14

15
PP_FMT = {
11✔
16
    "name": SFMT,
17
    "x": FFMT,
18
    "y": FFMT,
19
    "zone": IFMT,
20
    "tpl": SFMT,
21
    "parval1": FFMT,
22
}
23
PP_NAMES = ["name", "x", "y", "zone", "parval1"]
11✔
24

25

26
def setup_pilotpoints_grid(
11✔
27
    ml=None,
28
    sr=None,
29
    ibound=None,
30
    prefix_dict=None,
31
    every_n_cell=4,
32
    ninst=1,
33
    use_ibound_zones=False,
34
    pp_dir=".",
35
    tpl_dir=".",
36
    shapename="pp.shp",
37
    pp_filename_dict={},
38
):
39
    """setup a regularly-spaced (gridded) pilot point parameterization
40

41
    Args:
42
        ml (`flopy.mbase`, optional): a flopy mbase dervied type.  If None, `sr` must not be None.
43
        sr (`flopy.utils.reference.SpatialReference`, optional):  a spatial reference use to
44
            locate the model grid in space.  If None, `ml` must not be None.  Default is None
45
        ibound (`numpy.ndarray`, optional): the modflow ibound integer array.  THis is used to
46
            set pilot points only in active areas. If None and ml is None, then pilot points
47
            are set in all rows and columns according to `every_n_cell`.  Default is None.
48
        prefix_dict (`dict`): a dictionary of layer index, pilot point parameter prefix(es) pairs.
49
            For example : `{0:["hk,"vk"]}` would setup pilot points with the prefix "hk" and "vk" for
50
            model layer 1. If None, a generic set of pilot points with
51
            the "pp" prefix are setup for a generic nrow by ncol grid. Default is None
52
        ninst (`int`): Number of instances of pilot_points to set up.
53
            e.g. number of layers. If ml is None and prefix_dict is None,
54
            this is used to set up default prefix_dict.
55
        use_ibound_zones (`bool`): a flag to use the greater-than-zero values in the
56
            ibound as pilot point zones.  If False ,ibound values greater than zero are
57
            treated as a single zone.  Default is False.
58
        pp_dir (`str`, optional): directory to write pilot point files to.  Default is '.'
59
        tpl_dir (`str`, optional): directory to write pilot point template file to.  Default is '.'
60
        shapename (`str`, optional): name of shapefile to write that contains pilot
61
            point information. Default is "pp.shp"
62
        pp_filename_dict (`dict`): optional dict of prefix-pp filename pairs.  prefix values must
63
            match the values in `prefix_dict`.  If None, then pp filenames are based on the
64
            key values in `prefix_dict`.  Default is None
65

66
    Returns:
67
        `pandas.DataFrame`: a dataframe summarizing pilot point information (same information
68
        written to `shapename`
69

70
    Example::
71

72
        m = flopy.modflow.Modflow.load("my.nam")
73
        df = pyemu.pp_utils.setup_pilotpoints_grid(ml=m)
74

75
    """
76

77
    if ml is not None:
11✔
78
        try:
11✔
79
            import flopy
11✔
80
        except Exception as e:
×
81
            raise ImportError("error importing flopy: {0}".format(str(e)))
×
82
        assert isinstance(ml, flopy.modflow.Modflow)
11✔
83
        try:
11✔
84
            sr = ml.sr
11✔
85
        except AttributeError:
11✔
86
            from pyemu.utils.helpers import SpatialReference
11✔
87

88
            sr = SpatialReference.from_namfile(
11✔
89
                os.path.join(ml.model_ws, ml.namefile),
90
                delr=ml.modelgrid.delr,
91
                delc=ml.modelgrid.delc,
92
            )
93
        if ibound is None:
11✔
94
            ibound = ml.bas6.ibound.array
11✔
95
            # build a generic prefix_dict
96
        if prefix_dict is None:
11✔
97
            prefix_dict = {k: ["pp_{0:02d}_".format(k)] for k in range(ml.nlay)}
×
98
    else:
99
        assert sr is not None, "if 'ml' is not passed, 'sr' must be passed"
11✔
100
        if prefix_dict is None:
11✔
101
            prefix_dict = {k: ["pp_{0:02d}_".format(k)] for k in range(ninst)}
10✔
102
        if ibound is None:
11✔
103
            print("ibound not passed, using array of ones")
10✔
104
            ibound = {k: np.ones((sr.nrow, sr.ncol)) for k in prefix_dict.keys()}
10✔
105
        # assert ibound is not None,"if 'ml' is not pass, 'ibound' must be passed"
106

107
    if isinstance(ibound, np.ndarray):
11✔
108
        assert np.ndim(ibound) == 2 or np.ndim(ibound) == 3, (
11✔
109
            "ibound needs to be either 3d np.ndarray or k_dict of 2d arrays. "
110
            "Array of {0} dimensions passed".format(np.ndim(ibound))
111
        )
112
        if np.ndim(ibound) == 2:
11✔
113
            ibound = {0: ibound}
11✔
114
        else:
115
            ibound = {k: arr for k, arr in enumerate(ibound)}
11✔
116

117
    try:
11✔
118
        xcentergrid = sr.xcentergrid
11✔
119
        ycentergrid = sr.ycentergrid
11✔
120
    except AttributeError as e0:
×
121
        warnings.warn("xcentergrid and/or ycentergrid not in 'sr':{0}", PyemuWarning)
×
122
        try:
×
123
            xcentergrid = sr.xcellcenters
×
124
            ycentergrid = sr.ycellcenters
×
125
        except AttributeError as e1:
×
126
            raise Exception(
×
127
                "error getting xcentergrid and/or ycentergrid "
128
                "from 'sr':{0}:{1}".format(str(e0), str(e1))
129
            )
130
    start = int(float(every_n_cell) / 2.0)
11✔
131

132
    if sr.grid_type=='vertex':
11✔
133
        if len(xcentergrid.shape)==1:
10✔
134
            xcentergrid = np.reshape(xcentergrid, (xcentergrid.shape[0], 1))
10✔
135
            ycentergrid = np.reshape(ycentergrid, (ycentergrid.shape[0], 1))
10✔
136

137

138
    # fix for x-section models
139
    if xcentergrid.shape[0] == 1:
11✔
140
        start_row = 0
×
141
    else:
142
        start_row = start
11✔
143

144
    if xcentergrid.shape[1] == 1:
11✔
145
        start_col = 0
10✔
146
    else:
147
        start_col = start
11✔
148

149
    # check prefix_dict
150
    keys = list(prefix_dict.keys())
11✔
151
    keys.sort()
11✔
152

153
    # for k, prefix in prefix_dict.items():
154
    for k in keys:
11✔
155
        prefix = prefix_dict[k]
11✔
156
        if not isinstance(prefix, list):
11✔
157
            prefix_dict[k] = [prefix]
11✔
158
        if np.all([isinstance(v, dict) for v in ibound.values()]):
11✔
159
            for p in prefix_dict[k]:
1✔
160
                if np.any([p.startswith(key) for key in ibound.keys()]):
1✔
161
                    ib_sel = next(key for key in ibound.keys() if p.startswith(key))
×
162
                else:
163
                    ib_sel = "general_zn"
1✔
164
                assert k < len(ibound[ib_sel]), "layer index {0} > nlay {1}".format(
1✔
165
                    k, len(ibound[ib_sel])
166
                )
167
        else:
168
            assert k < len(ibound), "layer index {0} > nlay {1}".format(k, len(ibound))
11✔
169

170
    # try:
171
    # ibound = ml.bas6.ibound.array
172
    # except Exception as e:
173
    #    raise Exception("error getting model.bas6.ibound:{0}".format(str(e)))
174
    par_info = []
11✔
175
    pp_files, tpl_files = [], []
11✔
176
    pp_names = copy.copy(PP_NAMES)
11✔
177
    if sr.grid_type == "vertex":
11✔
178
        pp_names.extend(["k"])
10✔
179
    else:
180
        pp_names.extend(["k", "i", "j"])
11✔
181

182
    if not np.all([isinstance(v, dict) for v in ibound.values()]):
11✔
183
        ibound = {"general_zn": ibound}
11✔
184
    par_keys = list(ibound.keys())
11✔
185
    par_keys.sort()
11✔
186
    for par in par_keys:
11✔
187
        for k in range(len(ibound[par])):
11✔
188
            # skip this layer if not in prefix_dict
189
            if k not in prefix_dict.keys():
11✔
190
                continue
1✔
191

192
            pp_df = None
11✔
193
            ib = ibound[par][k]
11✔
194
            assert (
11✔
195
                ib.shape == xcentergrid.shape
196
            ), "ib.shape != xcentergrid.shape for k {0}".format(k)
197

198
            pp_count = 0
11✔
199
            if sr.grid_type == "vertex":
11✔
200
                spacing=every_n_cell
10✔
201
                
202
                for zone in np.unique(ib):
10✔
203
                    # escape <zero idomain values
204
                    if zone <= 0:
10✔
205
                        continue
10✔
206
                    ppoint_xys = get_zoned_ppoints_for_vertexgrid(spacing, ib, sr, zone_number=zone)
10✔
207

208
                    parval1 = 1.0
10✔
209

210
                    for pp in ppoint_xys:
10✔
211
                        name = "pp_{0:04d}".format(pp_count)
10✔
212
                        x,y = pp[0], pp[-1]
10✔
213
                        if pp_df is None:
10✔
214
                            data = {
10✔
215
                                "name": name,
216
                                "x": x,
217
                                "y": y,
218
                                "zone": zone,  # if use_ibound_zones is False this will always be 1
219
                                "parval1": parval1,
220
                                "k": k,
221
                            }
222
                            pp_df = pd.DataFrame(data=data, index=[0], columns=pp_names)
10✔
223
                        else:
224
                            data = [name, x, y, zone, parval1, k,]
10✔
225
                            pp_df.loc[pp_count, :] = data
10✔
226
                            pp_count+=1
10✔
227

228
            else:
229
                # cycle through rows and cols
230
                for i in range(start_row, ib.shape[0] - start_row, every_n_cell):
11✔
231
                    for j in range(start_col, ib.shape[1] - start_col, every_n_cell):
11✔
232
                        # skip if this is an inactive cell
233
                        if ib[i, j] <= 0:  # this will account for MF6 style ibound as well
11✔
234
                            continue
11✔
235
                        # get the attributes we need
236
                        x = xcentergrid[i, j]
11✔
237
                        y = ycentergrid[i, j]
11✔
238
                        name = "pp_{0:04d}".format(pp_count)
11✔
239
                        parval1 = 1.0
11✔
240

241
                        # decide what to use as the zone
242
                        zone = 1
11✔
243
                        if use_ibound_zones:
11✔
244
                            zone = ib[i, j]
×
245
                        # stick this pilot point into a dataframe container
246

247
                        if pp_df is None:
11✔
248
                            data = {
11✔
249
                                "name": name,
250
                                "x": x,
251
                                "y": y,
252
                                "zone": zone,  # if use_ibound_zones is False this will always be 1
253
                                "parval1": parval1,
254
                                "k": k,
255
                                "i": i,
256
                                "j": j,
257
                            }
258
                            pp_df = pd.DataFrame(data=data, index=[0], columns=pp_names)
11✔
259
                        else:
260
                            data = [name, x, y, zone, parval1, k, i, j]
11✔
261
                            pp_df.loc[pp_count, :] = data
11✔
262
                        pp_count += 1
11✔
263
            # if we found some acceptable locs...
264
            if pp_df is not None:
11✔
265
                for prefix in prefix_dict[k]:
11✔
266
                    # if parameter prefix relates to current zone definition
267
                    if prefix.startswith(par) or (
11✔
268
                        ~np.any([prefix.startswith(p) for p in ibound.keys()])
269
                        and par == "general_zn"
270
                    ):
271
                        if prefix in pp_filename_dict.keys():
11✔
272
                            base_filename = pp_filename_dict[prefix].replace(":", "")
11✔
273
                        else:
274
                            base_filename = "{0}pp.dat".format(prefix.replace(":", ""))
11✔
275
                        pp_filename = os.path.join(pp_dir, base_filename)
11✔
276
                        # write the base pilot point file
277
                        write_pp_file(pp_filename, pp_df)
11✔
278

279
                        tpl_filename = os.path.join(tpl_dir, base_filename + ".tpl")
11✔
280
                        # write the tpl file
281
                        pp_df = pilot_points_to_tpl(
11✔
282
                            pp_df, tpl_filename, name_prefix=prefix,
283
                        )
284
                        pp_df.loc[:, "tpl_filename"] = tpl_filename
11✔
285
                        pp_df.loc[:, "pp_filename"] = pp_filename
11✔
286
                        pp_df.loc[:, "pargp"] = prefix
11✔
287
                        # save the parameter names and parval1s for later
288
                        par_info.append(pp_df.copy())
11✔
289
                        # save the pp_filename and tpl_filename for later
290
                        pp_files.append(pp_filename)
11✔
291
                        tpl_files.append(tpl_filename)
11✔
292

293
    par_info = pd.concat(par_info)
11✔
294
    if sr.grid_type == "vertex":
11✔
295
        fields = ["k", "zone"]
10✔
296
    else:
297
        fields = ["k", "i", "j", "zone"]
11✔
298
    par_info = par_info.astype({f: int for f in fields}, errors='ignore')
11✔
299
    defaults = pd.DataFrame(pst_config["par_defaults"], index=par_info.index)
11✔
300
    missingcols = defaults.columns.difference(par_info.columns)
11✔
301
    par_info.loc[:, missingcols] = defaults
11✔
302

303
    if shapename is not None:
11✔
304
        try:
11✔
305
            import shapefile
11✔
306
        except Exception as e:
×
307
            print(
×
308
                "error importing shapefile, try pip install pyshp...{0}".format(str(e))
309
            )
310
            return par_info
×
311
        try:
11✔
312
            shp = shapefile.Writer(target=shapename, shapeType=shapefile.POINT)
11✔
313
        except:
×
314
            shp = shapefile.Writer(shapeType=shapefile.POINT)
×
315
        for name, dtype in par_info.dtypes.items():
11✔
316
            if dtype == object:
11✔
317
                shp.field(name=name, fieldType="C", size=50)
11✔
318
            elif dtype in [int]:#, np.int64, np.int32]:
11✔
319
                shp.field(name=name, fieldType="N", size=50, decimal=0)
11✔
320
            elif dtype in [float, np.float32, np.float64]:
11✔
321
                shp.field(name=name, fieldType="N", size=50, decimal=10)
11✔
322
            else:
UNCOV
323
                try:
×
UNCOV
324
                    if dtype in [np.int64, np.int32]:
×
UNCOV
325
                        shp.field(name=name, fieldType="N", size=50, decimal=0)
×
326
                    else:
327
                        raise Exception(
×
328
                            "unrecognized field type in par_info:{0}:{1}".format(name, dtype)
329
                        )
330

331
                except Exception as e:
×
332
                    raise Exception(
×
333
                        "unrecognized field type in par_info:{0}:{1}".format(name, dtype)
334
                    )
335

336
        # some pandas awesomeness..
337
        par_info.apply(lambda x: shp.point(x.x, x.y), axis=1)
11✔
338
        par_info.apply(lambda x: shp.record(*x), axis=1)
11✔
339
        try:
11✔
340
            shp.save(shapename)
11✔
341
        except:
11✔
342
            shp.close()
11✔
343
        shp.close()
11✔
344
        shp = shapefile.Reader(shapename)
11✔
345
        assert shp.numRecords == par_info.shape[0]
11✔
346
        shp.close()
11✔
347
    return par_info
11✔
348

349

350
def pp_file_to_dataframe(pp_filename):
11✔
351
    """read a pilot point file to a pandas Dataframe
352

353
    Args:
354
        pp_filename (`str`): path and name of an existing pilot point file
355

356
    Returns:
357
        `pandas.DataFrame`: a dataframe with `pp_utils.PP_NAMES` for columns
358

359
    Example::
360

361
        df = pyemu.pp_utils.pp_file_to_dataframe("my_pp.dat")
362

363
    """
364

365
    df = pd.read_csv(
11✔
366
        pp_filename,
367
        sep=r'\s+',
368
        header=None,
369
        names=PP_NAMES,
370
        usecols=[0, 1, 2, 3, 4],
371
    )
372
    df.loc[:, "name"] = df.name.apply(str).apply(str.lower)
11✔
373
    return df
11✔
374

375

376
def pp_tpl_to_dataframe(tpl_filename):
11✔
377
    """read a pilot points template file to a pandas dataframe
378

379
    Args:
380
        tpl_filename (`str`): path and name of an existing pilot points
381
            template file
382

383
    Returns:
384
        `pandas.DataFrame`: a dataframe of pilot point info with "parnme" included
385

386
    Notes:
387
        Use for processing pilot points since the point point file itself may
388
        have generic "names".
389

390
    Example::
391

392
        df = pyemu.pp_utils.pp_tpl_file_to_dataframe("my_pp.dat.tpl")
393

394
    """
395
    inlines = open(tpl_filename, "r").readlines()
10✔
396
    header = inlines.pop(0)
10✔
397
    marker = header.strip().split()[1]
10✔
398
    assert len(marker) == 1
10✔
399
    usecols = [0, 1, 2, 3]
10✔
400
    df = pd.read_csv(
10✔
401
        tpl_filename,
402
        sep=r"\s+",
403
        skiprows=1,
404
        header=None,
405
        names=PP_NAMES[:-1],
406
        usecols=usecols,
407
    )
408
    df.loc[:, "name"] = df.name.apply(str).apply(str.lower)
10✔
409
    df["parnme"] = [i.split(marker)[1].strip() for i in inlines]
10✔
410

411
    return df
10✔
412

413

414
def pilot_points_from_shapefile(shapename):
11✔
415
    """read pilot points from shapefile into a dataframe
416

417
    Args:
418
        shapename (`str`): the shapefile name to read.
419

420
    Notes:
421
        requires pyshp
422

423
    """
424
    try:
11✔
425
        import shapefile
11✔
426
    except Exception as e:
×
427
        raise Exception(
×
428
            "error importing shapefile: {0}, \ntry pip install pyshp...".format(str(e))
429
        )
430
    shp = shapefile.Reader(shapename)
11✔
431
    if shp.shapeType != shapefile.POINT:
11✔
432
        raise Exception("shapefile '{0}' is not POINT type")
×
433
    names = [n[0].lower() for n in shp.fields[1:]]
11✔
434
    if "name" not in names:
11✔
435
        raise Exception("pilot point shapefile missing 'name' attr")
×
436

437
    data = {name: [] for name in names}
11✔
438
    xvals = []
11✔
439
    yvals = []
11✔
440

441
    for shape, rec in zip(shp.shapes(), shp.records()):
11✔
442
        pt = shape.points[0]
11✔
443
        for name, val in zip(names, rec):
11✔
444
            data[name].append(val)
11✔
445
        xvals.append(pt[0])
11✔
446
        yvals.append(pt[1])
11✔
447

448
    df = pd.DataFrame(data)
11✔
449
    df.loc[:, "x"] = xvals
11✔
450
    df.loc[:, "y"] = yvals
11✔
451
    if "parval1" not in df.columns:
11✔
452
        print("adding generic parval1 to pp shapefile dataframe")
×
453
        df.loc[:, "parval1"] = 1.0
×
454

455
    return df
11✔
456

457

458
def write_pp_shapfile(pp_df, shapename=None):
11✔
459
    """write pilot points dataframe to a shapefile
460

461
    Args:
462
        pp_df (`pandas.DataFrame`): pilot point dataframe (must include "x" and "y"
463
            columns).  If `pp_df` is a string, it is assumed to be a pilot points file
464
            and is loaded with `pp_utils.pp_file_to_dataframe`. Can also be a list of
465
            `pandas.DataFrames` and/or filenames.
466
        shapename (`str`): the shapefile name to write.  If `None` , `pp_df` must be a string
467
            and shapefile is saved as `pp_df` +".shp"
468

469
    Notes:
470
        requires pyshp
471

472
    """
473
    try:
11✔
474
        import shapefile
11✔
475
    except Exception as e:
×
476
        raise Exception(
×
477
            "error importing shapefile: {0}, \ntry pip install pyshp...".format(str(e))
478
        )
479

480
    if not isinstance(pp_df, list):
11✔
481
        pp_df = [pp_df]
11✔
482
    dfs = []
11✔
483
    for pp in pp_df:
11✔
484
        if isinstance(pp, pd.DataFrame):
11✔
485
            dfs.append(pp)
11✔
486
        elif isinstance(pp, str):
10✔
487
            dfs.append(pp_file_to_dataframe(pp))
10✔
488
        else:
489
            raise Exception("unsupported arg type:{0}".format(type(pp)))
×
490

491
    if shapename is None:
11✔
492
        shapename = "pp_locs.shp"
×
493
    try:
11✔
494
        shp = shapefile.Writer(shapeType=shapefile.POINT)
11✔
495
    except:
11✔
496
        shp = shapefile.Writer(target=shapename, shapeType=shapefile.POINT)
11✔
497
    for name, dtype in dfs[0].dtypes.items():
11✔
498
        if dtype == object:
11✔
499
            shp.field(name=name, fieldType="C", size=50)
11✔
500
        elif dtype in [int]:#, np.int, np.int64, np.int32]:
11✔
501
            shp.field(name=name, fieldType="N", size=50, decimal=0)
11✔
502
        elif dtype in [float, np.float32, np.float32]:
11✔
503
            shp.field(name=name, fieldType="N", size=50, decimal=8)
11✔
504
        else:
UNCOV
505
            try:
×
UNCOV
506
                if dtype in [np.int64, np.int32]:
×
UNCOV
507
                    shp.field(name=name, fieldType="N", size=50, decimal=0)
×
508
                else:
509
                    raise Exception(
×
510
                        "unrecognized field type in par_info:{0}:{1}".format(name, dtype)
511
                    )
512

513
            except Exception as e:
×
514
                raise Exception(
×
515
                    "unrecognized field type in par_info:{0}:{1}".format(name, dtype)
516
                )
517

518
    # some pandas awesomeness..
519
    for df in dfs:
11✔
520
        # df.apply(lambda x: shp.poly([[[x.x, x.y]]]), axis=1)
521
        df.apply(lambda x: shp.point(x.x, x.y), axis=1)
11✔
522
        df.apply(lambda x: shp.record(*x), axis=1)
11✔
523

524
    try:
11✔
525
        shp.save(shapename)
11✔
526
    except:
11✔
527
        shp.close()
11✔
528

529

530
def write_pp_file(filename, pp_df):
11✔
531
    """write a pilot points dataframe to a pilot points file
532

533
    Args:
534
        filename (`str`): pilot points file to write
535
        pp_df (`pandas.DataFrame`):  a dataframe that has
536
            at least columns "x","y","zone", and "value"
537

538
    """
539
    with open(filename, "w") as f:
11✔
540
        f.write(
11✔
541
            pp_df.to_string(
542
                col_space=0,
543
                columns=PP_NAMES,
544
                formatters=PP_FMT,
545
                justify="right",
546
                header=False,
547
                index=False,
548
            )
549
            + "\n"
550
        )
551

552

553
def pilot_points_to_tpl(pp_file, tpl_file=None, name_prefix=None):
11✔
554
    """write a template file for a pilot points file
555

556
    Args:
557
        pp_file : (`str`): existing pilot points file
558
        tpl_file (`str`): template file name to write.  If None,
559
            `pp_file`+".tpl" is used.  Default is `None`.
560
        name_prefix (`str`): name to prepend to parameter names for each
561
            pilot point.  For example, if `name_prefix = "hk_"`, then each
562
            pilot point parameters will be named "hk_0001","hk_0002", etc.
563
            If None, parameter names from `pp_df.name` are used.
564
            Default is None.
565

566
    Returns:
567
        `pandas.DataFrame`: a dataframe with pilot point information
568
        (name,x,y,zone,parval1) with the parameter information
569
        (parnme,tpl_str)
570

571
    Example::
572

573
        pyemu.pp_utils.pilot_points_to_tpl("my_pps.dat",name_prefix="my_pps")
574

575

576
    """
577

578
    if isinstance(pp_file, pd.DataFrame):
11✔
579
        pp_df = pp_file
11✔
580
        assert tpl_file is not None
11✔
581
    else:
582
        assert os.path.exists(pp_file)
10✔
583
        pp_df = pd.read_csv(pp_file, sep=r"\s+",
10✔
584
                            header=None, names=PP_NAMES)
585
    pp_df = pp_df.astype({'zone': int}, errors='ignore')
11✔
586
    if tpl_file is None:
11✔
587
        tpl_file = pp_file + ".tpl"
10✔
588

589
    if name_prefix is not None:
11✔
590
        if "i" in pp_df.columns and "j" in pp_df.columns:
11✔
591
            pp_df.loc[:, "parnme"] = pp_df.apply(
11✔
592
                lambda x: "{0}_i:{1}_j:{2}".format(name_prefix, int(x.i), int(x.j)),
593
                axis=1,
594
            )
595
        elif "x" in pp_df.columns and "y" in pp_df.columns:
11✔
596
            pp_df.loc[:, "parnme"] = pp_df.apply(
11✔
597
                lambda x: "{0}_x:{1:0.2f}_y:{2:0.2f}".format(name_prefix, x.x, x.y),
598
                axis=1,
599
            )
600
        else:
601
            pp_df.loc[:, "idx"] = np.arange(pp_df.shape[0])
×
602
            pp_df.loc[:, "parnme"] = pp_df.apply(
×
603
                lambda x: "{0}_ppidx:{1}".format(name_prefix, x.idx),
604
                axis=1,
605
            )
606
        if "zone" in pp_df.columns:
11✔
607
            pp_df.loc[:, "parnme"] = pp_df.apply(
11✔
608
                lambda x: x.parnme + "_zone:{0}".format(x.zone), axis=1
609
            )
610
        pp_df.loc[:, "tpl"] = pp_df.parnme.apply(
11✔
611
            lambda x: "~    {0}    ~".format(x)
612
        )
613
    else:
614
        pp_df.loc[:, "parnme"] = pp_df.name
×
615
        pp_df.loc[:, "tpl"] = pp_df.parnme.apply(
×
616
            lambda x: "~    {0}    ~".format(x)
617
        )
618
    with open(tpl_file, "w") as f:
11✔
619
        f.write("ptf ~\n")
11✔
620
        pp_df.loc[:, ["name", "x", "y", "zone", "tpl"]].apply(
11✔
621
            lambda x: f.write(' '.join(x.astype(str)) + '\n'), axis=1)
622
    # _write_df_tpl(
623
    #     tpl_file,
624
    #     pp_df.loc[:, ["name", "x", "y", "zone", "tpl"]],
625
    #     sep=" ",
626
    #     index_label="index",
627
    #     header=False,
628
    #     index=False,
629
    #     quotechar=" ",
630
    #     quoting=2,
631
    # )
632

633
    return pp_df
11✔
634

635
def get_zoned_ppoints_for_vertexgrid(spacing, zone_array, mg, zone_number=None, add_buffer=True):
11✔
636
    """Generate equally spaced pilot points for active area of DISV type MODFLOW6 grid. 
637
 
638
    Args:
639
        spacing (`float`): spacing in model length units between pilot points. 
640
        zone_array (`numpy.ndarray`): the modflow 6 idomain integer array.  This is used to
641
            set pilot points only in active areas and to assign zone numbers. 
642
        mg  (`flopy.discretization.vertexgrid.VertexGrid`): a VertexGrid flopy discretization dervied type.
643
        zone_number (`int`): zone number 
644
        add_buffer (`boolean`): specifies whether pilot points ar eplaced wihtin a buffer zone of size `distance` around the zone/active domain
645

646
    Returns:
647
        `list`: a list of tuples with pilot point x and y coordinates
648

649
    Example::
650

651
        get_zoned_ppoints_for_vertexgrid(spacing=100, ib=idomain, mg, zone_number=1, add_buffer=False)
652

653
    """
654

655
    try:
10✔
656
        from shapely.ops import unary_union
10✔
657
        from shapely.geometry import Polygon, Point
10✔
658
    except ImportError:
×
659
        raise ImportError('The `shapely` library was not found. Please make sure it is installed.')
×
660

661
    
662
    if mg.grid_type=='vertex' and zone_array is not None and len(zone_array.shape)==1:
10✔
663
            zone_array = np.reshape(zone_array, (zone_array.shape[0], ))
×
664

665

666
    assert zone_array.shape[0] == mg.xcellcenters.shape[0], "The ib idomain array should be of shape (ncpl,). i.e. For a single layer."
10✔
667

668
    if zone_number:
10✔
669
        assert zone_array[zone_array==zone_number].shape[0]>0, f"The zone_number: {zone_number} is not in the ib array."
10✔
670

671
    # get zone cell x,y 
672
    xc, yc = mg.xcellcenters, mg.ycellcenters
10✔
673
    if zone_number != None:
10✔
674
        xc_zone = xc[np.where(zone_array==zone_number)[0]]
10✔
675
        yc_zone = yc[np.where(zone_array==zone_number)[0]]
10✔
676
    else:
677
        xc_zone = xc[np.where(zone_array>0)]
×
678
        yc_zone = yc[np.where(zone_array>0)]   
×
679

680
    # get outer bounds
681
    xmin, xmax = min(xc_zone), max(xc_zone)
10✔
682
    ymin, ymax = min(yc_zone), max(yc_zone)
10✔
683
    # n-ppoints
684
    nx = int(np.ceil((xmax - xmin) / spacing))
10✔
685
    ny = int(np.ceil((ymax - ymin) / spacing))
10✔
686
    # make even spaced points
687
    x = np.linspace(xmin, xmax, nx)
10✔
688
    y = np.linspace(ymin, ymax, ny)
10✔
689
    xv, yv = np.meshgrid(x, y)
10✔
690
    # make grid
691
    grid_points = [Point(x,y) for x,y in list(zip(xv.flatten(), yv.flatten()))]
10✔
692

693
    # get vertices for model grid/zone polygon
694
    verts = [mg.get_cell_vertices(cellid) for cellid in range(mg.ncpl)]
10✔
695
    if zone_number != None:
10✔
696
        # select zone area
697
        verts = [verts[i] for i in np.where(zone_array==zone_number)[0]]
10✔
698
    # dissolve
699
    polygon = unary_union([Polygon(v) for v in verts])
10✔
700
    # add buffer
701
    if add_buffer==True:
10✔
702
        polygon = polygon.buffer(spacing)
10✔
703
    # select ppoint coords within area
704
    ppoints = [(p.x, p.y) for p in grid_points if polygon.covers(p) ]
10✔
705
    assert len(ppoints)>0
10✔
706
    return ppoints
10✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc