• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

SEED-platform / seed / #6426

pending completion
#6426

push

coveralls-python

web-flow
Merge pull request #3682 from SEED-platform/Fix-legend

Fix legend

15655 of 22544 relevant lines covered (69.44%)

0.69 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

11.19
/seed/views/tax_lot_properties.py
1
# !/usr/bin/env python
2
# encoding: utf-8
3
"""
1✔
4
:copyright (c) 2014 - 2022, The Regents of the University of California,
5
through Lawrence Berkeley National Laboratory (subject to receipt of any
6
required approvals from the U.S. Department of Energy) and contributors.
7
All rights reserved.
8
:author
9
"""
10
import csv
1✔
11
import datetime
1✔
12
import io
1✔
13
from collections import OrderedDict
1✔
14

15
import xlsxwriter
1✔
16
from django.http import HttpResponse, JsonResponse
17
from quantityfield.units import ureg
1✔
18
from rest_framework.decorators import action
1✔
19
from rest_framework.renderers import JSONRenderer
1✔
20
from rest_framework.viewsets import GenericViewSet
1✔
21

22
from seed.decorators import ajax_request_class
1✔
23
from seed.lib.superperms.orgs.decorators import has_perm_class
1✔
24
from seed.models import (
1✔
25
    ColumnListProfile,
26
    PropertyView,
27
    TaxLotProperty,
28
    TaxLotView
29
)
30
from seed.models.meters import Meter, MeterReading
1✔
31
from seed.models.property_measures import PropertyMeasure
1✔
32
from seed.models.scenarios import Scenario
1✔
33
from seed.serializers.tax_lot_properties import TaxLotPropertySerializer
1✔
34
from seed.utils.api import api_endpoint_class
1✔
35

36
INVENTORY_MODELS = {'properties': PropertyView, 'taxlots': TaxLotView}
1✔
37

38

39
class TaxLotPropertyViewSet(GenericViewSet):
1✔
40
    """
41
    The TaxLotProperty field is used to return the properties and tax lots from the join table.
42
    This method presently only works with the CSV, but should eventually be extended to be the
43
    viewset for any tax lot / property join API call.
44
    """
45
    renderer_classes = (JSONRenderer,)
1✔
46
    serializer_class = TaxLotPropertySerializer
1✔
47

48
    @api_endpoint_class
1✔
49
    @ajax_request_class
1✔
50
    @has_perm_class('requires_member')
1✔
51
    @action(detail=False, methods=['POST'])
1✔
52
    def export(self, request):
1✔
53
        """
54
        Download a csv of the TaxLot and Properties
55

56
        .. code-block::
57

58
            {
59
                "ids": [1,2,3],
60
                "columns": ["tax_jurisdiction_tax_lot_id", "address_line_1", "property_view_id"]
61
            }
62

63
        ---
64
        parameter_strategy: replace
65
        parameters:
66
            - name: cycle
67
              description: cycle
68
              required: true
69
              paramType: query
70
            - name: inventory_type
71
              description: properties or taxlots (as defined by the inventory list page)
72
              required: true
73
              paramType: query
74
            - name: ids
75
              description: list of property/taxlot ids to export (not property/taxlot views)
76
              required: true
77
              paramType: body
78
            - name: columns
79
              description: list of columns to export
80
              required: true
81
              paramType: body
82
            - name: filename
83
              description: name of the file to create
84
              required: false
85
              paramType: body
86
            - name: profile_id
87
              description: Either an id of a list settings profile, or undefined
88
              paramType: body
89
        """
90
        cycle_pk = request.query_params.get('cycle_id', None)
×
91
        if not cycle_pk:
×
92
            return JsonResponse(
×
93
                {'status': 'error', 'message': 'Must pass in cycle_id as query parameter'})
94
        org_id = request.query_params['organization_id']
×
95
        if 'profile_id' not in request.data:
×
96
            profile_id = None
×
97
        else:
98
            if request.data['profile_id'] == 'None' or request.data['profile_id'] == '':
×
99
                profile_id = None
×
100
            else:
101
                profile_id = request.data['profile_id']
×
102

103
        # get the class to operate on and the relationships
104
        view_klass_str = request.query_params.get('inventory_type', 'properties')
×
105
        view_klass = INVENTORY_MODELS[view_klass_str]
×
106

107
        # Set the first column to be the ID
108
        column_name_mappings = OrderedDict([('id', 'ID')])
×
109
        column_ids, add_column_name_mappings, columns_from_database = ColumnListProfile.return_columns(
×
110
            org_id,
111
            profile_id,
112
            view_klass_str)
113
        column_name_mappings.update(add_column_name_mappings)
×
114
        select_related = ['state', 'cycle']
×
115
        ids = request.data.get('ids', [])
×
116
        filter_str = {'cycle': cycle_pk}
×
117
        if hasattr(view_klass, 'property'):
×
118
            select_related.append('property')
×
119
            prefetch_related = ['labels']
×
120
            filter_str = {'property__organization_id': org_id}
×
121
            if ids:
×
122
                filter_str['property__id__in'] = ids
×
123
            # always export the labels and notes
124
            column_name_mappings['property_notes'] = 'Property Notes'
×
125
            column_name_mappings['property_labels'] = 'Property Labels'
×
126

127
        elif hasattr(view_klass, 'taxlot'):
×
128
            select_related.append('taxlot')
×
129
            prefetch_related = ['labels']
×
130
            filter_str = {'taxlot__organization_id': org_id}
×
131
            if ids:
×
132
                filter_str['taxlot__id__in'] = ids
×
133
            # always export the labels and notes
134
            column_name_mappings['taxlot_notes'] = 'Tax Lot Notes'
×
135
            column_name_mappings['taxlot_labels'] = 'Tax Lot Labels'
×
136

137
        model_views = view_klass.objects.select_related(*select_related).prefetch_related(
×
138
            *prefetch_related).filter(**filter_str).order_by('id')
139

140
        # get the data in a dict which includes the related data
141
        data = TaxLotProperty.serialize(model_views, column_ids, columns_from_database)
×
142

143
        # add labels and notes
144
        for i, record in enumerate(model_views):
×
145
            label_string = []
×
146
            note_string = []
×
147
            for label in list(record.labels.all().order_by('name')):
×
148
                label_string.append(label.name)
×
149
            for note in list(record.notes.all().order_by('created')):
×
150
                note_string.append(
×
151
                    note.created.astimezone().strftime("%Y-%m-%d %I:%M:%S %p") + "\n" +
152
                    note.text
153
                )
154

155
            if hasattr(record, 'property'):
×
156
                data[i]['property_labels'] = ','.join(label_string)
×
157
                data[i]['property_notes'] = '\n----------\n'.join(note_string)
×
158
            elif hasattr(record, 'taxlot'):
×
159
                data[i]['taxlot_labels'] = ','.join(label_string)
×
160
                data[i]['taxlot_notes'] = '\n----------\n'.join(note_string)
×
161

162
        # force the data into the same order as the IDs
163
        if ids:
×
164
            order_dict = {obj_id: index for index, obj_id in enumerate(ids)}
×
165
            data.sort(key=lambda x: order_dict[x['id']])  # x is the property/taxlot object
×
166

167
        export_type = request.data.get('export_type', 'csv')
×
168

169
        filename = request.data.get('filename', f"ExportedData.{export_type}")
×
170

171
        if export_type == "csv":
×
172
            return self._csv_response(filename, data, column_name_mappings)
×
173
        elif export_type == "geojson":
×
174
            return self._json_response(filename, data, column_name_mappings)
×
175
        elif export_type == "xlsx":
×
176
            return self._spreadsheet_response(filename, data, column_name_mappings)
×
177

178
    def _csv_response(self, filename, data, column_name_mappings):
1✔
179
        response = HttpResponse(content_type='text/csv')
×
180
        response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
×
181

182
        writer = csv.writer(response)
×
183

184
        # check the first item in the header and make sure that it isn't ID (it can be id, or iD).
185
        # excel doesn't like the first item to be ID in a CSV
186
        header = list(column_name_mappings.values())
×
187
        if header[0] == 'ID':
×
188
            header[0] = 'id'
×
189
        writer.writerow(header)
×
190

191
        # iterate over the results to preserve column order and write row.
192
        for datum in data:
×
193
            row = []
×
194
            for column in column_name_mappings:
×
195
                row_result = datum.get(column, None)
×
196

197
                # Try grabbing the value out of the related field if not found yet.
198
                if row_result is None and datum.get('related'):
×
199
                    row_result = datum['related'][0].get(column, None)
×
200

201
                # Convert quantities (this is typically handled in the JSON Encoder, but that isn't here).
202
                if isinstance(row_result, ureg.Quantity):
×
203
                    row_result = row_result.magnitude
×
204
                elif isinstance(row_result, datetime.datetime):
×
205
                    row_result = row_result.strftime("%Y-%m-%d %H:%M:%S")
×
206
                elif isinstance(row_result, datetime.date):
×
207
                    row_result = row_result.strftime("%Y-%m-%d")
×
208
                row.append(row_result)
×
209

210
            writer.writerow(row)
×
211

212
        return response
×
213

214
    def _spreadsheet_response(self, filename, data, column_name_mappings):
1✔
215
        response = HttpResponse(
×
216
            content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
217
        response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
×
218

219
        scenario_keys = (
×
220
            'id', 'name', 'description', 'annual_site_energy_savings', 'annual_source_energy_savings',
221
            'annual_cost_savings', 'annual_electricity_savings',
222
            'annual_natural_gas_savings', 'annual_site_energy', 'annual_source_energy', 'annual_natural_gas_energy',
223
            'annual_electricity_energy', 'annual_peak_demand', 'annual_site_energy_use_intensity',
224
            'annual_source_energy_use_intensity'
225
        )
226
        scenario_key_mappings = {
×
227
            'annual_site_energy_savings': 'annual_site_energy_savings_mmbtu',
228
            'annual_source_energy_savings': 'annual_source_energy_savings_mmbtu',
229
            'annual_cost_savings': 'annual_cost_savings_dollars',
230
            'annual_site_energy': 'annual_site_energy_kbtu',
231
            'annual_site_energy_use_intensity': 'annual_site_energy_use_intensity_kbtu_ft2',
232
            'annual_source_energy': 'annual_source_energy_kbtu',
233
            'annual_source_energy_use_intensity': 'annual_source_energy_use_intensity_kbtu_ft2',
234
            'annual_natural_gas_energy': 'annual_natural_gas_energy_mmbtu',
235
            'annual_electricity_energy': 'annual_electricity_energy_mmbtu',
236
            'annual_peak_demand': 'annual_peak_demand_kw',
237
            'annual_electricity_savings': 'annual_electricity_savings_kbtu',
238
            'annual_natural_gas_savings': 'annual_natural_gas_savings_kbtu'
239
        }
240

241
        property_measure_keys = (
×
242
            'id', 'property_measure_name', 'measure_id', 'cost_mv', 'cost_total_first',
243
            'cost_installation', 'cost_material', 'cost_capital_replacement', 'cost_residual_value'
244
        )
245
        measure_keys = ('name', 'display_name', 'category', 'category_display_name')
×
246
        # find measures and scenarios
247
        for i, record in enumerate(data):
×
248
            measures = PropertyMeasure.objects.filter(property_state_id=record['property_state_id'])
×
249
            record['measures'] = measures
×
250

251
            scenarios = Scenario.objects.filter(property_state_id=record['property_state_id'])
×
252
            record['scenarios'] = scenarios
×
253

254
        output = io.BytesIO()
×
255
        wb = xlsxwriter.Workbook(output, {'remove_timezone': True})
×
256

257
        # add tabs
258
        ws1 = wb.add_worksheet('Properties')
×
259
        ws2 = wb.add_worksheet('Measures')
×
260
        ws3 = wb.add_worksheet('Scenarios')
×
261
        ws4 = wb.add_worksheet('Scenario Measure Join Table')
×
262
        ws5 = wb.add_worksheet('Meter Readings')
×
263
        bold = wb.add_format({'bold': True})
×
264

265
        row = 0
×
266
        row2 = 0
×
267
        col2 = 0
×
268
        row3 = 0
×
269
        col3 = 0
×
270
        row4 = 0
×
271
        row5 = 0
×
272

273
        for index, val in enumerate(list(column_name_mappings.values())):
×
274
            # Do not write the first element as ID, this causes weird issues with Excel.
275
            if index == 0 and val == 'ID':
×
276
                ws1.write(row, index, 'id', bold)
×
277
            else:
278
                ws1.write(row, index, val, bold)
×
279

280
        # iterate over the results to preserve column order and write row.
281
        add_m_headers = True
×
282
        add_s_headers = True
×
283
        for datum in data:
×
284
            row += 1
×
285
            id = None
×
286
            for index, column in enumerate(column_name_mappings):
×
287
                if column == 'id':
×
288
                    id = datum.get(column, None)
×
289

290
                row_result = datum.get(column, None)
×
291

292
                # Try grabbing the value out of the related field if not found yet.
293
                if row_result is None and datum.get('related'):
×
294
                    row_result = datum['related'][0].get(column, None)
×
295

296
                # Convert quantities (this is typically handled in the JSON Encoder, but that isn't here).
297
                if isinstance(row_result, ureg.Quantity):
×
298
                    row_result = row_result.magnitude
×
299
                elif isinstance(row_result, datetime.datetime):
×
300
                    row_result = row_result.strftime("%Y-%m-%d %H:%M:%S")
×
301
                elif isinstance(row_result, datetime.date):
×
302
                    row_result = row_result.strftime("%Y-%m-%d")
×
303
                ws1.write(row, index, row_result)
×
304

305
            # measures
306
            for index, m in enumerate(datum['measures']):
×
307
                if add_m_headers:
×
308
                    # grab headers
309
                    for key in property_measure_keys:
×
310
                        ws2.write(row2, col2, key, bold)
×
311
                        col2 += 1
×
312
                    for key in measure_keys:
×
313
                        ws2.write(row2, col2, 'measure ' + key, bold)
×
314
                        col2 += 1
×
315
                    add_m_headers = False
×
316

317
                row2 += 1
×
318
                col2 = 0
×
319
                for key in property_measure_keys:
×
320
                    ws2.write(row2, col2, getattr(m, key))
×
321
                    col2 += 1
×
322
                for key in measure_keys:
×
323
                    ws2.write(row2, col2, getattr(m.measure, key))
×
324
                    col2 += 1
×
325

326
            # scenarios (and join table)
327
            # join table
328
            ws4.write('A1', 'property_id', bold)
×
329
            ws4.write('B1', 'scenario_id', bold)
×
330
            ws4.write('C1', 'measure_id', bold)
×
331
            for index, s in enumerate(datum['scenarios']):
×
332
                # print("EXPORT SCENARIO: {}".format(inspect.getmembers(s)))
333
                scenario_id = s.id
×
334
                if add_s_headers:
×
335
                    # grab headers
336
                    for key in scenario_keys:
×
337
                        # double check scenario_key_mappings in case a different header is desired
338
                        if key in scenario_key_mappings.keys():
×
339
                            key = scenario_key_mappings[key]
×
340
                        ws3.write(row3, col3, key, bold)
×
341
                        col3 += 1
×
342
                    add_s_headers = False
×
343
                row3 += 1
×
344
                col3 = 0
×
345
                for key in scenario_keys:
×
346
                    ws3.write(row3, col3, getattr(s, key))
×
347
                    col3 += 1
×
348

349
                for sm in s.measures.all():
×
350
                    row4 += 1
×
351
                    ws4.write(row4, 0, id)
×
352
                    ws4.write(row4, 1, scenario_id)
×
353
                    ws4.write(row4, 2, sm.id)
×
354

355
            # scenario meter readings
356
            ws5.write('A1', 'scenario_id', bold)
×
357
            ws5.write('B1', 'meter_id', bold)
×
358
            ws5.write('C1', 'type', bold)
×
359
            ws5.write('D1', 'start_time', bold)
×
360
            ws5.write('E1', 'end_time', bold)
×
361
            ws5.write('F1', 'reading', bold)
×
362
            ws5.write('G1', 'units', bold)
×
363
            ws5.write('H1', 'is_virtual', bold)
×
364
            # datetime formatting
365
            date_format = wb.add_format({'num_format': 'yyyy-mm-dd hh:mm:ss'})
×
366

367
            for index, s in enumerate(datum['scenarios']):
×
368
                scenario_id = s.id
×
369
                # retrieve meters
370
                meters = Meter.objects.filter(scenario_id=scenario_id)
×
371
                for m in meters:
×
372
                    # retrieve readings
373
                    readings = MeterReading.objects.filter(meter_id=m.id).order_by('start_time')
×
374
                    for r in readings:
×
375
                        row5 += 1
×
376
                        ws5.write(row5, 0, scenario_id)
×
377
                        ws5.write(row5, 1, m.id)
×
378
                        the_type = next((item for item in Meter.ENERGY_TYPES if item[0] == m.type), None)
×
379
                        the_type = the_type[1] if the_type is not None else None
×
380
                        ws5.write(row5, 2, the_type)  # use energy type enum to determine reading type
×
381
                        ws5.write_datetime(row5, 3, r.start_time, date_format)
×
382
                        ws5.write_datetime(row5, 4, r.end_time, date_format)
×
383
                        ws5.write(row5, 5, r.reading)  # this is now a float field
×
384
                        ws5.write(row5, 6, r.source_unit)
×
385
                        ws5.write(row5, 7, m.is_virtual)
×
386

387
        wb.close()
×
388

389
        # xlsx_data contains the Excel file
390
        xlsx_data = output.getvalue()
×
391

392
        response.write(xlsx_data)
×
393
        return response
×
394

395
    def _json_response(self, filename, data, column_name_mappings):
1✔
396
        polygon_fields = ["bounding_box", "centroid", "property_footprint", "taxlot_footprint", "long_lat"]
×
397
        features = []
×
398

399
        # extract related records
400
        related_records = self._extract_related(data)
×
401

402
        # append related_records to data
403
        complete_data = data + related_records
×
404

405
        for datum in complete_data:
×
406
            feature = {
×
407
                "type": "Feature",
408
                "properties": {}
409
            }
410

411
            for key, value in datum.items():
×
412
                if value is None:
×
413
                    continue
×
414

415
                if isinstance(value, ureg.Quantity):
×
416
                    value = value.magnitude
×
417
                elif isinstance(value, datetime.datetime):
×
418
                    value = value.strftime("%Y-%m-%d %H:%M:%S")
×
419
                elif isinstance(value, datetime.date):
×
420
                    value = value.strftime("%Y-%m-%d")
×
421

422
                if value and any(k in key for k in polygon_fields):
×
423
                    """
424
                    If object is a polygon and is populated, add the 'geometry'
425
                    key-value-pair in the appropriate GeoJSON format.
426
                    When the first geometry is added, the correct format is
427
                    established. When/If a second geometry is added, this is
428
                    appended alongside the previous geometry.
429
                    """
430
                    individual_geometry = {}
×
431

432
                    print("VALUE:")
×
433
                    print(value)
×
434

435
                    # long_lat
436
                    if key == 'long_lat':
×
437
                        coordinates = self._serialized_point(value)
×
438
                        # point
439
                        individual_geometry = {
×
440
                            "coordinates": coordinates,
441
                            "type": "Point"
442
                        }
443
                    else:
444
                        # polygons
445
                        coordinates = self._serialized_coordinates(value)
×
446
                        individual_geometry = {
×
447
                            "coordinates": [coordinates],
448
                            "type": "Polygon"
449
                        }
450

451
                    if feature.get("geometry", None) is None:
×
452
                        feature["geometry"] = {
×
453
                            "type": "GeometryCollection",
454
                            "geometries": [individual_geometry]
455
                        }
456
                    else:
457
                        feature["geometry"]["geometries"].append(individual_geometry)
×
458
                else:
459
                    """
460
                    Non-polygon data
461
                    """
462
                    display_key = column_name_mappings.get(key, key)
×
463
                    feature["properties"][display_key] = value
×
464

465
                    # # store point geometry in case you need it
466
                    # if display_key == "Longitude":
467
                    #     point_geometry[0] = value
468
                    # if display_key == "Latitude":
469
                    #     point_geometry[1] = value
470

471
            """
472
            Before appending feature, ensure that if there is no geometry recorded.
473
            Note that the GeoJson will not render if no lat/lng
474
            """
475

476
            # add style information
477
            if feature["properties"].get("property_state_id") is not None:
×
478
                feature["properties"]["stroke"] = "#185189"  # buildings color
×
479
            elif feature["properties"].get("taxlot_state_id") is not None:
×
480
                feature["properties"]["stroke"] = "#10A0A0"  # buildings color
×
481
            feature["properties"]["marker-color"] = "#E74C3C"
×
482
            # feature["properties"]["stroke-width"] = 3
483
            feature["properties"]["fill-opacity"] = 0
×
484

485
            # append feature
486
            features.append(feature)
×
487

488
            response_dict = {
×
489
                "type": "FeatureCollection",
490
                "crs": {
491
                    "type": "EPSG",
492
                    "properties": {"code": 4326}
493
                },
494
                "features": features
495
            }
496

497
        response = JsonResponse(response_dict)
×
498
        response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
×
499

500
        return response
×
501

502
    def _serialized_coordinates(self, polygon_wkt):
1✔
503
        string_coord_pairs = polygon_wkt.lstrip('POLYGON (').rstrip(')').split(', ')
×
504

505
        coordinates = []
×
506
        for coord_pair in string_coord_pairs:
×
507
            float_coords = [float(coord) for coord in coord_pair.split(' ')]
×
508
            coordinates.append(float_coords)
×
509

510
        return coordinates
×
511

512
    def _serialized_point(self, point_wkt):
1✔
513
        string_coords = point_wkt.lstrip('POINT (').rstrip(')').split(', ')
×
514

515
        coordinates = []
×
516
        for coord in string_coords[0].split(' '):
×
517
            coordinates.append(float(coord))
×
518

519
        return coordinates
×
520

521
    def _extract_related(self, data):
1✔
522
        # extract all related records into a separate array
523
        related = []
×
524

525
        # figure out if we are dealing with properties or taxlots
526
        if data[0].get("property_state_id", None) is not None:
×
527
            is_property = True
×
528
        elif data[0].get("taxlot_state_id", None) is not None:
×
529
            is_property = False
×
530

531
        for datum in data:
×
532
            if datum.get("related", None) is not None:
×
533
                for record in datum["related"]:
×
534
                    related.append(record)
×
535

536
        # make array unique
537
        if is_property:
×
538

539
            unique = [dict(p) for p in set(tuple(i.items())
×
540
                                           for i in related)]
541

542
        else:
543
            unique = [dict(p) for p in set(tuple(i.items())
×
544
                                           for i in related)]
545

546
        return unique
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc