• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

SEED-platform / seed / #6556

pending completion
#6556

push

coveralls-python

web-flow
Bump future from 0.18.2 to 0.18.3 in /requirements (#3792)

Bumps [future](https://github.com/PythonCharmers/python-future) from 0.18.2 to 0.18.3.
- [Release notes](https://github.com/PythonCharmers/python-future/releases)
- [Changelog](https://github.com/PythonCharmers/python-future/blob/master/docs/changelog.rst)
- [Commits](https://github.com/PythonCharmers/python-future/compare/v0.18.2...v0.18.3)

---
updated-dependencies:
- dependency-name: future
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

15695 of 22613 relevant lines covered (69.41%)

0.69 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

17.1
/seed/analysis_pipelines/better/pipeline.py
1
# !/usr/bin/env python
2
# encoding: utf-8
3
"""
1✔
4
:copyright (c) 2014 - 2022, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Department of Energy) and contributors. All rights reserved.
5
:author
6
"""
7
import copy
1✔
8
import logging
1✔
9
from datetime import timedelta
1✔
10

11
import dateutil.parser
1✔
12
from celery import chain, shared_task
1✔
13
from django.core.files.base import ContentFile
14
from django.db.models import Count
15

16
from seed.analysis_pipelines.better.buildingsync import (
1✔
17
    SEED_TO_BSYNC_RESOURCE_TYPE,
18
    _build_better_input
19
)
20
from seed.analysis_pipelines.better.client import BETTERClient
1✔
21
from seed.analysis_pipelines.better.helpers import (
1✔
22
    BETTERPipelineContext,
23
    ExtraDataColumnPath,
24
    _check_errors,
25
    _create_better_buildings,
26
    _run_better_building_analyses,
27
    _run_better_portfolio_analysis,
28
    _store_better_building_analysis_results,
29
    _store_better_portfolio_analysis_results
30
)
31
from seed.analysis_pipelines.pipeline import (
1✔
32
    AnalysisPipeline,
33
    AnalysisPipelineException,
34
    StopAnalysisTaskChain,
35
    analysis_pipeline_task,
36
    task_create_analysis_property_views
37
)
38
from seed.analysis_pipelines.utils import (
1✔
39
    calendarize_and_extrapolate_meter_readings,
40
    get_json_path
41
)
42
from seed.models import (
1✔
43
    Analysis,
44
    AnalysisInputFile,
45
    AnalysisMessage,
46
    AnalysisPropertyView,
47
    Column,
48
    Meter
49
)
50

51
logger = logging.getLogger(__name__)
1✔
52

53

54
def _validate_better_config(analysis):
1✔
55
    """Performs basic validation of the analysis for running a BETTER analysis. Returns any
56
    errors
57

58
    :param analysis: Analysis
59
    :returns: list[str], list of validation error messages
60
    """
61
    config = analysis.configuration
×
62
    if not isinstance(config, dict):
×
63
        return ['Analysis configuration must be a dictionary/JSON']
×
64

65
    # print(f"ANALYSIS CONFIG IN PIPELINE: {config}")
66

67
    REQUIRED_CONFIG_PROPERTIES = [
×
68
        'min_model_r_squared',
69
        'savings_target',
70
        'benchmark_data',
71
        'portfolio_analysis',
72
        'preprocess_meters',
73
    ]
74

75
    return [
×
76
        f'Analysis configuration missing required property "{required_prop}"'
77
        for required_prop in REQUIRED_CONFIG_PROPERTIES if required_prop not in config
78
    ]
79

80

81
class BETTERPipeline(AnalysisPipeline):
1✔
82
    """
83
    BETTERPipeline is a class for preparing, running, and post
84
    processing BETTER analysis by implementing the AnalysisPipeline's abstract
85
    methods.
86
    """
87

88
    def _prepare_analysis(self, property_view_ids, start_analysis=False):
1✔
89
        """Internal implementation for preparing better analysis"""
90
        analysis = Analysis.objects.get(id=self._analysis_id)
×
91
        organization = analysis.organization
×
92
        if not organization.better_analysis_api_key:
×
93
            message = (f'Organization "{organization.name}" is missing the required BETTER Analysis API Key. '
×
94
                       'Please update your organization\'s settings or contact your organization administrator.')
95
            self.fail(message, logger)
×
96
            raise AnalysisPipelineException(message)
×
97

98
        # ping BETTER to verify the token is valid
99
        client = BETTERClient(organization.better_analysis_api_key)
×
100
        if not client.token_is_valid():
×
101
            message = 'Failed to communicate with BETTER. Please verify organization token is valid and try again.'
×
102
            self.fail(message, logger)
×
103
            raise AnalysisPipelineException(message)
×
104

105
        # validate the configuration
106
        validation_errors = _validate_better_config(analysis)
×
107
        if validation_errors:
×
108
            raise AnalysisPipelineException(
×
109
                f'Analysis configuration is invalid: {"; ".join(validation_errors)}')
110

111
        progress_data = self.get_progress_data(analysis)
×
112

113
        # Steps:
114
        # 1) ...starting
115
        # 2) create AnalysisPropertyViews
116
        # 3) create input files for each property
117
        progress_data.total = 3
×
118
        progress_data.save()
×
119

120
        chain(
×
121
            task_create_analysis_property_views.si(self._analysis_id, property_view_ids),
122
            _prepare_all_properties.s(self._analysis_id),
123
            _finish_preparation.si(self._analysis_id, start_analysis)
124
        ).apply_async()
125

126
    def _start_analysis(self):
1✔
127
        """Internal implementation for starting the BETTER analysis"""
128

129
        progress_data = self.get_progress_data()
×
130

131
        # Steps:
132
        # 1) ...starting
133
        # 2) make requests to better
134
        # 3) process the results files
135
        progress_data.total = 3
×
136
        progress_data.save()
×
137

138
        chain(
×
139
            _start_analysis.si(self._analysis_id),
140
            _process_results.si(self._analysis_id),
141
            _finish_analysis.si(self._analysis_id),
142
        ).apply_async()
143

144

145
def get_meter_readings(property_id, preprocess_meters, config):
1✔
146
    """Returns meters and readings which should meet BETTER's requirements
147

148
    :param property_id: int
149
    :param preprocess_meters: bool, if true aggregate and interpolate readings
150
        into monthly readings. If false, don't do any preprocessing of the property's
151
        meters and readings.
152
    :return: List[dict], list of dictionaries of the form:
153
        { 'meter_type': <Meter.type>, 'readings': List[SimpleMeterReading | MeterReading] }
154
    """
155
    selected_meters_and_readings = []
×
156
    meters = (
×
157
        Meter.objects
158
        .filter(
159
            property_id=property_id,
160
            type__in=list(SEED_TO_BSYNC_RESOURCE_TYPE.keys()),
161
        )
162
    )
163

164
    # check if dates are ok
165
    if 'select_meters' in config and config['select_meters'] == 'select':
×
166
        try:
×
167
            value1 = dateutil.parser.parse(config['meter']['start_date'])
×
168
            value2 = dateutil.parser.parse(config['meter']['end_date'])
×
169
            # add a day to get the timestamps to include the last day otherwise timestamp is 00:00:00
170
            value2 = value2 + timedelta(days=1)
×
171

172
        except Exception as err:
×
173
            raise AnalysisPipelineException(
×
174
                f'Analysis configuration error: invalid dates selected for meter readings: {err}')
175

176
    if preprocess_meters:
×
177
        for meter in meters:
×
178
            if 'select_meters' in config and config['select_meters'] == 'select':
×
179
                try:
×
180
                    meter_readings = meter.meter_readings.filter(start_time__range=[value1, value2])
×
181
                except Exception as err:
×
182
                    logger.error(f"!!! Error retrieving meter readings: {err}")
×
183
                    # continue but analysis will fail
184
                    continue
×
185
            else:
186
                meter_readings = meter.meter_readings
×
187
            if meter_readings.count() == 0:
×
188
                continue
×
189
            monthly_readings = calendarize_and_extrapolate_meter_readings(meter_readings.all())
×
190
            # filtering on readings >= 1.0 b/c BETTER flails when readings are less than 1 currently
191
            monthly_readings = [reading for reading in monthly_readings if reading.reading >= 1.0]
×
192
            if len(monthly_readings) >= 12:
×
193
                selected_meters_and_readings.append({
×
194
                    'meter_type': meter.type,
195
                    'readings': monthly_readings
196
                })
197
    else:
198
        meters = (
×
199
            meters
200
            .annotate(readings_count=Count('meter_readings'))
201
            .filter(
202
                readings_count__gte=12,
203
            )
204
        )
205
        for meter in meters:
×
206
            # filtering on readings >= 1.0 b/c BETTER flails when readings are less than 1 currently
207
            readings = []
×
208
            if 'select_meters' in config and config['select_meters'] == 'select':
×
209
                try:
×
210
                    readings = meter.meter_readings.filter(start_time__range=[value1, value2], reading__gte=1.0).order_by('start_time')
×
211
                except Exception as err:
×
212
                    logger.error(f"!!! Error retrieving meter readings: {err}")
×
213
                    # continue but analysis will fail
214
                    continue
×
215
            else:
216
                readings = meter.meter_readings.filter(reading__gte=1.0).order_by('start_time')
×
217

218
            if readings.count() >= 12:
×
219
                selected_meters_and_readings.append({
×
220
                    'meter_type': meter.type,
221
                    'readings': readings,
222
                })
223

224
    return selected_meters_and_readings
×
225

226

227
@shared_task(bind=True)
1✔
228
@analysis_pipeline_task(Analysis.CREATING)
1✔
229
def _prepare_all_properties(self, analysis_view_ids_by_property_view_id, analysis_id):
1✔
230
    """A Celery task which attempts to make BuildingSync files for all AnalysisPropertyViews.
231

232
    :param analysis_view_ids_by_property_view_id: dictionary[int:int]
233
    :param analysis_id: int
234
    :returns: void
235
    """
236
    analysis = Analysis.objects.get(id=analysis_id)
×
237
    pipeline = BETTERPipeline(analysis.id)
×
238

239
    progress_data = pipeline.get_progress_data(analysis)
×
240
    progress_data.step('Creating files for analysis')
×
241

242
    analysis_property_views = AnalysisPropertyView.objects.filter(id__in=analysis_view_ids_by_property_view_id.values())
×
243
    input_file_paths = []
×
244
    for analysis_property_view in analysis_property_views:
×
245
        selected_meters_and_readings = get_meter_readings(
×
246
            analysis_property_view.property_id,
247
            analysis.configuration.get('preprocess_meters', False),
248
            analysis.configuration
249
        )
250

251
        if len(selected_meters_and_readings) == 0:
×
252
            AnalysisMessage.log_and_create(
×
253
                logger=logger,
254
                type_=AnalysisMessage.INFO,
255
                analysis_id=analysis.id,
256
                analysis_property_view_id=analysis_property_view.id,
257
                user_message='Property not included in analysis: Property has no meters '
258
                             'meeting BETTER\'s requirements. See the analysis documentation for more info.',
259
                debug_message=''
260
            )
261
            continue
×
262

263
        better_doc, errors = _build_better_input(analysis_property_view, selected_meters_and_readings)
×
264
        if errors:
×
265
            for error in errors:
×
266
                AnalysisMessage.log_and_create(
×
267
                    logger=logger,
268
                    type_=AnalysisMessage.ERROR,
269
                    analysis_id=analysis.id,
270
                    analysis_property_view_id=analysis_property_view.id,
271
                    user_message=f'Error preparing BETTER input: {error}',
272
                    debug_message='',
273
                )
274
            continue
×
275

276
        analysis_input_file = AnalysisInputFile(
×
277
            content_type=AnalysisInputFile.BUILDINGSYNC,
278
            analysis=analysis
279
        )
280
        analysis_input_file.file.save(f'{analysis_property_view.id}.xml', ContentFile(better_doc))
×
281
        analysis_input_file.clean()
×
282
        analysis_input_file.save()
×
283
        input_file_paths.append(analysis_input_file.file.path)
×
284

285
    if len(input_file_paths) == 0:
×
286
        message = 'No files were able to be prepared for the analysis'
×
287
        pipeline.fail(message, logger)
×
288
        # stop the task chain
289
        raise StopAnalysisTaskChain(message)
×
290

291

292
@shared_task(bind=True)
1✔
293
@analysis_pipeline_task(Analysis.CREATING)
1✔
294
def _finish_preparation(self, analysis_id, start_analysis):
1✔
295
    """A Celery task which finishes the preparation for BETTER analysis
296

297
    :param analysis_id: int
298
    :param start_analysis: bool
299
    """
300
    pipeline = BETTERPipeline(analysis_id)
×
301
    pipeline.set_analysis_status_to_ready('Analysis is ready to be started')
×
302

303
    if start_analysis:
×
304
        pipeline = BETTERPipeline(analysis_id)
×
305
        pipeline.start_analysis()
×
306

307

308
@shared_task(bind=True)
1✔
309
@analysis_pipeline_task(Analysis.QUEUED)
1✔
310
def _start_analysis(self, analysis_id):
1✔
311
    """Start better analysis by making requests to the service"""
312
    pipeline = BETTERPipeline(analysis_id)
×
313
    progress_data = pipeline.set_analysis_status_to_running()
×
314
    progress_data.step('Sending requests to BETTER service')
×
315

316
    analysis = Analysis.objects.get(id=analysis_id)
×
317
    client = BETTERClient(analysis.organization.better_analysis_api_key)
×
318
    context = BETTERPipelineContext(analysis, progress_data, client)
×
319

320
    better_portfolio_id = None
×
321
    if analysis.configuration.get('portfolio_analysis', False):
×
322
        better_portfolio_id, errors = client.create_portfolio(f'SEED Analysis {analysis.name} ({analysis.id})')
×
323
        if errors:
×
324
            _check_errors(
×
325
                errors,
326
                'Failed to create BETTER portfolio',
327
                context,
328
                fail_on_error=True,
329
            )
330

331
    better_building_analyses = _create_better_buildings(better_portfolio_id, context)
×
332

333
    if better_portfolio_id is not None:
×
334
        better_analysis_id = _run_better_portfolio_analysis(
×
335
            better_portfolio_id,
336
            better_building_analyses,
337
            analysis.configuration,
338
            context,
339
        )
340

341
        _store_better_portfolio_analysis_results(
×
342
            better_analysis_id,
343
            better_building_analyses,
344
            context,
345
        )
346

347
    else:
348
        _run_better_building_analyses(
×
349
            better_building_analyses,
350
            analysis.configuration,
351
            context,
352
        )
353

354
    _store_better_building_analysis_results(
×
355
        better_building_analyses,
356
        context,
357
    )
358

359

360
@shared_task(bind=True)
1✔
361
@analysis_pipeline_task(Analysis.RUNNING)
1✔
362
def _process_results(self, analysis_id):
1✔
363
    """Store results from the analysis in the original PropertyState"""
364
    pipeline = BETTERPipeline(analysis_id)
×
365
    analysis = Analysis.objects.get(id=analysis_id)
×
366

367
    progress_data = pipeline.get_progress_data(analysis)
×
368
    progress_data.step('Processing results')
×
369

370
    # store all measure recommendations
371
    ee_measure_names = [
×
372
        'Upgrade Windows',
373
        'Reduce Plug Loads',
374
        'Add/Fix Economizers',
375
        'Decrease Ventilation',
376
        'Reduce Lighting Load',
377
        'Check Fossil Baseload',
378
        'Decrease Infiltration',
379
        'Decrease Heating Setpoints',
380
        'Eliminate Electric Heating',
381
        'Increase Cooling Setpoints',
382
        'Reduce Equipment Schedules',
383
        'Add Wall/Ceiling Insulation',
384
        'Increase Cooling System Efficiency',
385
        'Increase Heating System Efficiency'
386
    ]
387
    ee_measure_column_data_paths = [
×
388
        ExtraDataColumnPath(
389
            f'better_recommendation_{ee_measure_name.lower().replace(" ", "_")}',
390
            f'BETTER Recommendation: {ee_measure_name}',
391
            1,
392
            f'assessment.ee_measures.{ee_measure_name}'
393
        ) for ee_measure_name in ee_measure_names
394
    ]
395

396
    # gather all columns to store
397
    BETTER_VALID_MODEL_E_COL = 'better_valid_model_electricity'
×
398
    BETTER_VALID_MODEL_F_COL = 'better_valid_model_fuel'
×
399
    column_data_paths = [
×
400
        # Combined Savings
401
        ExtraDataColumnPath(
402
            'better_cost_savings_combined',
403
            'BETTER Potential Cost Savings (USD)',
404
            1,
405
            'assessment.assessment_energy_use.cost_savings_combined'
406
        ),
407
        ExtraDataColumnPath(
408
            'better_energy_savings_combined',
409
            'BETTER Potential Energy Savings (kWh)',
410
            1,
411
            'assessment.assessment_energy_use.energy_savings_combined'
412
        ),
413
        ExtraDataColumnPath(
414
            'better_ghg_reductions_combined',
415
            'BETTER Potential GHG Emissions Reduction (MtCO2e)',
416
            .001,
417
            'assessment.assessment_energy_use.ghg_reductions_combined'
418
        ),
419
        # Energy-specific Savings
420
        ExtraDataColumnPath(
421
            BETTER_VALID_MODEL_E_COL,
422
            'BETTER Valid Electricity Model',
423
            1,
424
            'assessment.assessment_energy_use.valid_model_e'
425
        ),
426
        ExtraDataColumnPath(
427
            BETTER_VALID_MODEL_F_COL,
428
            'BETTER Valid Fuel Model',
429
            1,
430
            'assessment.assessment_energy_use.valid_model_f'
431
        ),
432
        ExtraDataColumnPath(
433
            'better_cost_savings_electricity',
434
            'BETTER Potential Electricity Cost Savings (USD)',
435
            1,
436
            'assessment.assessment_energy_use.cost_savings_e'
437
        ),
438
        ExtraDataColumnPath(
439
            'better_cost_savings_fuel',
440
            'BETTER Potential Fuel Cost Savings (USD)',
441
            1,
442
            'assessment.assessment_energy_use.cost_savings_f'
443
        ),
444
        ExtraDataColumnPath(
445
            'better_energy_savings_electricity',
446
            'BETTER Potential Electricity Energy Savings (kWh)',
447
            1,
448
            'assessment.assessment_energy_use.energy_savings_e'
449
        ),
450
        ExtraDataColumnPath(
451
            'better_energy_savings_fuel',
452
            'BETTER Potential Fuel Energy Savings (kWh)',
453
            1,
454
            'assessment.assessment_energy_use.energy_savings_f'
455
        ),
456
        ExtraDataColumnPath(
457
            'better_ghg_reductions_electricity',
458
            'BETTER Potential Electricity GHG Emissions Reduction (MtCO2e)',
459
            .001,
460
            'assessment.assessment_energy_use.ghg_reductions_e'
461
        ),
462
        ExtraDataColumnPath(
463
            'better_ghg_reductions_fuel',
464
            'BETTER Potential Fuel GHG Emissions Reduction (MtCO2e)',
465
            .001,
466
            'assessment.assessment_energy_use.ghg_reductions_f'
467
        ),
468
        ExtraDataColumnPath(
469
            # we will manually add this to the data later (it's not part of BETTER's results)
470
            # Provides info so user knows which SEED analysis last updated these stored values
471
            'better_seed_analysis_id',
472
            'BETTER Analysis Id',
473
            1,
474
            'better_seed_analysis_id'
475
        ),
476
        ExtraDataColumnPath(
477
            # we will manually add this to the data later (it's not part of BETTER's results)
478
            # Provides info so user knows which SEED analysis last updated these stored values
479
            'better_seed_run_id',
480
            'BETTER Run Id',
481
            1,
482
            'better_seed_run_id'
483
        ),
484
        ExtraDataColumnPath(
485
            'better_min_model_r_squared',
486
            'BETTER Min Model R^2',
487
            1,
488
            'min_model_r_squared'
489
        ),
490
        ExtraDataColumnPath(
491
            'better_inverse_r_squared_electricity',
492
            'BETTER Inverse Model R^2 (Electricity)',
493
            1,
494
            'inverse_model.Electricity.r2'
495
        ),
496
        ExtraDataColumnPath(
497
            'better_inverse_r_squared_fossil_fuel',
498
            'BETTER Inverse Model R^2 (Fossil Fuel)',
499
            1,
500
            'inverse_model.Fossil Fuel.r2'
501
        ),
502
    ] + ee_measure_column_data_paths
503

504
    for column_data_path in column_data_paths:
×
505
        # check if the column exists with the bare minimum required pieces of data. For example,
506
        # don't check column_description and display_name because they may be changed by
507
        # the user at a later time.
508
        column, created = Column.objects.get_or_create(
×
509
            is_extra_data=True,
510
            column_name=column_data_path.column_name,
511
            organization=analysis.organization,
512
            table_name='PropertyState',
513
        )
514

515
        # add in the other fields of the columns only if it is a new column.
516
        if created:
×
517
            column.display_name = column_data_path.column_display_name
×
518
            column.column_description = column_data_path.column_display_name
×
519

520
        column.save()
×
521

522
    # Update the original PropertyView's PropertyState with analysis results of interest
523
    analysis_property_views = analysis.analysispropertyview_set.prefetch_related('property', 'cycle').all()
×
524
    property_view_by_apv_id = AnalysisPropertyView.get_property_views(analysis_property_views)
×
525

526
    for analysis_property_view in analysis_property_views:
×
527
        raw_better_results = copy.deepcopy(analysis_property_view.parsed_results)
×
528
        raw_better_results.update({'better_seed_analysis_id': analysis_id})
×
529
        raw_better_results.update({'better_seed_run_id': analysis_property_view.id})
×
530
        simplified_results = {}
×
531
        for data_path in column_data_paths:
×
532
            value = get_json_path(data_path.json_path, raw_better_results)
×
533
            if value is not None:
×
534
                value = float(value) * data_path.unit_multiplier
×
535
            simplified_results[data_path.column_name] = value
×
536

537
        electricity_model_is_valid = bool(simplified_results[BETTER_VALID_MODEL_E_COL])
×
538
        fuel_model_is_valid = bool(simplified_results[BETTER_VALID_MODEL_F_COL])
×
539

540
        # create a message for the failed models
541
        warning_messages = []
×
542
        if not electricity_model_is_valid:
×
543
            warning_messages.append('No reasonable change-point model could be found for this building\'s electricity consumption. Model R^2 was {}'.format(round(simplified_results['better_inverse_r_squared_fossil_fuel'], 4)))
×
544
        if not fuel_model_is_valid:
×
545
            warning_messages.append('No reasonable change-point model could be found for this building\'s fossil fuel consumption. Model R^2 was {}'.format(round(simplified_results['better_inverse_r_squared_fossil_fuel'], 4)))
×
546
        for warning_message in warning_messages:
×
547
            AnalysisMessage.log_and_create(
×
548
                logger,
549
                AnalysisMessage.WARNING,
550
                warning_message,
551
                '',
552
                analysis_id,
553
                analysis_property_view.id,
554
            )
555

556
        cleaned_results = {}
×
557
        # do some extra cleanup of the results:
558
        #  - round decimal places of floats
559
        #  - for fuel-type specific fields, set values to null if the model for
560
        #    that fuel type wasn't valid (e.g., if electricity model is invalid,
561
        #    set "potential electricity savings" to null)
562
        for col_name, value in simplified_results.items():
×
563
            value = value if not isinstance(value, float) else round(value, 2)
×
564
            if col_name.endswith('_electricity') and col_name != BETTER_VALID_MODEL_E_COL:
×
565
                cleaned_results[col_name] = value if electricity_model_is_valid else None
×
566
            elif col_name.endswith('_fuel') and col_name != BETTER_VALID_MODEL_F_COL:
×
567
                cleaned_results[col_name] = value if fuel_model_is_valid else None
×
568
            else:
569
                cleaned_results[col_name] = value
×
570

571
        original_property_state = property_view_by_apv_id[analysis_property_view.id].state
×
572
        original_property_state.extra_data.update(cleaned_results)
×
573
        original_property_state.save()
×
574

575

576
@shared_task(bind=True)
1✔
577
@analysis_pipeline_task(Analysis.RUNNING)
1✔
578
def _finish_analysis(self, analysis_id):
1✔
579
    """A Celery task which finishes the analysis run
580

581
    :param analysis_id: int
582
    """
583
    pipeline = BETTERPipeline(analysis_id)
×
584
    pipeline.set_analysis_status_to_completed()
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc