• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

localstack / localstack / a57ee786-03e7-4a8f-ac70-1374334153cd

12 May 2025 06:25PM UTC coverage: 86.642% (+0.02%) from 86.624%
a57ee786-03e7-4a8f-ac70-1374334153cd

push

circleci

web-flow
CFn v2: better handle deploy errors (#12601)

10 of 17 new or added lines in 1 file covered. (58.82%)

9 existing lines in 7 files now uncovered.

64355 of 74277 relevant lines covered (86.64%)

0.87 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

89.77
/localstack-core/localstack/services/cloudwatch/provider_v2.py
1
import datetime
1✔
2
import json
1✔
3
import logging
1✔
4
import re
1✔
5
import threading
1✔
6
import uuid
1✔
7
from datetime import timezone
1✔
8
from typing import List
1✔
9

10
from localstack.aws.api import CommonServiceException, RequestContext, handler
1✔
11
from localstack.aws.api.cloudwatch import (
1✔
12
    AccountId,
13
    ActionPrefix,
14
    AlarmName,
15
    AlarmNamePrefix,
16
    AlarmNames,
17
    AlarmTypes,
18
    AmazonResourceName,
19
    CloudwatchApi,
20
    DashboardBody,
21
    DashboardName,
22
    DashboardNamePrefix,
23
    DashboardNames,
24
    Datapoint,
25
    DeleteDashboardsOutput,
26
    DescribeAlarmHistoryOutput,
27
    DescribeAlarmsForMetricOutput,
28
    DescribeAlarmsOutput,
29
    DimensionFilters,
30
    Dimensions,
31
    EntityMetricDataList,
32
    ExtendedStatistic,
33
    ExtendedStatistics,
34
    GetDashboardOutput,
35
    GetMetricDataMaxDatapoints,
36
    GetMetricDataOutput,
37
    GetMetricStatisticsOutput,
38
    HistoryItemType,
39
    IncludeLinkedAccounts,
40
    InvalidParameterCombinationException,
41
    InvalidParameterValueException,
42
    LabelOptions,
43
    ListDashboardsOutput,
44
    ListMetricsOutput,
45
    ListTagsForResourceOutput,
46
    MaxRecords,
47
    MetricData,
48
    MetricDataQueries,
49
    MetricDataQuery,
50
    MetricDataResult,
51
    MetricDataResultMessages,
52
    MetricName,
53
    MetricStat,
54
    Namespace,
55
    NextToken,
56
    Period,
57
    PutCompositeAlarmInput,
58
    PutDashboardOutput,
59
    PutMetricAlarmInput,
60
    RecentlyActive,
61
    ResourceNotFound,
62
    ScanBy,
63
    StandardUnit,
64
    StateReason,
65
    StateReasonData,
66
    StateValue,
67
    Statistic,
68
    Statistics,
69
    StrictEntityValidation,
70
    TagKeyList,
71
    TagList,
72
    TagResourceOutput,
73
    Timestamp,
74
    UntagResourceOutput,
75
)
76
from localstack.aws.connect import connect_to
1✔
77
from localstack.http import Request
1✔
78
from localstack.services.cloudwatch.alarm_scheduler import AlarmScheduler
1✔
79
from localstack.services.cloudwatch.cloudwatch_database_helper import CloudwatchDatabase
1✔
80
from localstack.services.cloudwatch.models import (
1✔
81
    CloudWatchStore,
82
    LocalStackAlarm,
83
    LocalStackCompositeAlarm,
84
    LocalStackDashboard,
85
    LocalStackMetricAlarm,
86
    cloudwatch_stores,
87
)
88
from localstack.services.edge import ROUTER
1✔
89
from localstack.services.plugins import SERVICE_PLUGINS, ServiceLifecycleHook
1✔
90
from localstack.state import AssetDirectory, StateVisitor
1✔
91
from localstack.utils.aws import arns
1✔
92
from localstack.utils.aws.arns import extract_account_id_from_arn, lambda_function_name
1✔
93
from localstack.utils.collections import PaginatedList
1✔
94
from localstack.utils.json import CustomEncoder as JSONEncoder
1✔
95
from localstack.utils.strings import camel_to_snake_case
1✔
96
from localstack.utils.sync import poll_condition
1✔
97
from localstack.utils.threads import start_worker_thread
1✔
98
from localstack.utils.time import timestamp_millis
1✔
99

100
PATH_GET_RAW_METRICS = "/_aws/cloudwatch/metrics/raw"
1✔
101
MOTO_INITIAL_UNCHECKED_REASON = "Unchecked: Initial alarm creation"
1✔
102
LIST_METRICS_MAX_RESULTS = 500
1✔
103
# If the values in these fields are not the same, their values are added when generating labels
104
LABEL_DIFFERENTIATORS = ["Stat", "Period"]
1✔
105
HISTORY_VERSION = "1.0"
1✔
106

107
LOG = logging.getLogger(__name__)
1✔
108
_STORE_LOCK = threading.RLock()
1✔
109
AWS_MAX_DATAPOINTS_ACCEPTED: int = 1440
1✔
110

111

112
class ValidationError(CommonServiceException):
1✔
113
    # TODO: check this error against AWS (doesn't exist in the API)
114
    def __init__(self, message: str):
1✔
115
        super().__init__("ValidationError", message, 400, True)
1✔
116

117

118
class InvalidParameterCombination(CommonServiceException):
1✔
119
    def __init__(self, message: str):
1✔
120
        super().__init__("InvalidParameterCombination", message, 400, True)
1✔
121

122

123
def _validate_parameters_for_put_metric_data(metric_data: MetricData) -> None:
1✔
124
    for index, metric_item in enumerate(metric_data):
1✔
125
        indexplusone = index + 1
1✔
126
        if metric_item.get("Value") and metric_item.get("Values"):
1✔
127
            raise InvalidParameterCombinationException(
1✔
128
                f"The parameters MetricData.member.{indexplusone}.Value and MetricData.member.{indexplusone}.Values are mutually exclusive and you have specified both."
129
            )
130

131
        if metric_item.get("StatisticValues") and metric_item.get("Value"):
1✔
132
            raise InvalidParameterCombinationException(
1✔
133
                f"The parameters MetricData.member.{indexplusone}.Value and MetricData.member.{indexplusone}.StatisticValues are mutually exclusive and you have specified both."
134
            )
135

136
        if metric_item.get("Values") and metric_item.get("Counts"):
1✔
137
            values = metric_item.get("Values")
1✔
138
            counts = metric_item.get("Counts")
1✔
139
            if len(values) != len(counts):
1✔
140
                raise InvalidParameterValueException(
1✔
141
                    f"The parameters MetricData.member.{indexplusone}.Values and MetricData.member.{indexplusone}.Counts must be of the same size."
142
                )
143

144

145
class CloudwatchProvider(CloudwatchApi, ServiceLifecycleHook):
1✔
146
    """
147
    Cloudwatch provider.
148

149
    LIMITATIONS:
150
        - simplified composite alarm rule evaluation:
151
            - only OR operator is supported
152
            - only ALARM expression is supported
153
            - only metric alarms can be included in the rule and they should be referenced by ARN only
154
    """
155

156
    def __init__(self):
1✔
157
        self.alarm_scheduler: AlarmScheduler = None
1✔
158
        self.store = None
1✔
159
        self.cloudwatch_database = CloudwatchDatabase()
1✔
160

161
    @staticmethod
1✔
162
    def get_store(account_id: str, region: str) -> CloudWatchStore:
1✔
163
        return cloudwatch_stores[account_id][region]
1✔
164

165
    def accept_state_visitor(self, visitor: StateVisitor):
1✔
166
        visitor.visit(cloudwatch_stores)
×
167
        visitor.visit(AssetDirectory(self.service, CloudwatchDatabase.CLOUDWATCH_DATA_ROOT))
×
168

169
    def on_after_init(self):
1✔
170
        ROUTER.add(PATH_GET_RAW_METRICS, self.get_raw_metrics)
1✔
171
        self.start_alarm_scheduler()
1✔
172

173
    def on_before_state_reset(self):
1✔
174
        self.shutdown_alarm_scheduler()
×
175
        self.cloudwatch_database.clear_tables()
×
176

177
    def on_after_state_reset(self):
1✔
178
        self.start_alarm_scheduler()
×
179

180
    def on_before_state_load(self):
1✔
181
        self.shutdown_alarm_scheduler()
×
182

183
    def on_after_state_load(self):
1✔
184
        self.start_alarm_scheduler()
×
185

186
        def restart_alarms(*args):
×
187
            poll_condition(lambda: SERVICE_PLUGINS.is_running("cloudwatch"))
×
188
            self.alarm_scheduler.restart_existing_alarms()
×
189

190
        start_worker_thread(restart_alarms)
×
191

192
    def on_before_stop(self):
1✔
193
        self.shutdown_alarm_scheduler()
1✔
194

195
    def start_alarm_scheduler(self):
1✔
196
        if not self.alarm_scheduler:
1✔
197
            LOG.debug("starting cloudwatch scheduler")
1✔
198
            self.alarm_scheduler = AlarmScheduler()
1✔
199

200
    def shutdown_alarm_scheduler(self):
1✔
201
        LOG.debug("stopping cloudwatch scheduler")
1✔
202
        self.alarm_scheduler.shutdown_scheduler()
1✔
203
        self.alarm_scheduler = None
1✔
204

205
    def delete_alarms(self, context: RequestContext, alarm_names: AlarmNames, **kwargs) -> None:
1✔
206
        """
207
        Delete alarms.
208
        """
209
        with _STORE_LOCK:
1✔
210
            for alarm_name in alarm_names:
1✔
211
                alarm_arn = arns.cloudwatch_alarm_arn(
1✔
212
                    alarm_name, account_id=context.account_id, region_name=context.region
213
                )  # obtain alarm ARN from alarm name
214
                self.alarm_scheduler.delete_scheduler_for_alarm(alarm_arn)
1✔
215
                store = self.get_store(context.account_id, context.region)
1✔
216
                store.alarms.pop(alarm_arn, None)
1✔
217

218
    def put_metric_data(
1✔
219
        self,
220
        context: RequestContext,
221
        namespace: Namespace,
222
        metric_data: MetricData = None,
223
        entity_metric_data: EntityMetricDataList = None,
224
        strict_entity_validation: StrictEntityValidation = None,
225
        **kwargs,
226
    ) -> None:
227
        # TODO add support for entity_metric_data and strict_entity_validation
228
        _validate_parameters_for_put_metric_data(metric_data)
1✔
229

230
        self.cloudwatch_database.add_metric_data(
1✔
231
            context.account_id, context.region, namespace, metric_data
232
        )
233

234
    def get_metric_data(
1✔
235
        self,
236
        context: RequestContext,
237
        metric_data_queries: MetricDataQueries,
238
        start_time: Timestamp,
239
        end_time: Timestamp,
240
        next_token: NextToken = None,
241
        scan_by: ScanBy = None,
242
        max_datapoints: GetMetricDataMaxDatapoints = None,
243
        label_options: LabelOptions = None,
244
        **kwargs,
245
    ) -> GetMetricDataOutput:
246
        results: List[MetricDataResult] = []
1✔
247
        limit = max_datapoints or 100_800
1✔
248
        messages: MetricDataResultMessages = []
1✔
249
        nxt = None
1✔
250
        label_additions = []
1✔
251

252
        for diff in LABEL_DIFFERENTIATORS:
1✔
253
            non_unique = []
1✔
254
            for query in metric_data_queries:
1✔
255
                non_unique.append(query["MetricStat"][diff])
1✔
256
            if len(set(non_unique)) > 1:
1✔
257
                label_additions.append(diff)
1✔
258

259
        for query in metric_data_queries:
1✔
260
            query_result = self.cloudwatch_database.get_metric_data_stat(
1✔
261
                account_id=context.account_id,
262
                region=context.region,
263
                query=query,
264
                start_time=start_time,
265
                end_time=end_time,
266
                scan_by=scan_by,
267
            )
268
            if query_result.get("messages"):
1✔
269
                messages.extend(query_result.get("messages"))
×
270

271
            label = query.get("Label") or f"{query['MetricStat']['Metric']['MetricName']}"
1✔
272
            # TODO: does this happen even if a label is set in the query?
273
            for label_addition in label_additions:
1✔
274
                label = f"{label} {query['MetricStat'][label_addition]}"
1✔
275

276
            timestamps = query_result.get("timestamps", {})
1✔
277
            values = query_result.get("values", {})
1✔
278

279
            # Paginate
280
            timestamp_value_dicts = [
1✔
281
                {
282
                    "Timestamp": timestamp,
283
                    "Value": value,
284
                }
285
                for timestamp, value in zip(timestamps, values, strict=False)
286
            ]
287

288
            pagination = PaginatedList(timestamp_value_dicts)
1✔
289
            timestamp_page, nxt = pagination.get_page(
1✔
290
                lambda item: item.get("Timestamp"),
291
                next_token=next_token,
292
                page_size=limit,
293
            )
294

295
            timestamps = [item.get("Timestamp") for item in timestamp_page]
1✔
296
            values = [item.get("Value") for item in timestamp_page]
1✔
297

298
            metric_data_result = {
1✔
299
                "Id": query.get("Id"),
300
                "Label": label,
301
                "StatusCode": "Complete",
302
                "Timestamps": timestamps,
303
                "Values": values,
304
            }
305
            results.append(MetricDataResult(**metric_data_result))
1✔
306

307
        return GetMetricDataOutput(MetricDataResults=results, NextToken=nxt, Messages=messages)
1✔
308

309
    def set_alarm_state(
1✔
310
        self,
311
        context: RequestContext,
312
        alarm_name: AlarmName,
313
        state_value: StateValue,
314
        state_reason: StateReason,
315
        state_reason_data: StateReasonData = None,
316
        **kwargs,
317
    ) -> None:
318
        try:
1✔
319
            if state_reason_data:
1✔
320
                state_reason_data = json.loads(state_reason_data)
1✔
321
        except ValueError:
×
322
            raise InvalidParameterValueException(
×
323
                "TODO: check right error message: Json was not correctly formatted"
324
            )
325
        with _STORE_LOCK:
1✔
326
            store = self.get_store(context.account_id, context.region)
1✔
327
            alarm = store.alarms.get(
1✔
328
                arns.cloudwatch_alarm_arn(
329
                    alarm_name, account_id=context.account_id, region_name=context.region
330
                )
331
            )
332
            if not alarm:
1✔
333
                raise ResourceNotFound()
1✔
334

335
            old_state = alarm.alarm["StateValue"]
1✔
336
            if state_value not in ("OK", "ALARM", "INSUFFICIENT_DATA"):
1✔
337
                raise ValidationError(
1✔
338
                    f"1 validation error detected: Value '{state_value}' at 'stateValue' failed to satisfy constraint: Member must satisfy enum value set: [INSUFFICIENT_DATA, ALARM, OK]"
339
                )
340

341
            old_state_reason = alarm.alarm["StateReason"]
1✔
342
            old_state_update_timestamp = alarm.alarm["StateUpdatedTimestamp"]
1✔
343

344
            if old_state == state_value:
1✔
345
                return
×
346

347
            alarm.alarm["StateTransitionedTimestamp"] = datetime.datetime.now(timezone.utc)
1✔
348
            # update startDate (=last ALARM date) - should only update when a new alarm is triggered
349
            # the date is only updated if we have a reason-data, which is set by an alarm
350
            if state_reason_data:
1✔
351
                state_reason_data["startDate"] = state_reason_data.get("queryDate")
1✔
352

353
            self._update_state(
1✔
354
                context,
355
                alarm,
356
                state_value,
357
                state_reason,
358
                state_reason_data,
359
            )
360

361
            self._evaluate_composite_alarms(context, alarm)
1✔
362

363
            if not alarm.alarm["ActionsEnabled"]:
1✔
364
                return
1✔
365
            if state_value == "OK":
1✔
366
                actions = alarm.alarm["OKActions"]
1✔
367
            elif state_value == "ALARM":
1✔
368
                actions = alarm.alarm["AlarmActions"]
1✔
369
            else:
370
                actions = alarm.alarm["InsufficientDataActions"]
×
371
            for action in actions:
1✔
372
                data = arns.parse_arn(action)
1✔
373
                # test for sns - can this be done in a more generic way?
374
                if data["service"] == "sns":
1✔
375
                    service = connect_to(
1✔
376
                        region_name=data["region"], aws_access_key_id=data["account"]
377
                    ).sns
378
                    subject = f"""{state_value}: "{alarm_name}" in {context.region}"""
1✔
379
                    message = create_message_response_update_state_sns(alarm, old_state)
1✔
380
                    service.publish(TopicArn=action, Subject=subject, Message=message)
1✔
381
                elif data["service"] == "lambda":
1✔
382
                    service = connect_to(
1✔
383
                        region_name=data["region"], aws_access_key_id=data["account"]
384
                    ).lambda_
385
                    message = create_message_response_update_state_lambda(
1✔
386
                        alarm, old_state, old_state_reason, old_state_update_timestamp
387
                    )
388
                    service.invoke(FunctionName=lambda_function_name(action), Payload=message)
1✔
389
                else:
390
                    # TODO: support other actions
391
                    LOG.warning(
×
392
                        "Action for service %s not implemented, action '%s' will not be triggered.",
393
                        data["service"],
394
                        action,
395
                    )
396

397
    def get_raw_metrics(self, request: Request):
1✔
398
        """this feature was introduced with https://github.com/localstack/localstack/pull/3535
399
        # in the meantime, it required a valid aws-header so that the account-id/region could be extracted
400
        # with the new implementation, we want to return all data, but add the account-id/region as additional attributes
401

402
        # TODO endpoint should be refactored or deprecated at some point
403
        #   - result should be paginated
404
        #   - include aggregated metrics (but we would also need to change/adapt the shape of "metrics" that we return)
405
        :returns: json {"metrics": [{"ns": "namespace", "n": "metric_name", "v": value, "t": timestamp,
406
        "d": [<dimensions-key-pair-values>],"account": account, "region": region}]}
407
        """
408
        return {"metrics": self.cloudwatch_database.get_all_metric_data() or []}
1✔
409

410
    @handler("PutMetricAlarm", expand=False)
1✔
411
    def put_metric_alarm(self, context: RequestContext, request: PutMetricAlarmInput) -> None:
1✔
412
        # missing will be the default, when not set (but it will not explicitly be set)
413
        if request.get("TreatMissingData", "missing") not in [
1✔
414
            "breaching",
415
            "notBreaching",
416
            "ignore",
417
            "missing",
418
        ]:
419
            raise ValidationError(
×
420
                f"The value {request['TreatMissingData']} is not supported for TreatMissingData parameter. Supported values are [breaching, notBreaching, ignore, missing]."
421
            )
422
            # do some sanity checks:
423
        if request.get("Period"):
1✔
424
            # Valid values are 10, 30, and any multiple of 60.
425
            value = request.get("Period")
1✔
426
            if value not in (10, 30):
1✔
427
                if value % 60 != 0:
1✔
428
                    raise ValidationError("Period must be 10, 30 or a multiple of 60")
×
429
        if request.get("Statistic"):
1✔
430
            if request.get("Statistic") not in [
1✔
431
                "SampleCount",
432
                "Average",
433
                "Sum",
434
                "Minimum",
435
                "Maximum",
436
            ]:
437
                raise ValidationError(
×
438
                    f"Value '{request.get('Statistic')}' at 'statistic' failed to satisfy constraint: Member must satisfy enum value set: [Maximum, SampleCount, Sum, Minimum, Average]"
439
                )
440

441
        extended_statistic = request.get("ExtendedStatistic")
1✔
442
        if extended_statistic and not extended_statistic.startswith("p"):
1✔
443
            raise InvalidParameterValueException(
×
444
                f"The value {extended_statistic} for parameter ExtendedStatistic is not supported."
445
            )
446
        evaluate_low_sample_count_percentile = request.get("EvaluateLowSampleCountPercentile")
1✔
447
        if evaluate_low_sample_count_percentile and evaluate_low_sample_count_percentile not in (
1✔
448
            "evaluate",
449
            "ignore",
450
        ):
451
            raise ValidationError(
×
452
                f"Option {evaluate_low_sample_count_percentile} is not supported. "
453
                "Supported options for parameter EvaluateLowSampleCountPercentile are evaluate and ignore."
454
            )
455
        with _STORE_LOCK:
1✔
456
            store = self.get_store(context.account_id, context.region)
1✔
457
            metric_alarm = LocalStackMetricAlarm(context.account_id, context.region, {**request})
1✔
458
            alarm_arn = metric_alarm.alarm["AlarmArn"]
1✔
459
            store.alarms[alarm_arn] = metric_alarm
1✔
460
            self.alarm_scheduler.schedule_metric_alarm(alarm_arn)
1✔
461

462
    @handler("PutCompositeAlarm", expand=False)
1✔
463
    def put_composite_alarm(self, context: RequestContext, request: PutCompositeAlarmInput) -> None:
1✔
464
        with _STORE_LOCK:
1✔
465
            store = self.get_store(context.account_id, context.region)
1✔
466
            composite_alarm = LocalStackCompositeAlarm(
1✔
467
                context.account_id, context.region, {**request}
468
            )
469

470
            alarm_rule = composite_alarm.alarm["AlarmRule"]
1✔
471
            rule_expression_validation_result = self._validate_alarm_rule_expression(alarm_rule)
1✔
472
            [LOG.warning(w) for w in rule_expression_validation_result]
1✔
473

474
            alarm_arn = composite_alarm.alarm["AlarmArn"]
1✔
475
            store.alarms[alarm_arn] = composite_alarm
1✔
476

477
    def describe_alarms(
1✔
478
        self,
479
        context: RequestContext,
480
        alarm_names: AlarmNames = None,
481
        alarm_name_prefix: AlarmNamePrefix = None,
482
        alarm_types: AlarmTypes = None,
483
        children_of_alarm_name: AlarmName = None,
484
        parents_of_alarm_name: AlarmName = None,
485
        state_value: StateValue = None,
486
        action_prefix: ActionPrefix = None,
487
        max_records: MaxRecords = None,
488
        next_token: NextToken = None,
489
        **kwargs,
490
    ) -> DescribeAlarmsOutput:
491
        store = self.get_store(context.account_id, context.region)
1✔
492
        alarms = list(store.alarms.values())
1✔
493
        if action_prefix:
1✔
494
            alarms = [a.alarm for a in alarms if a.alarm["AlarmAction"].startswith(action_prefix)]
×
495
        elif alarm_name_prefix:
1✔
496
            alarms = [a.alarm for a in alarms if a.alarm["AlarmName"].startswith(alarm_name_prefix)]
×
497
        elif alarm_names:
1✔
498
            alarms = [a.alarm for a in alarms if a.alarm["AlarmName"] in alarm_names]
1✔
499
        elif state_value:
×
500
            alarms = [a.alarm for a in alarms if a.alarm["StateValue"] == state_value]
×
501
        else:
502
            alarms = [a.alarm for a in list(store.alarms.values())]
×
503

504
        # TODO: Pagination
505
        metric_alarms = [a for a in alarms if a.get("AlarmRule") is None]
1✔
506
        composite_alarms = [a for a in alarms if a.get("AlarmRule") is not None]
1✔
507
        return DescribeAlarmsOutput(CompositeAlarms=composite_alarms, MetricAlarms=metric_alarms)
1✔
508

509
    def describe_alarms_for_metric(
1✔
510
        self,
511
        context: RequestContext,
512
        metric_name: MetricName,
513
        namespace: Namespace,
514
        statistic: Statistic = None,
515
        extended_statistic: ExtendedStatistic = None,
516
        dimensions: Dimensions = None,
517
        period: Period = None,
518
        unit: StandardUnit = None,
519
        **kwargs,
520
    ) -> DescribeAlarmsForMetricOutput:
521
        store = self.get_store(context.account_id, context.region)
1✔
522
        alarms = [
1✔
523
            a.alarm
524
            for a in store.alarms.values()
525
            if isinstance(a, LocalStackMetricAlarm)
526
            and a.alarm.get("MetricName") == metric_name
527
            and a.alarm.get("Namespace") == namespace
528
        ]
529

530
        if statistic:
1✔
531
            alarms = [a for a in alarms if a.get("Statistic") == statistic]
1✔
532
        if dimensions:
1✔
533
            alarms = [a for a in alarms if a.get("Dimensions") == dimensions]
1✔
534
        if period:
1✔
535
            alarms = [a for a in alarms if a.get("Period") == period]
×
536
        if unit:
1✔
537
            alarms = [a for a in alarms if a.get("Unit") == unit]
×
538
        return DescribeAlarmsForMetricOutput(MetricAlarms=alarms)
1✔
539

540
    def list_tags_for_resource(
1✔
541
        self, context: RequestContext, resource_arn: AmazonResourceName, **kwargs
542
    ) -> ListTagsForResourceOutput:
543
        store = self.get_store(context.account_id, context.region)
1✔
544
        tags = store.TAGS.list_tags_for_resource(resource_arn)
1✔
545
        return ListTagsForResourceOutput(Tags=tags.get("Tags", []))
1✔
546

547
    def untag_resource(
1✔
548
        self,
549
        context: RequestContext,
550
        resource_arn: AmazonResourceName,
551
        tag_keys: TagKeyList,
552
        **kwargs,
553
    ) -> UntagResourceOutput:
554
        store = self.get_store(context.account_id, context.region)
1✔
555
        store.TAGS.untag_resource(resource_arn, tag_keys)
1✔
556
        return UntagResourceOutput()
1✔
557

558
    def tag_resource(
1✔
559
        self, context: RequestContext, resource_arn: AmazonResourceName, tags: TagList, **kwargs
560
    ) -> TagResourceOutput:
561
        store = self.get_store(context.account_id, context.region)
1✔
562
        store.TAGS.tag_resource(resource_arn, tags)
1✔
563
        return TagResourceOutput()
1✔
564

565
    def put_dashboard(
1✔
566
        self,
567
        context: RequestContext,
568
        dashboard_name: DashboardName,
569
        dashboard_body: DashboardBody,
570
        **kwargs,
571
    ) -> PutDashboardOutput:
572
        pattern = r"^[a-zA-Z0-9_-]+$"
1✔
573
        if not re.match(pattern, dashboard_name):
1✔
574
            raise InvalidParameterValueException(
1✔
575
                "The value for field DashboardName contains invalid characters. "
576
                "It can only contain alphanumerics, dash (-) and underscore (_).\n"
577
            )
578

579
        store = self.get_store(context.account_id, context.region)
1✔
580
        store.dashboards[dashboard_name] = LocalStackDashboard(
1✔
581
            context.account_id, context.region, dashboard_name, dashboard_body
582
        )
583
        return PutDashboardOutput()
1✔
584

585
    def get_dashboard(
1✔
586
        self, context: RequestContext, dashboard_name: DashboardName, **kwargs
587
    ) -> GetDashboardOutput:
588
        store = self.get_store(context.account_id, context.region)
1✔
589
        dashboard = store.dashboards.get(dashboard_name)
1✔
590
        if not dashboard:
1✔
591
            raise InvalidParameterValueException(f"Dashboard {dashboard_name} does not exist.")
×
592

593
        return GetDashboardOutput(
1✔
594
            DashboardName=dashboard_name,
595
            DashboardBody=dashboard.dashboard_body,
596
            DashboardArn=dashboard.dashboard_arn,
597
        )
598

599
    def delete_dashboards(
1✔
600
        self, context: RequestContext, dashboard_names: DashboardNames, **kwargs
601
    ) -> DeleteDashboardsOutput:
602
        store = self.get_store(context.account_id, context.region)
1✔
603
        for dashboard_name in dashboard_names:
1✔
604
            store.dashboards.pop(dashboard_name, None)
1✔
605
        return DeleteDashboardsOutput()
1✔
606

607
    def list_dashboards(
1✔
608
        self,
609
        context: RequestContext,
610
        dashboard_name_prefix: DashboardNamePrefix = None,
611
        next_token: NextToken = None,
612
        **kwargs,
613
    ) -> ListDashboardsOutput:
614
        store = self.get_store(context.account_id, context.region)
1✔
615
        dashboard_names = list(store.dashboards.keys())
1✔
616
        dashboard_names = [
1✔
617
            name for name in dashboard_names if name.startswith(dashboard_name_prefix or "")
618
        ]
619

620
        entries = [
1✔
621
            {
622
                "DashboardName": name,
623
                "DashboardArn": store.dashboards[name].dashboard_arn,
624
                "LastModified": store.dashboards[name].last_modified,
625
                "Size": store.dashboards[name].size,
626
            }
627
            for name in dashboard_names
628
        ]
629
        return ListDashboardsOutput(
1✔
630
            DashboardEntries=entries,
631
        )
632

633
    def list_metrics(
1✔
634
        self,
635
        context: RequestContext,
636
        namespace: Namespace = None,
637
        metric_name: MetricName = None,
638
        dimensions: DimensionFilters = None,
639
        next_token: NextToken = None,
640
        recently_active: RecentlyActive = None,
641
        include_linked_accounts: IncludeLinkedAccounts = None,
642
        owning_account: AccountId = None,
643
        **kwargs,
644
    ) -> ListMetricsOutput:
645
        result = self.cloudwatch_database.list_metrics(
1✔
646
            context.account_id,
647
            context.region,
648
            namespace,
649
            metric_name,
650
            dimensions or [],
651
        )
652

653
        metrics = [
1✔
654
            {
655
                "Namespace": metric.get("namespace"),
656
                "MetricName": metric.get("metric_name"),
657
                "Dimensions": metric.get("dimensions"),
658
            }
659
            for metric in result.get("metrics", [])
660
        ]
661
        aliases_list = PaginatedList(metrics)
1✔
662
        page, nxt = aliases_list.get_page(
1✔
663
            lambda metric: f"{metric.get('Namespace')}-{metric.get('MetricName')}-{metric.get('Dimensions')}",
664
            next_token=next_token,
665
            page_size=LIST_METRICS_MAX_RESULTS,
666
        )
667
        return ListMetricsOutput(Metrics=page, NextToken=nxt)
1✔
668

669
    def get_metric_statistics(
1✔
670
        self,
671
        context: RequestContext,
672
        namespace: Namespace,
673
        metric_name: MetricName,
674
        start_time: Timestamp,
675
        end_time: Timestamp,
676
        period: Period,
677
        dimensions: Dimensions = None,
678
        statistics: Statistics = None,
679
        extended_statistics: ExtendedStatistics = None,
680
        unit: StandardUnit = None,
681
        **kwargs,
682
    ) -> GetMetricStatisticsOutput:
683
        start_time_unix = int(start_time.timestamp())
1✔
684
        end_time_unix = int(end_time.timestamp())
1✔
685

686
        if not start_time_unix < end_time_unix:
1✔
687
            raise InvalidParameterValueException(
1✔
688
                "The parameter StartTime must be less than the parameter EndTime."
689
            )
690

691
        expected_datapoints = (end_time_unix - start_time_unix) / period
1✔
692

693
        if expected_datapoints > AWS_MAX_DATAPOINTS_ACCEPTED:
1✔
694
            raise InvalidParameterCombination(
1✔
695
                f"You have requested up to {int(expected_datapoints)} datapoints, which exceeds the limit of {AWS_MAX_DATAPOINTS_ACCEPTED}. "
696
                f"You may reduce the datapoints requested by increasing Period, or decreasing the time range."
697
            )
698

699
        stat_datapoints = {}
1✔
700

701
        units = (
1✔
702
            [unit]
703
            if unit
704
            else self.cloudwatch_database.get_units_for_metric_data_stat(
705
                account_id=context.account_id,
706
                region=context.region,
707
                start_time=start_time,
708
                end_time=end_time,
709
                metric_name=metric_name,
710
                namespace=namespace,
711
            )
712
        )
713

714
        for stat in statistics:
1✔
715
            for selected_unit in units:
1✔
716
                query_result = self.cloudwatch_database.get_metric_data_stat(
1✔
717
                    account_id=context.account_id,
718
                    region=context.region,
719
                    start_time=start_time,
720
                    end_time=end_time,
721
                    scan_by="TimestampDescending",
722
                    query=MetricDataQuery(
723
                        MetricStat=MetricStat(
724
                            Metric={
725
                                "MetricName": metric_name,
726
                                "Namespace": namespace,
727
                                "Dimensions": dimensions or [],
728
                            },
729
                            Period=period,
730
                            Stat=stat,
731
                            Unit=selected_unit,
732
                        )
733
                    ),
734
                )
735

736
                timestamps = query_result.get("timestamps", [])
1✔
737
                values = query_result.get("values", [])
1✔
738
                for i, timestamp in enumerate(timestamps):
1✔
739
                    stat_datapoints.setdefault(selected_unit, {})
1✔
740
                    stat_datapoints[selected_unit].setdefault(timestamp, {})
1✔
741
                    stat_datapoints[selected_unit][timestamp][stat] = values[i]
1✔
742
                    stat_datapoints[selected_unit][timestamp]["Unit"] = selected_unit
1✔
743

744
        datapoints: list[Datapoint] = []
1✔
745
        for selected_unit, results in stat_datapoints.items():
1✔
746
            for timestamp, stats in results.items():
1✔
747
                datapoints.append(
1✔
748
                    Datapoint(
749
                        Timestamp=timestamp,
750
                        SampleCount=stats.get("SampleCount"),
751
                        Average=stats.get("Average"),
752
                        Sum=stats.get("Sum"),
753
                        Minimum=stats.get("Minimum"),
754
                        Maximum=stats.get("Maximum"),
755
                        Unit="None" if selected_unit == "NULL_VALUE" else selected_unit,
756
                    )
757
                )
758

759
        return GetMetricStatisticsOutput(Datapoints=datapoints, Label=metric_name)
1✔
760

761
    def _update_state(
1✔
762
        self,
763
        context: RequestContext,
764
        alarm: LocalStackAlarm,
765
        state_value: str,
766
        state_reason: str,
767
        state_reason_data: dict = None,
768
    ):
769
        old_state = alarm.alarm["StateValue"]
1✔
770
        old_state_reason = alarm.alarm["StateReason"]
1✔
771
        store = self.get_store(context.account_id, context.region)
1✔
772
        current_time = datetime.datetime.now()
1✔
773
        # version is not present in state reason data for composite alarm, hence the check
774
        if state_reason_data and isinstance(alarm, LocalStackMetricAlarm):
1✔
775
            state_reason_data["version"] = HISTORY_VERSION
1✔
776
        history_data = {
1✔
777
            "version": HISTORY_VERSION,
778
            "oldState": {"stateValue": old_state, "stateReason": old_state_reason},
779
            "newState": {
780
                "stateValue": state_value,
781
                "stateReason": state_reason,
782
                "stateReasonData": state_reason_data,
783
            },
784
        }
785
        store.histories.append(
1✔
786
            {
787
                "Timestamp": timestamp_millis(alarm.alarm["StateUpdatedTimestamp"]),
788
                "HistoryItemType": HistoryItemType.StateUpdate,
789
                "AlarmName": alarm.alarm["AlarmName"],
790
                "HistoryData": json.dumps(history_data),
791
                "HistorySummary": f"Alarm updated from {old_state} to {state_value}",
792
                "AlarmType": "MetricAlarm"
793
                if isinstance(alarm, LocalStackMetricAlarm)
794
                else "CompositeAlarm",
795
            }
796
        )
797
        alarm.alarm["StateValue"] = state_value
1✔
798
        alarm.alarm["StateReason"] = state_reason
1✔
799
        if state_reason_data:
1✔
800
            alarm.alarm["StateReasonData"] = json.dumps(state_reason_data)
1✔
801
        alarm.alarm["StateUpdatedTimestamp"] = current_time
1✔
802

803
    def disable_alarm_actions(
1✔
804
        self, context: RequestContext, alarm_names: AlarmNames, **kwargs
805
    ) -> None:
806
        self._set_alarm_actions(context, alarm_names, enabled=False)
1✔
807

808
    def enable_alarm_actions(
1✔
809
        self, context: RequestContext, alarm_names: AlarmNames, **kwargs
810
    ) -> None:
811
        self._set_alarm_actions(context, alarm_names, enabled=True)
1✔
812

813
    def _set_alarm_actions(self, context, alarm_names, enabled):
1✔
814
        store = self.get_store(context.account_id, context.region)
1✔
815
        for name in alarm_names:
1✔
816
            alarm_arn = arns.cloudwatch_alarm_arn(
1✔
817
                name, account_id=context.account_id, region_name=context.region
818
            )
819
            alarm = store.alarms.get(alarm_arn)
1✔
820
            if alarm:
1✔
821
                alarm.alarm["ActionsEnabled"] = enabled
1✔
822

823
    def describe_alarm_history(
1✔
824
        self,
825
        context: RequestContext,
826
        alarm_name: AlarmName = None,
827
        alarm_types: AlarmTypes = None,
828
        history_item_type: HistoryItemType = None,
829
        start_date: Timestamp = None,
830
        end_date: Timestamp = None,
831
        max_records: MaxRecords = None,
832
        next_token: NextToken = None,
833
        scan_by: ScanBy = None,
834
        **kwargs,
835
    ) -> DescribeAlarmHistoryOutput:
836
        store = self.get_store(context.account_id, context.region)
1✔
837
        history = store.histories
1✔
838
        if alarm_name:
1✔
839
            history = [h for h in history if h["AlarmName"] == alarm_name]
1✔
840

841
        def _get_timestamp(input: dict):
1✔
842
            if timestamp_string := input.get("Timestamp"):
×
843
                return datetime.datetime.fromisoformat(timestamp_string)
×
844
            return None
×
845

846
        if start_date:
1✔
847
            history = [h for h in history if (date := _get_timestamp(h)) and date >= start_date]
×
848
        if end_date:
1✔
849
            history = [h for h in history if (date := _get_timestamp(h)) and date <= end_date]
×
850
        return DescribeAlarmHistoryOutput(AlarmHistoryItems=history)
1✔
851

852
    def _evaluate_composite_alarms(self, context: RequestContext, triggering_alarm):
1✔
853
        # TODO either pass store as a parameter or acquire RLock (with _STORE_LOCK:)
854
        # everything works ok now but better ensure protection of critical section in front of future changes
855
        store = self.get_store(context.account_id, context.region)
1✔
856
        alarms = list(store.alarms.values())
1✔
857
        composite_alarms = [a for a in alarms if isinstance(a, LocalStackCompositeAlarm)]
1✔
858
        for composite_alarm in composite_alarms:
1✔
859
            self._evaluate_composite_alarm(context, composite_alarm, triggering_alarm)
1✔
860

861
    def _evaluate_composite_alarm(self, context, composite_alarm, triggering_alarm):
1✔
862
        store = self.get_store(context.account_id, context.region)
1✔
863
        alarm_rule = composite_alarm.alarm["AlarmRule"]
1✔
864
        rule_expression_validation = self._validate_alarm_rule_expression(alarm_rule)
1✔
865
        if rule_expression_validation:
1✔
UNCOV
866
            LOG.warning(
×
867
                "Alarm rule contains unsupported expressions and will not be evaluated: %s",
868
                rule_expression_validation,
869
            )
UNCOV
870
            return
×
871
        new_state_value = StateValue.OK
1✔
872
        # assuming that a rule consists only of ALARM evaluations of metric alarms, with OR logic applied
873
        for metric_alarm_arn in self._get_alarm_arns(alarm_rule):
1✔
874
            metric_alarm = store.alarms.get(metric_alarm_arn)
1✔
875
            if not metric_alarm:
1✔
876
                LOG.warning(
×
877
                    "Alarm rule won't be evaluated as there is no alarm with ARN %s",
878
                    metric_alarm_arn,
879
                )
880
                return
×
881
            if metric_alarm.alarm["StateValue"] == StateValue.ALARM:
1✔
882
                triggering_alarm = metric_alarm
1✔
883
                new_state_value = StateValue.ALARM
1✔
884
                break
1✔
885
        old_state_value = composite_alarm.alarm["StateValue"]
1✔
886
        if old_state_value == new_state_value:
1✔
887
            return
1✔
888
        triggering_alarm_arn = triggering_alarm.alarm.get("AlarmArn")
1✔
889
        triggering_alarm_state = triggering_alarm.alarm.get("StateValue")
1✔
890
        triggering_alarm_state_change_timestamp = triggering_alarm.alarm.get(
1✔
891
            "StateTransitionedTimestamp"
892
        )
893
        state_reason_formatted_timestamp = triggering_alarm_state_change_timestamp.strftime(
1✔
894
            "%A %d %B, %Y %H:%M:%S %Z"
895
        )
896
        state_reason = (
1✔
897
            f"{triggering_alarm_arn} "
898
            f"transitioned to {triggering_alarm_state} "
899
            f"at {state_reason_formatted_timestamp}"
900
        )
901
        state_reason_data = {
1✔
902
            "triggeringAlarms": [
903
                {
904
                    "arn": triggering_alarm_arn,
905
                    "state": {
906
                        "value": triggering_alarm_state,
907
                        "timestamp": timestamp_millis(triggering_alarm_state_change_timestamp),
908
                    },
909
                }
910
            ]
911
        }
912
        self._update_state(
1✔
913
            context, composite_alarm, new_state_value, state_reason, state_reason_data
914
        )
915
        if composite_alarm.alarm["ActionsEnabled"]:
1✔
916
            self._run_composite_alarm_actions(
1✔
917
                context, composite_alarm, old_state_value, triggering_alarm
918
            )
919

920
    def _validate_alarm_rule_expression(self, alarm_rule):
1✔
921
        validation_result = []
1✔
922
        alarms_conditions = [alarm.strip() for alarm in alarm_rule.split("OR")]
1✔
923
        for alarm_condition in alarms_conditions:
1✔
924
            if not alarm_condition.startswith("ALARM"):
1✔
925
                validation_result.append(
1✔
926
                    f"Unsupported expression in alarm rule condition {alarm_condition}: Only ALARM expression is supported by Localstack as of now"
927
                )
928
        return validation_result
1✔
929

930
    def _get_alarm_arns(self, composite_alarm_rule):
1✔
931
        # regexp for everything within (" ")
932
        return re.findall(r'\("([^"]*)"\)', composite_alarm_rule)
1✔
933

934
    def _run_composite_alarm_actions(
1✔
935
        self, context, composite_alarm, old_state_value, triggering_alarm
936
    ):
937
        new_state_value = composite_alarm.alarm["StateValue"]
1✔
938
        if new_state_value == StateValue.OK:
1✔
939
            actions = composite_alarm.alarm["OKActions"]
1✔
940
        elif new_state_value == StateValue.ALARM:
1✔
941
            actions = composite_alarm.alarm["AlarmActions"]
1✔
942
        else:
943
            actions = composite_alarm.alarm["InsufficientDataActions"]
×
944
        for action in actions:
1✔
945
            data = arns.parse_arn(action)
1✔
946
            if data["service"] == "sns":
1✔
947
                service = connect_to(
1✔
948
                    region_name=data["region"], aws_access_key_id=data["account"]
949
                ).sns
950
                subject = f"""{new_state_value}: "{composite_alarm.alarm["AlarmName"]}" in {context.region}"""
1✔
951
                message = create_message_response_update_composite_alarm_state_sns(
1✔
952
                    composite_alarm, triggering_alarm, old_state_value
953
                )
954
                service.publish(TopicArn=action, Subject=subject, Message=message)
1✔
955
            else:
956
                # TODO: support other actions
957
                LOG.warning(
×
958
                    "Action for service %s not implemented, action '%s' will not be triggered.",
959
                    data["service"],
960
                    action,
961
                )
962

963

964
def create_metric_data_query_from_alarm(alarm: LocalStackMetricAlarm):
1✔
965
    # TODO may need to be adapted for other use cases
966
    #  verified return value with a snapshot test
967
    return [
1✔
968
        {
969
            "id": str(uuid.uuid4()),
970
            "metricStat": {
971
                "metric": {
972
                    "namespace": alarm.alarm["Namespace"],
973
                    "name": alarm.alarm["MetricName"],
974
                    "dimensions": alarm.alarm.get("Dimensions") or {},
975
                },
976
                "period": int(alarm.alarm["Period"]),
977
                "stat": alarm.alarm["Statistic"],
978
            },
979
            "returnData": True,
980
        }
981
    ]
982

983

984
def create_message_response_update_state_lambda(
1✔
985
    alarm: LocalStackMetricAlarm, old_state, old_state_reason, old_state_timestamp
986
):
987
    _alarm = alarm.alarm
1✔
988
    response = {
1✔
989
        "accountId": extract_account_id_from_arn(_alarm["AlarmArn"]),
990
        "alarmArn": _alarm["AlarmArn"],
991
        "alarmData": {
992
            "alarmName": _alarm["AlarmName"],
993
            "state": {
994
                "value": _alarm["StateValue"],
995
                "reason": _alarm["StateReason"],
996
                "timestamp": _alarm["StateUpdatedTimestamp"],
997
            },
998
            "previousState": {
999
                "value": old_state,
1000
                "reason": old_state_reason,
1001
                "timestamp": old_state_timestamp,
1002
            },
1003
            "configuration": {
1004
                "description": _alarm.get("AlarmDescription", ""),
1005
                "metrics": _alarm.get(
1006
                    "Metrics", create_metric_data_query_from_alarm(alarm)
1007
                ),  # TODO: add test with metric_data_queries
1008
            },
1009
        },
1010
        "time": _alarm["StateUpdatedTimestamp"],
1011
        "region": alarm.region,
1012
        "source": "aws.cloudwatch",
1013
    }
1014
    return json.dumps(response, cls=JSONEncoder)
1✔
1015

1016

1017
def create_message_response_update_state_sns(alarm: LocalStackMetricAlarm, old_state: StateValue):
1✔
1018
    _alarm = alarm.alarm
1✔
1019
    response = {
1✔
1020
        "AWSAccountId": alarm.account_id,
1021
        "OldStateValue": old_state,
1022
        "AlarmName": _alarm["AlarmName"],
1023
        "AlarmDescription": _alarm.get("AlarmDescription"),
1024
        "AlarmConfigurationUpdatedTimestamp": _alarm["AlarmConfigurationUpdatedTimestamp"],
1025
        "NewStateValue": _alarm["StateValue"],
1026
        "NewStateReason": _alarm["StateReason"],
1027
        "StateChangeTime": _alarm["StateUpdatedTimestamp"],
1028
        # the long-name for 'region' should be used - as we don't have it, we use the short name
1029
        # which needs to be slightly changed to make snapshot tests work
1030
        "Region": alarm.region.replace("-", " ").capitalize(),
1031
        "AlarmArn": _alarm["AlarmArn"],
1032
        "OKActions": _alarm.get("OKActions", []),
1033
        "AlarmActions": _alarm.get("AlarmActions", []),
1034
        "InsufficientDataActions": _alarm.get("InsufficientDataActions", []),
1035
    }
1036

1037
    # collect trigger details
1038
    details = {
1✔
1039
        "MetricName": _alarm.get("MetricName", ""),
1040
        "Namespace": _alarm.get("Namespace", ""),
1041
        "Unit": _alarm.get("Unit", None),  # testing with AWS revealed this currently returns None
1042
        "Period": int(_alarm.get("Period", 0)),
1043
        "EvaluationPeriods": int(_alarm.get("EvaluationPeriods", 0)),
1044
        "ComparisonOperator": _alarm.get("ComparisonOperator", ""),
1045
        "Threshold": float(_alarm.get("Threshold", 0.0)),
1046
        "TreatMissingData": _alarm.get("TreatMissingData", ""),
1047
        "EvaluateLowSampleCountPercentile": _alarm.get("EvaluateLowSampleCountPercentile", ""),
1048
    }
1049

1050
    # Dimensions not serializable
1051
    dimensions = []
1✔
1052
    alarm_dimensions = _alarm.get("Dimensions", [])
1✔
1053
    if alarm_dimensions:
1✔
1054
        for d in _alarm["Dimensions"]:
1✔
1055
            dimensions.append({"value": d["Value"], "name": d["Name"]})
1✔
1056
    details["Dimensions"] = dimensions or ""
1✔
1057

1058
    alarm_statistic = _alarm.get("Statistic")
1✔
1059
    alarm_extended_statistic = _alarm.get("ExtendedStatistic")
1✔
1060

1061
    if alarm_statistic:
1✔
1062
        details["StatisticType"] = "Statistic"
1✔
1063
        details["Statistic"] = camel_to_snake_case(alarm_statistic).upper()  # AWS returns uppercase
1✔
1064
    elif alarm_extended_statistic:
×
1065
        details["StatisticType"] = "ExtendedStatistic"
×
1066
        details["ExtendedStatistic"] = alarm_extended_statistic
×
1067

1068
    response["Trigger"] = details
1✔
1069

1070
    return json.dumps(response, cls=JSONEncoder)
1✔
1071

1072

1073
def create_message_response_update_composite_alarm_state_sns(
1✔
1074
    composite_alarm: LocalStackCompositeAlarm,
1075
    triggering_alarm: LocalStackMetricAlarm,
1076
    old_state: StateValue,
1077
):
1078
    _alarm = composite_alarm.alarm
1✔
1079
    response = {
1✔
1080
        "AWSAccountId": composite_alarm.account_id,
1081
        "AlarmName": _alarm["AlarmName"],
1082
        "AlarmDescription": _alarm.get("AlarmDescription"),
1083
        "AlarmRule": _alarm.get("AlarmRule"),
1084
        "OldStateValue": old_state,
1085
        "NewStateValue": _alarm["StateValue"],
1086
        "NewStateReason": _alarm["StateReason"],
1087
        "StateChangeTime": _alarm["StateUpdatedTimestamp"],
1088
        # the long-name for 'region' should be used - as we don't have it, we use the short name
1089
        # which needs to be slightly changed to make snapshot tests work
1090
        "Region": composite_alarm.region.replace("-", " ").capitalize(),
1091
        "AlarmArn": _alarm["AlarmArn"],
1092
        "OKActions": _alarm.get("OKActions", []),
1093
        "AlarmActions": _alarm.get("AlarmActions", []),
1094
        "InsufficientDataActions": _alarm.get("InsufficientDataActions", []),
1095
    }
1096

1097
    triggering_children = [
1✔
1098
        {
1099
            "Arn": triggering_alarm.alarm.get("AlarmArn"),
1100
            "State": {
1101
                "Value": triggering_alarm.alarm["StateValue"],
1102
                "Timestamp": triggering_alarm.alarm["StateUpdatedTimestamp"],
1103
            },
1104
        }
1105
    ]
1106

1107
    response["TriggeringChildren"] = triggering_children
1✔
1108

1109
    return json.dumps(response, cls=JSONEncoder)
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc