• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

localstack / localstack / 96c05c56-d484-41e1-aa95-8bc070bb9477

07 Apr 2025 07:41AM UTC coverage: 86.842% (+0.03%) from 86.81%
96c05c56-d484-41e1-aa95-8bc070bb9477

push

circleci

web-flow
Update ASF APIs, update events provider signature (#12490)

Co-authored-by: LocalStack Bot <localstack-bot@users.noreply.github.com>
Co-authored-by: Alexander Rashed <alexander.rashed@localstack.cloud>

63569 of 73201 relevant lines covered (86.84%)

0.87 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

86.73
/localstack-core/localstack/services/lambda_/event_source_mapping/pollers/stream_poller.py
1
import json
1✔
2
import logging
1✔
3
import threading
1✔
4
from abc import abstractmethod
1✔
5
from collections import defaultdict
1✔
6
from datetime import datetime
1✔
7
from typing import Iterator
1✔
8

9
from botocore.client import BaseClient
1✔
10
from botocore.exceptions import ClientError
1✔
11

12
from localstack.aws.api.pipes import (
1✔
13
    OnPartialBatchItemFailureStreams,
14
)
15
from localstack.services.lambda_.event_source_mapping.event_processor import (
1✔
16
    BatchFailureError,
17
    CustomerInvocationError,
18
    EventProcessor,
19
    PartialBatchFailureError,
20
    PipeInternalError,
21
)
22
from localstack.services.lambda_.event_source_mapping.pipe_utils import (
1✔
23
    get_current_time,
24
    get_datetime_from_timestamp,
25
    get_internal_client,
26
)
27
from localstack.services.lambda_.event_source_mapping.pollers.poller import (
1✔
28
    EmptyPollResultsException,
29
    Poller,
30
    get_batch_item_failures,
31
)
32
from localstack.services.lambda_.event_source_mapping.pollers.sqs_poller import get_queue_url
1✔
33
from localstack.services.lambda_.event_source_mapping.senders.sender_utils import (
1✔
34
    batched,
35
)
36
from localstack.utils.aws.arns import parse_arn, s3_bucket_name
1✔
37
from localstack.utils.backoff import ExponentialBackoff
1✔
38
from localstack.utils.batch_policy import Batcher
1✔
39
from localstack.utils.strings import long_uid
1✔
40

41
LOG = logging.getLogger(__name__)
1✔
42

43

44
# TODO: fix this poller to support resharding
45
#   https://docs.aws.amazon.com/streams/latest/dev/kinesis-using-sdk-java-resharding.html
46
class StreamPoller(Poller):
1✔
47
    # Mapping of shard id => shard iterator
48
    shards: dict[str, str]
1✔
49
    # Iterator for round-robin polling from different shards because a batch cannot contain events from different shards
50
    # This is a workaround for not handling shards in parallel.
51
    iterator_over_shards: Iterator[tuple[str, str]] | None
1✔
52
    # ESM UUID is needed in failure processing to form s3 failure destination object key
53
    esm_uuid: str | None
1✔
54

55
    # The ARN of the processor (e.g., Pipe ARN)
56
    partner_resource_arn: str | None
1✔
57

58
    # Used for backing-off between retries and breaking the retry loop
59
    _is_shutdown: threading.Event
1✔
60

61
    # Collects and flushes a batch of records based on a batching policy
62
    shard_batcher: dict[str, Batcher[dict]]
1✔
63

64
    def __init__(
1✔
65
        self,
66
        source_arn: str,
67
        source_parameters: dict | None = None,
68
        source_client: BaseClient | None = None,
69
        processor: EventProcessor | None = None,
70
        partner_resource_arn: str | None = None,
71
        esm_uuid: str | None = None,
72
    ):
73
        super().__init__(source_arn, source_parameters, source_client, processor)
1✔
74
        self.partner_resource_arn = partner_resource_arn
1✔
75
        self.esm_uuid = esm_uuid
1✔
76
        self.shards = {}
1✔
77
        self.iterator_over_shards = None
1✔
78

79
        self._is_shutdown = threading.Event()
1✔
80

81
        self.shard_batcher = defaultdict(
1✔
82
            lambda: Batcher(
83
                max_count=self.stream_parameters.get("BatchSize", 100),
84
                max_window=self.stream_parameters.get("MaximumBatchingWindowInSeconds", 0),
85
            )
86
        )
87

88
    @abstractmethod
1✔
89
    def transform_into_events(self, records: list[dict], shard_id) -> list[dict]:
1✔
90
        pass
×
91

92
    @property
1✔
93
    @abstractmethod
1✔
94
    def stream_parameters(self) -> dict:
1✔
95
        pass
×
96

97
    @abstractmethod
1✔
98
    def initialize_shards(self) -> dict[str, str]:
1✔
99
        """Returns a shard dict mapping from shard id -> shard iterator
100
        The implementations for Kinesis and DynamoDB are similar but differ in various ways:
101
        * Kinesis uses "StreamARN" and DynamoDB uses "StreamArn" as source parameter
102
        * Kinesis uses "StreamStatus.ACTIVE" and DynamoDB uses "StreamStatus.ENABLED"
103
        * Only Kinesis supports the additional StartingPosition called "AT_TIMESTAMP" using "StartingPositionTimestamp"
104
        """
105
        pass
×
106

107
    @abstractmethod
1✔
108
    def stream_arn_param(self) -> dict:
1✔
109
        """Returns a dict of the correct key/value pair for the stream arn used in GetRecords.
110
        Either StreamARN for Kinesis or {} for DynamoDB (unsupported)"""
111
        pass
×
112

113
    @abstractmethod
1✔
114
    def failure_payload_details_field_name(self) -> str:
1✔
115
        pass
×
116

117
    @abstractmethod
1✔
118
    def get_approximate_arrival_time(self, record: dict) -> float:
1✔
119
        pass
×
120

121
    @abstractmethod
1✔
122
    def format_datetime(self, time: datetime) -> str:
1✔
123
        """Formats a datetime in the correct format for DynamoDB (with ms) or Kinesis (without ms)"""
124
        pass
×
125

126
    @abstractmethod
1✔
127
    def get_sequence_number(self, record: dict) -> str:
1✔
128
        pass
×
129

130
    def close(self):
1✔
131
        self._is_shutdown.set()
1✔
132

133
    def pre_filter(self, events: list[dict]) -> list[dict]:
1✔
134
        return events
1✔
135

136
    def post_filter(self, events: list[dict]) -> list[dict]:
1✔
137
        return events
1✔
138

139
    def poll_events(self):
1✔
140
        """Generalized poller for streams such as Kinesis or DynamoDB
141
        Examples of Kinesis consumers:
142
        * StackOverflow: https://stackoverflow.com/a/22403036/6875981
143
        * AWS Sample: https://github.com/aws-samples/kinesis-poster-worker/blob/master/worker.py
144
        Examples of DynamoDB consumers:
145
        * Blogpost: https://www.tecracer.com/blog/2022/05/getting-a-near-real-time-view-of-a-dynamodb-stream-with-python.html
146
        """
147
        # TODO: consider potential shard iterator timeout after 300 seconds (likely not relevant with short-polling):
148
        #   https://docs.aws.amazon.com/streams/latest/dev/troubleshooting-consumers.html#shard-iterator-expires-unexpectedly
149
        #  Does this happen if no records are received for 300 seconds?
150
        if not self.shards:
1✔
151
            self.shards = self.initialize_shards()
1✔
152

153
        if not self.shards:
1✔
154
            LOG.debug("No shards found for %s.", self.source_arn)
1✔
155
            raise EmptyPollResultsException(service=self.event_source(), source_arn=self.source_arn)
1✔
156
        else:
157
            # Remove all shard batchers without corresponding shards
158
            for shard_id in self.shard_batcher.keys() - self.shards.keys():
1✔
159
                self.shard_batcher.pop(shard_id, None)
×
160

161
        # TODO: improve efficiency because this currently limits the throughput to at most batch size per poll interval
162
        # Handle shards round-robin. Re-initialize current shard iterator once all shards are handled.
163
        if self.iterator_over_shards is None:
1✔
164
            self.iterator_over_shards = iter(self.shards.items())
1✔
165

166
        current_shard_tuple = next(self.iterator_over_shards, None)
1✔
167
        if not current_shard_tuple:
1✔
168
            self.iterator_over_shards = iter(self.shards.items())
1✔
169
            current_shard_tuple = next(self.iterator_over_shards, None)
1✔
170

171
        # TODO Better handling when shards are initialised and the iterator returns nothing
172
        if not current_shard_tuple:
1✔
173
            raise PipeInternalError(
×
174
                "Failed to retrieve any shards for stream polling despite initialization."
175
            )
176

177
        try:
1✔
178
            self.poll_events_from_shard(*current_shard_tuple)
1✔
179
        except PipeInternalError:
1✔
180
            # TODO: standardize logging
181
            # Ignore and wait for the next polling interval, which will do retry
182
            pass
1✔
183

184
    def poll_events_from_shard(self, shard_id: str, shard_iterator: str):
1✔
185
        get_records_response = self.get_records(shard_iterator)
1✔
186
        records: list[dict] = get_records_response.get("Records", [])
1✔
187
        if not (next_shard_iterator := get_records_response.get("NextShardIterator")):
1✔
188
            # If the next shard iterator is None, we can assume the shard is closed or
189
            # has expired on the DynamoDB Local server, hence we should re-initialize.
190
            self.shards = self.initialize_shards()
1✔
191

192
        # We cannot reliably back-off when no records found since an iterator
193
        # may have to move multiple times until records are returned.
194
        # See https://docs.aws.amazon.com/streams/latest/dev/troubleshooting-consumers.html#getrecords-returns-empty
195
        # However, we still need to check if batcher should be triggered due to time-based batching.
196
        should_flush = self.shard_batcher[shard_id].add(records)
1✔
197
        if not should_flush:
1✔
198
            self.shards[shard_id] = next_shard_iterator
1✔
199
            return
1✔
200

201
        # Retrieve and drain all events in batcher
202
        collected_records = self.shard_batcher[shard_id].flush()
1✔
203
        # If there is overflow (i.e 1k BatchSize and 1.2K returned in flush), further split up the batch.
204
        for batch in batched(collected_records, self.stream_parameters.get("BatchSize")):
1✔
205
            # This could potentially lead to data loss if forward_events_to_target raises an exception after a flush
206
            # which would otherwise be solved with checkpointing.
207
            # TODO: Implement checkpointing, leasing, etc. from https://docs.aws.amazon.com/streams/latest/dev/kcl-concepts.html
208
            self.forward_events_to_target(shard_id, next_shard_iterator, batch)
1✔
209

210
    def forward_events_to_target(self, shard_id, next_shard_iterator, records):
1✔
211
        polled_events = self.transform_into_events(records, shard_id)
1✔
212

213
        abort_condition = None
1✔
214
        # Check MaximumRecordAgeInSeconds
215
        if maximum_record_age_in_seconds := self.stream_parameters.get("MaximumRecordAgeInSeconds"):
1✔
216
            arrival_timestamp_of_last_event = polled_events[-1]["approximateArrivalTimestamp"]
×
217
            now = get_current_time().timestamp()
×
218
            record_age_in_seconds = now - arrival_timestamp_of_last_event
×
219
            if record_age_in_seconds > maximum_record_age_in_seconds:
×
220
                abort_condition = "RecordAgeExpired"
×
221

222
        # TODO: implement format detection behavior (e.g., for JSON body):
223
        #  https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html
224
        #  Check whether we need poller-specific filter-preprocessing here without modifying the actual event!
225
        # convert to json for filtering (HACK for fixing parity with v1 and getting regression tests passing)
226
        # localstack.services.lambda_.event_source_listeners.kinesis_event_source_listener.KinesisEventSourceListener._filter_records
227
        # TODO: explore better abstraction for the entire filtering, including the set_data and get_data remapping
228
        #  We need better clarify which transformations happen before and after filtering -> fix missing test coverage
229
        parsed_events = self.pre_filter(polled_events)
1✔
230
        # TODO: advance iterator past matching events!
231
        #  We need to checkpoint the sequence number for each shard and then advance the shard iterator using
232
        #  GetShardIterator with a given sequence number
233
        #  https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html
234
        #  Failing to do so kinda blocks the stream resulting in very high latency.
235
        matching_events = self.filter_events(parsed_events)
1✔
236
        matching_events_post_filter = self.post_filter(matching_events)
1✔
237

238
        # TODO: implement MaximumBatchingWindowInSeconds flush condition (before or after filter?)
239
        # Don't trigger upon empty events
240
        if len(matching_events_post_filter) == 0:
1✔
241
            # Update shard iterator if no records match the filter
242
            self.shards[shard_id] = next_shard_iterator
1✔
243
            return
1✔
244
        events = self.add_source_metadata(matching_events_post_filter)
1✔
245
        LOG.debug("Polled %d events from %s in shard %s", len(events), self.source_arn, shard_id)
1✔
246
        # TODO: A retry should probably re-trigger fetching the record from the stream again?!
247
        #  -> This could be tested by setting a high retry number, using a long pipe execution, and a relatively
248
        #  short record expiration age at the source. Check what happens if the record expires at the source.
249
        #  A potential implementation could use checkpointing based on the iterator position (within shard scope)
250
        # TODO: handle partial batch failure (see poller.py:parse_batch_item_failures)
251
        # TODO: think about how to avoid starvation of other shards if one shard runs into infinite retries
252
        attempts = 0
1✔
253
        error_payload = {}
1✔
254

255
        max_retries = self.stream_parameters.get("MaximumRetryAttempts", -1)
1✔
256
        # NOTE: max_retries == 0 means exponential backoff is disabled
257
        boff = ExponentialBackoff(max_retries=max_retries)
1✔
258
        while (
1✔
259
            not abort_condition
260
            and not self.max_retries_exceeded(attempts)
261
            and not self._is_shutdown.is_set()
262
        ):
263
            try:
1✔
264
                if attempts > 0:
1✔
265
                    # TODO: Should we always backoff (with jitter) before processing since we may not want multiple pollers
266
                    # all starting up and polling simultaneously
267
                    # For example: 500 persisted ESMs starting up and requesting concurrently could flood gateway
268
                    self._is_shutdown.wait(boff.next_backoff())
1✔
269

270
                self.processor.process_events_batch(events)
1✔
271
                boff.reset()
1✔
272

273
                # Update shard iterator if execution is successful
274
                self.shards[shard_id] = next_shard_iterator
1✔
275
                return
1✔
276
            except PartialBatchFailureError as ex:
1✔
277
                # TODO: add tests for partial batch failure scenarios
278
                if (
1✔
279
                    self.stream_parameters.get("OnPartialBatchItemFailure")
280
                    == OnPartialBatchItemFailureStreams.AUTOMATIC_BISECT
281
                ):
282
                    # TODO: implement and test splitting batches in half until batch size 1
283
                    #  https://docs.aws.amazon.com/eventbridge/latest/pipes-reference/API_PipeSourceKinesisStreamParameters.html
284
                    LOG.warning(
×
285
                        "AUTOMATIC_BISECT upon partial batch item failure is not yet implemented. Retrying the entire batch."
286
                    )
287
                error_payload = ex.error
1✔
288

289
                # Extract all sequence numbers from events in batch. This allows us to fail the whole batch if
290
                # an unknown itemidentifier is returned.
291
                batch_sequence_numbers = {
1✔
292
                    self.get_sequence_number(event) for event in matching_events
293
                }
294

295
                # If the batchItemFailures array contains multiple items, Lambda uses the record with the lowest sequence number as the checkpoint.
296
                # Lambda then retries all records starting from that checkpoint.
297
                failed_sequence_ids: list[int] | None = get_batch_item_failures(
1✔
298
                    ex.partial_failure_payload, batch_sequence_numbers
299
                )
300

301
                # If None is returned, consider the entire batch a failure.
302
                if failed_sequence_ids is None:
1✔
303
                    continue
1✔
304

305
                # This shouldn't be possible since a PartialBatchFailureError was raised
306
                if len(failed_sequence_ids) == 0:
1✔
307
                    assert failed_sequence_ids, (
×
308
                        "Invalid state encountered: PartialBatchFailureError raised but no batch item failures found."
309
                    )
310

311
                lowest_sequence_id: str = min(failed_sequence_ids, key=int)
1✔
312

313
                # Discard all successful events and re-process from sequence number of failed event
314
                _, events = self.bisect_events(lowest_sequence_id, events)
1✔
315
            except (BatchFailureError, Exception) as ex:
1✔
316
                if isinstance(ex, BatchFailureError):
1✔
317
                    error_payload = ex.error
1✔
318

319
                # FIXME partner_resource_arn is not defined in ESM
320
                LOG.debug(
1✔
321
                    "Attempt %d failed while processing %s with events: %s",
322
                    attempts,
323
                    self.partner_resource_arn or self.source_arn,
324
                    events,
325
                )
326
            finally:
327
                # Retry polling until the record expires at the source
328
                attempts += 1
1✔
329

330
        # Send failed events to potential DLQ
331
        abort_condition = abort_condition or "RetryAttemptsExhausted"
1✔
332
        failure_context = self.processor.generate_event_failure_context(
1✔
333
            abort_condition=abort_condition,
334
            error=error_payload,
335
            attempts_count=attempts,
336
            partner_resource_arn=self.partner_resource_arn,
337
        )
338
        self.send_events_to_dlq(shard_id, events, context=failure_context)
1✔
339
        # Update shard iterator if the execution failed but the events are sent to a DLQ
340
        self.shards[shard_id] = next_shard_iterator
1✔
341

342
    def get_records(self, shard_iterator: str) -> dict:
1✔
343
        """Returns a GetRecordsOutput from the GetRecords endpoint of streaming services such as Kinesis or DynamoDB"""
344
        try:
1✔
345
            get_records_response = self.source_client.get_records(
1✔
346
                # TODO: add test for cross-account scenario
347
                # Differs for Kinesis and DynamoDB but required for cross-account scenario
348
                **self.stream_arn_param(),
349
                ShardIterator=shard_iterator,
350
                Limit=self.stream_parameters["BatchSize"],
351
            )
352
            return get_records_response
1✔
353
        # TODO: test iterator expired with conditional error scenario (requires failure destinations)
354
        except self.source_client.exceptions.ExpiredIteratorException as e:
1✔
355
            LOG.debug(
1✔
356
                "Shard iterator %s expired for stream %s, re-initializing shards",
357
                shard_iterator,
358
                self.source_arn,
359
            )
360
            # TODO: test TRIM_HORIZON and AT_TIMESTAMP scenarios for this case. We don't want to start from scratch and
361
            #  might need to think about checkpointing here.
362
            self.shards = self.initialize_shards()
1✔
363
            raise PipeInternalError from e
1✔
364
        except ClientError as e:
1✔
365
            if "AccessDeniedException" in str(e):
1✔
366
                LOG.warning(
×
367
                    "Insufficient permissions to get records from stream %s: %s",
368
                    self.source_arn,
369
                    e,
370
                )
371
                raise CustomerInvocationError from e
×
372
            elif "ResourceNotFoundException" in str(e):
1✔
373
                # FIXME: The 'Invalid ShardId in ShardIterator' error is returned by DynamoDB-local. Unsure when/why this is returned.
374
                if "Invalid ShardId in ShardIterator" in str(e):
×
375
                    LOG.warning(
×
376
                        "Invalid ShardId in ShardIterator for %s. Re-initializing shards.",
377
                        self.source_arn,
378
                    )
379
                    self.shards = self.initialize_shards()
×
380
                else:
381
                    LOG.warning(
×
382
                        "Source stream %s does not exist: %s",
383
                        self.source_arn,
384
                        e,
385
                    )
386
                    raise CustomerInvocationError from e
×
387
            elif "TrimmedDataAccessException" in str(e):
1✔
388
                LOG.debug(
×
389
                    "Attempted to iterate over trimmed record or expired shard iterator %s for stream %s, re-initializing shards",
390
                    shard_iterator,
391
                    self.source_arn,
392
                )
393
                self.shards = self.initialize_shards()
×
394
            else:
395
                LOG.debug("ClientError during get_records for stream %s: %s", self.source_arn, e)
1✔
396
            raise PipeInternalError from e
1✔
397

398
    def send_events_to_dlq(self, shard_id, events, context) -> None:
1✔
399
        dlq_arn = self.stream_parameters.get("DeadLetterConfig", {}).get("Arn")
1✔
400
        if dlq_arn:
1✔
401
            failure_timstamp = get_current_time()
1✔
402
            dlq_event = self.create_dlq_event(shard_id, events, context, failure_timstamp)
1✔
403
            # Send DLQ event to DLQ target
404
            parsed_arn = parse_arn(dlq_arn)
1✔
405
            service = parsed_arn["service"]
1✔
406
            # TODO: use a sender instance here, likely inject via DI into poller (what if it updates?)
407
            if service == "sqs":
1✔
408
                # TODO: inject and cache SQS client using proper IAM role (supports cross-account operations)
409
                sqs_client = get_internal_client(dlq_arn)
1✔
410
                # TODO: check if the DLQ exists
411
                dlq_url = get_queue_url(dlq_arn)
1✔
412
                # TODO: validate no FIFO queue because they are unsupported
413
                sqs_client.send_message(QueueUrl=dlq_url, MessageBody=json.dumps(dlq_event))
1✔
414
            elif service == "sns":
1✔
415
                sns_client = get_internal_client(dlq_arn)
1✔
416
                sns_client.publish(TopicArn=dlq_arn, Message=json.dumps(dlq_event))
1✔
417
            elif service == "s3":
1✔
418
                s3_client = get_internal_client(dlq_arn)
1✔
419
                dlq_event_with_payload = {
1✔
420
                    **dlq_event,
421
                    "payload": {
422
                        "Records": events,
423
                    },
424
                }
425
                s3_client.put_object(
1✔
426
                    Bucket=s3_bucket_name(dlq_arn),
427
                    Key=get_failure_s3_object_key(self.esm_uuid, shard_id, failure_timstamp),
428
                    Body=json.dumps(dlq_event_with_payload),
429
                )
430
            else:
431
                LOG.warning("Unsupported DLQ service %s", service)
×
432

433
    def create_dlq_event(
1✔
434
        self, shard_id: str, events: list[dict], context: dict, failure_timestamp: datetime
435
    ) -> dict:
436
        first_record = events[0]
1✔
437
        first_record_arrival = get_datetime_from_timestamp(
1✔
438
            self.get_approximate_arrival_time(first_record)
439
        )
440

441
        last_record = events[-1]
1✔
442
        last_record_arrival = get_datetime_from_timestamp(
1✔
443
            self.get_approximate_arrival_time(last_record)
444
        )
445
        return {
1✔
446
            **context,
447
            self.failure_payload_details_field_name(): {
448
                "approximateArrivalOfFirstRecord": self.format_datetime(first_record_arrival),
449
                "approximateArrivalOfLastRecord": self.format_datetime(last_record_arrival),
450
                "batchSize": len(events),
451
                "endSequenceNumber": self.get_sequence_number(last_record),
452
                "shardId": shard_id,
453
                "startSequenceNumber": self.get_sequence_number(first_record),
454
                "streamArn": self.source_arn,
455
            },
456
            "timestamp": failure_timestamp.isoformat(timespec="milliseconds").replace(
457
                "+00:00", "Z"
458
            ),
459
            "version": "1.0",
460
        }
461

462
    def max_retries_exceeded(self, attempts: int) -> bool:
1✔
463
        maximum_retry_attempts = self.stream_parameters.get("MaximumRetryAttempts", -1)
1✔
464
        # Infinite retries until the source expires
465
        if maximum_retry_attempts == -1:
1✔
466
            return False
1✔
467
        return attempts > maximum_retry_attempts
1✔
468

469
    def bisect_events(
1✔
470
        self, sequence_number: str, events: list[dict]
471
    ) -> tuple[list[dict], list[dict]]:
472
        """Splits list of events in two, where a sequence number equals a passed parameter `sequence_number`.
473
        This is used for:
474
          - `ReportBatchItemFailures`: Discarding events in a batch following a failure when is set.
475
          - `BisectBatchOnFunctionError`: Used to split a failed batch in two when doing a retry (not implemented)."""
476
        for i, event in enumerate(events):
1✔
477
            if self.get_sequence_number(event) == sequence_number:
1✔
478
                return events[:i], events[i:]
1✔
479

480
        return events, []
×
481

482

483
def get_failure_s3_object_key(esm_uuid: str, shard_id: str, failure_datetime: datetime) -> str:
1✔
484
    """
485
    From https://docs.aws.amazon.com/lambda/latest/dg/kinesis-on-failure-destination.html:
486

487
    The S3 object containing the invocation record uses the following naming convention:
488
    aws/lambda/<ESM-UUID>/<shardID>/YYYY/MM/DD/YYYY-MM-DDTHH.MM.SS-<Random UUID>
489

490
    :return: Key for s3 object that invocation failure record will be put to
491
    """
492
    timestamp = failure_datetime.strftime("%Y-%m-%dT%H.%M.%S")
1✔
493
    year_month_day = failure_datetime.strftime("%Y/%m/%d")
1✔
494
    random_uuid = long_uid()
1✔
495
    return f"aws/lambda/{esm_uuid}/{shard_id}/{year_month_day}/{timestamp}-{random_uuid}"
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc