• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

Unleash / unleash-edge / 17914520369

22 Sep 2025 11:58AM UTC coverage: 68.901% (-0.02%) from 68.92%
17914520369

Pull #1175

github

web-flow
Merge 0b359f9c3 into fa44c3fee
Pull Request #1175: fix: readded hosting to EdgeInstanceData

24 of 37 new or added lines in 3 files covered. (64.86%)

75 existing lines in 3 files now uncovered.

7351 of 10669 relevant lines covered (68.9%)

6987.87 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

58.88
/crates/unleash-edge-types/src/metrics/instance_data.rs
1
use crate::BuildInfo;
2
use crate::metrics::{
3
    BUCKET_SIZE_FEATURES, BUCKET_SIZE_METRICS, ConnectionConsumptionData, ConnectionMetricsType,
4
    DEFAULT_FEATURES_INTERVAL, DEFAULT_METRICS_INTERVAL, DESIRED_URLS, DataPoint, ENDPOINT_LABEL,
5
    InstanceTraffic, LatencyMetrics, MAX_BUCKET_INTERVAL, METHOD_LABEL, ProcessMetrics,
6
    RequestConsumptionData, RequestStats, STATUS_LABEL, UpstreamLatency,
7
};
8
use ahash::HashMap;
9
use chrono::{DateTime, Utc};
10
use dashmap::DashMap;
11
use prometheus::gather;
12
use serde::{Deserialize, Serialize};
13
use std::sync::atomic::{AtomicU64, Ordering};
14
use ulid::Ulid;
15

16
pub const CONNECTED_STREAMING_CLIENTS: &str = "connected_streaming_clients";
17

18
#[derive(Debug, Clone, Deserialize, Serialize)]
19
#[serde(rename_all = "camelCase")]
20
pub struct EdgeInstanceData {
21
    pub identifier: String,
22
    pub app_name: String,
23
    #[serde(skip_serializing_if = "Option::is_none")]
24
    pub hosting: Option<Hosting>,
25
    pub region: Option<String>,
26
    pub edge_version: String,
27
    pub process_metrics: Option<ProcessMetrics>,
28
    pub started: DateTime<Utc>,
29
    pub traffic: InstanceTraffic,
30
    pub latency_upstream: UpstreamLatency,
31
    pub requests_since_last_report: DashMap<String, RequestStats>,
32
    pub connected_streaming_clients: u64,
33
    pub connected_edges: Vec<EdgeInstanceData>,
34
    pub connection_consumption_since_last_report: ConnectionConsumptionData,
35
    pub request_consumption_since_last_report: RequestConsumptionData,
36
}
37

38
#[derive(Debug, Copy, Clone, Deserialize, Serialize, Eq, PartialEq)]
39
pub enum Hosting {
40
    #[serde(rename = "self-hosted")]
41
    SelfHosted,
42
    #[serde(rename = "hosted")]
43
    Hosted,
44
}
45

46
impl From<String> for Hosting {
NEW
47
    fn from(value: String) -> Self {
×
NEW
48
        match value.to_lowercase().as_str() {
×
NEW
49
            "hosted" => Hosting::Hosted,
×
NEW
50
            _ => Hosting::SelfHosted,
×
51
        }
NEW
52
    }
×
53
}
54

55
impl EdgeInstanceData {
56
    pub fn new(app_name: &str, identifier: &Ulid, hosting: Option<Hosting>) -> Self {
30✔
57
        let build_info = BuildInfo::default();
30✔
58
        Self {
30✔
59
            identifier: identifier.to_string(),
30✔
60
            hosting,
30✔
61
            app_name: app_name.to_string(),
30✔
62
            region: std::env::var("AWS_REGION").ok(),
30✔
63
            edge_version: build_info.package_version.clone(),
30✔
64
            process_metrics: None,
30✔
65
            started: Utc::now(),
30✔
66
            traffic: InstanceTraffic::default(),
30✔
67
            latency_upstream: UpstreamLatency::default(),
30✔
68
            connected_edges: vec![],
30✔
69
            connected_streaming_clients: 0,
30✔
70
            requests_since_last_report: DashMap::default(),
30✔
71
            connection_consumption_since_last_report: ConnectionConsumptionData::default(),
30✔
72
            request_consumption_since_last_report: RequestConsumptionData {
30✔
73
                metered_groups: DashMap::new(),
30✔
74
            },
30✔
75
        }
30✔
76
    }
30✔
77

78
    pub fn clear_time_windowed_metrics(&self) {
1✔
79
        self.requests_since_last_report.clear();
1✔
80
        self.connection_consumption_since_last_report.reset();
1✔
81
        self.request_consumption_since_last_report.reset();
1✔
82
    }
1✔
83

84
    pub fn observe_request_consumption(&self) {
4✔
85
        self.request_consumption_since_last_report
4✔
86
            .increment_requests("default");
4✔
87
    }
4✔
88

UNCOV
89
    pub fn observe_request(&self, http_target: &str, status_code: u16) {
×
UNCOV
90
        match status_code {
×
UNCOV
91
            200 | 202 | 204 => {
×
UNCOV
92
                self.requests_since_last_report
×
UNCOV
93
                    .entry(http_target.to_string())
×
UNCOV
94
                    .or_default()
×
UNCOV
95
                    .requests_200
×
UNCOV
96
                    .fetch_add(1, Ordering::SeqCst);
×
97
            }
×
98
            304 => {
×
99
                self.requests_since_last_report
×
100
                    .entry(http_target.to_string())
×
101
                    .or_default()
×
102
                    .requests_304
×
103
                    .fetch_add(1, Ordering::SeqCst);
×
104
            }
×
105
            _ => {}
×
106
        }
107
    }
×
108

109
    pub fn get_interval_bucket(endpoint: &str, interval_ms: Option<u64>) -> std::ops::Range<u64> {
24✔
110
        if endpoint.is_empty() {
24✔
111
            return 0..DEFAULT_FEATURES_INTERVAL;
×
112
        }
24✔
113

114
        let interval = interval_ms.unwrap_or(if endpoint.ends_with("/metrics") {
24✔
115
            DEFAULT_METRICS_INTERVAL
12✔
116
        } else {
117
            DEFAULT_FEATURES_INTERVAL
12✔
118
        });
119

120
        // For intervals greater than 1 hour, use [1h, 1h] range
121
        if interval > MAX_BUCKET_INTERVAL {
24✔
122
            return MAX_BUCKET_INTERVAL..MAX_BUCKET_INTERVAL;
4✔
123
        }
20✔
124

125
        if endpoint.ends_with("/metrics") {
20✔
126
            Self::get_metrics_bucket(interval)
10✔
127
        } else {
128
            Self::get_features_bucket(interval)
10✔
129
        }
130
    }
24✔
131

132
    fn get_metrics_bucket(interval: u64) -> std::ops::Range<u64> {
10✔
133
        if interval <= DEFAULT_METRICS_INTERVAL {
10✔
134
            0..DEFAULT_METRICS_INTERVAL
6✔
135
        } else {
136
            let bucket_start = (interval / BUCKET_SIZE_METRICS) * BUCKET_SIZE_METRICS;
4✔
137
            bucket_start..(bucket_start + BUCKET_SIZE_METRICS)
4✔
138
        }
139
    }
10✔
140

141
    fn get_features_bucket(interval: u64) -> std::ops::Range<u64> {
10✔
142
        if interval <= DEFAULT_FEATURES_INTERVAL {
10✔
143
            0..DEFAULT_FEATURES_INTERVAL
6✔
144
        } else {
145
            let bucket_start = ((interval - DEFAULT_FEATURES_INTERVAL) / BUCKET_SIZE_FEATURES)
4✔
146
                * BUCKET_SIZE_FEATURES
4✔
147
                + DEFAULT_FEATURES_INTERVAL;
4✔
148
            bucket_start..(bucket_start + BUCKET_SIZE_FEATURES)
4✔
149
        }
150
    }
10✔
151

152
    pub fn observe_connection_consumption(&self, endpoint: &str, interval: Option<u64>) {
6✔
153
        let bucket = Self::get_interval_bucket(endpoint, interval);
6✔
154
        if let Some(metrics_type) = ConnectionMetricsType::from_endpoint(endpoint) {
6✔
155
            match metrics_type {
6✔
156
                ConnectionMetricsType::Features => {
157
                    self.connection_consumption_since_last_report
3✔
158
                        .features_map
3✔
159
                        .entry([bucket.start, bucket.end])
3✔
160
                        .or_insert_with(|| DataPoint {
3✔
161
                            interval: [bucket.start, bucket.end],
2✔
162
                            requests: AtomicU64::new(0),
2✔
163
                        })
2✔
164
                        .requests
165
                        .fetch_add(1, Ordering::SeqCst);
3✔
166
                }
167
                ConnectionMetricsType::Metrics => {
168
                    self.connection_consumption_since_last_report
3✔
169
                        .metrics_map
3✔
170
                        .entry([bucket.start, bucket.end])
3✔
171
                        .or_insert_with(|| DataPoint {
3✔
172
                            interval: [bucket.start, bucket.end],
2✔
173
                            requests: AtomicU64::new(0),
2✔
174
                        })
2✔
175
                        .requests
176
                        .fetch_add(1, Ordering::SeqCst);
3✔
177
                }
178
            }
UNCOV
179
        }
×
180
    }
6✔
181

UNCOV
182
    pub fn observe(&self, connected_instances: Vec<EdgeInstanceData>, base_path: &str) -> Self {
×
UNCOV
183
        let mut observed = self.clone();
×
UNCOV
184
        let mut cpu_seconds = 0;
×
UNCOV
185
        let mut resident_memory = 0;
×
UNCOV
186
        let mut get_requests = HashMap::default();
×
187
        let mut post_requests = HashMap::default();
×
UNCOV
188
        let mut access_denied = HashMap::default();
×
UNCOV
189
        let mut no_change = HashMap::default();
×
190

191
        for family in gather().iter() {
×
192
            match family.name() {
×
193
                crate::metrics::HTTP_REQUESTS_DURATION => {
×
194
                    family
×
195
                        .get_metric()
×
196
                        .iter()
×
197
                        .filter(|m| {
×
UNCOV
198
                            m.get_label().iter().any(|l| {
×
199
                                l.name() == ENDPOINT_LABEL
×
200
                                    && DESIRED_URLS
×
201
                                        .iter()
×
202
                                        .any(|desired| l.value().ends_with(desired))
×
203
                            }) && m.get_label().iter().any(|l| {
×
204
                                l.name() == STATUS_LABEL
×
205
                                    && (l.value() == "200"
×
206
                                        || l.value() == "202"
×
207
                                        || l.value() == "304"
×
208
                                        || l.value() == "403")
×
209
                            })
×
210
                        })
×
211
                        .for_each(|m| {
×
212
                            let labels = m.get_label();
×
213
                            let path = labels
×
214
                                .iter()
×
215
                                .find(|l| l.name() == ENDPOINT_LABEL)
×
216
                                .unwrap()
×
217
                                .value()
×
218
                                .strip_prefix(base_path)
×
219
                                .unwrap();
×
220
                            let method = labels
×
221
                                .iter()
×
222
                                .find(|l| l.name() == METHOD_LABEL)
×
223
                                .unwrap()
×
224
                                .value();
×
225
                            let status = labels
×
226
                                .iter()
×
227
                                .find(|l| l.name() == STATUS_LABEL)
×
228
                                .unwrap()
×
229
                                .value();
×
230
                            let latency = match status {
×
231
                                "200" | "202" => {
×
232
                                    if method == "GET" {
×
233
                                        get_requests
×
234
                                            .entry(path.to_string())
×
235
                                            .or_insert(LatencyMetrics::default())
×
236
                                    } else {
237
                                        post_requests
×
238
                                            .entry(path.to_string())
×
239
                                            .or_insert(LatencyMetrics::default())
×
240
                                    }
241
                                }
242
                                "304" => no_change
×
243
                                    .entry(path.to_string())
×
UNCOV
244
                                    .or_insert(LatencyMetrics::default()),
×
245
                                _ => access_denied
×
246
                                    .entry(path.to_string())
×
247
                                    .or_insert(LatencyMetrics::default()),
×
248
                            };
UNCOV
249
                            let total = m.get_histogram().get_sample_sum(); // already in ms
×
250
                            let count = m.get_histogram().get_sample_count() as f64;
×
251
                            let p99 = get_percentile(
×
252
                                99,
253
                                m.get_histogram().get_sample_count(),
×
254
                                m.get_histogram().get_bucket(),
×
255
                            );
256
                            *latency = LatencyMetrics {
257
                                avg: if count == 0.0 {
×
258
                                    0.0
×
259
                                } else {
UNCOV
260
                                    round_to_3_decimals(total / count)
×
261
                                },
262
                                count,
×
UNCOV
263
                                p99,
×
264
                            };
265
                        });
×
266
                }
UNCOV
267
                "process_cpu_seconds_total" => {
×
268
                    if let Some(cpu_second_metric) = family.get_metric().last() {
×
UNCOV
269
                        cpu_seconds = cpu_second_metric.get_counter().value() as u64;
×
270
                    }
×
271
                }
UNCOV
272
                "process_resident_memory_bytes" => {
×
273
                    if let Some(resident_memory_metric) = family.get_metric().last() {
×
UNCOV
274
                        resident_memory = resident_memory_metric.get_gauge().value() as u64;
×
275
                    }
×
276
                }
277
                "client_metrics_upload" => {
×
278
                    if let Some(metrics_upload_metric) = family.get_metric().last() {
×
UNCOV
279
                        let count = metrics_upload_metric.get_histogram().get_sample_count();
×
280
                        let p99 = get_percentile(
×
281
                            99,
282
                            count,
×
283
                            metrics_upload_metric.get_histogram().get_bucket(),
×
284
                        );
285
                        observed.latency_upstream.metrics = LatencyMetrics {
×
286
                            avg: round_to_3_decimals(
×
287
                                metrics_upload_metric.get_histogram().get_sample_sum()
×
288
                                    / count as f64,
×
UNCOV
289
                            ),
×
290
                            count: count as f64,
×
291
                            p99,
×
UNCOV
292
                        }
×
293
                    }
×
294
                }
295
                "instance_data_upload" => {
×
296
                    if let Some(instance_data_upload_metric) = family.get_metric().last() {
×
297
                        let count = instance_data_upload_metric
×
298
                            .get_histogram()
×
299
                            .get_sample_count();
×
300
                        let p99 = get_percentile(
×
301
                            99,
UNCOV
302
                            count,
×
303
                            instance_data_upload_metric.get_histogram().get_bucket(),
×
304
                        );
305
                        observed.latency_upstream.edge = LatencyMetrics {
×
306
                            avg: round_to_3_decimals(
×
307
                                instance_data_upload_metric.get_histogram().get_sample_sum()
×
308
                                    / count as f64,
×
UNCOV
309
                            ),
×
310
                            count: count as f64,
×
311
                            p99,
×
UNCOV
312
                        }
×
313
                    }
×
314
                }
315
                "client_feature_fetch" => {
×
316
                    if let Some(feature_fetch_metric) = family.get_metric().last() {
×
317
                        let count = feature_fetch_metric.get_histogram().get_sample_count();
×
318
                        let p99 = get_percentile(
×
319
                            99,
320
                            count,
×
321
                            feature_fetch_metric.get_histogram().get_bucket(),
×
322
                        );
323
                        observed.latency_upstream.features = LatencyMetrics {
×
324
                            avg: round_to_3_decimals(
×
325
                                feature_fetch_metric.get_histogram().get_sample_sum()
×
326
                                    / count as f64,
×
UNCOV
327
                            ),
×
328
                            count: count as f64,
×
329
                            p99,
×
UNCOV
330
                        }
×
331
                    }
×
332
                }
333
                CONNECTED_STREAMING_CLIENTS => {
×
334
                    if let Some(connected_streaming_clients) = family.get_metric().last() {
×
335
                        observed.connected_streaming_clients =
×
336
                            connected_streaming_clients.get_gauge().value() as u64;
×
337
                    }
×
338
                }
339
                _ => {}
×
340
            }
341
        }
342
        observed.traffic = InstanceTraffic {
×
343
            get: get_requests,
×
344
            post: post_requests,
×
345
            access_denied,
×
UNCOV
346
            cached_responses: no_change,
×
347
        };
×
UNCOV
348
        observed.process_metrics = Some(ProcessMetrics {
×
UNCOV
349
            cpu_usage: cpu_seconds as f64,
×
350
            memory_usage: resident_memory as f64,
×
351
        });
×
352
        for connected_instance in connected_instances {
×
353
            observed.connected_edges.push(connected_instance.clone());
×
354
        }
×
355
        observed
×
356
    }
×
357
}
358

359
fn get_percentile(percentile: u64, count: u64, buckets: &[prometheus::proto::Bucket]) -> f64 {
2✔
360
    let target = (percentile as f64 / 100.0) * count as f64;
2✔
361
    let mut previous_upper_bound = 0.0;
2✔
362
    let mut previous_count = 0;
2✔
363
    for bucket in buckets {
8✔
364
        if bucket.cumulative_count() as f64 >= target {
8✔
365
            let nth_count = bucket.cumulative_count() - previous_count;
2✔
366
            let observation_in_range = target - previous_count as f64;
2✔
367
            return round_to_3_decimals(
2✔
368
                previous_upper_bound
2✔
369
                    + ((observation_in_range / nth_count as f64)
2✔
370
                        * (bucket.upper_bound() - previous_upper_bound)),
2✔
371
            );
372
        }
6✔
373
        previous_upper_bound = bucket.upper_bound();
6✔
374
        previous_count = bucket.cumulative_count();
6✔
375
    }
UNCOV
376
    0.0
×
377
}
2✔
378

379
fn round_to_3_decimals(number: f64) -> f64 {
2✔
380
    (number * 1000.0).round() / 1000.0
2✔
381
}
2✔
382

383
#[cfg(test)]
384
mod tests {
385
    use super::*;
386

387
    #[test]
388
    pub fn can_find_p99_of_a_range() {
1✔
389
        let mut one_ms = prometheus::proto::Bucket::new();
1✔
390
        one_ms.set_cumulative_count(1000);
1✔
391
        one_ms.set_upper_bound(1.0);
1✔
392
        let mut five_ms = prometheus::proto::Bucket::new();
1✔
393
        five_ms.set_cumulative_count(2000);
1✔
394
        five_ms.set_upper_bound(5.0);
1✔
395
        let mut ten_ms = prometheus::proto::Bucket::new();
1✔
396
        ten_ms.set_cumulative_count(3000);
1✔
397
        ten_ms.set_upper_bound(10.0);
1✔
398
        let mut twenty_ms = prometheus::proto::Bucket::new();
1✔
399
        twenty_ms.set_cumulative_count(4000);
1✔
400
        twenty_ms.set_upper_bound(20.0);
1✔
401
        let mut fifty_ms = prometheus::proto::Bucket::new();
1✔
402
        fifty_ms.set_cumulative_count(5000);
1✔
403
        fifty_ms.set_upper_bound(50.0);
1✔
404
        let buckets = vec![one_ms, five_ms, ten_ms, twenty_ms, fifty_ms];
1✔
405
        let result = get_percentile(99, 5000, &buckets);
1✔
406
        assert_eq!(result, 48.5);
1✔
407
    }
1✔
408

409
    #[test]
410
    pub fn can_find_p50_of_a_range() {
1✔
411
        let mut one_ms = prometheus::proto::Bucket::new();
1✔
412
        one_ms.set_cumulative_count(1000);
1✔
413
        one_ms.set_upper_bound(1.0);
1✔
414
        let mut five_ms = prometheus::proto::Bucket::new();
1✔
415
        five_ms.set_cumulative_count(2000);
1✔
416
        five_ms.set_upper_bound(5.0);
1✔
417
        let mut ten_ms = prometheus::proto::Bucket::new();
1✔
418
        ten_ms.set_cumulative_count(3000);
1✔
419
        ten_ms.set_upper_bound(10.0);
1✔
420
        let mut twenty_ms = prometheus::proto::Bucket::new();
1✔
421
        twenty_ms.set_cumulative_count(4000);
1✔
422
        twenty_ms.set_upper_bound(20.0);
1✔
423
        let mut fifty_ms = prometheus::proto::Bucket::new();
1✔
424
        fifty_ms.set_cumulative_count(5000);
1✔
425
        fifty_ms.set_upper_bound(50.0);
1✔
426
        let buckets = vec![one_ms, five_ms, ten_ms, twenty_ms, fifty_ms];
1✔
427
        let result = get_percentile(50, 5000, &buckets);
1✔
428
        assert_eq!(result, 7.5);
1✔
429
    }
1✔
430

431
    #[test]
432
    fn can_observe_request_consumption_and_clear_consumption_metrics() {
1✔
433
        let instance_data = EdgeInstanceData::new("test", &Ulid::new(), None);
1✔
434

435
        instance_data.observe_request_consumption();
1✔
436
        instance_data.observe_request_consumption();
1✔
437
        instance_data.observe_request_consumption();
1✔
438
        instance_data.observe_request_consumption();
1✔
439

440
        let serialized = serde_json::to_value(&instance_data).unwrap();
1✔
441
        assert_eq!(
1✔
442
            serialized["requestConsumptionSinceLastReport"],
1✔
443
            serde_json::json!([
1✔
444
                {
445
                    "meteredGroup": "default",
1✔
446
                    "requests": 4
1✔
447
                }
448
            ])
449
        );
450

451
        instance_data.clear_time_windowed_metrics();
1✔
452

453
        let serialized_cleared = serde_json::to_value(&instance_data).unwrap();
1✔
454
        assert_eq!(
1✔
455
            serialized_cleared["requestConsumptionSinceLastReport"],
1✔
456
            serde_json::json!([
1✔
457
                {
458
                    "meteredGroup": "default",
1✔
459
                    "requests": 0
1✔
460
                }
461
            ])
462
        );
463
    }
1✔
464

465
    #[test]
466
    fn can_observe_connection_consumption_with_data_points() {
1✔
467
        let instance_data = EdgeInstanceData::new("test", &Ulid::new(), None);
1✔
468

469
        instance_data.observe_connection_consumption("/api/client/features", Some(0));
1✔
470
        instance_data.observe_connection_consumption("/api/client/features", Some(0));
1✔
471
        instance_data.observe_connection_consumption("/api/client/features", Some(15001));
1✔
472

473
        instance_data.observe_connection_consumption("/api/client/metrics", Some(0));
1✔
474
        instance_data.observe_connection_consumption("/api/client/metrics", Some(0));
1✔
475
        instance_data.observe_connection_consumption("/api/client/metrics", Some(60001));
1✔
476

477
        let serialized = serde_json::to_value(&instance_data).unwrap();
1✔
478
        let connection_data = &serialized["connectionConsumptionSinceLastReport"];
1✔
479

480
        let actual_features = connection_data["features"][0].clone();
1✔
481
        let actual_metrics = connection_data["metrics"][0].clone();
1✔
482

483
        let features_data_points = actual_features["dataPoints"].as_array().unwrap();
1✔
484
        assert_eq!(features_data_points.len(), 2);
1✔
485
        assert!(features_data_points.iter().any(|data_point| {
2✔
486
            data_point["interval"] == serde_json::json!([0, 15000]) && data_point["requests"] == 2
2✔
487
        }));
2✔
488
        assert!(features_data_points.iter().any(|data_point| {
1✔
489
            data_point["interval"] == serde_json::json!([15000, 20000])
1✔
490
                && data_point["requests"] == 1
1✔
491
        }));
1✔
492

493
        let metrics_data_points = actual_metrics["dataPoints"].as_array().unwrap();
1✔
494
        assert_eq!(metrics_data_points.len(), 2);
1✔
495
        assert!(metrics_data_points.iter().any(|data_point| {
2✔
496
            data_point["interval"] == serde_json::json!([0, 60000]) && data_point["requests"] == 2
2✔
497
        }));
2✔
498
        assert!(metrics_data_points.iter().any(|data_point| {
1✔
499
            data_point["interval"] == serde_json::json!([60000, 120000])
1✔
500
                && data_point["requests"] == 1
1✔
501
        }));
1✔
502
    }
1✔
503

504
    #[test]
505
    fn test_bucket_boundaries() {
1✔
506
        assert_eq!(
1✔
507
            EdgeInstanceData::get_interval_bucket("/api/client/features", None),
1✔
508
            0..15000
509
        );
510
        assert_eq!(
1✔
511
            EdgeInstanceData::get_interval_bucket("/api/client/features", Some(0)),
1✔
512
            0..15000
513
        );
514
        assert_eq!(
1✔
515
            EdgeInstanceData::get_interval_bucket("/api/client/features", Some(14999)),
1✔
516
            0..15000
517
        );
518
        assert_eq!(
1✔
519
            EdgeInstanceData::get_interval_bucket("/api/client/features", Some(15000)),
1✔
520
            0..15000
521
        );
522
        assert_eq!(
1✔
523
            EdgeInstanceData::get_interval_bucket("/api/client/features", Some(15001)),
1✔
524
            15000..20000
525
        );
526
        assert_eq!(
1✔
527
            EdgeInstanceData::get_interval_bucket("/api/client/features", Some(19999)),
1✔
528
            15000..20000
529
        );
530
        assert_eq!(
1✔
531
            EdgeInstanceData::get_interval_bucket("/api/client/features", Some(20000)),
1✔
532
            20000..25000
533
        );
534

535
        assert_eq!(
1✔
536
            EdgeInstanceData::get_interval_bucket("/api/client/metrics", None),
1✔
537
            0..60000
538
        );
539
        assert_eq!(
1✔
540
            EdgeInstanceData::get_interval_bucket("/api/client/metrics", Some(0)),
1✔
541
            0..60000
542
        );
543
        assert_eq!(
1✔
544
            EdgeInstanceData::get_interval_bucket("/api/client/metrics", Some(59999)),
1✔
545
            0..60000
546
        );
547
        assert_eq!(
1✔
548
            EdgeInstanceData::get_interval_bucket("/api/client/metrics", Some(60000)),
1✔
549
            0..60000
550
        );
551
        assert_eq!(
1✔
552
            EdgeInstanceData::get_interval_bucket("/api/client/metrics", Some(60001)),
1✔
553
            60000..120000
554
        );
555
        assert_eq!(
1✔
556
            EdgeInstanceData::get_interval_bucket("/api/client/metrics", Some(119999)),
1✔
557
            60000..120000
558
        );
559
        assert_eq!(
1✔
560
            EdgeInstanceData::get_interval_bucket("/api/client/metrics", Some(120000)),
1✔
561
            120000..180000
562
        );
563

564
        // Test intervals greater than 1 hour (3600000 ms)
565
        assert_eq!(
1✔
566
            EdgeInstanceData::get_interval_bucket("/api/client/features", Some(3600001)),
1✔
567
            3600000..3600000
568
        );
569
        assert_eq!(
1✔
570
            EdgeInstanceData::get_interval_bucket("/api/client/metrics", Some(3600001)),
1✔
571
            3600000..3600000
572
        );
573
        assert_eq!(
1✔
574
            EdgeInstanceData::get_interval_bucket("/api/client/features", Some(7200000)),
1✔
575
            3600000..3600000
576
        );
577
        assert_eq!(
1✔
578
            EdgeInstanceData::get_interval_bucket("/api/client/metrics", Some(7200000)),
1✔
579
            3600000..3600000
580
        );
581
    }
1✔
582

583
    #[test]
584
    fn test_endpoint_matching() {
1✔
585
        assert_eq!(
1✔
586
            ConnectionMetricsType::from_endpoint("/api/client/features"),
1✔
587
            Some(ConnectionMetricsType::Features)
588
        );
589
        assert_eq!(
1✔
590
            ConnectionMetricsType::from_endpoint("/api/client/delta"),
1✔
591
            Some(ConnectionMetricsType::Features)
592
        );
593
        assert_eq!(
1✔
594
            ConnectionMetricsType::from_endpoint("/api/client/metrics"),
1✔
595
            Some(ConnectionMetricsType::Metrics)
596
        );
597
        assert_eq!(
1✔
598
            ConnectionMetricsType::from_endpoint("/api/client/metrics/bulk"),
1✔
599
            Some(ConnectionMetricsType::Metrics)
600
        );
601
        assert_eq!(
1✔
602
            ConnectionMetricsType::from_endpoint("/api/client/metrics/edge"),
1✔
603
            Some(ConnectionMetricsType::Metrics)
604
        );
605
        assert_eq!(
1✔
606
            ConnectionMetricsType::from_endpoint("/api/client/other"),
1✔
607
            None
608
        );
609
    }
1✔
610

611
    #[test]
612
    fn serializes_hosting_if_and_only_if_present() {
1✔
613
        let self_hosted = EdgeInstanceData::new("test", &Ulid::new(), Some(Hosting::SelfHosted));
1✔
614
        let hosted = EdgeInstanceData::new("test", &Ulid::new(), Some(Hosting::Hosted));
1✔
615
        let no_data = EdgeInstanceData::new("test", &Ulid::new(), None);
1✔
616

617
        let serialized_self_hosted = serde_json::to_value(&self_hosted).unwrap();
1✔
618
        assert_eq!(serialized_self_hosted["hosting"], "self-hosted");
1✔
619

620
        let serialized_hosted = serde_json::to_value(&hosted).unwrap();
1✔
621
        assert_eq!(serialized_hosted["hosting"], "hosted");
1✔
622

623
        let serialized_no_data = serde_json::to_value(&no_data).unwrap();
1✔
624

625
        if let Some(map) = serialized_no_data.as_object() {
1✔
626
            assert!(!map.contains_key("hosting"));
1✔
627
        } else {
NEW
628
            panic!("Expected JSON value to be an object");
×
629
        }
630
    }
1✔
631
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc