• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

sapcc / limes / 14831633718

05 May 2025 07:48AM UTC coverage: 78.206% (+1.0%) from 77.223%
14831633718

Pull #708

github

Varsius
replace resource data types by liquid reports
Pull Request #708: replace resource data types by liquid reports

130 of 163 new or added lines in 6 files covered. (79.75%)

3 existing lines in 2 files now uncovered.

6355 of 8126 relevant lines covered (78.21%)

46.78 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

83.59
/internal/collector/metrics.go
1
/*******************************************************************************
2
*
3
* Copyright 2017-2020 SAP SE
4
*
5
* Licensed under the Apache License, Version 2.0 (the "License");
6
* you may not use this file except in compliance with the License.
7
* You should have received a copy of the License along with this
8
* program. If not, you may obtain a copy of the License at
9
*
10
*     http://www.apache.org/licenses/LICENSE-2.0
11
*
12
* Unless required by applicable law or agreed to in writing, software
13
* distributed under the License is distributed on an "AS IS" BASIS,
14
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
* See the License for the specific language governing permissions and
16
* limitations under the License.
17
*
18
*******************************************************************************/
19

20
package collector
21

22
import (
23
        "bufio"
24
        "database/sql"
25
        "encoding/json"
26
        "fmt"
27
        "io"
28
        "math/big"
29
        "net/http"
30
        "slices"
31
        "strings"
32
        "time"
33

34
        "github.com/go-gorp/gorp/v3"
35
        "github.com/prometheus/client_golang/prometheus"
36
        "github.com/sapcc/go-api-declarations/liquid"
37
        "github.com/sapcc/go-bits/logg"
38
        "github.com/sapcc/go-bits/respondwith"
39
        "github.com/sapcc/go-bits/sqlext"
40

41
        "github.com/sapcc/limes/internal/core"
42
        "github.com/sapcc/limes/internal/db"
43
)
44

45
////////////////////////////////////////////////////////////////////////////////
46
// scraped_at aggregate metrics
47

48
// TODO: When merging these metrics together while merging the resource
49
// scraping and rate scraping loops, please also get rid of the duplicate label
50
// `service_name` that is currently retained for backwards compatibility (which
51
// is to say, I didn't care to assess the impact of removing it yet).
52
var minScrapedAtGauge = prometheus.NewGaugeVec(
53
        prometheus.GaugeOpts{
54
                Name: "limes_oldest_scraped_at",
55
                Help: "Oldest (i.e. smallest) scraped_at timestamp for any project given a certain service in a certain OpenStack cluster.",
56
        },
57
        []string{"service", "service_name"},
58
)
59

60
var maxScrapedAtGauge = prometheus.NewGaugeVec(
61
        prometheus.GaugeOpts{
62
                Name: "limes_newest_scraped_at",
63
                Help: "Newest (i.e. largest) scraped_at timestamp for any project given a certain service in a certain OpenStack cluster.",
64
        },
65
        []string{"service", "service_name"},
66
)
67

68
var minRatesScrapedAtGauge = prometheus.NewGaugeVec(
69
        prometheus.GaugeOpts{
70
                Name: "limes_oldest_rates_scraped_at",
71
                Help: "Oldest (i.e. smallest) rates_scraped_at timestamp for any project given a certain service in a certain OpenStack cluster.",
72
        },
73
        []string{"service", "service_name"},
74
)
75

76
var maxRatesScrapedAtGauge = prometheus.NewGaugeVec(
77
        prometheus.GaugeOpts{
78
                Name: "limes_newest_rates_scraped_at",
79
                Help: "Newest (i.e. largest) rates_scraped_at timestamp for any project given a certain service in a certain OpenStack cluster.",
80
        },
81
        []string{"service", "service_name"},
82
)
83

84
// AggregateMetricsCollector is a prometheus.Collector that submits
85
// dynamically-calculated aggregate metrics about scraping progress.
86
type AggregateMetricsCollector struct {
87
        Cluster *core.Cluster
88
        DB      *gorp.DbMap
89
}
90

91
// Describe implements the prometheus.Collector interface.
92
func (c *AggregateMetricsCollector) Describe(ch chan<- *prometheus.Desc) {
1✔
93
        minScrapedAtGauge.Describe(ch)
1✔
94
        maxScrapedAtGauge.Describe(ch)
1✔
95
        minRatesScrapedAtGauge.Describe(ch)
1✔
96
        maxRatesScrapedAtGauge.Describe(ch)
1✔
97
}
1✔
98

99
var scrapedAtAggregateQuery = sqlext.SimplifyWhitespace(`
100
        SELECT type, MIN(scraped_at), MAX(scraped_at), MIN(rates_scraped_at), MAX(rates_scraped_at)
101
          FROM project_services
102
         WHERE scraped_at IS NOT NULL
103
         GROUP BY type
104
`)
105

106
// Collect implements the prometheus.Collector interface.
107
func (c *AggregateMetricsCollector) Collect(ch chan<- prometheus.Metric) {
1✔
108
        //NOTE: I use NewConstMetric() instead of storing the values in the GaugeVec
1✔
109
        // instances because it is faster.
1✔
110

1✔
111
        descCh := make(chan *prometheus.Desc, 1)
1✔
112
        minScrapedAtGauge.Describe(descCh)
1✔
113
        minScrapedAtDesc := <-descCh
1✔
114
        maxScrapedAtGauge.Describe(descCh)
1✔
115
        maxScrapedAtDesc := <-descCh
1✔
116
        minRatesScrapedAtGauge.Describe(descCh)
1✔
117
        minRatesScrapedAtDesc := <-descCh
1✔
118
        maxRatesScrapedAtGauge.Describe(descCh)
1✔
119
        maxRatesScrapedAtDesc := <-descCh
1✔
120

1✔
121
        err := sqlext.ForeachRow(c.DB, scrapedAtAggregateQuery, nil, func(rows *sql.Rows) error {
2✔
122
                var (
1✔
123
                        serviceType       db.ServiceType
1✔
124
                        minScrapedAt      *time.Time
1✔
125
                        maxScrapedAt      *time.Time
1✔
126
                        minRatesScrapedAt *time.Time
1✔
127
                        maxRatesScrapedAt *time.Time
1✔
128
                )
1✔
129
                err := rows.Scan(&serviceType, &minScrapedAt, &maxScrapedAt, &minRatesScrapedAt, &maxRatesScrapedAt)
1✔
130
                if err != nil {
1✔
131
                        return err
×
132
                }
×
133

134
                plugin := c.Cluster.QuotaPlugins[serviceType]
1✔
135
                if plugin == nil {
1✔
136
                        return nil
×
137
                }
×
138

139
                if len(plugin.ServiceInfo().Resources) > 0 {
2✔
140
                        ch <- prometheus.MustNewConstMetric(
1✔
141
                                minScrapedAtDesc,
1✔
142
                                prometheus.GaugeValue, timeAsUnixOrZero(minScrapedAt),
1✔
143
                                string(serviceType), string(serviceType),
1✔
144
                        )
1✔
145
                        ch <- prometheus.MustNewConstMetric(
1✔
146
                                maxScrapedAtDesc,
1✔
147
                                prometheus.GaugeValue, timeAsUnixOrZero(maxScrapedAt),
1✔
148
                                string(serviceType), string(serviceType),
1✔
149
                        )
1✔
150
                }
1✔
151
                if len(plugin.ServiceInfo().Rates) > 0 {
1✔
152
                        ch <- prometheus.MustNewConstMetric(
×
153
                                minRatesScrapedAtDesc,
×
154
                                prometheus.GaugeValue, timeAsUnixOrZero(minRatesScrapedAt),
×
155
                                string(serviceType), string(serviceType),
×
156
                        )
×
157
                        ch <- prometheus.MustNewConstMetric(
×
158
                                maxRatesScrapedAtDesc,
×
159
                                prometheus.GaugeValue, timeAsUnixOrZero(maxRatesScrapedAt),
×
160
                                string(serviceType), string(serviceType),
×
161
                        )
×
162
                }
×
163
                return nil
1✔
164
        })
165
        if err != nil {
1✔
166
                logg.Error("collect cluster aggregate metrics failed: " + err.Error())
×
167
        }
×
168
}
169

170
func timeAsUnixOrZero(t *time.Time) float64 {
8✔
171
        if t == nil {
10✔
172
                return 0
2✔
173
        }
2✔
174
        return float64(t.Unix())
6✔
175
}
176

177
////////////////////////////////////////////////////////////////////////////////
178
// capacity plugin metrics
179

180
var capacityPluginMetricsOkGauge = prometheus.NewGaugeVec(
181
        prometheus.GaugeOpts{
182
                Name: "limes_capacity_plugin_metrics_ok",
183
                Help: "Whether capacity plugin metrics were rendered successfully for a particular capacitor. Only present when the capacitor emits metrics.",
184
        },
185
        []string{"capacitor"},
186
)
187

188
// CapacityPluginMetricsCollector is a prometheus.Collector that submits metrics
189
// which are specific to the selected capacity plugins.
190
type CapacityPluginMetricsCollector struct {
191
        Cluster *core.Cluster
192
        DB      *gorp.DbMap
193
        // When .Override is set, the DB is bypassed and only the given
194
        // CapacityPluginMetricsInstances are considered. This is used for testing only.
195
        Override []CapacityPluginMetricsInstance
196
}
197

198
// CapacityPluginMetricsInstance describes a single project service for which plugin
199
// metrics are submitted. It appears in type CapacityPluginMetricsCollector.
200
type CapacityPluginMetricsInstance struct {
201
        CapacitorID       string
202
        SerializedMetrics string
203
}
204

205
// Describe implements the prometheus.Collector interface.
206
func (c *CapacityPluginMetricsCollector) Describe(ch chan<- *prometheus.Desc) {
1✔
207
        capacityPluginMetricsOkGauge.Describe(ch)
1✔
208
        for _, plugin := range c.Cluster.CapacityPlugins {
2✔
209
                liquidDescribeMetrics(ch, plugin.ServiceInfo().CapacityMetricFamilies, nil)
1✔
210
        }
1✔
211
}
212

213
var capacitySerializedMetricsGetQuery = sqlext.SimplifyWhitespace(`
214
        SELECT capacitor_id, serialized_metrics
215
          FROM cluster_capacitors
216
         WHERE serialized_metrics != '' AND serialized_metrics != '{}'
217
`)
218

219
// Collect implements the prometheus.Collector interface.
220
func (c *CapacityPluginMetricsCollector) Collect(ch chan<- prometheus.Metric) {
1✔
221
        descCh := make(chan *prometheus.Desc, 1)
1✔
222
        capacityPluginMetricsOkGauge.Describe(descCh)
1✔
223
        pluginMetricsOkDesc := <-descCh
1✔
224

1✔
225
        if c.Override != nil {
1✔
226
                for _, instance := range c.Override {
×
227
                        c.collectOneCapacitor(ch, pluginMetricsOkDesc, instance)
×
228
                }
×
229
                return
×
230
        }
231

232
        err := sqlext.ForeachRow(c.DB, capacitySerializedMetricsGetQuery, nil, func(rows *sql.Rows) error {
2✔
233
                var i CapacityPluginMetricsInstance
1✔
234
                err := rows.Scan(&i.CapacitorID, &i.SerializedMetrics)
1✔
235
                if err == nil {
2✔
236
                        c.collectOneCapacitor(ch, pluginMetricsOkDesc, i)
1✔
237
                }
1✔
238
                return err
1✔
239
        })
240
        if err != nil {
1✔
241
                logg.Error("collect capacity plugin metrics failed: " + err.Error())
×
242
        }
×
243
}
244

245
func (c *CapacityPluginMetricsCollector) collectOneCapacitor(ch chan<- prometheus.Metric, pluginMetricsOkDesc *prometheus.Desc, instance CapacityPluginMetricsInstance) {
1✔
246
        plugin := c.Cluster.CapacityPlugins[instance.CapacitorID]
1✔
247
        if plugin == nil {
1✔
248
                return
×
249
        }
×
250
        err := liquidCollectMetrics(ch, []byte(instance.SerializedMetrics), plugin.ServiceInfo().CapacityMetricFamilies, nil, nil)
1✔
251
        successAsFloat := 1.0
1✔
252
        if err != nil {
1✔
253
                successAsFloat = 0.0
×
NEW
254
                // errors in plugin.LiquidCollectMetrics() are not fatal: we record a failure in
×
255
                // the metrics and keep going with the other project services
×
256
                logg.Error("while collecting capacity metrics for capacitor %s: %s",
×
257
                        instance.CapacitorID, err.Error())
×
258
        }
×
259
        ch <- prometheus.MustNewConstMetric(
1✔
260
                pluginMetricsOkDesc,
1✔
261
                prometheus.GaugeValue, successAsFloat,
1✔
262
                instance.CapacitorID,
1✔
263
        )
1✔
264
}
265

266
////////////////////////////////////////////////////////////////////////////////
267
// quota plugin metrics
268

269
var quotaPluginMetricsOkGauge = prometheus.NewGaugeVec(
270
        prometheus.GaugeOpts{
271
                Name: "limes_plugin_metrics_ok",
272
                Help: "Whether quota plugin metrics were rendered successfully for a particular project service. Only present when the project service emits metrics.",
273
        },
274
        []string{"domain", "domain_id", "project", "project_id", "service", "service_name"},
275
)
276

277
// QuotaPluginMetricsCollector is a prometheus.Collector that submits metrics
278
// which are specific to the selected quota plugins.
279
type QuotaPluginMetricsCollector struct {
280
        Cluster *core.Cluster
281
        DB      *gorp.DbMap
282
        // When .Override is set, the DB is bypassed and only the given
283
        // QuotaPluginMetricsInstances are considered. This is used for testing only.
284
        Override []QuotaPluginMetricsInstance
285
}
286

287
// QuotaPluginMetricsInstance describes a single project service for which plugin
288
// metrics are submitted. It appears in type QuotaPluginMetricsCollector.
289
type QuotaPluginMetricsInstance struct {
290
        Project           core.KeystoneProject
291
        ServiceType       db.ServiceType
292
        SerializedMetrics string
293
}
294

295
// Describe implements the prometheus.Collector interface.
296
func (c *QuotaPluginMetricsCollector) Describe(ch chan<- *prometheus.Desc) {
1✔
297
        quotaPluginMetricsOkGauge.Describe(ch)
1✔
298
        for _, plugin := range c.Cluster.QuotaPlugins {
2✔
299
                liquidDescribeMetrics(ch, plugin.ServiceInfo().UsageMetricFamilies, []string{"domain_id", "project_id"})
1✔
300
        }
1✔
301
}
302

303
var quotaSerializedMetricsGetQuery = sqlext.SimplifyWhitespace(`
304
        SELECT d.name, d.uuid, p.name, p.uuid, p.parent_uuid, ps.type, ps.serialized_metrics
305
          FROM domains d
306
          JOIN projects p ON p.domain_id = d.id
307
          JOIN project_services ps ON ps.project_id = p.id
308
         WHERE ps.serialized_metrics != '' AND ps.serialized_metrics != '{}'
309
`)
310

311
// Collect implements the prometheus.Collector interface.
312
func (c *QuotaPluginMetricsCollector) Collect(ch chan<- prometheus.Metric) {
1✔
313
        descCh := make(chan *prometheus.Desc, 1)
1✔
314
        quotaPluginMetricsOkGauge.Describe(descCh)
1✔
315
        pluginMetricsOkDesc := <-descCh
1✔
316

1✔
317
        if c.Override != nil {
1✔
318
                for _, instance := range c.Override {
×
319
                        c.collectOneProjectService(ch, pluginMetricsOkDesc, instance)
×
320
                }
×
321
                return
×
322
        }
323

324
        err := sqlext.ForeachRow(c.DB, quotaSerializedMetricsGetQuery, nil, func(rows *sql.Rows) error {
3✔
325
                var i QuotaPluginMetricsInstance
2✔
326
                err := rows.Scan(
2✔
327
                        &i.Project.Domain.Name, &i.Project.Domain.UUID,
2✔
328
                        &i.Project.Name, &i.Project.UUID, &i.Project.ParentUUID,
2✔
329
                        &i.ServiceType, &i.SerializedMetrics)
2✔
330
                if err == nil {
4✔
331
                        c.collectOneProjectService(ch, pluginMetricsOkDesc, i)
2✔
332
                }
2✔
333
                return err
2✔
334
        })
335
        if err != nil {
1✔
336
                logg.Error("collect quota plugin metrics failed: " + err.Error())
×
337
        }
×
338
}
339

340
func (c *QuotaPluginMetricsCollector) collectOneProjectService(ch chan<- prometheus.Metric, pluginMetricsOkDesc *prometheus.Desc, instance QuotaPluginMetricsInstance) {
2✔
341
        plugin := c.Cluster.QuotaPlugins[instance.ServiceType]
2✔
342
        if plugin == nil {
2✔
343
                return
×
344
        }
×
345

346
        err := liquidCollectMetrics(ch, []byte(instance.SerializedMetrics), plugin.ServiceInfo().UsageMetricFamilies,
2✔
347
                []string{"domain_id", "project_id"},
2✔
348
                []string{instance.Project.Domain.UUID, instance.Project.UUID},
2✔
349
        )
2✔
350
        successAsFloat := 1.0
2✔
351
        if err != nil {
2✔
352
                successAsFloat = 0.0
×
NEW
353
                // errors in plugin.LiquidCollectMetrics() are not fatal: we record a failure in
×
354
                // the metrics and keep going with the other project services
×
355
                logg.Error("while collecting plugin metrics for service %s in project %s: %s",
×
356
                        instance.ServiceType, instance.Project.UUID, err.Error())
×
357
        }
×
358
        ch <- prometheus.MustNewConstMetric(
2✔
359
                pluginMetricsOkDesc,
2✔
360
                prometheus.GaugeValue, successAsFloat,
2✔
361
                instance.Project.Domain.Name, instance.Project.Domain.UUID, instance.Project.Name, instance.Project.UUID,
2✔
362
                string(instance.ServiceType), string(instance.ServiceType),
2✔
363
        )
2✔
364
}
365

366
////////////////////////////////////////////////////////////////////////////////
367
// data metrics
368

369
// DataMetricsReporter renders Prometheus metrics for data attributes (quota,
370
// usage, etc.) for all projects known to Limes.
371
//
372
// It is an http.Handler, instead of implementing the prometheus.Collector
373
// interface (like all the other Collector types in this package) and going
374
// through the normal promhttp facility.
375
//
376
// We are not going through promhttp here because promhttp insists on holding
377
// all metrics in memory before rendering them out (in order to sort them).
378
// Given the extremely high cardinality of these metrics, this results in
379
// unreasonably high memory usage spikes.
380
//
381
// This implementation also holds all the metrics in memory (because ORDER BY
382
// on database level turned out to be prohibitively expensive), but we hold
383
// their rendered forms (i.e. something like `{bar="bar",foo="foo"} 42` instead
384
// of a dozen allocations for each label name, label value, label pair, a map
385
// of label pairs, and so on) in order to save memory.
386
type DataMetricsReporter struct {
387
        Cluster      *core.Cluster
388
        DB           *gorp.DbMap
389
        ReportZeroes bool
390
}
391

392
// This is the same Content-Type that promhttp's GET /metrics implementation reports.
393
// If this changes because of a prometheus/client-go upgrade, we will know because our
394
// test verifies that promhttp yields this Content-Type. In the case of a change,
395
// the output format of promhttp should be carefully reviewed for changes, and then
396
// our implementation should match those changes (including to the Content-Type).
397
const contentTypeForPrometheusMetrics = "text/plain; version=0.0.4; charset=utf-8; escaping=underscores"
398

399
// ServeHTTP implements the http.Handler interface.
400
func (d *DataMetricsReporter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
7✔
401
        metricsBySeries, err := d.collectMetricsBySeries()
7✔
402
        if respondwith.ErrorText(w, err) {
7✔
403
                return
×
404
        }
×
405

406
        w.Header().Set("Content-Type", contentTypeForPrometheusMetrics)
7✔
407
        w.WriteHeader(http.StatusOK)
7✔
408

7✔
409
        // NOTE: Keep metrics ordered by name!
7✔
410
        bw := bufio.NewWriter(w)
7✔
411
        printDataMetrics(bw, metricsBySeries, "limes_autogrow_growth_multiplier", `For resources with quota distribution model "autogrow", reports the configured growth multiplier.`)
7✔
412
        printDataMetrics(bw, metricsBySeries, "limes_autogrow_quota_overcommit_threshold_percent", `For resources with quota distribution model "autogrow", reports the allocation percentage above which quota overcommit is disabled.`)
7✔
413
        printDataMetrics(bw, metricsBySeries, "limes_available_commitment_duration", `Reports which commitment durations are available for new commitments on a Limes resource.`)
7✔
414
        printDataMetrics(bw, metricsBySeries, "limes_cluster_capacity", `Reported capacity of a Limes resource for an OpenStack cluster.`)
7✔
415
        printDataMetrics(bw, metricsBySeries, "limes_cluster_capacity_per_az", "Reported capacity of a Limes resource for an OpenStack cluster in a specific availability zone.")
7✔
416
        printDataMetrics(bw, metricsBySeries, "limes_cluster_usage_per_az", "Actual usage of a Limes resource for an OpenStack cluster in a specific availability zone.")
7✔
417
        printDataMetrics(bw, metricsBySeries, "limes_domain_quota", `Assigned quota of a Limes resource for an OpenStack domain.`)
7✔
418
        printDataMetrics(bw, metricsBySeries, "limes_project_backendquota", `Actual quota of a Limes resource for an OpenStack project.`)
7✔
419
        printDataMetrics(bw, metricsBySeries, "limes_project_commitment_min_expires_at", `Minimum expiredAt timestamp of all commitments for an Openstack project, grouped by resource and service.`)
7✔
420
        printDataMetrics(bw, metricsBySeries, "limes_project_committed_per_az", `Sum of all active commitments of a Limes resource for an OpenStack project, grouped by availability zone and state.`)
7✔
421
        printDataMetrics(bw, metricsBySeries, "limes_project_override_quota_from_config", `Quota override for a Limes resource for an OpenStack project, if any. (Value comes from cluster configuration.)`)
7✔
422
        printDataMetrics(bw, metricsBySeries, "limes_project_physical_usage", `Actual (physical) usage of a Limes resource for an OpenStack project.`)
7✔
423
        printDataMetrics(bw, metricsBySeries, "limes_project_quota", `Assigned quota of a Limes resource for an OpenStack project.`)
7✔
424
        printDataMetrics(bw, metricsBySeries, "limes_project_rate_usage", `Usage of a Limes rate for an OpenStack project. These are counters that never reset.`)
7✔
425
        printDataMetrics(bw, metricsBySeries, "limes_project_usage", `Actual (logical) usage of a Limes resource for an OpenStack project.`)
7✔
426
        printDataMetrics(bw, metricsBySeries, "limes_project_usage_per_az", `Actual (logical) usage of a Limes resource for an OpenStack project in a specific availability zone.`)
7✔
427
        printDataMetrics(bw, metricsBySeries, "limes_project_used_and_or_committed_per_az", `The maximum of limes_project_usage_per_az and limes_project_committed_per_az{state="active"}.`)
7✔
428
        printDataMetrics(bw, metricsBySeries, "limes_unit_multiplier", `Conversion factor that a value of this resource must be multiplied with to obtain the base unit (e.g. bytes). For use with Grafana when only the base unit can be configured because of templating.`)
7✔
429

7✔
430
        err = bw.Flush()
7✔
431
        if err != nil {
7✔
432
                logg.Error("in DataMetricsReporter.ServeHTTP: " + err.Error())
×
433
        }
×
434
}
435

436
type dataMetric struct {
437
        Labels string // e.g. `bar="bar",foo="foo"`
438
        Value  float64
439
}
440

441
func printDataMetrics(w io.Writer, metricsBySeries map[string][]dataMetric, seriesName, seriesHelp string) {
126✔
442
        metrics := metricsBySeries[seriesName]
126✔
443
        if len(metrics) == 0 {
202✔
444
                return
76✔
445
        }
76✔
446
        fmt.Fprintf(w, "# HELP %s %s\n# TYPE %s gauge\n", seriesName, seriesHelp, seriesName)
50✔
447

50✔
448
        slices.SortFunc(metrics, func(lhs, rhs dataMetric) int {
196✔
449
                return strings.Compare(lhs.Labels, rhs.Labels)
146✔
450
        })
146✔
451
        for _, m := range metrics {
190✔
452
                fmt.Fprintf(w, "%s{%s} %g\n", seriesName, m.Labels, m.Value)
140✔
453
        }
140✔
454
}
455

456
var clusterMetricsQuery = sqlext.SimplifyWhitespace(`
457
        SELECT cs.type, cr.name, JSON_OBJECT_AGG(car.az, car.raw_capacity), JSON_OBJECT_AGG(car.az, car.usage)
458
          FROM cluster_services cs
459
          JOIN cluster_resources cr ON cr.service_id = cs.id
460
          JOIN cluster_az_resources car ON car.resource_id = cr.id
461
         GROUP BY cs.type, cr.name
462
`)
463

464
var domainMetricsQuery = sqlext.SimplifyWhitespace(`
465
        SELECT d.name, d.uuid, ps.type, pr.name, SUM(pr.quota)
466
          FROM domains d
467
          JOIN projects p ON p.domain_id = d.id
468
          JOIN project_services ps ON ps.project_id = p.id
469
          JOIN project_resources pr ON pr.service_id = ps.id
470
         GROUP BY d.name, d.uuid, ps.type, pr.name
471
`)
472

473
var projectMetricsQuery = sqlext.SimplifyWhitespace(`
474
        WITH project_sums AS (
475
          SELECT resource_id,
476
                 SUM(usage) AS usage,
477
                 SUM(COALESCE(physical_usage, usage)) AS physical_usage,
478
                 COUNT(physical_usage) > 0 AS has_physical_usage
479
            FROM project_az_resources
480
           GROUP BY resource_id
481
        ),
482
        project_commitment_minExpiresAt AS (
483
                SELECT p.domain_id, p.id AS project_id, ps.type, pr.name, MIN(expires_at) AS project_commitment_min_expires_at
484
                FROM projects p
485
                JOIN project_services ps ON ps.project_id = p.id
486
                JOIN project_resources pr ON pr.service_id = ps.id
487
                JOIN project_az_resources par ON par.resource_id = pr.id
488
                JOIN project_commitments pc ON pc.az_resource_id = par.id AND pc.state = 'active'
489
                GROUP BY p.domain_id, p.id, ps.type, pr.name 
490
        )
491
        SELECT d.name, d.uuid, p.name, p.uuid, ps.type, pr.name,
492
               pr.quota, pr.backend_quota, pr.override_quota_from_config,
493
               psums.usage, psums.physical_usage, psums.has_physical_usage,
494
               pcmea.project_commitment_min_expires_at
495
          FROM domains d
496
          JOIN projects p ON p.domain_id = d.id
497
          JOIN project_services ps ON ps.project_id = p.id
498
          JOIN project_resources pr ON pr.service_id = ps.id
499
          JOIN project_sums psums ON psums.resource_id = pr.id
500
          LEFT JOIN project_commitment_minExpiresAt pcmea ON d.id = pcmea.domain_id AND p.id = pcmea.project_id AND ps.type= pcmea.TYPE AND pr.name = pcmea.name
501
`)
502

503
var projectAZMetricsQuery = sqlext.SimplifyWhitespace(`
504
        WITH project_commitment_sums_by_state AS (
505
          SELECT az_resource_id, state, SUM(amount) AS amount
506
            FROM project_commitments
507
           WHERE state NOT IN ('superseded', 'expired')
508
           GROUP BY az_resource_id, state
509
        ), project_commitment_sums AS (
510
          SELECT az_resource_id, JSON_OBJECT_AGG(state, amount) AS amount_by_state
511
            FROM project_commitment_sums_by_state
512
           GROUP BY az_resource_id
513
        )
514
        SELECT d.name, d.uuid, p.name, p.uuid, ps.type, pr.name, par.az, par.usage, pcs.amount_by_state
515
          FROM domains d
516
          JOIN projects p ON p.domain_id = d.id
517
          JOIN project_services ps ON ps.project_id = p.id
518
          JOIN project_resources pr ON pr.service_id = ps.id
519
          JOIN project_az_resources par ON par.resource_id = pr.id
520
          LEFT OUTER JOIN project_commitment_sums pcs ON pcs.az_resource_id = par.id
521
`)
522

523
var projectRateMetricsQuery = sqlext.SimplifyWhitespace(`
524
        SELECT d.name, d.uuid, p.name, p.uuid, ps.type, pra.name, pra.usage_as_bigint
525
          FROM domains d
526
          JOIN projects p ON p.domain_id = d.id
527
          JOIN project_services ps ON ps.project_id = p.id
528
          JOIN project_rates pra ON pra.service_id = ps.id
529
         WHERE pra.usage_as_bigint != ''
530
`)
531

532
func (d *DataMetricsReporter) collectMetricsBySeries() (map[string][]dataMetric, error) {
7✔
533
        behaviorCache := newResourceAndRateBehaviorCache(d.Cluster)
7✔
534
        result := make(map[string][]dataMetric)
7✔
535

7✔
536
        // fetch values for cluster level
7✔
537
        capacityReported := make(map[db.ServiceType]map[liquid.ResourceName]bool)
7✔
538
        err := sqlext.ForeachRow(d.DB, clusterMetricsQuery, nil, func(rows *sql.Rows) error {
11✔
539
                var (
4✔
540
                        dbServiceType     db.ServiceType
4✔
541
                        dbResourceName    liquid.ResourceName
4✔
542
                        capacityPerAZJSON string
4✔
543
                        usagePerAZJSON    string
4✔
544
                )
4✔
545
                err := rows.Scan(&dbServiceType, &dbResourceName, &capacityPerAZJSON, &usagePerAZJSON)
4✔
546
                if err != nil {
4✔
547
                        return err
×
548
                }
×
549

550
                var (
4✔
551
                        capacityPerAZ map[liquid.AvailabilityZone]uint64
4✔
552
                        usagePerAZ    map[liquid.AvailabilityZone]*uint64
4✔
553
                )
4✔
554
                err = json.Unmarshal([]byte(capacityPerAZJSON), &capacityPerAZ)
4✔
555
                if err != nil {
4✔
556
                        return err
×
557
                }
×
558
                err = json.Unmarshal([]byte(usagePerAZJSON), &usagePerAZ)
4✔
559
                if err != nil {
4✔
560
                        return err
×
561
                }
×
562
                reportAZBreakdown := false
4✔
563
                totalCapacity := uint64(0)
4✔
564
                for az, azCapacity := range capacityPerAZ {
9✔
565
                        totalCapacity += azCapacity
5✔
566
                        if az != liquid.AvailabilityZoneAny {
7✔
567
                                reportAZBreakdown = true
2✔
568
                        }
2✔
569
                }
570

571
                behavior := behaviorCache.Get(dbServiceType, dbResourceName)
4✔
572
                apiIdentity := behavior.IdentityInV1API
4✔
573
                if reportAZBreakdown {
5✔
574
                        for az, azCapacity := range capacityPerAZ {
3✔
575
                                azLabels := fmt.Sprintf(`availability_zone=%q,resource=%q,service=%q,service_name=%q`,
2✔
576
                                        az, apiIdentity.Name, apiIdentity.ServiceType, dbServiceType,
2✔
577
                                )
2✔
578
                                metric := dataMetric{Labels: azLabels, Value: float64(behavior.OvercommitFactor.ApplyTo(azCapacity))}
2✔
579
                                result["limes_cluster_capacity_per_az"] = append(result["limes_cluster_capacity_per_az"], metric)
2✔
580

2✔
581
                                azUsage := usagePerAZ[az]
2✔
582
                                if azUsage != nil && *azUsage != 0 {
4✔
583
                                        metric := dataMetric{Labels: azLabels, Value: float64(*azUsage)}
2✔
584
                                        result["limes_cluster_usage_per_az"] = append(result["limes_cluster_usage_per_az"], metric)
2✔
585
                                }
2✔
586
                        }
587
                }
588

589
                labels := fmt.Sprintf(`resource=%q,service=%q,service_name=%q`,
4✔
590
                        apiIdentity.Name, apiIdentity.ServiceType, dbServiceType,
4✔
591
                )
4✔
592
                metric := dataMetric{Labels: labels, Value: float64(behavior.OvercommitFactor.ApplyTo(totalCapacity))}
4✔
593
                result["limes_cluster_capacity"] = append(result["limes_cluster_capacity"], metric)
4✔
594

4✔
595
                _, exists := capacityReported[dbServiceType]
4✔
596
                if !exists {
8✔
597
                        capacityReported[dbServiceType] = make(map[liquid.ResourceName]bool)
4✔
598
                }
4✔
599
                capacityReported[dbServiceType][dbResourceName] = true
4✔
600

4✔
601
                return nil
4✔
602
        })
603
        if err != nil {
7✔
604
                return nil, fmt.Errorf("in clusterMetricsQuery: %w", err)
×
605
        }
×
606

607
        // make sure that a cluster capacity value is reported for each resource (the
608
        // corresponding time series might otherwise be missing if capacity scraping
609
        // fails)
610
        for serviceType, quotaPlugin := range d.Cluster.QuotaPlugins {
15✔
611
                for resName := range quotaPlugin.ServiceInfo().Resources {
20✔
612
                        if capacityReported[serviceType][resName] {
16✔
613
                                continue
4✔
614
                        }
615
                        apiIdentity := behaviorCache.Get(serviceType, resName).IdentityInV1API
8✔
616

8✔
617
                        labels := fmt.Sprintf(`resource=%q,service=%q,service_name=%q`,
8✔
618
                                apiIdentity.Name, apiIdentity.ServiceType, serviceType,
8✔
619
                        )
8✔
620
                        metric := dataMetric{Labels: labels, Value: 0}
8✔
621
                        result["limes_cluster_capacity"] = append(result["limes_cluster_capacity"], metric)
8✔
622
                }
623
        }
624

625
        // fetch values for domain level
626
        err = sqlext.ForeachRow(d.DB, domainMetricsQuery, nil, func(rows *sql.Rows) error {
11✔
627
                var (
4✔
628
                        domainName     string
4✔
629
                        domainUUID     string
4✔
630
                        dbServiceType  db.ServiceType
4✔
631
                        dbResourceName liquid.ResourceName
4✔
632
                        quota          *uint64
4✔
633
                )
4✔
634
                err := rows.Scan(&domainName, &domainUUID, &dbServiceType, &dbResourceName, &quota)
4✔
635
                if err != nil {
4✔
636
                        return err
×
637
                }
×
638
                apiIdentity := behaviorCache.Get(dbServiceType, dbResourceName).IdentityInV1API
4✔
639

4✔
640
                if quota != nil {
8✔
641
                        labels := fmt.Sprintf(
4✔
642
                                `domain=%q,domain_id=%q,resource=%q,service=%q,service_name=%q`,
4✔
643
                                domainName, domainUUID,
4✔
644
                                apiIdentity.Name, apiIdentity.ServiceType, dbServiceType,
4✔
645
                        )
4✔
646
                        metric := dataMetric{Labels: labels, Value: float64(*quota)}
4✔
647
                        result["limes_domain_quota"] = append(result["limes_domain_quota"], metric)
4✔
648
                }
4✔
649
                return nil
4✔
650
        })
651
        if err != nil {
7✔
652
                return nil, fmt.Errorf("during domainMetricsQuery: %w", err)
×
653
        }
×
654

655
        // fetch values for project level (quota/usage)
656
        err = sqlext.ForeachRow(d.DB, projectMetricsQuery, nil, func(rows *sql.Rows) error {
15✔
657
                var (
8✔
658
                        domainName              string
8✔
659
                        domainUUID              string
8✔
660
                        projectName             string
8✔
661
                        projectUUID             string
8✔
662
                        dbServiceType           db.ServiceType
8✔
663
                        dbResourceName          liquid.ResourceName
8✔
664
                        quota                   *uint64
8✔
665
                        backendQuota            *int64
8✔
666
                        overrideQuotaFromConfig *uint64
8✔
667
                        usage                   uint64
8✔
668
                        physicalUsage           uint64
8✔
669
                        hasPhysicalUsage        bool
8✔
670
                        minExpiresAt            *time.Time
8✔
671
                )
8✔
672
                err := rows.Scan(&domainName, &domainUUID, &projectName, &projectUUID, &dbServiceType, &dbResourceName,
8✔
673
                        &quota, &backendQuota, &overrideQuotaFromConfig, &usage, &physicalUsage, &hasPhysicalUsage, &minExpiresAt)
8✔
674
                if err != nil {
8✔
675
                        return err
×
676
                }
×
677
                apiIdentity := behaviorCache.Get(dbServiceType, dbResourceName).IdentityInV1API
8✔
678

8✔
679
                labels := fmt.Sprintf(
8✔
680
                        `domain=%q,domain_id=%q,project=%q,project_id=%q,resource=%q,service=%q,service_name=%q`,
8✔
681
                        domainName, domainUUID, projectName, projectUUID,
8✔
682
                        apiIdentity.Name, apiIdentity.ServiceType, dbServiceType,
8✔
683
                )
8✔
684

8✔
685
                if quota != nil {
16✔
686
                        if d.ReportZeroes || *quota != 0 {
16✔
687
                                metric := dataMetric{Labels: labels, Value: float64(*quota)}
8✔
688
                                result["limes_project_quota"] = append(result["limes_project_quota"], metric)
8✔
689
                        }
8✔
690
                }
691
                if backendQuota != nil {
16✔
692
                        if d.ReportZeroes || *backendQuota != 0 {
16✔
693
                                metric := dataMetric{Labels: labels, Value: float64(*backendQuota)}
8✔
694
                                result["limes_project_backendquota"] = append(result["limes_project_backendquota"], metric)
8✔
695
                        }
8✔
696
                }
697
                if overrideQuotaFromConfig != nil {
8✔
698
                        metric := dataMetric{Labels: labels, Value: float64(*overrideQuotaFromConfig)}
×
699
                        result["limes_project_override_quota_from_config"] = append(result["limes_project_override_quota_from_config"], metric)
×
700
                }
×
701
                if d.ReportZeroes || usage != 0 {
16✔
702
                        metric := dataMetric{Labels: labels, Value: float64(usage)}
8✔
703
                        result["limes_project_usage"] = append(result["limes_project_usage"], metric)
8✔
704
                }
8✔
705
                if hasPhysicalUsage {
12✔
706
                        if d.ReportZeroes || physicalUsage != 0 {
8✔
707
                                metric := dataMetric{Labels: labels, Value: float64(physicalUsage)}
4✔
708
                                result["limes_project_physical_usage"] = append(result["limes_project_physical_usage"], metric)
4✔
709
                        }
4✔
710
                }
711
                if minExpiresAt != nil || d.ReportZeroes {
14✔
712
                        metric := dataMetric{Labels: labels, Value: timeAsUnixOrZero(minExpiresAt)}
6✔
713
                        result["limes_project_commitment_min_expires_at"] = append(result["limes_project_commitment_min_expires_at"], metric)
6✔
714
                }
6✔
715
                return nil
8✔
716
        })
717
        if err != nil {
7✔
718
                return nil, fmt.Errorf("during projectMetricsQuery: %w", err)
×
719
        }
×
720

721
        // fetch values for project AZ level (usage/commitments)
722
        err = sqlext.ForeachRow(d.DB, projectAZMetricsQuery, nil, func(rows *sql.Rows) error {
31✔
723
                var (
24✔
724
                        domainName        string
24✔
725
                        domainUUID        string
24✔
726
                        projectName       string
24✔
727
                        projectUUID       string
24✔
728
                        dbServiceType     db.ServiceType
24✔
729
                        dbResourceName    liquid.ResourceName
24✔
730
                        az                liquid.AvailabilityZone
24✔
731
                        usage             uint64
24✔
732
                        amountByStateJSON *string
24✔
733
                )
24✔
734
                err := rows.Scan(&domainName, &domainUUID, &projectName, &projectUUID, &dbServiceType, &dbResourceName,
24✔
735
                        &az, &usage, &amountByStateJSON)
24✔
736
                if err != nil {
24✔
737
                        return err
×
738
                }
×
739
                apiIdentity := behaviorCache.Get(dbServiceType, dbResourceName).IdentityInV1API
24✔
740

24✔
741
                labels := fmt.Sprintf(
24✔
742
                        `availability_zone=%q,domain=%q,domain_id=%q,project=%q,project_id=%q,resource=%q,service=%q,service_name=%q`,
24✔
743
                        az, domainName, domainUUID, projectName, projectUUID,
24✔
744
                        apiIdentity.Name, apiIdentity.ServiceType, dbServiceType,
24✔
745
                )
24✔
746

24✔
747
                if d.ReportZeroes || usage != 0 {
42✔
748
                        metric := dataMetric{Labels: labels, Value: float64(usage)}
18✔
749
                        result["limes_project_usage_per_az"] = append(result["limes_project_usage_per_az"], metric)
18✔
750
                }
18✔
751
                committed := uint64(0)
24✔
752
                if amountByStateJSON != nil {
28✔
753
                        var amountByState map[db.CommitmentState]uint64
4✔
754
                        err = json.Unmarshal([]byte(*amountByStateJSON), &amountByState)
4✔
755
                        if err != nil {
4✔
756
                                return fmt.Errorf("while unmarshalling amount_by_state: %w (input was %q)", err, *amountByStateJSON)
×
757
                        }
×
758
                        committed = amountByState[db.CommitmentStateActive]
4✔
759
                        for state, amount := range amountByState {
10✔
760
                                labelsWithState := fmt.Sprintf(`%s,state=%q`, labels, state)
6✔
761
                                metric := dataMetric{Labels: labelsWithState, Value: float64(amount)}
6✔
762
                                result["limes_project_committed_per_az"] = append(result["limes_project_committed_per_az"], metric)
6✔
763
                        }
6✔
764
                }
765
                if d.ReportZeroes || max(usage, committed) != 0 {
42✔
766
                        metric := dataMetric{Labels: labels, Value: float64(max(usage, committed))}
18✔
767
                        result["limes_project_used_and_or_committed_per_az"] = append(result["limes_project_used_and_or_committed_per_az"], metric)
18✔
768
                }
18✔
769
                return nil
24✔
770
        })
771
        if err != nil {
7✔
772
                return nil, fmt.Errorf("during projectAZMetricsQuery: %w", err)
×
773
        }
×
774

775
        // fetch metadata for services/resources
776
        for dbServiceType, quotaPlugin := range d.Cluster.QuotaPlugins {
15✔
777
                for dbResourceName, resourceInfo := range quotaPlugin.ServiceInfo().Resources {
20✔
778
                        behavior := behaviorCache.Get(dbServiceType, dbResourceName)
12✔
779
                        apiIdentity := behavior.IdentityInV1API
12✔
780
                        labels := fmt.Sprintf(`resource=%q,service=%q,service_name=%q`,
12✔
781
                                apiIdentity.Name, apiIdentity.ServiceType, dbServiceType,
12✔
782
                        )
12✔
783

12✔
784
                        _, multiplier := resourceInfo.Unit.Base()
12✔
785
                        metric := dataMetric{Labels: labels, Value: float64(multiplier)}
12✔
786
                        result["limes_unit_multiplier"] = append(result["limes_unit_multiplier"], metric)
12✔
787

12✔
788
                        autogrowCfg, ok := d.Cluster.QuotaDistributionConfigForResource(dbServiceType, dbResourceName).Autogrow.Unpack()
12✔
789
                        if ok {
24✔
790
                                metric := dataMetric{Labels: labels, Value: autogrowCfg.GrowthMultiplier}
12✔
791
                                result["limes_autogrow_growth_multiplier"] = append(result["limes_autogrow_growth_multiplier"], metric)
12✔
792

12✔
793
                                metric = dataMetric{Labels: labels, Value: autogrowCfg.AllowQuotaOvercommitUntilAllocatedPercent}
12✔
794
                                result["limes_autogrow_quota_overcommit_threshold_percent"] = append(result["limes_autogrow_quota_overcommit_threshold_percent"], metric)
12✔
795
                        }
12✔
796

797
                        for _, duration := range behaviorCache.GetCommitmentBehavior(dbServiceType, dbResourceName).Durations {
12✔
798
                                labels := fmt.Sprintf(`duration=%q,resource=%q,service=%q,service_name=%q`,
×
799
                                        duration.String(), apiIdentity.Name, apiIdentity.ServiceType, dbServiceType,
×
800
                                )
×
801
                                metric := dataMetric{Labels: labels, Value: 1.0}
×
802
                                result["limes_available_commitment_duration"] = append(result["limes_available_commitment_duration"], metric)
×
803
                        }
×
804
                }
805
        }
806

807
        // fetch values for project level (rate usage)
808
        err = sqlext.ForeachRow(d.DB, projectRateMetricsQuery, nil, func(rows *sql.Rows) error {
15✔
809
                var (
8✔
810
                        domainName    string
8✔
811
                        domainUUID    string
8✔
812
                        projectName   string
8✔
813
                        projectUUID   string
8✔
814
                        dbServiceType db.ServiceType
8✔
815
                        dbRateName    liquid.RateName
8✔
816
                        usageAsBigint string
8✔
817
                )
8✔
818
                err := rows.Scan(&domainName, &domainUUID, &projectName, &projectUUID, &dbServiceType, &dbRateName, &usageAsBigint)
8✔
819
                if err != nil {
8✔
820
                        return err
×
821
                }
×
822
                usageAsBigFloat, _, err := big.NewFloat(0).Parse(usageAsBigint, 10)
8✔
823
                if err != nil {
8✔
824
                        return err
×
825
                }
×
826
                usageAsFloat, _ := usageAsBigFloat.Float64()
8✔
827

8✔
828
                if d.ReportZeroes || usageAsFloat != 0 {
16✔
829
                        behavior := behaviorCache.GetForRate(dbServiceType, dbRateName)
8✔
830
                        apiIdentity := behavior.IdentityInV1API
8✔
831
                        labels := fmt.Sprintf(
8✔
832
                                `domain=%q,domain_id=%q,project=%q,project_id=%q,rate=%q,service=%q,service_name=%q`,
8✔
833
                                domainName, domainUUID, projectName, projectUUID,
8✔
834
                                apiIdentity.Name, apiIdentity.ServiceType, dbServiceType,
8✔
835
                        )
8✔
836
                        metric := dataMetric{Labels: labels, Value: usageAsFloat}
8✔
837
                        result["limes_project_rate_usage"] = append(result["limes_project_rate_usage"], metric)
8✔
838
                }
8✔
839
                return nil
8✔
840
        })
841
        if err != nil {
7✔
842
                return nil, fmt.Errorf("during projectRateMetricsQuery: %w", err)
×
843
        }
×
844

845
        return result, nil
7✔
846
}
847

848
///////////////////////////////////////////////////////////////////////////////////////////
849
// utilities
850

851
// Caches the result of repeated cluster.BehaviorForResource() calls.
852
//
853
// NOTE: This looks like something that should be baked into BehaviorForResource() itself.
854
// But then cache access would need to be protected by a mutex, which would likely negate the performance gain from caching.
855
// We could revisit the idea of more central caching once <https://github.com/golang/go/issues/71076> makes thread-safe maps more viable.
856
//
857
// Alternatively, once ServiceInfo and ResourceInfo gets refactored towards being stored in the DB,
858
// we could consider persisting behavior information there as well. But this might introduce additional
859
// complications to account for behaviors being updated without the underlying ResourceInfo changing.
860
type resourceAndRateBehaviorCache struct {
861
        cluster   *core.Cluster
862
        cache     map[db.ServiceType]map[liquid.ResourceName]core.ResourceBehavior
863
        rateCache map[db.ServiceType]map[liquid.RateName]core.RateBehavior
864
        cbCache   map[db.ServiceType]map[liquid.ResourceName]core.ScopedCommitmentBehavior
865
}
866

867
func newResourceAndRateBehaviorCache(cluster *core.Cluster) resourceAndRateBehaviorCache {
7✔
868
        cache := make(map[db.ServiceType]map[liquid.ResourceName]core.ResourceBehavior)
7✔
869
        rateCache := make(map[db.ServiceType]map[liquid.RateName]core.RateBehavior)
7✔
870
        cbCache := make(map[db.ServiceType]map[liquid.ResourceName]core.ScopedCommitmentBehavior)
7✔
871
        return resourceAndRateBehaviorCache{cluster, cache, rateCache, cbCache}
7✔
872
}
7✔
873

874
func (c resourceAndRateBehaviorCache) Get(srvType db.ServiceType, resName liquid.ResourceName) core.ResourceBehavior {
60✔
875
        if c.cache[srvType] == nil {
68✔
876
                c.cache[srvType] = make(map[liquid.ResourceName]core.ResourceBehavior)
8✔
877
        }
8✔
878
        behavior, exists := c.cache[srvType][resName]
60✔
879
        if !exists {
72✔
880
                behavior = c.cluster.BehaviorForResource(srvType, resName)
12✔
881
                c.cache[srvType][resName] = behavior
12✔
882
        }
12✔
883
        return behavior
60✔
884
}
885

886
func (c resourceAndRateBehaviorCache) GetForRate(srvType db.ServiceType, rateName liquid.RateName) core.RateBehavior {
8✔
887
        if c.rateCache[srvType] == nil {
10✔
888
                c.rateCache[srvType] = make(map[liquid.RateName]core.RateBehavior)
2✔
889
        }
2✔
890
        behavior, exists := c.rateCache[srvType][rateName]
8✔
891
        if !exists {
12✔
892
                behavior = c.cluster.BehaviorForRate(srvType, rateName)
4✔
893
                c.rateCache[srvType][rateName] = behavior
4✔
894
        }
4✔
895
        return behavior
8✔
896
}
897

898
func (c resourceAndRateBehaviorCache) GetCommitmentBehavior(srvType db.ServiceType, resName liquid.ResourceName) core.ScopedCommitmentBehavior {
12✔
899
        if c.cbCache[srvType] == nil {
20✔
900
                c.cbCache[srvType] = make(map[liquid.ResourceName]core.ScopedCommitmentBehavior)
8✔
901
        }
8✔
902
        behavior, exists := c.cbCache[srvType][resName]
12✔
903
        if !exists {
24✔
904
                behavior = c.cluster.CommitmentBehaviorForResource(srvType, resName).ForCluster()
12✔
905
                c.cbCache[srvType][resName] = behavior
12✔
906
        }
12✔
907
        return behavior
12✔
908
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc