• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tfcollins / telemetry / 10495659905

21 Aug 2024 06:45PM UTC coverage: 34.158% (-24.1%) from 58.234%
10495659905

push

github

tfcollins
Put exceptions back for missing backends

Signed-off-by: Travis F. Collins <travis.collins@analog.com>

0 of 2 new or added lines in 1 file covered. (0.0%)

211 existing lines in 13 files now uncovered.

428 of 1253 relevant lines covered (34.16%)

0.34 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

78.72
/telemetry/ingest.py
UNCOV
1
import telemetry
×
UNCOV
2
import datetime
×
UNCOV
3
import os
×
UNCOV
4
import csv
×
5

6

UNCOV
7
class ingest:
×
UNCOV
8
    use_test_index = False
×
9

UNCOV
10
    def __init__(self, mode="elastic", server="alpine"):
×
11
        if mode == "elastic":
1✔
12
            self.db = telemetry.elastic(server=server)
1✔
13

UNCOV
14
    def _get_schema(self, name):
×
15
        loc = os.path.dirname(__file__)
1✔
16
        return os.path.join(loc, "resources", name)
1✔
17

UNCOV
18
    def _translate_hdl_resource_fields(self, fieldss):
×
19
        out = []
1✔
20
        for field in fieldss:
1✔
21
            fields = field.replace("(%)", "_percent")
1✔
22
            fields = fields.replace("(#)", "_count")
1✔
23
            fields = fields.replace("/", "_")
1✔
24
            fields = fields.replace(" ", "_")
1✔
25
            fields = fields.replace("(Avg)", "avg")
1✔
26
            fields = fields.replace(">", "gt")
1✔
27
            fields = fields.replace("-", "_")
1✔
28
            fields = fields.replace("+", "_")
1✔
29
            fields = fields.replace(".", "p")
1✔
30
            fields = fields.replace("*", "")
1✔
31
            fields = fields.replace("(Cell)", "_cell")
1✔
32
            fields = fields.replace("(Pblock)", "_pblock")
1✔
33
            fields = fields.replace("__", "_")
1✔
34
            # Dupe so if long spaces show up they get squashed
35
            fields = fields.replace("__", "_")
1✔
36
            fields = fields.replace("__", "_")
1✔
37
            fields = fields.replace("__", "_")
1✔
38
            out.append(fields)
1✔
39

40
        # Check
41
        s = self.db.import_schema(self._get_schema("hdl_resources.json"))
1✔
42
        for k in s["mappings"]["properties"]:
1✔
43
            if k not in out:
1✔
44
                raise Exception("Cannot find field {}".format(k))
×
45

UNCOV
46
    def log_boot_tests(
×
47
        self,
48
        boot_folder_name,
49
        hdl_hash,
50
        linux_hash,
51
        boot_partition_hash,
52
        hdl_branch,
53
        linux_branch,
54
        boot_partition_branch,
55
        is_hdl_release,
56
        is_linux_release,
57
        is_boot_partition_release,
58
        uboot_reached,
59
        linux_prompt_reached,
60
        drivers_enumerated,
61
        drivers_missing,
62
        dmesg_warnings_found,
63
        dmesg_errors_found,
64
        jenkins_job_date,
65
        jenkins_build_number,
66
        jenkins_project_name,
67
        jenkins_agent,
68
        jenkins_trigger,
69
        pytest_errors,
70
        pytest_failures,
71
        pytest_skipped,
72
        pytest_tests,
73
        matlab_errors,
74
        matlab_failures,
75
        matlab_skipped,
76
        matlab_tests,
77
        last_failing_stage,
78
        last_failing_stage_failure
79
    ):
80
        """ Upload boot test results to elasticsearch """
81
        # Build will produce the following:
82
        #   hdl commit hash
83
        #   linux commit hash
84
        #   hdl release flag
85
        #   hdl master flag
86
        #   linux release flag
87
        #   linux master flag
88
        #
89
        #   fully booted status
90
        #   uboot reached status
91
        #   drivers enumerated correctly
92
        #
93
        #   dmesg warnings found
94
        #   dmesg errors found
95
        args = {
1✔
96
            "hdl_branch" : hdl_branch,
97
            "linux_branch": linux_branch,
98
            "boot_partition_branch": boot_partition_branch
99
        }
100

101
        # Create query
102
        entry = {
1✔
103
            "boot_folder_name": boot_folder_name,
104
            "hdl_hash": hdl_hash,
105
            "linux_hash": linux_hash,
106
            "boot_partition_hash": boot_partition_hash,
107
            "hdl_branch": hdl_branch,
108
            "linux_branch": linux_branch,
109
            "boot_partition_branch": boot_partition_branch,
110
            "is_hdl_release": is_hdl_release,
111
            "is_linux_release": is_linux_release,
112
            "is_boot_partition_release": is_boot_partition_release,
113
            "uboot_reached": uboot_reached,
114
            "linux_prompt_reached": linux_prompt_reached,
115
            "drivers_enumerated": drivers_enumerated,
116
            "drivers_missing": drivers_missing,
117
            "dmesg_warnings_found": dmesg_warnings_found,
118
            "dmesg_errors_found": dmesg_errors_found,
119
            "jenkins_job_date": jenkins_job_date,
120
            "jenkins_build_number": jenkins_build_number,
121
            "jenkins_project_name": jenkins_project_name,
122
            "jenkins_agent": jenkins_agent,
123
            "jenkins_trigger": jenkins_trigger,
124
            "source_adjacency_matrix" : self.get_adjacency_matrix(**args),
125
            "pytest_errors": pytest_errors,
126
            "pytest_failures": pytest_failures,
127
            "pytest_skipped": pytest_skipped,
128
            "pytest_tests": pytest_tests,
129
            "matlab_errors": matlab_errors,
130
            "matlab_failures": matlab_failures,
131
            "matlab_skipped": matlab_skipped,
132
            "matlab_tests": matlab_tests,
133
            "last_failing_stage": last_failing_stage,
134
            "last_failing_stage_failure": last_failing_stage_failure
135
        }
136
        # Setup index if necessary
137
        self.db.index_name = "dummy" if self.use_test_index else "boot_tests"
1✔
138
        s = self.db.import_schema(self._get_schema("boot_tests.json"))
1✔
139
        self.db.create_db_from_schema(s)
1✔
140
        # Add entry
141
        self.db.add_entry(entry)
1✔
142

UNCOV
143
    def log_artifacts(
×
144
        self,
145
        url,
146
        server,
147
        job,
148
        job_no,
149
        job_date,
150
        job_build_parameters,
151
        file_name,
152
        target_board,
153
        artifact_info_type,
154
        payload_raw,
155
        payload_ts,
156
        payload,
157
        payload_param
158
    ):
159
        """ Upload artifacts data to elasticsearch """
160

161
        # Create query
162
        entry = {
1✔
163
            "archive_date": datetime.datetime.now(),
164
            "url": url,
165
            "server": server,
166
            "job": job,
167
            "job_no": job_no,
168
            "job_date": datetime.datetime.now() if not job_date else job_date,
169
            "job_build_parameters": job_build_parameters,
170
            "file_name": file_name,
171
            "target_board": target_board,
172
            "artifact_info_type": artifact_info_type,
173
            "payload_raw": payload_raw,
174
            "payload_ts": payload_ts,
175
            "payload": payload,
176
            "payload_param": payload_param
177
        }
178
        # Setup index if necessary
179
        self.db.index_name = "dummy" if self.use_test_index else "artifacts"
1✔
180
        s = self.db.import_schema(self._get_schema("artifacts.json"))
1✔
181
        self.db.create_db_from_schema(s)
1✔
182
        # Add entry
183
        self.db.add_entry(entry)
1✔
184

UNCOV
185
    def log_hdl_resources_from_csv(self, filename):
×
186

187
        if not os.path.exists(filename):
1✔
188
            raise Exception("File does not exist: " + str(filename))
×
189

190
        with open(filename, "r") as csvfile:
1✔
191
            csvreader = csv.reader(csvfile)
1✔
192
            fields = next(csvreader)
1✔
193
            values = next(csvreader)
1✔
194
        fields = fields[1:]
1✔
195
        self._translate_hdl_resource_fields(fields)
1✔
196
        values = values[1:]
1✔
197
        entry = dict(zip(fields, values))
1✔
198
        # Setup index if necessary
199
        self.db.index_name = "hdl_resources" if not self.use_test_index else "dummy"
1✔
200
        s = self.db.import_schema(self._get_schema("hdl_resources.json"))
1✔
201
        self.db.create_db_from_schema(s)
1✔
202
        # Add entry
203
        self.db.add_entry(entry)
1✔
204

UNCOV
205
    def log_ad9361_tx_quad_cal_test(
×
206
        self,
207
        test_name,
208
        device,
209
        failed,
210
        iterations,
211
        channel,
212
        date=datetime.datetime.now(),
213
    ):
214
        """ Upload AD9361 tx quad cal test data to elasticsearch """
215
        # Create query
216
        entry = {
1✔
217
            "test_name": test_name,
218
            "date": date,
219
            "failed": failed,
220
            "iterations": iterations,
221
            "device": device,
222
            "channel": channel,
223
        }
224
        # Setup index if necessary
225
        self.db.index_name = "dummy" if self.use_test_index else "ad936x_tx_quad_cal"
1✔
226
        s = self.db.import_schema(self._get_schema("ad936x_tx_quad_cal.json"))
1✔
227
        self.db.create_db_from_schema(s)
1✔
228
        # Add entry
229
        self.db.add_entry(entry)
1✔
230

UNCOV
231
    def log_lte_evm_test(
×
232
        self,
233
        device_name,
234
        tx_attn,
235
        rx_gain_control_mode,
236
        lo_freq,
237
        tmn,
238
        bw,
239
        evm_pbch,
240
        evm_pcfich,
241
        evm_phich,
242
        evm_pdcch,
243
        evm_rs,
244
        evm_sss,
245
        evm_pss,
246
        evm_pdsch,
247
        date=datetime.datetime.now(),
248
    ):
249
        """ Upload LTE EVM tests to elasticsearch """
250
        # Create query
251
        entry = {
1✔
252
            "device_name": device_name,
253
            "date": date,
254
            "tx_attn": tx_attn,
255
            "rx_gain_control_mode": rx_gain_control_mode,
256
            "lo_freq": lo_freq,
257
            "tmn": tmn,
258
            "bw": bw,
259
            "evm_pbch": evm_pbch,
260
            "evm_pcfich": evm_pcfich,
261
            "evm_phich": evm_phich,
262
            "evm_pdcch": evm_pdcch,
263
            "evm_rs": evm_rs,
264
            "evm_sss": evm_sss,
265
            "evm_pss": evm_pss,
266
            "evm_pdsch": evm_pdsch,
267
        }
268
        # Setup index if necessary
269
        self.db.index_name = "lte_evm" if not self.use_test_index else "dummy"
1✔
270
        s = self.db.import_schema(self._get_schema("evm_tests_el.json"))
1✔
271
        self.db.create_db_from_schema(s)
1✔
272
        # Add entry
273
        self.db.add_entry(entry)
1✔
274

UNCOV
275
    def log_github_stats(
×
276
        self,
277
        repo,
278
        views,
279
        clones,
280
        view_unique,
281
        clones_unique,
282
        date=datetime.datetime.now(),
283
    ):
284
        """ Upload github stats to elasticsearch """
285
        # Create query
286
        entry = {
1✔
287
            "repo": repo,
288
            "date": date,
289
            "views": views,
290
            "clones": clones,
291
            "view_unique": view_unique,
292
            "clones_unique": clones_unique,
293
        }
294
        # Setup index if necessary
295
        self.db.index_name = "github_stats" if not self.use_test_index else "dummy"
1✔
296
        s = self.db.import_schema(self._get_schema("github_stats.json"))
1✔
297
        self.db.create_db_from_schema(s)
1✔
298
        # Add entry
299
        self.db.add_entry(entry)
1✔
300

UNCOV
301
    def log_github_release_stats(
×
302
        self,
303
        repo,
304
        tag,
305
        downloads,
306
        release_date,
307
        date=datetime.datetime.now(),
308
    ):
309
        """ Upload github release stats to elasticsearch """
310
        # Create query
311
        entry = {
1✔
312
            "repo": repo,
313
            "date": date,
314
            "downloads": downloads,
315
            "tag": tag,
316
            "release_date": release_date,
317
        }
318
        # Setup index if necessary
319
        self.db.index_name = (
1✔
320
            "github_release_stats" if not self.use_test_index else "dummy"
321
        )
322
        s = self.db.import_schema(self._get_schema("github_release_stats.json"))
1✔
323
        self.db.create_db_from_schema(s)
1✔
324
        # Add entry
325
        self.db.add_entry(entry)
1✔
326

UNCOV
327
    def get_adjacency_matrix(
×
328
        self,
329
        hdl_branch,
330
        linux_branch,
331
        boot_partition_branch
332
    ):
333
        """ Returns Source combination matrix for elastic adjacency_matrix """
334
        matrix = ''
1✔
335
        if not boot_partition_branch == "NA":
1✔
336
            matrix = "boot_partition_{}".format(boot_partition_branch)
×
337
        else:
338
            matrix = "hdl_{}_linux_{}".format(hdl_branch, linux_branch)
1✔
339
        return matrix
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc