• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

my8100 / scrapydweb / e216c1fa-3f0d-4af3-b5a9-93e3c04fcdc8

12 Jan 2025 10:24AM UTC coverage: 85.873% (+0.06%) from 85.817%
e216c1fa-3f0d-4af3-b5a9-93e3c04fcdc8

Pull #250

circleci

my8100
Update config.yml
Pull Request #250: Fix bug for scrapyd v1.5.0

2 of 3 new or added lines in 2 files covered. (66.67%)

3 existing lines in 2 files now uncovered.

3471 of 4042 relevant lines covered (85.87%)

10.17 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.16
/scrapydweb/views/dashboard/jobs.py
1
# coding: utf-8
2
from collections import OrderedDict
12✔
3
from datetime import datetime
12✔
4
import re
12✔
5
import traceback
12✔
6

7
from flask import flash, get_flashed_messages, render_template, request, url_for
12✔
8
from six.moves.urllib.parse import urljoin
12✔
9

10
from ...common import handle_metadata
12✔
11
from ...models import create_jobs_table, db
12✔
12
from ...vars import STRICT_NAME_PATTERN, jobs_table_map
12✔
13
from ..baseview import BaseView
12✔
14

15

16
_metadata = handle_metadata()
12✔
17
metadata = dict(
12✔
18
    pageview=_metadata.get('pageview', 1),
19
    per_page=_metadata.get('jobs_per_page', 100),
20
    style=_metadata.get('jobs_style', 'database'),
21
    unique_key_strings={}
22
)
23

24
STATUS_PENDING = '0'
12✔
25
STATUS_RUNNING = '1'
12✔
26
STATUS_FINISHED = '2'
12✔
27
NOT_DELETED = '0'
12✔
28
DELETED = '1'
12✔
29
HREF_PATTERN = re.compile(r"""href=['"](.+?)['"]""")  # Temp support for Scrapyd v1.3.0 (not released)
12✔
30
# See also scrapydweb/utils/poll.py
31
JOB_PATTERN = re.compile(r"""
12✔
32
                            <tr>\s*
33
                                <td>(?P<Project>.*?)</td>\s*
34
                                <td>(?P<Spider>.*?)</td>\s*
35
                                <td>(?P<Job>.*?)</td>\s*
36
                                (?:<td>(?P<PID>.*?)</td>\s*)?
37
                                (?:<td>(?P<Start>.*?)</td>\s*)?
38
                                (?:<td>(?P<Runtime>.*?)</td>\s*)?
39
                                (?:<td>(?P<Finish>.*?)</td>\s*)?
40
                                (?:<td>(?P<Log>.*?)</td>\s*)?
41
                                (?:<td>(?P<Items>.*?)</td>\s*)?
42
                                [\w\W]*?  # Temp support for Scrapyd v1.3.0 (not released)
43
                            </tr>
44
                          """, re.X)
45
JOB_KEYS = ['project', 'spider', 'job', 'pid', 'start', 'runtime', 'finish', 'href_log', 'href_items']
12✔
46

47

48
class JobsView(BaseView):
12✔
49
    # methods = ['GET']
50
    metadata = metadata
12✔
51

52
    def __init__(self):
12✔
53
        super(JobsView, self).__init__()
12✔
54

55
        style = request.args.get('style')
12✔
56
        self.style = style if style in ['database', 'classic'] else self.metadata['style']
12✔
57
        if self.style != self.metadata['style']:
12✔
58
            self.metadata['style'] = self.style
12✔
59
            handle_metadata('jobs_style', self.style)
12✔
60
            self.logger.debug("Change style to %s", self.metadata['style'])
12✔
61

62
        self.per_page = request.args.get('per_page', default=self.metadata['per_page'], type=int)
12✔
63
        if self.per_page != self.metadata['per_page']:
12✔
64
            self.metadata['per_page'] = self.per_page
12✔
65
            handle_metadata('jobs_per_page', self.per_page)
12✔
66
            self.logger.debug("Change per_page to %s", self.metadata['per_page'])
12✔
67
        self.page = request.args.get('page', default=1, type=int)
12✔
68

69
        self.url = 'http://%s/jobs' % self.SCRAPYD_SERVER
12✔
70
        if self.SCRAPYD_SERVER_PUBLIC_URL:
12✔
71
            self.public_url = '%s/jobs' % self.SCRAPYD_SERVER_PUBLIC_URL
×
72
        else:
73
            self.public_url = ''
12✔
74
        self.text = ''
12✔
75
        self.kwargs = {}
12✔
76
        if self.USE_MOBILEUI:
12✔
77
            self.style = 'classic'
12✔
78
            self.template = 'scrapydweb/jobs_mobileui.html'
12✔
79
        elif self.style == 'classic':
12✔
80
            self.template = 'scrapydweb/jobs_classic.html'
12✔
81
        else:  # 'database'
82
            self.template = 'scrapydweb/jobs.html'
12✔
83

84
        self.listjobs = request.args.get('listjobs', None)
12✔
85

86
        self.liststats_datas = {}
12✔
87
        self.jobs_dict = {}
12✔
88

89
        self.jobs = []
12✔
90
        self.jobs_backup = []
12✔
91
        self.pending_jobs = []
12✔
92
        self.running_jobs = []
12✔
93
        self.finished_jobs = []
12✔
94
        self.jobs_pagination = None
12✔
95

96
        self.Job = None  # database class Job
12✔
97

98
    def dispatch_request(self, **kwargs):
12✔
99
        status_code, self.text = self.make_request(self.url, auth=self.AUTH, as_json=False)
12✔
100
        if status_code != 200 or not re.search(r'<h1>Jobs</h1>', self.text):
12✔
101
            kwargs = dict(
12✔
102
                node=self.node,
103
                url=self.url,
104
                status_code=status_code,
105
                text=self.text,
106
                tip="Click the above link to make sure your Scrapyd server is accessable. "
107
            )
108
            return render_template(self.template_fail, **kwargs)
12✔
109
        # Temp support for Scrapyd v1.3.0 (not released)
110
        self.text = re.sub(r'<thead>.*?</thead>', '', self.text, flags=re.S)
12✔
111
        self.jobs = [dict(zip(JOB_KEYS, job)) for job in re.findall(JOB_PATTERN, self.text)]
12✔
112
        self.jobs_backup = list(self.jobs)
12✔
113

114
        if self.listjobs:
12✔
115
            return self.json_dumps(self.jobs, as_response=True)
12✔
116

117
        if self.POST:  # To update self.liststats_datas
12✔
118
            self.get_liststats_datas()
12✔
119
        else:
120
            self.metadata['pageview'] += 1
12✔
121
            self.logger.debug('metadata: %s', self.metadata)
12✔
122
            self.set_flash()
12✔
123
        if self.style == 'database' or self.POST:
12✔
124
            self.handle_jobs_with_db()
12✔
125
        if self.POST:
12✔
126
            try:
12✔
127
                self.set_jobs_dict()
12✔
128
            except:
×
129
                raise
×
130
            finally:
131
                get_flashed_messages()
12✔
132
            return self.json_dumps(self.jobs_dict, as_response=True)
12✔
133
        if self.style != 'database':
12✔
134
            self.jobs = self.jobs_backup
12✔
135
            self.handle_jobs_without_db()
12✔
136
        self.set_kwargs()
12✔
137
        return render_template(self.template, **self.kwargs)
12✔
138

139
    def set_flash(self):
12✔
140
        if self.metadata['pageview'] > 2 and self.metadata['pageview'] % 100:
12✔
141
            return
12✔
142
        if not self.ENABLE_AUTH and self.SCRAPYD_SERVERS_AMOUNT == 1:
12✔
143
            flash("Set 'ENABLE_AUTH = True' to enable basic auth for web UI", self.INFO)
×
144
        if self.IS_LOCAL_SCRAPYD_SERVER:
12✔
145
            if not self.LOCAL_SCRAPYD_LOGS_DIR:
12✔
146
                flash(("Set up the LOCAL_SCRAPYD_LOGS_DIR option to speed up the loading of scrapy logfiles "
×
147
                      "for the LOCAL_SCRAPYD_SERVER %s" % self.SCRAPYD_SERVER), self.WARN)
148
            if not self.ENABLE_LOGPARSER:
12✔
149
                flash("Set 'ENABLE_LOGPARSER = True' to run LogParser as a subprocess at startup", self.WARN)
12✔
150
        if not self.ENABLE_MONITOR and self.SCRAPYD_SERVERS_AMOUNT == 1:
12✔
151
            flash("Set 'ENABLE_MONITOR = True' to enable the monitor feature", self.INFO)
×
152

153
# stats.json by LogParser
154
# {
155
#     "status_code": 200,
156
#     "status": "ok",
157
#     "datas": {
158
#         "demo": {
159
#             "test": {
160
#                 "2019-01-01T0_00_01": {
161
#                     "pages": 3,
162
#                     "items": 2,
163
    def get_liststats_datas(self):
12✔
164
        # NOTE: get_response_from_view() would update g.url_jobs_list, unexpected for mobileui
165
        # request.url: http://localhost/1/api/liststats/
166
        # TODO: test https
167
        url_liststats = url_for('api', node=self.node, opt='liststats')
12✔
168
        js = self.get_response_from_view(url_liststats, as_json=True)
12✔
169
        if js['status'] == self.OK:
12✔
170
            self.liststats_datas = js.pop('datas', {})
12✔
171
            self.logger.debug("Got datas with %s entries from liststats: %s", len(self.liststats_datas), js)
12✔
172
        else:
173
            self.logger.warning("Fail to get datas from liststats: (%s) %s %s",
×
174
                                js['status_code'], js['status'], js.get('tip', ''))
175

176
    def create_table(self):
12✔
177
        self.Job = jobs_table_map.get(self.node, None)
12✔
178
        if self.Job is not None:
12✔
179
            self.logger.debug("Got table: %s", self.Job.__tablename__)
12✔
180
        else:
181
            self.Job = create_jobs_table(re.sub(STRICT_NAME_PATTERN, '_', self.SCRAPYD_SERVER))
×
182
            # sqlite3.OperationalError: table "127_0_0_1_6800" already exists
183
            db.create_all(bind='jobs')
×
184
            self.metadata[self.node] = self.Job
×
185
            jobs_table_map[self.node] = self.Job
×
186
            self.logger.debug("Created table: %s", self.Job.__tablename__)
×
187

188
    def handle_jobs_with_db(self):
12✔
189
        try:
12✔
190
            if request.args.get('raise_exception') == 'True':  # For test only
12✔
191
                assert False, "raise_exception: True"
12✔
192
            self.handle_unique_constraint()
12✔
193
            self.create_table()
12✔
194
            self.db_insert_jobs()
12✔
195
            self.db_clean_pending_jobs()
12✔
196
            self.query_jobs()
12✔
197
        except Exception as err:
12✔
198
            self.logger.error("Fail to persist jobs in database: %s", traceback.format_exc())
12✔
199
            db.session.rollback()
12✔
200
            flash("Fail to persist jobs in database: %s" % err, self.WARN)
12✔
201
            # sqlalchemy.exc.InvalidRequestError: Table '127_0_0_1_6800' is already defined for this MetaData instance.
202
            # Specify 'extend_existing=True' to redefine options and columns on an existing Table object.
203
            if "is already defined for this MetaData instance" in str(err):
12✔
204
                flash("Please restart ScrapydWeb to work around this occasional bug!", self.WARN)
×
205
            if self.style == 'database' and not self.POST:
12✔
206
                self.style = 'classic'
12✔
207
                self.template = 'scrapydweb/jobs_classic.html'
12✔
208
                self.metadata['style'] = self.style
12✔
209
                handle_metadata('jobs_style', self.style)
12✔
210
                msg = "Change style to %s" % self.style
12✔
211
                self.logger.info(msg)
12✔
212
                # flash(msg, self.WARN)
213

214
    # Note that there may be jobs with the same combination of (project, spider, job) in the fetched Jobs
215
    def handle_unique_constraint(self):
12✔
216
        seen_jobs = OrderedDict()
12✔
217
        for job in self.jobs:  # (Pending, Running) ASC
12✔
218
            if job['finish']:
12✔
219
                break
12✔
220
            unique_key = (job['project'], job['spider'], job['job'])
12✔
221
            if unique_key in seen_jobs:  # ignore previous
12✔
222
                start = seen_jobs[unique_key]['start']
12✔
223
                finish = seen_jobs[unique_key]['finish']
12✔
224
                unique_key_string = '/'.join(list(unique_key) + [start, finish, str(self.node)])
12✔
225
                if start:
12✔
226
                    msg = "Ignore seen running job: %s, started at %s" % ('/'.join(unique_key), start)
12✔
227
                else:
228
                    msg = "Ignore seen pending job: %s" % ('/'.join(unique_key))
12✔
229
                self.logger.debug(msg)
12✔
230
                if unique_key_string not in self.metadata['unique_key_strings']:  # flash only once
12✔
231
                    self.metadata['unique_key_strings'][unique_key_string] = None
12✔
232
                    flash(msg, self.WARN if start else self.INFO)
12✔
233
                seen_jobs.pop(unique_key)
12✔
234
            seen_jobs[unique_key] = job
12✔
235
        for job in reversed(self.jobs):  # Finished DESC
12✔
236
            if not job['finish']:
12✔
237
                break
12✔
238
            unique_key = (job['project'], job['spider'], job['job'])
12✔
239
            if unique_key in seen_jobs:  # ignore current
12✔
240
                unique_key_string = '/'.join(list(unique_key) + [job['start'], job['finish'], str(self.node)])
12✔
241
                msg = "Ignore seen finished job: %s, started at %s" % ('/'.join(unique_key), job['start'])
12✔
242
                self.logger.debug(msg)
12✔
243
                if unique_key_string not in self.metadata['unique_key_strings']:
12✔
244
                    self.metadata['unique_key_strings'][unique_key_string] = None
12✔
245
                    flash(msg, self.INFO)
12✔
246
            else:
247
                seen_jobs[unique_key] = job
12✔
248
        self.jobs = list(seen_jobs.values())
12✔
249

250
    def db_insert_jobs(self):
12✔
251
        records = []
12✔
252
        for job in self.jobs:  # set(self.jobs): unhashable type: 'dict'
12✔
253
            record = self.Job.query.filter_by(project=job['project'], spider=job['spider'], job=job['job']).first()
12✔
254
            if record:
12✔
255
                self.logger.debug("Found job in database: %s", record)
12✔
256
                if record.deleted == DELETED:
12✔
257
                    if record.status == STATUS_FINISHED and str(record.start) == job['start']:
12✔
258
                        self.logger.info("Ignore deleted job: %s", record)
12✔
259
                        continue
12✔
260
                    else:
261
                        record.deleted = NOT_DELETED
12✔
262
                        record.pages = None
12✔
263
                        record.items = None
12✔
264
                        self.logger.info("Recover deleted job: %s", record)
12✔
265
                        flash("Recover deleted job: %s" % job, self.WARN)
12✔
266
            else:
267
                record = self.Job()
12✔
268
            records.append(record)
12✔
269
            for k, v in job.items():
12✔
270
                v = v or None  # Save NULL in database for empty string
12✔
271
                if k in ['start', 'finish']:
12✔
272
                    v = datetime.strptime(v, '%Y-%m-%d %H:%M:%S') if v else None  # Avoid empty string
12✔
273
                elif k in ['href_log', 'href_items']:  # <a href='/logs/demo/test/xxx.log'>Log</a>
12✔
274
                    m = re.search(HREF_PATTERN, v) if v else None
12✔
275
                    v = m.group(1) if m else v
12✔
276
                setattr(record, k, v)
12✔
277
            if not job['start']:
12✔
278
                record.status = STATUS_PENDING
12✔
279
            elif not job['finish']:
12✔
280
                record.status = STATUS_RUNNING
12✔
281
            else:
282
                record.status = STATUS_FINISHED
12✔
283
            if not job['start']:
12✔
284
                record.pages = None
12✔
285
                record.items = None
12✔
286
            elif self.liststats_datas:
12✔
287
                try:
12✔
288
                    data = self.liststats_datas[job['project']][job['spider']][job['job']]
12✔
289
                    record.pages = data['pages']  # Logparser: None or non-negative int
12✔
290
                    record.items = data['items']  # Logparser: None or non-negative int
12✔
291
                except KeyError:
4✔
292
                    pass
4✔
293
                except Exception as err:
×
294
                    self.logger.error(err)
×
295
            # SQLite DateTime type only accepts Python datetime and date objects as input
296
            record.update_time = datetime.now()  # datetime.now().replace(microsecond=0)
12✔
297
        # https://www.reddit.com/r/flask/comments/3tea4k/af_flasksqlalchemy_bulk_updateinsert/
298
        db.session.add_all(records)
12✔
299
        db.session.commit()
12✔
300

301
    def db_clean_pending_jobs(self):
12✔
302
        current_pending_jobs = [(job['project'], job['spider'], job['job'])
12✔
303
                                for job in self.jobs_backup if not job['start']]
304
        for record in self.Job.query.filter_by(start=None).all():
12✔
305
            if (record.project, record.spider, record.job) not in current_pending_jobs:
12✔
306
                db.session.delete(record)
×
307
                db.session.commit()
×
308
                self.logger.info("Deleted pending jobs %s", record)
×
309

310
    def query_jobs(self):
12✔
311
        current_running_job_pids = [int(job['pid']) for job in self.jobs_backup if job['pid']]
12✔
312
        self.logger.debug("current_running_job_pids: %s", current_running_job_pids)
12✔
313
        self.jobs_pagination = self.Job.query.filter_by(deleted=NOT_DELETED).order_by(
12✔
314
            self.Job.status.asc(), self.Job.finish.desc(), self.Job.start.asc(), self.Job.id.asc()).paginate(
315
            page=self.page, per_page=self.per_page, error_out=False)
316
        with db.session.no_autoflush:
12✔
317
            for index, job in enumerate(self.jobs_pagination.items,
12✔
318
                                        (self.jobs_pagination.page - 1) * self.jobs_pagination.per_page + 1):
319
                # print(vars(job))
320
                job.index = index
12✔
321
                job.pid = job.pid or ''
12✔
322
                job.start = job.start or ''  # None for Pending jobs
12✔
323
                job.runtime = job.runtime or ''
12✔
324
                job.finish = job.finish or ''  # None for Pending and Running jobs
12✔
325
                job.update_time = self.remove_microsecond(job.update_time)
12✔
326
                job.to_be_killed = True if job.pid and job.pid not in current_running_job_pids else False
12✔
327
                if job.finish:
12✔
328
                    job.url_multinode = url_for('servers', node=self.node, opt='schedule', project=job.project,
12✔
329
                                                version_job=self.DEFAULT_LATEST_VERSION, spider=job.spider)
330
                    job.url_action = url_for('schedule', node=self.node, project=job.project,
12✔
331
                                             version=self.DEFAULT_LATEST_VERSION, spider=job.spider)
332
                else:
333
                    job.url_multinode = url_for('servers', node=self.node, opt='stop', project=job.project,
12✔
334
                                                version_job=job.job)
335
                    job.url_action = url_for('api', node=self.node, opt='stop', project=job.project,
12✔
336
                                             version_spider_job=job.job)
337
                if job.start:
12✔
338
                    job.pages = self.NA if job.pages is None else job.pages  # May be 0
12✔
339
                    job.items = self.NA if job.items is None else job.items  # May be 0
12✔
340
                else:  # Pending
341
                    job.pages = None  # from Running/Finished to Pending
12✔
342
                    job.items = None
12✔
343
                    continue
12✔
344
                job_finished = 'True' if job.finish else None
12✔
345
                job.url_utf8 = url_for('log', node=self.node, opt='utf8', project=job.project, ui=self.UI,
12✔
346
                                       spider=job.spider, job=job.job, job_finished=job_finished)
347
                job.url_stats = url_for('log', node=self.node, opt='stats', project=job.project, ui=self.UI,
12✔
348
                                        spider=job.spider, job=job.job, job_finished=job_finished)
349
                job.url_clusterreports = url_for('clusterreports', node=self.node, project=job.project,
12✔
350
                                                 spider=job.spider, job=job.job)
351
                # '/items/demo/test/2018-10-12_205507.log'
352
                job.url_source = urljoin(self.public_url or self.url, job.href_log)
12✔
353
                if job.href_items:
12✔
UNCOV
354
                    job.url_items = urljoin(self.public_url or self.url, job.href_items)
2✔
355
                else:
356
                    job.url_items = ''
10✔
357
                job.url_delete = url_for('jobs.xhr', node=self.node, action='delete', id=job.id)
12✔
358

359
    def set_jobs_dict(self):
12✔
360
        for job in self.jobs_pagination.items:  # Pagination obj in handle_jobs_with_db() > query_jobs()
12✔
361
            key = '%s/%s/%s' % (job.project, job.spider, job.job)
12✔
362
            value = dict((k, v) for (k, v) in job.__dict__.items() if not k.startswith('_'))
12✔
363
            for k, v in value.items():
12✔
364
                if k in ['create_time', 'update_time', 'start', 'finish']:
12✔
365
                    value[k] = str(value[k])
12✔
366
            self.jobs_dict[key] = value
12✔
367

368
    def handle_jobs_without_db(self):
12✔
369
        for job in self.jobs:
12✔
370
            job['start'] = job['start'][5:]
12✔
371
            job['finish'] = job['finish'][5:]
12✔
372
            if not job['start']:
12✔
373
                self.pending_jobs.append(job)
×
374
            else:
375
                if job['finish']:
12✔
376
                    self.finished_jobs.append(job)
12✔
377
                    job['url_multinode_run'] = url_for('servers', node=self.node, opt='schedule',
12✔
378
                                                       project=job['project'], version_job=self.DEFAULT_LATEST_VERSION,
379
                                                       spider=job['spider'])
380
                    job['url_schedule'] = url_for('schedule', node=self.node, project=job['project'],
12✔
381
                                                  version=self.DEFAULT_LATEST_VERSION, spider=job['spider'])
382
                    job['url_start'] = url_for('api', node=self.node, opt='start', project=job['project'],
12✔
383
                                               version_spider_job=job['spider'])
384
                else:
385
                    self.running_jobs.append(job)
12✔
386
                    job['url_forcestop'] = url_for('api', node=self.node, opt='forcestop', project=job['project'],
12✔
387
                                                   version_spider_job=job['job'])
388

389
                job_finished = 'True' if job['finish'] else None
12✔
390
                job['url_utf8'] = url_for('log', node=self.node, opt='utf8', project=job['project'], ui=self.UI,
12✔
391
                                          spider=job['spider'], job=job['job'], job_finished=job_finished)
392
                job['url_stats'] = url_for('log', node=self.node, opt='stats', project=job['project'], ui=self.UI,
12✔
393
                                           spider=job['spider'], job=job['job'], job_finished=job_finished)
394
                job['url_clusterreports'] = url_for('clusterreports', node=self.node, project=job['project'],
12✔
395
                                                    spider=job['spider'], job=job['job'])
396
                # <a href='/items/demo/test/2018-10-12_205507.jl'>Items</a>
397
                m = re.search(HREF_PATTERN, job['href_items'])
12✔
398
                if m:
12✔
UNCOV
399
                    job['url_items'] = urljoin(self.public_url or self.url, m.group(1))
2✔
400
                else:
401
                    job['url_items'] = ''
10✔
402

403
            if not job['finish']:
12✔
404
                job['url_multinode_stop'] = url_for('servers', node=self.node, opt='stop', project=job['project'],
12✔
405
                                                    version_job=job['job'])
406
                job['url_stop'] = url_for('api', node=self.node, opt='stop', project=job['project'],
12✔
407
                                          version_spider_job=job['job'])
408

409
    def set_kwargs(self):
12✔
410
        self.kwargs = dict(
12✔
411
            node=self.node,
412
            url=self.url,
413
            url_schedule=url_for('schedule', node=self.node),
414
            url_liststats=url_for('api', node=self.node, opt='liststats'),
415
            url_liststats_source='http://%s/logs/stats.json' % self.SCRAPYD_SERVER,
416
            SCRAPYD_SERVER=self.SCRAPYD_SERVER.split(':')[0],
417
            LOGPARSER_VERSION=self.LOGPARSER_VERSION,
418
            JOBS_RELOAD_INTERVAL=self.JOBS_RELOAD_INTERVAL,
419
            IS_IE_EDGE=self.IS_IE_EDGE,
420
            pageview=self.metadata['pageview'],
421
            FEATURES=self.FEATURES
422
        )
423
        if self.style == 'database':
12✔
424
            self.kwargs.update(dict(
12✔
425
                url_jobs_classic=url_for('jobs', node=self.node, style='classic'),
426
                jobs=self.jobs_pagination
427
            ))
428
            return
12✔
429

430
        self.finished_jobs.sort(key=lambda x: (x['finish'], x['start']), reverse=True)  # Finished DESC
12✔
431
        if self.JOBS_FINISHED_JOBS_LIMIT > 0:
12✔
NEW
432
            self.finished_jobs = self.finished_jobs[:self.JOBS_FINISHED_JOBS_LIMIT]
×
433
        self.kwargs.update(dict(
12✔
434
            colspan=14,
435
            url_jobs_database=url_for('jobs', node=self.node, style='database'),
436
            pending_jobs=self.pending_jobs,
437
            running_jobs=self.running_jobs,
438
            finished_jobs=self.finished_jobs,
439
            SHOW_JOBS_JOB_COLUMN=self.SHOW_JOBS_JOB_COLUMN
440
        ))
441

442

443
class JobsXhrView(BaseView):
12✔
444
    metadata = metadata
12✔
445

446
    def __init__(self):
12✔
447
        super(JobsXhrView, self).__init__()
12✔
448

449
        self.action = self.view_args['action']  # delete
12✔
450
        self.id = self.view_args['id']  # <int:id>
12✔
451

452
        self.js = {}
12✔
453
        self.Job = jobs_table_map[self.node]  # database class Job
12✔
454

455
    def dispatch_request(self, **kwargs):
12✔
456
        job = self.Job.query.get(self.id)
12✔
457
        if job:
12✔
458
            try:
12✔
459
                job.deleted = DELETED
12✔
460
                db.session.commit()
12✔
461
            except Exception as err:
×
462
                self.logger.error(traceback.format_exc())
×
463
                db.session.rollback()
×
464
                self.js['status'] = self.ERROR
×
465
                self.js['message'] = str(err)
×
466
            else:
467
                self.js['status'] = self.OK
12✔
468
                self.logger.info(self.js.setdefault('tip', "Deleted %s" % job))
12✔
469
        else:
470
            self.js['status'] = self.ERROR
12✔
471
            self.js['message'] = "job #%s not found in the database" % self.id
12✔
472

473
        return self.json_dumps(self.js, as_response=True)
12✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc