• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

liqd / roots / 22072536647

16 Feb 2026 05:41PM UTC coverage: 42.093%. First build
22072536647

Pull #59

github

Pull Request #59: apps/summerization: Integrate Document Summary into Workflow

51 of 314 new or added lines in 7 files covered. (16.24%)

3564 of 8467 relevant lines covered (42.09%)

0.42 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

12.44
/apps/projects/export_utils.py
1
import re
1✔
2

3
# import json
4
from adhocracy4.comments.models import Comment
1✔
5
from adhocracy4.polls.models import Poll
1✔
6
from apps.debate.models import Subject
1✔
7
from apps.documents.models import Chapter
1✔
8
from apps.documents.models import Paragraph
1✔
9
from apps.ideas.models import Idea
1✔
10
from apps.offlineevents.models import OfflineEvent
1✔
11
from apps.topicprio.models import Topic
1✔
12

13

14
def get_module_status(module):
1✔
15
    """
16
    Return the status of a module based on its phases.
17

18
    Returns:
19
        str: 'past', 'active', or 'future'
20
    """
21

22
    # Use the existing queryset methods
NEW
23
    try:
×
NEW
24
        if module.module_has_finished:
×
NEW
25
            return "past"
×
NEW
26
        elif module.active_phase:
×
NEW
27
            return "active"
×
28
        else:
NEW
29
            return "future"
×
NEW
30
    except (TypeError, ValueError):
×
31
        # Fallback if module_has_finished or active_phase fail due to None datetime values
32
        return "future"
×
33

34

35
def extract_attachments(text):
1✔
36
    """Extract upload links from HTML text"""
37
    if not text:
×
38
        return []
×
39

40
    # Find all links containing /uploads/ (both href and src attributes)
NEW
41
    pattern_href = r'href="([^"]*?/uploads/[^"]*?)"'
×
NEW
42
    pattern_src = r'src="([^"]*?/uploads/[^"]*?)"'
×
43

NEW
44
    attachments_href = re.findall(pattern_href, text)
×
NEW
45
    attachments_src = re.findall(pattern_src, text)
×
46

47
    # Combine and deduplicate
NEW
48
    attachments = list(dict.fromkeys(attachments_href + attachments_src))
×
49

50
    return attachments
×
51

52

53
def extract_comments(queryset, include_ratings=True, include_children=True):
1✔
54
    """
55
    Extract comments from any model with a 'comments' GenericRelation.
56
    Recursively includes child comments.
57

58
    Args:
59
        queryset: Comment queryset (e.g., obj.comments.all())
60
        include_ratings: Whether to include ratings on comments
61
        include_children: Whether to recursively include child comments
62

63
    Returns:
64
        List of comment dictionaries with nested 'replies' key
65
    """
66
    comments_list = []
×
67

68
    for comment in queryset:
×
69
        comment_data = {
×
70
            "id": comment.id,
71
            "text": comment.comment,
72
            "created": comment.created.isoformat(),
73
            "is_removed": comment.is_removed,
74
            "is_censored": comment.is_censored,
75
            "is_blocked": comment.is_blocked,
76
        }
77

78
        # Optional fields
79
        if hasattr(comment, "comment_categories") and comment.comment_categories:
×
80
            comment_data["comment_categories"] = comment.comment_categories
×
81
        if hasattr(comment, "is_moderator_marked"):
×
82
            comment_data["is_moderator_marked"] = comment.is_moderator_marked
×
83
        if hasattr(comment, "is_reviewed"):
×
84
            comment_data["is_reviewed"] = comment.is_reviewed
×
85

86
        if include_ratings and hasattr(comment, "ratings"):
×
87
            comment_data["ratings"] = [
×
88
                {
89
                    "id": rating.id,
90
                    "value": rating.value,
91
                }
92
                for rating in comment.ratings.all()
93
            ]
94

95
        # Recursively include child comments
96
        if include_children and hasattr(comment, "child_comments"):
×
97
            child_comments = comment.child_comments.all()
×
98
            if child_comments.exists():
×
99
                comment_data["replies"] = extract_comments(
×
100
                    child_comments,
101
                    include_ratings=include_ratings,
102
                    include_children=True,
103
                )
104
                comment_data["reply_count"] = child_comments.count()
×
105

106
        comments_list.append(comment_data)
×
107

108
    return comments_list
×
109

110

111
def extract_ratings(queryset):
1✔
112
    """
113
    Extract ratings from any model with a 'ratings' GenericRelation.
114

115
    Args:
116
        queryset: Rating queryset (e.g., obj.ratings.all())
117

118
    Returns:
119
        List of rating dictionaries
120
    """
121
    ratings_list = []
×
122
    for rating in queryset:
×
123
        ratings_list.append(
×
124
            {
125
                "id": rating.id,
126
                "value": rating.value,
127
                "created": rating.created.isoformat(),
128
            }
129
        )
130
    return ratings_list
×
131

132

133
def restructure_by_module_status(export):
1✔
134
    """
135
    Restructure export data by module status (past/active/future).
136
    """
137
    grouped = {
×
138
        "project": export["project"],
139
        "stats": export["stats"],
140
        "past": [],
141
        "active": [],
142
        "future": [],
143
    }
144

145
    # Filter each item type by its active_status
146
    for item in export.get("ideas", []):
×
147
        grouped[item["active_status"]].append(item)
×
148

149
    for item in export.get("polls", []):
×
150
        grouped[item["active_status"]].append(item)
×
151

152
    for item in export.get("topics", []):
×
153
        grouped[item["active_status"]].append(item)
×
154

155
    for item in export.get("debates", []):
×
156
        grouped[item["active_status"]].append(item)
×
157

158
    for chapter in export.get("documents", []):
×
159
        grouped[chapter["active_status"]].append(chapter)
×
160
        for paragraph in chapter.get("paragraphs", []):
×
161
            paragraph["module_start"] = chapter.get("module_start")
×
162
            paragraph["module_end"] = chapter.get("module_end")
×
163
            paragraph["active_status"] = chapter.get("active_status")
×
164
            grouped[paragraph["active_status"]].append(paragraph)
×
165

166
    for item in export.get("offline_events", []):
×
167
        from dateutil.parser import parse
×
168
        from django.utils import timezone
×
169

170
        now = timezone.now()
×
171
        event_date = parse(item["date"])
×
172
        if event_date < now:
×
173
            grouped["past"].append(item)
×
174
        elif event_date > now:
×
175
            grouped["future"].append(item)
×
176
        else:
177
            grouped["active"].append(item)
×
178

179
    return grouped
×
180

181

182
def generate_full_export(project):
1✔
183
    """Generate complete project export data"""
NEW
184
    description_attachments = extract_attachments(project.description)
×
NEW
185
    information_attachments = (
×
186
        extract_attachments(project.information)
187
        if hasattr(project, "information")
188
        else []
189
    )
NEW
190
    result_attachments = extract_attachments(project.result)
×
191

192
    export = {
×
193
        "project": {
194
            "name": project.name,
195
            "description": project.description,
196
            "description_attachments": description_attachments,
197
            "information": (
198
                project.information if hasattr(project, "information") else None
199
            ),
200
            "information_attachments": information_attachments,
201
            "slug": project.slug,
202
            "organisation": project.organisation.name,
203
            "result": project.result,
204
            "result_attachments": result_attachments,
205
            "url": project.get_absolute_url(),
206
        },
207
        "ideas": export_ideas_full(project),
208
        "polls": export_polls_full(project),
209
        "topics": export_topics_full(project),
210
        "debates": export_debates_full(project),
211
        "documents": export_documents_full(project),
212
        "offline_events": export_offline_events_full(project),
213
        "stats": calculate_stats(project),
214
    }
215
    structured = restructure_by_module_status(export)
×
216
    # print(json.dumps(structured))
217
    return structured
×
218

219

220
def export_ideas_full(project):
1✔
221
    """Export all ideas with full data"""
222
    ideas_data = []
×
223
    ideas = (
×
224
        Idea.objects.filter(module__project=project)
225
        .select_related("category")
226
        .prefetch_related("labels")
227
    )
228

229
    for idea in ideas:
×
230
        # Get comments for this idea
231
        comments_list = extract_comments(idea.comments.all())
×
232

233
        # Get ratings for this idea
234
        ratings_list = extract_ratings(idea.ratings.all())
×
235

236
        ideas_data.append(
×
237
            {
238
                "id": idea.id,
239
                "active_status": get_module_status(idea.module),
240
                "module_start": str(idea.module.module_start),
241
                "module_end": str(idea.module.module_end),
242
                "url": idea.get_absolute_url(),
243
                "name": idea.name,
244
                "description": str(idea.description),
245
                "attachments": extract_attachments(str(idea.description)),
246
                "created": idea.created.isoformat(),
247
                "reference_number": idea.reference_number,
248
                "category": idea.category.name if idea.category else None,
249
                "labels": [label.name for label in idea.labels.all()],
250
                "comment_count": idea.comments.count(),
251
                "comments": comments_list,
252
                "rating_count": idea.ratings.count(),
253
                "ratings": ratings_list,
254
                "module_id": idea.module.id,
255
                "module_name": idea.module.name,
256
                "images": [i.name for i in idea._a4images_current_images],
257
            }
258
        )
259

260
    return ideas_data
×
261

262

263
def export_polls_full(project):
1✔
264
    """Export all polls with full data"""
265

266
    polls_data = []
×
267
    polls = Poll.objects.filter(module__project=project).prefetch_related(
×
268
        "questions__choices__votes__other_vote",
269
    )
270

271
    for poll in polls:
×
272
        questions_list = []
×
273
        for question in poll.questions.all().order_by("weight"):
×
274
            choices_list = []
×
275
            for choice in question.choices.all().order_by("weight"):
×
276
                votes_list = []
×
277
                for vote in choice.votes.all():
×
278
                    vote_data = {
×
279
                        "created": vote.created.isoformat(),
280
                    }
281
                    if hasattr(vote, "other_vote"):
×
282
                        vote_data["other_answer"] = vote.other_vote.answer
×
283
                    votes_list.append(vote_data)
×
284

285
                choices_list.append(
×
286
                    {
287
                        "label": choice.label,
288
                        "is_other_choice": choice.is_other_choice,
289
                        "vote_count": choice.votes.count(),
290
                        "votes": votes_list,
291
                    }
292
                )
293

294
            answers_list = []
×
295
            for answer in question.answers.all():
×
296
                answers_list.append(
×
297
                    {
298
                        "answer": answer.answer,
299
                        "created": answer.created.isoformat(),
300
                    }
301
                )
302

303
            questions_list.append(
×
304
                {
305
                    "label": question.label,
306
                    "multiple_choice": question.multiple_choice,
307
                    "is_open": question.is_open,
308
                    "choices": choices_list,
309
                    "answers": answers_list,
310
                    "vote_count": sum(c["vote_count"] for c in choices_list),
311
                }
312
            )
313

314
        # Get comments for this poll
315
        comments_list = extract_comments(poll.comments.all())
×
316

317
        polls_data.append(
×
318
            {
319
                "id": poll.id,
320
                "active_status": get_module_status(poll.module),
321
                "module_start": str(poll.module.module_start),
322
                "module_end": str(poll.module.module_end),
323
                "description": poll.module.description,
324
                "url": poll.get_absolute_url(),
325
                "module_name": poll.module.name,
326
                "questions": questions_list,
327
                "comments": comments_list,
328
                "comment_count": poll.comments.count(),
329
                "total_votes": sum(q["vote_count"] for q in questions_list),
330
            }
331
        )
332

333
    return polls_data
×
334

335

336
def export_topics_full(project):
1✔
337
    """Export all topics with full data"""
338

339
    topics_data = []
×
340
    topics = (
×
341
        Topic.objects.filter(module__project=project)
342
        .select_related("category")
343
        .prefetch_related("labels")
344
    )
345

346
    for topic in topics:
×
347
        # Get comments for this topic
348
        comments_list = extract_comments(topic.comments.all())
×
349

350
        # Get ratings for this topic
351
        ratings_list = extract_ratings(topic.ratings.all())
×
352

353
        topics_data.append(
×
354
            {
355
                "id": topic.id,
356
                "active_status": get_module_status(topic.module),
357
                "module_start": str(topic.module.module_start),
358
                "module_end": str(topic.module.module_end),
359
                "url": topic.get_absolute_url(),
360
                "name": topic.name,
361
                "description": str(topic.description),
362
                "created": topic.created.isoformat(),
363
                "reference_number": topic.reference_number,
364
                "category": topic.category.name if topic.category else None,
365
                "labels": [label.name for label in topic.labels.all()],
366
                "comment_count": topic.comments.count(),
367
                "comments": comments_list,
368
                "rating_count": topic.ratings.count(),
369
                "ratings": ratings_list,
370
                "module_id": topic.module.id,
371
                "module_name": topic.module.name,
372
            }
373
        )
374

375
    return topics_data
×
376

377

378
def export_documents_full(project):
1✔
379
    """Export all document chapters and paragraphs with comments"""
380

381
    documents_data = []
×
382
    chapters = Chapter.objects.filter(module__project=project)
×
383

384
    for chapter in chapters:
×
385
        # Get chapter comments
386
        chapter_comments = extract_comments(chapter.comments.all())
×
387

388
        # Get paragraphs for this chapter
389
        paragraphs_list = []
×
390
        for paragraph in chapter.paragraphs.all().order_by("weight"):
×
391
            # Get paragraph comments
392
            paragraph_comments = extract_comments(paragraph.comments.all())
×
393

394
            paragraphs_list.append(
×
395
                {
396
                    "id": paragraph.id,
397
                    "name": paragraph.name,
398
                    "text": str(paragraph.text),
399
                    "attachments": extract_attachments(str(paragraph.text)),
400
                    "weight": paragraph.weight,
401
                    "created": paragraph.created.isoformat(),
402
                    "comment_count": paragraph.comments.count(),
403
                    "comments": paragraph_comments,
404
                }
405
            )
406

407
        documents_data.append(
×
408
            {
409
                "id": chapter.id,
410
                "name": chapter.name,
411
                "url": chapter.get_absolute_url(),
412
                "weight": chapter.weight,
413
                "created": chapter.created.isoformat(),
414
                "active_status": get_module_status(chapter.module),
415
                "module_start": str(chapter.module.module_start),
416
                "module_end": str(chapter.module.module_end),
417
                "module_id": chapter.module.id,
418
                "module_name": chapter.module.name,
419
                "prev_chapter_id": chapter.prev.id if chapter.prev else None,
420
                "next_chapter_id": chapter.next.id if chapter.next else None,
421
                "paragraph_count": chapter.paragraphs.count(),
422
                "paragraphs": paragraphs_list,
423
                "chapter_comment_count": chapter.comments.count(),
424
                "chapter_comments": chapter_comments,
425
                "total_paragraph_comments": sum(
426
                    p["comment_count"] for p in paragraphs_list
427
                ),
428
            }
429
        )
430

431
    return documents_data
×
432

433

434
def collect_document_attachments(export_data, request):
1✔
435
    """
436
    Collect all document attachments from project fields (information, result).
437

438
    Args:
439
        export_data: The full export dictionary (as returned by generate_full_export())
440
        request: Django Request object for build_absolute_uri()
441

442
    Returns:
443
        tuple: (documents_dict, handle_to_source)
444
            - documents_dict: {handle: absolute_url, ...}
445
            - handle_to_source: {handle: "project_information" | "project_result", ...}
446
    """
NEW
447
    documents_dict = {}
×
NEW
448
    handle_to_source = {}
×
449

NEW
450
    project_data = export_data.get("project", {})
×
451

452
    # Collect attachments from information field
NEW
453
    information_attachments = project_data.get("information_attachments", [])
×
NEW
454
    for attachment_index, attachment_url in enumerate(information_attachments):
×
NEW
455
        handle = f"project_information_attachment_{attachment_index}"
×
NEW
456
        absolute_url = request.build_absolute_uri(attachment_url)
×
NEW
457
        documents_dict[handle] = absolute_url
×
NEW
458
        handle_to_source[handle] = "project_information"
×
459

460
    # Collect attachments from result field
NEW
461
    result_attachments = project_data.get("result_attachments", [])
×
NEW
462
    for attachment_index, attachment_url in enumerate(result_attachments):
×
NEW
463
        handle = f"project_result_attachment_{attachment_index}"
×
NEW
464
        absolute_url = request.build_absolute_uri(attachment_url)
×
NEW
465
        documents_dict[handle] = absolute_url
×
NEW
466
        handle_to_source[handle] = "project_result"
×
467

NEW
468
    return documents_dict, handle_to_source
×
469

470

471
def integrate_document_summaries(
1✔
472
    export_data: dict,
473
    document_summaries: list,
474
    handle_to_source: dict[str, str],
475
):
476
    """
477
    Integrate document summaries into export_data by project field source.
478

479
    Args:
480
        export_data: Export dictionary (modified in-place)
481
        document_summaries: List of DocumentSummaryItem objects
482
        handle_to_source: Mapping from handle to source field ("project_information", "project_result")
483
    """
484
    # Initialize document_summaries structure
NEW
485
    project_summaries = {
×
486
        "information": [],
487
        "result": [],
488
    }
489

490
    # Group summaries by source field
NEW
491
    for summary_item in document_summaries:
×
NEW
492
        handle = summary_item.handle
×
NEW
493
        source = handle_to_source.get(handle)
×
494

NEW
495
        if source == "project_information":
×
NEW
496
            project_summaries["information"].append(
×
497
                {
498
                    "handle": summary_item.handle,
499
                    "summary": summary_item.summary,
500
                }
501
            )
NEW
502
        elif source == "project_result":
×
NEW
503
            project_summaries["result"].append(
×
504
                {
505
                    "handle": summary_item.handle,
506
                    "summary": summary_item.summary,
507
                }
508
            )
509

510
    # Integrate summaries into export_data
NEW
511
    if "project" not in export_data:
×
NEW
512
        export_data["project"] = {}
×
NEW
513
    export_data["project"]["document_summaries"] = project_summaries
×
514

515

516
def export_debates_full(project):
1✔
517
    """Export all debate subjects with comments"""
518

519
    debates_data = []
×
520
    subjects = Subject.objects.filter(module__project=project)
×
521

522
    for subject in subjects:
×
523
        # Get comments for this subject
524
        comments_list = extract_comments(subject.comments.all())
×
525

526
        debates_data.append(
×
527
            {
528
                "id": subject.id,
529
                "name": subject.name,
530
                "description": subject.description,
531
                "created": subject.created.isoformat(),
532
                "reference_number": subject.reference_number,
533
                "slug": subject.slug,
534
                "active_status": get_module_status(subject.module),
535
                "module_start": str(subject.module.module_start),
536
                "module_end": str(subject.module.module_end),
537
                "module_id": subject.module.id,
538
                "module_name": subject.module.name,
539
                "comment_count": subject.comments.count(),
540
                "comments": comments_list,
541
                "comment_creator_count": subject.comment_creator_count,
542
            }
543
        )
544

545
    return debates_data
×
546

547

548
def export_offline_events_full(project):
1✔
549
    """Export all offline events for a project"""
550

551
    events_data = []
×
552
    events = OfflineEvent.objects.filter(project=project)
×
553

554
    for event in events:
×
555
        events_data.append(
×
556
            {
557
                "id": event.id,
558
                "name": event.name,
559
                "event_type": event.event_type,
560
                "date": event.date.isoformat(),
561
                "description": str(event.description),
562
                "attachments": extract_attachments(str(event.description)),
563
                "slug": event.slug,
564
                "url": event.get_absolute_url(),
565
                "timeline_index": event.get_timeline_index,
566
                "created": event.created.isoformat(),
567
                "modified": event.modified.isoformat() if event.modified else None,
568
            }
569
        )
570

571
    return events_data
×
572

573

574
def calculate_stats(project):
1✔
575
    """Calculate statistics for the export"""
576
    # Get counts
577
    ideas_count = Idea.objects.filter(module__project=project).count()
×
578
    polls_count = Poll.objects.filter(module__project=project).count()
×
579
    topics_count = Topic.objects.filter(module__project=project).count()
×
580
    debates = Subject.objects.filter(module__project=project)
×
581
    chapters_count = Chapter.objects.filter(module__project=project).count()
×
582

583
    # Get paragraph count
584
    paragraphs_count = sum(
×
585
        chapter.paragraphs.count()
586
        for chapter in Chapter.objects.filter(module__project=project)
587
    )
588

589
    # Count comments on chapters
590
    chapter_comments_count = Comment.objects.filter(
×
591
        content_type__model="chapter",
592
        object_pk__in=Chapter.objects.filter(module__project=project).values_list(
593
            "id", flat=True
594
        ),
595
    ).count()
596

597
    # Count comments on paragraphs
598
    paragraph_comments_count = Comment.objects.filter(
×
599
        content_type__model="paragraph",
600
        object_pk__in=Paragraph.objects.filter(
601
            chapter__module__project=project
602
        ).values_list("id", flat=True),
603
    ).count()
604

605
    # Get comment counts
606
    ideas_comments = (
×
607
        sum(
608
            Idea.objects.get(pk=idea.id).comments.count()
609
            for idea in Idea.objects.filter(module__project=project)
610
        )
611
        if ideas_count > 0
612
        else 0
613
    )
614

615
    polls_comments = (
×
616
        sum(
617
            Poll.objects.get(pk=poll.id).comments.count()
618
            for poll in Poll.objects.filter(module__project=project)
619
        )
620
        if polls_count > 0
621
        else 0
622
    )
623

624
    topics_comments = (
×
625
        sum(
626
            Topic.objects.get(pk=topic.id).comments.count()
627
            for topic in Topic.objects.filter(module__project=project)
628
        )
629
        if topics_count > 0
630
        else 0
631
    )
632

633
    total_debate_comments = sum(debate.comments.count() for debate in debates)
×
634
    total_document_comments = chapter_comments_count + paragraph_comments_count
×
635
    total_comments = (
×
636
        ideas_comments
637
        + polls_comments
638
        + topics_comments
639
        + total_document_comments
640
        + total_debate_comments
641
    )
642

643
    return {
×
644
        "total_ideas": ideas_count,
645
        "total_polls": polls_count,
646
        "total_topics": topics_count,
647
        "total_debates": debates.count(),
648
        "total_comments": total_comments,
649
        "total_chapters": chapters_count,
650
        "total_paragraphs": paragraphs_count,
651
        "total_participants": project.participants.count(),
652
    }
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc