• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

popstas / talks-reducer / 18643271357

20 Oct 2025 05:38AM UTC coverage: 69.746% (-1.4%) from 71.158%
18643271357

Pull #119

github

web-flow
Merge 123afec07 into 661879c59
Pull Request #119: Add Windows taskbar progress integration

83 of 196 new or added lines in 8 files covered. (42.35%)

1279 existing lines in 23 files now uncovered.

5542 of 7946 relevant lines covered (69.75%)

0.7 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

81.34
/talks_reducer/server.py
1
"""Gradio-powered simple server for running Talks Reducer in a browser."""
2

3
from __future__ import annotations
1✔
4

5
import argparse
1✔
6
import atexit
1✔
7
import shutil
1✔
8
import socket
1✔
9
import sys
1✔
10
import tempfile
1✔
11
from contextlib import AbstractContextManager, suppress
1✔
12
from dataclasses import dataclass
1✔
13
from pathlib import Path
1✔
14
from queue import SimpleQueue
1✔
15
from threading import Thread
1✔
16
from typing import Callable, Iterator, Optional, Sequence, cast
1✔
17

18
import gradio as gr
1✔
19

20
from talks_reducer.ffmpeg import FFmpegNotFoundError, is_global_ffmpeg_available
1✔
21
from talks_reducer.icons import find_icon_path
1✔
22
from talks_reducer.models import ProcessingOptions, ProcessingResult
1✔
23
from talks_reducer.pipeline import speed_up_video
1✔
24
from talks_reducer.progress import ProgressHandle, SignalProgressReporter
1✔
25
from talks_reducer.version_utils import resolve_version
1✔
26

27

28
class _GradioProgressHandle(AbstractContextManager[ProgressHandle]):
1✔
29
    """Translate pipeline progress updates into Gradio progress callbacks."""
30

31
    def __init__(
1✔
32
        self,
33
        reporter: "GradioProgressReporter",
34
        *,
35
        desc: str,
36
        total: Optional[int],
37
        unit: str,
38
    ) -> None:
39
        self._reporter = reporter
1✔
40
        self._desc = desc.strip() or "Processing"
1✔
41
        self._unit = unit
1✔
42
        self._total = total
1✔
43
        self._current = 0
1✔
44
        self._reporter._start_task(self._desc, self._total)
1✔
45

46
    @property
1✔
47
    def current(self) -> int:
1✔
48
        """Return the number of processed units reported so far."""
49

50
        return self._current
×
51

52
    def ensure_total(self, total: int) -> None:
1✔
53
        """Update the total units when FFmpeg discovers a larger frame count."""
54

55
        if total > 0 and (self._total is None or total > self._total):
1✔
56
            self._total = total
1✔
57
            self._reporter._update_progress(self._current, self._total, self._desc)
1✔
58

59
    def advance(self, amount: int) -> None:
1✔
60
        """Advance the current progress and notify the UI."""
61

62
        if amount <= 0:
1✔
63
            return
×
64
        self._current += amount
1✔
65
        self._reporter._update_progress(self._current, self._total, self._desc)
1✔
66

67
    def finish(self) -> None:
1✔
68
        """Fill the progress bar when FFmpeg completes."""
69

70
        if self._total is not None:
1✔
71
            self._current = self._total
1✔
72
        else:
73
            # Without a known total, treat the final frame count as the total so the
74
            # progress bar reaches 100%.
75
            inferred_total = self._current if self._current > 0 else 1
×
76
            self._reporter._update_progress(self._current, inferred_total, self._desc)
×
77
            return
×
78
        self._reporter._update_progress(self._current, self._total, self._desc)
1✔
79

80
    def __enter__(self) -> "_GradioProgressHandle":
1✔
81
        return self
1✔
82

83
    def __exit__(self, exc_type, exc, tb) -> bool:
1✔
84
        if exc_type is None:
1✔
85
            self.finish()
1✔
86
        return False
1✔
87

88

89
class GradioProgressReporter(SignalProgressReporter):
1✔
90
    """Progress reporter that forwards updates to Gradio's progress widget."""
91

92
    def __init__(
1✔
93
        self,
94
        progress_callback: Optional[Callable[[int, int, str], None]] = None,
95
        *,
96
        log_callback: Optional[Callable[[str], None]] = None,
97
        max_log_lines: int = 500,
98
    ) -> None:
99
        super().__init__()
1✔
100
        self._progress_callback = progress_callback
1✔
101
        self._log_callback = log_callback
1✔
102
        self._max_log_lines = max_log_lines
1✔
103
        self._active_desc = "Processing"
1✔
104
        self.logs: list[str] = []
1✔
105

106
    def log(self, message: str) -> None:
1✔
107
        """Collect log messages for display in the web interface."""
108

109
        text = message.strip()
1✔
110
        if not text:
1✔
111
            return
×
112
        self.logs.append(text)
1✔
113
        if len(self.logs) > self._max_log_lines:
1✔
114
            self.logs = self.logs[-self._max_log_lines :]
×
115
        if self._log_callback is not None:
1✔
116
            self._log_callback(text)
1✔
117

118
    def task(
1✔
119
        self,
120
        *,
121
        desc: str = "",
122
        total: Optional[int] = None,
123
        unit: str = "",
124
    ) -> AbstractContextManager[ProgressHandle]:
125
        """Create a context manager bridging pipeline progress to Gradio."""
126

127
        return _GradioProgressHandle(self, desc=desc, total=total, unit=unit)
1✔
128

129
    # Internal helpers -------------------------------------------------
130

131
    def _start_task(self, desc: str, total: Optional[int]) -> None:
1✔
132
        self._active_desc = desc or "Processing"
1✔
133
        self._update_progress(0, total, self._active_desc)
1✔
134

135
    def _update_progress(
1✔
136
        self, current: int, total: Optional[int], desc: Optional[str]
137
    ) -> None:
138
        if self._progress_callback is None:
1✔
139
            return
×
140
        if total is None or total <= 0:
1✔
141
            total_value = max(1, int(current) + 1 if current >= 0 else 1)
×
142
            bounded_current = max(0, int(current))
×
143
        else:
144
            total_value = max(int(total), 1, int(current))
1✔
145
            bounded_current = max(0, min(int(current), int(total_value)))
1✔
146
        display_desc = desc or self._active_desc
1✔
147
        self._progress_callback(bounded_current, total_value, display_desc)
1✔
148

149

150
_FAVICON_FILENAMES = (
1✔
151
    ("app.ico", "app-256.png", "app.png")
152
    if sys.platform.startswith("win")
153
    else ("app-256.png", "app.png", "app.ico")
154
)
155
_FAVICON_PATH = find_icon_path(filenames=_FAVICON_FILENAMES)
1✔
156
_FAVICON_PATH_STR = str(_FAVICON_PATH) if _FAVICON_PATH else None
1✔
157
_WORKSPACES: list[Path] = []
1✔
158

159

160
def _allocate_workspace() -> Path:
1✔
161
    """Create and remember a workspace directory for a single request."""
162

163
    path = Path(tempfile.mkdtemp(prefix="talks_reducer_web_"))
1✔
164
    _WORKSPACES.append(path)
1✔
165
    return path
1✔
166

167

168
def _cleanup_workspaces() -> None:
1✔
169
    """Remove any workspaces that remain when the process exits."""
170

171
    for workspace in _WORKSPACES:
1✔
172
        if workspace.exists():
1✔
173
            with suppress(Exception):
1✔
174
                shutil.rmtree(workspace)
1✔
175
    _WORKSPACES.clear()
1✔
176

177

178
def _describe_server_host() -> str:
1✔
179
    """Return a human-readable description of the server hostname and IP."""
180

181
    hostname = socket.gethostname().strip()
1✔
182
    ip_address = ""
1✔
183

184
    with suppress(OSError):
1✔
185
        resolved_ip = socket.gethostbyname(hostname or "localhost")
1✔
186
        if resolved_ip:
1✔
187
            ip_address = resolved_ip
1✔
188

189
    if hostname and ip_address and hostname != ip_address:
1✔
190
        return f"{hostname} ({ip_address})"
1✔
191
    if ip_address:
1✔
192
        return ip_address
×
193
    if hostname:
1✔
UNCOV
194
        return hostname
×
195
    return "unknown"
1✔
196

197

198
def _build_output_path(input_path: Path, workspace: Path, small: bool) -> Path:
1✔
199
    """Mirror the CLI output naming scheme inside the workspace directory."""
200

201
    suffix = input_path.suffix or ".mp4"
1✔
202
    stem = input_path.stem
1✔
203
    marker = "_speedup_small" if small else "_speedup"
1✔
204
    return workspace / f"{stem}{marker}{suffix}"
1✔
205

206

207
def _format_duration(seconds: float) -> str:
1✔
208
    """Return a compact human-readable duration string."""
209

210
    if seconds <= 0:
1✔
211
        return "0s"
1✔
212
    total_seconds = int(round(seconds))
1✔
213
    hours, remainder = divmod(total_seconds, 3600)
1✔
214
    minutes, secs = divmod(remainder, 60)
1✔
215
    parts: list[str] = []
1✔
216
    if hours:
1✔
217
        parts.append(f"{hours}h")
1✔
218
    if minutes or hours:
1✔
219
        parts.append(f"{minutes}m")
1✔
220
    parts.append(f"{secs}s")
1✔
221
    return " ".join(parts)
1✔
222

223

224
def _format_summary(result: ProcessingResult) -> str:
1✔
225
    """Produce a Markdown summary of the processing result."""
226

227
    lines = [
1✔
228
        f"**Input:** `{result.input_file.name}`",
229
        f"**Output:** `{result.output_file.name}`",
230
    ]
231

232
    duration_line = (
1✔
233
        f"**Duration:** {_format_duration(result.output_duration)}"
234
        f" ({_format_duration(result.original_duration)} original)"
235
    )
236
    if result.time_ratio is not None:
1✔
237
        duration_line += f" — {result.time_ratio * 100:.1f}% of the original"
1✔
238
    lines.append(duration_line)
1✔
239

240
    if result.size_ratio is not None:
1✔
241
        size_percent = result.size_ratio * 100
1✔
242
        lines.append(f"**Size:** {size_percent:.1f}% of the original file")
1✔
243

244
    lines.append(f"**Chunks merged:** {result.chunk_count}")
1✔
245
    lines.append(f"**Encoder:** {'CUDA' if result.used_cuda else 'CPU'}")
1✔
246

247
    return "\n".join(lines)
1✔
248

249

250
PipelineEvent = tuple[str, object]
1✔
251

252

253
def _default_reporter_factory(
1✔
254
    progress_callback: Optional[Callable[[int, int, str], None]],
255
    log_callback: Callable[[str], None],
256
) -> SignalProgressReporter:
257
    """Construct a :class:`GradioProgressReporter` with the given callbacks."""
258

259
    return GradioProgressReporter(
1✔
260
        progress_callback=progress_callback,
261
        log_callback=log_callback,
262
    )
263

264

265
def run_pipeline_job(
1✔
266
    options: ProcessingOptions,
267
    *,
268
    speed_up: Callable[[ProcessingOptions, SignalProgressReporter], ProcessingResult],
269
    reporter_factory: Callable[
270
        [Optional[Callable[[int, int, str], None]], Callable[[str], None]],
271
        SignalProgressReporter,
272
    ],
273
    events: SimpleQueue[PipelineEvent],
274
    enable_progress: bool = True,
275
    start_in_thread: bool = True,
276
) -> Iterator[PipelineEvent]:
277
    """Execute the processing pipeline and yield emitted events."""
278

279
    def _emit(kind: str, payload: object) -> None:
1✔
280
        events.put((kind, payload))
1✔
281

282
    progress_callback: Optional[Callable[[int, int, str], None]] = None
1✔
283
    if enable_progress:
1✔
284
        progress_callback = lambda current, total, desc: _emit(
1✔
285
            "progress", (current, total, desc)
286
        )
287

288
    reporter = reporter_factory(
1✔
289
        progress_callback, lambda message: _emit("log", message)
290
    )
291

292
    def _worker() -> None:
1✔
293
        try:
1✔
294
            result = speed_up(options, reporter=reporter)
1✔
295
        except FFmpegNotFoundError as exc:  # pragma: no cover - depends on runtime env
296
            _emit("error", gr.Error(str(exc)))
297
        except FileNotFoundError as exc:
1✔
UNCOV
298
            _emit("error", gr.Error(str(exc)))
×
299
        except Exception as exc:  # pragma: no cover - defensive fallback
300
            reporter.log(f"Error: {exc}")
301
            _emit("error", gr.Error(f"Failed to process the video: {exc}"))
302
        else:
303
            reporter.log("Processing complete.")
1✔
304
            _emit("result", result)
1✔
305
        finally:
306
            _emit("done", None)
1✔
307

308
    thread: Optional[Thread] = None
1✔
309
    if start_in_thread:
1✔
UNCOV
310
        thread = Thread(target=_worker, daemon=True)
×
UNCOV
311
        thread.start()
×
312
    else:
313
        _worker()
1✔
314

315
    try:
1✔
316
        while True:
1✔
317
            kind, payload = events.get()
1✔
318
            if kind == "done":
1✔
319
                break
1✔
320
            yield (kind, payload)
1✔
321
    finally:
322
        if thread is not None:
1✔
UNCOV
323
            thread.join()
×
324

325

326
@dataclass
1✔
327
class ProcessVideoDependencies:
1✔
328
    """Container for dependencies used by :func:`process_video`."""
329

330
    speed_up: Callable[
1✔
331
        [ProcessingOptions, SignalProgressReporter], ProcessingResult
332
    ] = speed_up_video
333
    reporter_factory: Callable[
1✔
334
        [Optional[Callable[[int, int, str], None]], Callable[[str], None]],
335
        SignalProgressReporter,
336
    ] = _default_reporter_factory
337
    queue_factory: Callable[[], SimpleQueue[PipelineEvent]] = SimpleQueue
1✔
338
    run_pipeline_job_func: Callable[..., Iterator[PipelineEvent]] = run_pipeline_job
1✔
339
    start_in_thread: bool = True
1✔
340

341

342
def process_video(
1✔
343
    file_path: Optional[str],
344
    small_video: bool,
345
    small_480: bool = False,
346
    video_codec: str = "hevc",
347
    use_global_ffmpeg: bool = False,
348
    silent_threshold: Optional[float] = None,
349
    sounded_speed: Optional[float] = None,
350
    silent_speed: Optional[float] = None,
351
    progress: Optional[gr.Progress] = gr.Progress(track_tqdm=False),
352
    *,
353
    dependencies: Optional[ProcessVideoDependencies] = None,
354
) -> Iterator[tuple[Optional[str], str, str, Optional[str]]]:
355
    """Run the Talks Reducer pipeline for a single uploaded file."""
356

357
    if not file_path:
1✔
358
        raise gr.Error("Please upload a video file to begin processing.")
1✔
359

360
    input_path = Path(file_path)
1✔
361
    if not input_path.exists():
1✔
362
        raise gr.Error("The uploaded file is no longer available on the server.")
1✔
363

364
    workspace = _allocate_workspace()
1✔
365
    temp_folder = workspace / "temp"
1✔
366
    output_file = _build_output_path(input_path, workspace, small_video)
1✔
367

368
    deps = dependencies or ProcessVideoDependencies()
1✔
369
    events = deps.queue_factory()
1✔
370

371
    codec_value = (video_codec or "hevc").strip().lower()
1✔
372
    if codec_value not in {"h264", "hevc", "av1"}:
1✔
UNCOV
373
        codec_value = "hevc"
×
374

375
    option_kwargs: dict[str, float | str | bool] = {
1✔
376
        "video_codec": codec_value,
377
        "prefer_global_ffmpeg": bool(use_global_ffmpeg),
378
    }
379
    if silent_threshold is not None:
1✔
380
        option_kwargs["silent_threshold"] = float(silent_threshold)
1✔
381
    if sounded_speed is not None:
1✔
382
        option_kwargs["sounded_speed"] = float(sounded_speed)
1✔
383
    if silent_speed is not None:
1✔
384
        option_kwargs["silent_speed"] = float(silent_speed)
1✔
385

386
    if small_video and small_480:
1✔
387
        option_kwargs["small_target_height"] = 480
1✔
388

389
    options = ProcessingOptions(
1✔
390
        input_file=input_path,
391
        output_file=output_file,
392
        temp_folder=temp_folder,
393
        small=small_video,
394
        **option_kwargs,
395
    )
396

397
    event_stream = deps.run_pipeline_job_func(
1✔
398
        options,
399
        speed_up=deps.speed_up,
400
        reporter_factory=deps.reporter_factory,
401
        events=events,
402
        enable_progress=progress is not None,
403
        start_in_thread=deps.start_in_thread,
404
    )
405

406
    collected_logs: list[str] = []
1✔
407
    final_result: Optional[ProcessingResult] = None
1✔
408
    error: Optional[gr.Error] = None
1✔
409

410
    for kind, payload in event_stream:
1✔
411
        if kind == "log":
1✔
412
            text = str(payload).strip()
1✔
413
            if text:
1✔
414
                collected_logs.append(text)
1✔
415
                yield (
1✔
416
                    gr.update(),
417
                    "\n".join(collected_logs),
418
                    gr.update(),
419
                    gr.update(),
420
                )
421
        elif kind == "progress":
1✔
422
            if progress is not None:
1✔
423
                current, total, desc = cast(tuple[int, int, str], payload)
1✔
424
                percent = current / total if total > 0 else 0
1✔
425
                progress(percent, total=total, desc=desc)
1✔
426
        elif kind == "result":
1✔
427
            final_result = payload  # type: ignore[assignment]
1✔
428
        elif kind == "error":
1✔
429
            error = payload  # type: ignore[assignment]
1✔
430

431
    if error is not None:
1✔
432
        raise error
1✔
433

434
    if final_result is None:
1✔
435
        raise gr.Error("Failed to process the video.")
1✔
436

437
    log_text = "\n".join(collected_logs)
1✔
438
    summary = _format_summary(final_result)
1✔
439

440
    yield (
1✔
441
        str(final_result.output_file),
442
        log_text,
443
        summary,
444
        str(final_result.output_file),
445
    )
446

447

448
def build_interface() -> gr.Blocks:
1✔
449
    """Construct the Gradio Blocks application for the simple web UI."""
450

UNCOV
451
    server_identity = _describe_server_host()
×
UNCOV
452
    global_ffmpeg_available = is_global_ffmpeg_available()
×
453

UNCOV
454
    app_version = resolve_version()
×
455
    version_suffix = (
×
456
        f" v{app_version}" if app_version and app_version != "unknown" else ""
457
    )
458

UNCOV
459
    with gr.Blocks(title=f"Talks Reducer Web UI{version_suffix}") as demo:
×
UNCOV
460
        gr.Markdown(
×
461
            f"""
462
            ## Talks Reducer Web UI{version_suffix}
463
            Drop a video into the zone below or click to browse. **Small video** is enabled
464
            by default to apply the 720p/128k preset before processing starts—clear it to
465
            keep the original resolution or pair it with **Target 480p** to downscale
466
            further. Choose **Video codec** to switch between h.265 (≈25% smaller),
467
            h.264 (≈10% faster), and av1 (no advantages) compression, and enable
468
            **Use global FFmpeg** when your system install offers hardware encoders that the
469
            bundled build lacks.
470

471
            Video will be rendered on server **{server_identity}**.
472
            """.strip()
473
        )
474

UNCOV
475
        with gr.Column():
×
UNCOV
476
            file_input = gr.File(
×
477
                label="Video file",
478
                file_types=["video"],
479
                type="filepath",
480
            )
481

UNCOV
482
        with gr.Row():
×
UNCOV
483
            small_checkbox = gr.Checkbox(label="Small video", value=True)
×
UNCOV
484
            small_480_checkbox = gr.Checkbox(label="Target 480p", value=False)
×
485

UNCOV
486
        codec_dropdown = gr.Dropdown(
×
487
            choices=[
488
                ("hevc", "h.265 (25% smaller)"),
489
                ("h264", "h.264 (10% faster)"),
490
                ("av1", "av1 (no advantages)"),
491
            ],
492
            value="hevc",
493
            label="Video codec",
494
        )
495

UNCOV
496
        global_ffmpeg_info = (
×
497
            "Prefer the FFmpeg binary from PATH instead of the bundled build."
498
            if global_ffmpeg_available
499
            else "Global FFmpeg not detected; the bundled build will be used."
500
        )
UNCOV
501
        use_global_ffmpeg_checkbox = gr.Checkbox(
×
502
            label="Use global FFmpeg",
503
            value=False,
504
            info=global_ffmpeg_info,
505
            interactive=global_ffmpeg_available,
506
        )
507

508
        with gr.Column():
×
UNCOV
509
            silent_speed_input = gr.Slider(
×
510
                minimum=1.0,
511
                maximum=10.0,
512
                value=4.0,
513
                step=0.1,
514
                label="Silent speed",
515
            )
UNCOV
516
            sounded_speed_input = gr.Slider(
×
517
                minimum=0.5,
518
                maximum=3.0,
519
                value=1.0,
520
                step=0.05,
521
                label="Sounded speed",
522
            )
UNCOV
523
            silent_threshold_input = gr.Slider(
×
524
                minimum=0.0,
525
                maximum=1.0,
526
                value=0.05,
527
                step=0.01,
528
                label="Silent threshold",
529
            )
530

UNCOV
531
        video_output = gr.Video(label="Processed video")
×
UNCOV
532
        summary_output = gr.Markdown()
×
UNCOV
533
        download_output = gr.File(label="Download processed file", interactive=False)
×
UNCOV
534
        log_output = gr.Textbox(label="Log", lines=12, interactive=False)
×
535

536
        file_input.upload(
×
537
            process_video,
538
            inputs=[
539
                file_input,
540
                small_checkbox,
541
                small_480_checkbox,
542
                codec_dropdown,
543
                use_global_ffmpeg_checkbox,
544
                silent_threshold_input,
545
                sounded_speed_input,
546
                silent_speed_input,
547
            ],
548
            outputs=[video_output, log_output, summary_output, download_output],
549
            queue=True,
550
            api_name="process_video",
551
        )
552

UNCOV
553
    demo.queue(default_concurrency_limit=1)
×
UNCOV
554
    return demo
×
555

556

557
def main(argv: Optional[Sequence[str]] = None) -> None:
1✔
558
    """Launch the Gradio server from the command line."""
559

UNCOV
560
    parser = argparse.ArgumentParser(description="Launch the Talks Reducer web UI.")
×
UNCOV
561
    parser.add_argument(
×
562
        "--host", dest="host", default="0.0.0.0", help="Custom host to bind."
563
    )
UNCOV
564
    parser.add_argument(
×
565
        "--port",
566
        dest="port",
567
        type=int,
568
        default=9005,
569
        help="Port number for the web server (default: 9005).",
570
    )
UNCOV
571
    parser.add_argument(
×
572
        "--share",
573
        action="store_true",
574
        help="Create a temporary public Gradio link.",
575
    )
UNCOV
576
    parser.add_argument(
×
577
        "--no-browser",
578
        action="store_true",
579
        help="Do not automatically open the browser window.",
580
    )
581

UNCOV
582
    args = parser.parse_args(argv)
×
583

UNCOV
584
    demo = build_interface()
×
UNCOV
585
    demo.launch(
×
586
        server_name=args.host,
587
        server_port=args.port,
588
        share=args.share,
589
        inbrowser=not args.no_browser,
590
        favicon_path=_FAVICON_PATH_STR,
591
    )
592

593

594
atexit.register(_cleanup_workspaces)
1✔
595

596

597
__all__ = [
1✔
598
    "GradioProgressReporter",
599
    "build_interface",
600
    "main",
601
    "process_video",
602
]
603

604

605
if __name__ == "__main__":  # pragma: no cover - convenience entry point
606
    main()
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc