• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

paulmthompson / WhiskerToolbox / 17956774620

23 Sep 2025 07:21PM UTC coverage: 68.919% (+0.06%) from 68.862%
17956774620

push

github

paulmthompson
clang tidy fixes for common analysis dashboard widgets

52 of 73 new or added lines in 6 files covered. (71.23%)

576 existing lines in 4 files now uncovered.

41684 of 60483 relevant lines covered (68.92%)

1138.46 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

19.22
/src/WhiskerToolbox/Media_Widget/Media_Window/Media_Window.cpp
1
#include "Media_Window.hpp"
2

3
#include "CoreGeometry/line_geometry.hpp"
4
#include "CoreGeometry/lines.hpp"
5
#include "CoreGeometry/masks.hpp"
6
#include "DataManager/DataManager.hpp"
7
#include "DataManager/DigitalTimeSeries/Digital_Interval_Series.hpp"
8
#include "DataManager/Lines/Line_Data.hpp"
9
#include "DataManager/Masks/Mask_Data.hpp"
10
#include "DataManager/Media/Media_Data.hpp"
11
#include "DataManager/Points/Point_Data.hpp"
12
#include "ImageProcessing/OpenCVUtility.hpp"
13
#include "Media_Widget/DisplayOptions/DisplayOptions.hpp"
14
#include "Media_Widget/MediaProcessing_Widget/MediaProcessing_Widget.hpp"
15
#include "Media_Widget/MediaText_Widget/MediaText_Widget.hpp"
16
#include "Media_Widget/Media_Widget.hpp"
17
#include "TimeFrame/TimeFrame.hpp"
18

19
//https://stackoverflow.com/questions/72533139/libtorch-errors-when-used-with-qt-opencv-and-point-cloud-library
20
#undef slots
21
#include "DataManager/Tensors/Tensor_Data.hpp"
22
#define slots Q_SLOTS
23

24
#include <QElapsedTimer>
25
#include <QFont>
26
#include <QGraphicsPixmapItem>
27
#include <QGraphicsSceneMouseEvent>
28
#include <QGraphicsTextItem>
29
#include <QImage>
30
#include <QPainter>
31
#include <algorithm>
32

33
#include <iostream>
34

35
/*
36

37
The Media_Window class
38

39
*/
40

41

42
Media_Window::Media_Window(std::shared_ptr<DataManager> data_manager, QObject * parent)
3✔
43
    : QGraphicsScene(parent),
44
      _data_manager{std::move(data_manager)} {
3✔
45

46
    _data_manager->addObserver([this]() {
3✔
47
        _addRemoveData();
1✔
48
    });
1✔
49

50
    _canvasImage = QImage(_canvasWidth, _canvasHeight, QImage::Format_ARGB32);
3✔
51
    _canvasPixmap = addPixmap(QPixmap::fromImage(_canvasImage));
3✔
52
}
3✔
53

54
Media_Window::~Media_Window() {
6✔
55
    // Clear all items from the scene - this automatically removes and deletes all QGraphicsItems
56
    clear();
3✔
57

58
    // Just clear the containers since the items are already deleted by clear()
59
    _line_paths.clear();
3✔
60
    _masks.clear();
3✔
61
    _mask_bounding_boxes.clear();
3✔
62
    _mask_outlines.clear();
3✔
63
    _points.clear();
3✔
64
    _intervals.clear();
3✔
65
    _tensors.clear();
3✔
66
    _text_items.clear();
3✔
67
}
6✔
68

69
void Media_Window::addMediaDataToScene(std::string const & media_key) {
3✔
70
    auto media_config = std::make_unique<MediaDisplayOptions>();
3✔
71

72
    _media_configs[media_key] = std::move(media_config);
3✔
73

74
    UpdateCanvas();
3✔
75
}
6✔
76

77
void Media_Window::_clearMedia() {
16✔
78
    // Set to black
79
    _canvasImage.fill(Qt::black);
16✔
80
    _canvasPixmap->setPixmap(QPixmap::fromImage(_canvasImage));
16✔
81
}
16✔
82

UNCOV
83
void Media_Window::removeMediaDataFromScene(std::string const & media_key) {
×
UNCOV
84
    auto mediaItem = _media_configs.find(media_key);
×
UNCOV
85
    if (mediaItem != _media_configs.end()) {
×
86
        _media_configs.erase(mediaItem);
×
87
    }
88

89
    UpdateCanvas();
×
UNCOV
90
}
×
91

92
void Media_Window::addLineDataToScene(std::string const & line_key) {
×
93
    auto line_config = std::make_unique<LineDisplayOptions>();
×
94

95
    // Assign color based on the current number of line configs
96
    line_config->hex_color = DefaultDisplayValues::getColorForIndex(_line_configs.size());
×
97

UNCOV
98
    _line_configs[line_key] = std::move(line_config);
×
99

UNCOV
100
    UpdateCanvas();
×
101
}
×
102

103
void Media_Window::_clearLines() {
16✔
104
    for (auto pathItem: _line_paths) {
16✔
UNCOV
105
        removeItem(pathItem);
×
106
    }
107
    for (auto pathItem: _line_paths) {
16✔
108
        delete pathItem;
×
109
    }
110
    _line_paths.clear();
16✔
111
}
16✔
112

UNCOV
113
void Media_Window::removeLineDataFromScene(std::string const & line_key) {
×
UNCOV
114
    auto lineItem = _line_configs.find(line_key);
×
UNCOV
115
    if (lineItem != _line_configs.end()) {
×
116
        _line_configs.erase(lineItem);
×
117
    }
118

119
    UpdateCanvas();
×
UNCOV
120
}
×
121

122
void Media_Window::addMaskDataToScene(std::string const & mask_key) {
3✔
123
    auto mask_config = std::make_unique<MaskDisplayOptions>();
3✔
124

125
    // Assign color based on the current number of mask configs
126
    mask_config->hex_color = DefaultDisplayValues::getColorForIndex(_mask_configs.size());
3✔
127

128
    _mask_configs[mask_key] = std::move(mask_config);
3✔
129
    UpdateCanvas();
3✔
130
}
6✔
131

132
void Media_Window::_clearMasks() {
16✔
133
    for (auto maskItem: _masks) {
16✔
UNCOV
134
        removeItem(maskItem);
×
135
    }
136

137
    for (auto maskItem: _masks) {
16✔
UNCOV
138
        delete maskItem;
×
139
    }
140
    _masks.clear();
16✔
141
}
16✔
142

143
void Media_Window::_clearMaskBoundingBoxes() {
16✔
144
    for (auto boundingBoxItem: _mask_bounding_boxes) {
16✔
UNCOV
145
        removeItem(boundingBoxItem);
×
146
    }
147

148
    for (auto boundingBoxItem: _mask_bounding_boxes) {
16✔
UNCOV
149
        delete boundingBoxItem;
×
150
    }
151
    _mask_bounding_boxes.clear();
16✔
152
}
16✔
153

154
void Media_Window::_clearMaskOutlines() {
16✔
155
    for (auto outlineItem: _mask_outlines) {
16✔
UNCOV
156
        removeItem(outlineItem);
×
157
    }
158

159
    for (auto outlineItem: _mask_outlines) {
16✔
UNCOV
160
        delete outlineItem;
×
161
    }
162
    _mask_outlines.clear();
16✔
163
}
16✔
164

UNCOV
165
void Media_Window::removeMaskDataFromScene(std::string const & mask_key) {
×
UNCOV
166
    auto maskItem = _mask_configs.find(mask_key);
×
UNCOV
167
    if (maskItem != _mask_configs.end()) {
×
168
        _mask_configs.erase(maskItem);
×
169
    }
170

171
    UpdateCanvas();
×
UNCOV
172
}
×
173

174
void Media_Window::addPointDataToScene(std::string const & point_key) {
×
175
    auto point_config = std::make_unique<PointDisplayOptions>();
×
176

177
    // Assign color based on the current number of point configs
178
    point_config->hex_color = DefaultDisplayValues::getColorForIndex(_point_configs.size());
×
179

UNCOV
180
    _point_configs[point_key] = std::move(point_config);
×
181
    UpdateCanvas();
×
UNCOV
182
}
×
183

184
void Media_Window::_clearPoints() {
16✔
185
    if (_debug_performance) {
16✔
UNCOV
186
        std::cout << "CLEARING POINTS - Count before: " << _points.size() << std::endl;
×
187
    }
188

189
    for (auto pathItem: _points) {
16✔
UNCOV
190
        removeItem(pathItem);
×
191
    }
192
    for (auto pathItem: _points) {
16✔
193
        delete pathItem;
×
194
    }
195
    _points.clear();
16✔
196

197
    if (_debug_performance) {
16✔
UNCOV
198
        std::cout << "  Points cleared. Count after: " << _points.size() << std::endl;
×
UNCOV
199
        std::cout << "  Hover circle item still exists: " << (_hover_circle_item ? "YES" : "NO") << std::endl;
×
200
    }
201
}
16✔
202

UNCOV
203
void Media_Window::removePointDataFromScene(std::string const & point_key) {
×
UNCOV
204
    auto pointItem = _point_configs.find(point_key);
×
UNCOV
205
    if (pointItem != _point_configs.end()) {
×
206
        _point_configs.erase(pointItem);
×
207
    }
208

209
    UpdateCanvas();
×
UNCOV
210
}
×
211

212
void Media_Window::addDigitalIntervalSeries(std::string const & key) {
×
213
    auto interval_config = std::make_unique<DigitalIntervalDisplayOptions>();
×
214

215
    // Assign color based on the current number of interval configs
216
    interval_config->hex_color = DefaultDisplayValues::getColorForIndex(_interval_configs.size());
×
217

UNCOV
218
    _interval_configs[key] = std::move(interval_config);
×
219
    UpdateCanvas();
×
UNCOV
220
}
×
221

222
void Media_Window::removeDigitalIntervalSeries(std::string const & key) {
×
223
    auto item = _interval_configs.find(key);
×
UNCOV
224
    if (item != _interval_configs.end()) {
×
225
        _interval_configs.erase(item);
×
226
    }
227

228
    UpdateCanvas();
×
UNCOV
229
}
×
230

231
void Media_Window::_clearIntervals() {
16✔
232
    for (auto item: _intervals) {
16✔
UNCOV
233
        removeItem(item);
×
234
    }
235

236
    for (auto item: _intervals) {
16✔
UNCOV
237
        delete item;
×
238
    }
239
    _intervals.clear();
16✔
240
}
16✔
241

UNCOV
242
void Media_Window::addTensorDataToScene(std::string const & tensor_key) {
×
UNCOV
243
    auto tensor_config = std::make_unique<TensorDisplayOptions>();
×
244

245
    // Assign color based on the current number of tensor configs
246
    tensor_config->hex_color = DefaultDisplayValues::getColorForIndex(_tensor_configs.size());
×
247

UNCOV
248
    _tensor_configs[tensor_key] = std::move(tensor_config);
×
249

UNCOV
250
    UpdateCanvas();
×
251
}
×
252

253
void Media_Window::removeTensorDataFromScene(std::string const & tensor_key) {
×
254
    auto tensorItem = _tensor_configs.find(tensor_key);
×
UNCOV
255
    if (tensorItem != _tensor_configs.end()) {
×
256
        _tensor_configs.erase(tensorItem);
×
257
    }
258

259
    UpdateCanvas();
×
UNCOV
260
}
×
261

262
void Media_Window::_clearTensors() {
16✔
263
    for (auto item: _tensors) {
16✔
UNCOV
264
        removeItem(item);
×
265
    }
266

267
    for (auto item: _tensors) {
16✔
UNCOV
268
        delete item;
×
269
    }
270
    _tensors.clear();
16✔
271
}
16✔
272

273
void Media_Window::setTextWidget(MediaText_Widget * text_widget) {
6✔
274
    _text_widget = text_widget;
6✔
275
}
6✔
276

277
void Media_Window::_plotTextOverlays() {
16✔
278
    if (!_text_widget) {
16✔
UNCOV
279
        return;
×
280
    }
281

282
    // Get enabled text overlays from the widget
283
    auto text_overlays = _text_widget->getEnabledTextOverlays();
16✔
284

285
    for (auto const & overlay: text_overlays) {
16✔
UNCOV
286
        if (!overlay.enabled) {
×
UNCOV
287
            continue;
×
288
        }
289

290
        // Calculate position based on relative coordinates (0.0-1.0)
UNCOV
291
        float const x_pos = overlay.x_position * static_cast<float>(_canvasWidth);
×
UNCOV
292
        float const y_pos = overlay.y_position * static_cast<float>(_canvasHeight);
×
293

294
        // Create text item
295
        auto text_item = addText(overlay.text);
×
296

297
        // Set font and size
298
        QFont font = text_item->font();
×
UNCOV
299
        font.setPointSize(overlay.font_size);
×
UNCOV
300
        text_item->setFont(font);
×
301

302
        // Set color
303
        QColor const text_color(overlay.color);
×
UNCOV
304
        text_item->setDefaultTextColor(text_color);
×
305

306
        // Handle orientation
307
        if (overlay.orientation == TextOrientation::Vertical) {
×
UNCOV
308
            text_item->setRotation(90.0);// Rotate 90 degrees for vertical text
×
309
        }
310

311
        // Set position
UNCOV
312
        text_item->setPos(x_pos, y_pos);
×
313

314
        // Add to our collection for cleanup
315
        _text_items.append(text_item);
×
UNCOV
316
    }
×
317
}
16✔
318

319
void Media_Window::_clearTextOverlays() {
16✔
320
    for (auto text_item: _text_items) {
16✔
UNCOV
321
        removeItem(text_item);
×
322
    }
323
    for (auto text_item: _text_items) {
16✔
324
        delete text_item;
×
325
    }
326
    _text_items.clear();
16✔
327
}
16✔
328

UNCOV
329
void Media_Window::LoadFrame(int frame_id) {
×
330
    // Get MediaData using the active media key
UNCOV
331
    for (auto const & [media_key, media_config]: _media_configs) {
×
332
        if (!media_config.get()->is_visible) {
×
UNCOV
333
            continue;
×
334
        }
335

336
        auto media = _data_manager->getData<MediaData>(media_key);
×
UNCOV
337
        if (!media) {
×
UNCOV
338
            std::cerr << "Warning: No media data found for key '" << media_key << "'" << std::endl;
×
339
            return;
×
340
        }
341
        media->LoadFrame(frame_id);
×
342
    }
×
343

344
    // Clear any accumulated drawing points when changing frames
345
    // This ensures no cross-frame accumulation and explains why lag disappears on frame change
UNCOV
346
    _drawing_points.clear();
×
UNCOV
347
    _is_drawing = false;
×
348

349
    UpdateCanvas();
×
350
}
351

352
void Media_Window::UpdateCanvas() {
16✔
353

354
    if (_debug_performance) {
16✔
UNCOV
355
        std::cout << "========== Update Canvas called ==========" << std::endl;
×
356

357
        // Debug: Show current item counts before clearing
358
        std::cout << "BEFORE CLEAR - Items in scene: " << items().size() << std::endl;
×
UNCOV
359
        std::cout << "  Lines: " << _line_paths.size() << std::endl;
×
UNCOV
360
        std::cout << "  Points: " << _points.size() << std::endl;
×
361
        std::cout << "  Masks: " << _masks.size() << std::endl;
×
362
        std::cout << "  Mask bounding boxes: " << _mask_bounding_boxes.size() << std::endl;
×
363
        std::cout << "  Mask outlines: " << _mask_outlines.size() << std::endl;
×
364
        std::cout << "  Intervals: " << _intervals.size() << std::endl;
×
365
        std::cout << "  Tensors: " << _tensors.size() << std::endl;
×
366
        std::cout << "  Text items: " << _text_items.size() << std::endl;
×
367
        std::cout << "  Drawing points accumulated: " << _drawing_points.size() << std::endl;
×
368
        std::cout << "  Hover circle item exists: " << (_hover_circle_item ? "YES" : "NO") << std::endl;
×
369
    }
370

371
    _clearLines();
16✔
372
    _clearPoints();
16✔
373
    _clearMasks();
16✔
374
    _clearMaskBoundingBoxes();
16✔
375
    _clearMaskOutlines();
16✔
376
    _clearIntervals();
16✔
377
    _clearTensors();
16✔
378
    _clearTextOverlays();
16✔
379
    _clearMedia();
16✔
380

381
    _plotMediaData();
16✔
382

383
    _plotLineData();
16✔
384

385
    _plotMaskData();
16✔
386

387
    _plotPointData();
16✔
388

389
    _plotDigitalIntervalSeries();
16✔
390

391
    _plotDigitalIntervalBorders();
16✔
392

393
    _plotTensorData();
16✔
394

395
    _plotTextOverlays();
16✔
396

397
    // Note: Hover circle is now handled efficiently via _updateHoverCirclePosition()
398
    // and doesn't need to be redrawn on every UpdateCanvas() call
399

400
    if (_debug_performance) {
16✔
401
        // Debug: Show item counts after plotting
UNCOV
402
        std::cout << "AFTER PLOTTING - Items in scene: " << items().size() << std::endl;
×
UNCOV
403
        std::cout << "  Lines plotted: " << _line_paths.size() << std::endl;
×
UNCOV
404
        std::cout << "  Points plotted: " << _points.size() << std::endl;
×
405
        std::cout << "  Masks plotted: " << _masks.size() << std::endl;
×
406
        std::cout << "  Mask bounding boxes plotted: " << _mask_bounding_boxes.size() << std::endl;
×
407
        std::cout << "  Mask outlines plotted: " << _mask_outlines.size() << std::endl;
×
408
        std::cout << "  Intervals plotted: " << _intervals.size() << std::endl;
×
409
        std::cout << "  Tensors plotted: " << _tensors.size() << std::endl;
×
410
        std::cout << "  Text items plotted: " << _text_items.size() << std::endl;
×
411
    }
412

413
    // Save the entire QGraphicsScene as an image
414
    QImage scene_image(_canvasWidth, _canvasHeight, QImage::Format_ARGB32);
16✔
415
    scene_image.fill(Qt::transparent);// Optional: fill with transparent background
16✔
416
    QPainter painter(&scene_image);
16✔
417

418
    // Set the scene rect to match the canvas dimensions
419
    this->setSceneRect(0, 0, _canvasWidth, _canvasHeight);
16✔
420

421
    // Render the scene with proper viewport mapping
422
    this->render(&painter, QRectF(0, 0, _canvasWidth, _canvasHeight),
48✔
423
                 QRect(0, 0, _canvasWidth, _canvasHeight));
32✔
424

425
    emit canvasUpdated(scene_image);
16✔
426
}
32✔
427

428

UNCOV
429
QImage::Format Media_Window::_getQImageFormat(std::string const & media_key) {
×
430

UNCOV
431
    auto _media = _data_manager->getData<MediaData>(media_key);
×
432
    if (!_media) {
×
433
        // Return a default format if no media is available
434
        return QImage::Format_Grayscale8;
×
435
    }
436

437
    // Check bit depth for grayscale images
UNCOV
438
    if (_media->getFormat() == MediaData::DisplayFormat::Gray) {
×
UNCOV
439
        if (_media->is32Bit()) {
×
UNCOV
440
            return QImage::Format_Grayscale16;// Use 16-bit for higher precision
×
441
        } else {
442
            return QImage::Format_Grayscale8;// Default 8-bit
×
443
        }
444
    } else {
445
        // Color format
UNCOV
446
        return QImage::Format_RGBA8888;
×
447
    }
UNCOV
448
}
×
449

450
void Media_Window::_plotMediaData() {
16✔
451

452
    auto const current_time = _data_manager->getCurrentTime();
16✔
453

454
    auto video_timeframe = _data_manager->getTime(TimeKey("time"));
16✔
455

456
    int total_visible_media = 0;
16✔
457
    std::string active_media_key;
16✔
458
    for (auto const & [media_key, _media_config]: _media_configs) {
32✔
459
        if (!_media_config.get()->is_visible) continue;
16✔
UNCOV
460
        total_visible_media++;
×
UNCOV
461
        active_media_key = media_key;
×
462
    }
463

464
    if (total_visible_media == 0) {
16✔
465
        return;
16✔
466
    }
467

UNCOV
468
    QImage unscaled_image;
×
469

UNCOV
470
    if (total_visible_media == 1) {
×
471
        auto media = _data_manager->getData<MediaData>(active_media_key);
×
UNCOV
472
        if (!media) {
×
473
            std::cerr << "Warning: No media data found for key '" << active_media_key << "'" << std::endl;
×
474
            return;
×
475
        }
476

477
        if (media->getFormat() == MediaData::DisplayFormat::Gray) {
×
478
            // Handle grayscale images with potential colormap application
UNCOV
479
            bool apply_colormap = _media_configs[active_media_key].get()->colormap_options.active &&
×
480
                                  _media_configs[active_media_key].get()->colormap_options.colormap != ColormapType::None;
×
481

482
            if (media->is8Bit()) {
×
483
                // 8-bit grayscale processing
UNCOV
484
                auto unscaled_image_data_8bit = media->getProcessedData8(current_time);
×
485

UNCOV
486
                if (apply_colormap) {
×
487
                    auto colormap_data = ImageProcessing::apply_colormap_for_display(
×
488
                            unscaled_image_data_8bit,
489
                            media->getImageSize(),
490
                            _media_configs[active_media_key].get()->colormap_options);
×
491

492
                    // Apply colormap and get BGRA data (OpenCV returns BGRA format)
493
                    unscaled_image = QImage(colormap_data.data(),
×
494
                                            media->getWidth(),
495
                                            media->getHeight(),
496
                                            QImage::Format_ARGB32)
UNCOV
497
                                             .copy();
×
UNCOV
498
                } else {
×
499
                    // No colormap, use original 8-bit grayscale data
500
                    unscaled_image = QImage(unscaled_image_data_8bit.data(),
×
501
                                            media->getWidth(),
502
                                            media->getHeight(),
503
                                            QImage::Format_Grayscale8)
UNCOV
504
                                             .copy();
×
505
                }
UNCOV
506
            } else if (media->is32Bit()) {
×
507
                // 32-bit float processing
UNCOV
508
                auto unscaled_image_data_32bit = media->getProcessedData32(current_time);
×
509

UNCOV
510
                if (apply_colormap) {
×
511
                    // TODO: Need to implement apply_colormap_for_display for float data
512
                    // For now, convert to 8-bit and apply colormap
UNCOV
513
                    std::vector<uint8_t> converted_8bit;
×
514
                    converted_8bit.reserve(unscaled_image_data_32bit.size());
×
515

UNCOV
516
                    for (float pixel_value: unscaled_image_data_32bit) {
×
517
                        // Clamp to 0-255 range and convert to uint8_t
UNCOV
518
                        uint8_t byte_value = static_cast<uint8_t>(std::max(0.0f, std::min(255.0f, pixel_value)));
×
519
                        converted_8bit.push_back(byte_value);
×
520
                    }
521

UNCOV
522
                    auto colormap_data = ImageProcessing::apply_colormap_for_display(
×
523
                            converted_8bit,
524
                            media->getImageSize(),
UNCOV
525
                            _media_configs[active_media_key].get()->colormap_options);
×
526

527
                    // Apply colormap and get BGRA data - make a deep copy to avoid use-after-free
UNCOV
528
                    unscaled_image = QImage(colormap_data.data(),
×
529
                                            media->getWidth(),
530
                                            media->getHeight(),
531
                                            QImage::Format_ARGB32)
532
                                             .copy();
×
533
                } else {
×
534
                    // No colormap, convert 32-bit float to 16-bit for higher precision display
535
                    std::vector<uint16_t> converted_16bit;
×
536
                    converted_16bit.reserve(unscaled_image_data_32bit.size());
×
537

538
                    for (float pixel_value: unscaled_image_data_32bit) {
×
539
                        // Scale from 0-255 range to 0-65535 range
540
                        uint16_t value_16bit = static_cast<uint16_t>(std::max(0.0f, std::min(255.0f, pixel_value)) * 257.0f);
×
541
                        converted_16bit.push_back(value_16bit);
×
542
                    }
543

544
                    // Create QImage and make a deep copy to avoid use-after-free
545
                    unscaled_image = QImage(reinterpret_cast<uchar const *>(converted_16bit.data()),
×
546
                                            media->getWidth(),
547
                                            media->getHeight(),
548
                                            media->getWidth() * sizeof(uint16_t),
×
549
                                            QImage::Format_Grayscale16)
550
                                             .copy();
×
551
                }
×
UNCOV
552
            }
×
553
        } else {
554
            // Color image processing (always 8-bit for now)
555
            auto unscaled_image_data = media->getProcessedData8(current_time);
×
UNCOV
556
            unscaled_image = QImage(unscaled_image_data.data(),
×
557
                                    media->getWidth(),
558
                                    media->getHeight(),
559
                                    QImage::Format_RGBA8888);
×
560
        }
×
UNCOV
561
    }
×
562

563

564
    // Check for multi-channel mode (multiple enabled grayscale media)
UNCOV
565
    if (total_visible_media > 1) {
×
566
        // Multi-channel mode: combine multiple media with colormaps
UNCOV
567
        unscaled_image = _combineMultipleMedia();
×
568
    }
569

UNCOV
570
    auto new_image = unscaled_image.scaled(
×
571
            _canvasWidth,
572
            _canvasHeight,
573
            Qt::IgnoreAspectRatio,
UNCOV
574
            Qt::SmoothTransformation);
×
575

UNCOV
576
    std::cout << "Scaled image" << std::endl;
×
577

578
    // Check if any masks are in transparency mode
579
    bool has_transparency_mask = false;
×
580
    for (auto const & [mask_key, mask_config]: _mask_configs) {
×
581
        if (mask_config->is_visible && mask_config->use_as_transparency) {
×
582
            has_transparency_mask = true;
×
UNCOV
583
            break;
×
584
        }
585
    }
586

587
    // If we have transparency masks, modify the new_image
588
    if (has_transparency_mask) {
×
UNCOV
589
        new_image = _applyTransparencyMasks(new_image);
×
590
    }
591

592
    _canvasPixmap->setPixmap(QPixmap::fromImage(new_image));
×
UNCOV
593
    _canvasImage = new_image;
×
594
}
32✔
595

596

UNCOV
597
QImage Media_Window::_combineMultipleMedia() {
×
598

UNCOV
599
    auto current_time = _data_manager->getCurrentTime();
×
600

601
    // Loop through configs and get the largest image size
602
    std::vector<ImageSize> media_sizes;
×
603
    for (auto const & [media_key, media_config]: _media_configs) {
×
UNCOV
604
        if (!media_config->is_visible) continue;
×
605

606
        auto media = _data_manager->getData<MediaData>(media_key);
×
UNCOV
607
        if (!media) continue;
×
608

609
        media_sizes.push_back(media->getImageSize());
×
UNCOV
610
    }
×
611

UNCOV
612
    if (media_sizes.empty()) return QImage();
×
613

614
    // Find the maximum width and height
615
    int width = 0;
×
616
    int height = 0;
×
617
    for (auto const & size: media_sizes) {
×
618
        width = std::max(width, size.width);
×
UNCOV
619
        height = std::max(height, size.height);
×
620
    }
621

622
    // Create combined RGBA image
623
    QImage combined_image(width, height, QImage::Format_RGBA8888);
×
UNCOV
624
    combined_image.fill(qRgba(0, 0, 0, 255));// Start with black background
×
625

626
    for (auto const & [media_key, media_config]: _media_configs) {
×
UNCOV
627
        if (!media_config->is_visible) continue;
×
628

629
        auto media = _data_manager->getData<MediaData>(media_key);
×
630
        if (!media || media->getFormat() != MediaData::DisplayFormat::Gray) {
×
UNCOV
631
            continue;// Skip non-grayscale media
×
632
        }
633

634
        bool apply_colormap = media_config.get()->colormap_options.active &&
×
UNCOV
635
                              media_config.get()->colormap_options.colormap != ColormapType::None;
×
636

UNCOV
637
        if (media->is8Bit()) {
×
638
            // Handle 8-bit media data
UNCOV
639
            auto media_data_8bit = media->getProcessedData8(current_time);
×
640

641
            if (apply_colormap) {
×
UNCOV
642
                auto colormap_data = ImageProcessing::apply_colormap_for_display(
×
643
                        media_data_8bit,
644
                        media->getImageSize(),
UNCOV
645
                        media_config.get()->colormap_options);
×
646

647
                // Use colormap data (BGRA format from OpenCV)
648
                for (int y = 0; y < media->getHeight(); ++y) {
×
649
                    for (int x = 0; x < media->getWidth(); ++x) {
×
UNCOV
650
                        int const pixel_idx = (y * media->getWidth() + x) * 4;
×
651

652
                        uint8_t const b = colormap_data[pixel_idx];    // Blue channel
×
653
                        uint8_t const g = colormap_data[pixel_idx + 1];// Green channel
×
654
                        uint8_t const r = colormap_data[pixel_idx + 2];// Red channel
×
UNCOV
655
                        uint8_t const a = colormap_data[pixel_idx + 3];// Alpha channel
×
656

657
                        // Get current pixel from combined image
UNCOV
658
                        QRgb current_pixel = combined_image.pixel(x, y);
×
659

660
                        // Additive blending (common for multi-channel microscopy)
661
                        uint8_t const new_r = std::min(255, qRed(current_pixel) + r);
×
662
                        uint8_t const new_g = std::min(255, qGreen(current_pixel) + g);
×
UNCOV
663
                        uint8_t const new_b = std::min(255, qBlue(current_pixel) + b);
×
664

UNCOV
665
                        combined_image.setPixel(x, y, qRgba(new_r, new_g, new_b, 255));
×
666
                    }
667
                }
UNCOV
668
            } else {
×
669
                // Use 8-bit grayscale data directly (no colormap)
670
                for (int y = 0; y < media->getHeight(); ++y) {
×
671
                    for (int x = 0; x < media->getWidth(); ++x) {
×
672
                        int const pixel_idx = y * media->getWidth() + x;
×
UNCOV
673
                        uint8_t const gray_value = media_data_8bit[pixel_idx];
×
674

675
                        // Get current pixel from combined image
UNCOV
676
                        QRgb current_pixel = combined_image.pixel(x, y);
×
677

678
                        // Additive blending
679
                        uint8_t const new_r = std::min(255, qRed(current_pixel) + gray_value);
×
680
                        uint8_t const new_g = std::min(255, qGreen(current_pixel) + gray_value);
×
UNCOV
681
                        uint8_t const new_b = std::min(255, qBlue(current_pixel) + gray_value);
×
682

UNCOV
683
                        combined_image.setPixel(x, y, qRgba(new_r, new_g, new_b, 255));
×
684
                    }
685
                }
686
            }
UNCOV
687
        } else if (media->is32Bit()) {
×
688
            // Handle 32-bit float media data
UNCOV
689
            auto media_data_32bit = media->getProcessedData32(current_time);
×
690

UNCOV
691
            if (apply_colormap) {
×
692
                // Convert to 8-bit for colormap application (temporary until float colormap is implemented)
693
                std::vector<uint8_t> converted_8bit;
×
UNCOV
694
                converted_8bit.reserve(media_data_32bit.size());
×
695

696
                for (float pixel_value: media_data_32bit) {
×
697
                    uint8_t byte_value = static_cast<uint8_t>(std::max(0.0f, std::min(255.0f, pixel_value)));
×
UNCOV
698
                    converted_8bit.push_back(byte_value);
×
699
                }
700

UNCOV
701
                auto colormap_data = ImageProcessing::apply_colormap_for_display(
×
702
                        converted_8bit,
703
                        media->getImageSize(),
UNCOV
704
                        media_config.get()->colormap_options);
×
705

706
                // Use colormap data (BGRA format from OpenCV)
707
                for (int y = 0; y < media->getHeight(); ++y) {
×
708
                    for (int x = 0; x < media->getWidth(); ++x) {
×
UNCOV
709
                        int const pixel_idx = (y * media->getWidth() + x) * 4;
×
710

711
                        uint8_t const b = colormap_data[pixel_idx];    // Blue channel
×
712
                        uint8_t const g = colormap_data[pixel_idx + 1];// Green channel
×
713
                        uint8_t const r = colormap_data[pixel_idx + 2];// Red channel
×
UNCOV
714
                        uint8_t const a = colormap_data[pixel_idx + 3];// Alpha channel
×
715

716
                        // Get current pixel from combined image
UNCOV
717
                        QRgb current_pixel = combined_image.pixel(x, y);
×
718

719
                        // Additive blending
720
                        uint8_t const new_r = std::min(255, qRed(current_pixel) + r);
×
721
                        uint8_t const new_g = std::min(255, qGreen(current_pixel) + g);
×
UNCOV
722
                        uint8_t const new_b = std::min(255, qBlue(current_pixel) + b);
×
723

UNCOV
724
                        combined_image.setPixel(x, y, qRgba(new_r, new_g, new_b, 255));
×
725
                    }
726
                }
UNCOV
727
            } else {
×
728
                // Use 32-bit float data directly (no colormap)
729
                for (int y = 0; y < media->getHeight(); ++y) {
×
730
                    for (int x = 0; x < media->getWidth(); ++x) {
×
731
                        int const pixel_idx = y * media->getWidth() + x;
×
732
                        float const float_value = media_data_32bit[pixel_idx];
×
UNCOV
733
                        uint8_t const gray_value = static_cast<uint8_t>(std::max(0.0f, std::min(255.0f, float_value)));
×
734

735
                        // Get current pixel from combined image
UNCOV
736
                        QRgb current_pixel = combined_image.pixel(x, y);
×
737

738
                        // Additive blending
739
                        uint8_t const new_r = std::min(255, qRed(current_pixel) + gray_value);
×
740
                        uint8_t const new_g = std::min(255, qGreen(current_pixel) + gray_value);
×
UNCOV
741
                        uint8_t const new_b = std::min(255, qBlue(current_pixel) + gray_value);
×
742

UNCOV
743
                        combined_image.setPixel(x, y, qRgba(new_r, new_g, new_b, 255));
×
744
                    }
745
                }
746
            }
747
        }
×
UNCOV
748
    }
×
749

750
    return combined_image;
×
UNCOV
751
}
×
752

753
void Media_Window::mousePressEvent(QGraphicsSceneMouseEvent * event) {
×
754
    if (_debug_performance) {
×
755
        std::cout << "Mouse PRESS - Button: " << (event->button() == Qt::LeftButton ? "LEFT" : "RIGHT")
×
UNCOV
756
                  << ", Drawing mode: " << _drawing_mode << ", Current drawing points: " << _drawing_points.size() << std::endl;
×
757
    }
758

759
    if (event->button() == Qt::LeftButton) {
×
760
        if (_drawing_mode) {
×
761
            auto pos = event->scenePos();
×
762
            _drawing_points.clear();
×
763
            _drawing_points.push_back(pos);
×
764
            _is_drawing = true;
×
765
            if (_debug_performance) {
×
UNCOV
766
                std::cout << "  Started drawing - cleared and added first point" << std::endl;
×
767
            }
768
        }
769

770
        // Emit legacy signals (qreal values)
771
        emit leftClick(event->scenePos().x(), event->scenePos().y());
×
772
        emit leftClickMedia(
×
773
                event->scenePos().x() / getXAspect(),
×
UNCOV
774
                event->scenePos().y() / getYAspect());
×
775

776
        // Emit strong-typed coordinate signals
777
        CanvasCoordinates const canvas_coords(static_cast<float>(event->scenePos().x()),
×
778
                                              static_cast<float>(event->scenePos().y()));
×
779
        MediaCoordinates const media_coords(static_cast<float>(event->scenePos().x() / getXAspect()),
×
780
                                            static_cast<float>(event->scenePos().y() / getYAspect()));
×
781
        emit leftClickCanvas(canvas_coords);
×
UNCOV
782
        emit leftClickMediaCoords(media_coords);
×
783

784
    } else if (event->button() == Qt::RightButton) {
×
785
        if (_drawing_mode) {
×
786
            auto pos = event->scenePos();
×
787
            _drawing_points.clear();
×
788
            _drawing_points.push_back(pos);
×
UNCOV
789
            _is_drawing = true;
×
790
        }
791

792
        // Emit legacy signals (qreal values)
793
        emit rightClick(event->scenePos().x(), event->scenePos().y());
×
794
        emit rightClickMedia(
×
795
                event->scenePos().x() / getXAspect(),
×
UNCOV
796
                event->scenePos().y() / getYAspect());
×
797

798
        // Emit strong-typed coordinate signals
799
        CanvasCoordinates const canvas_coords(static_cast<float>(event->scenePos().x()),
×
800
                                              static_cast<float>(event->scenePos().y()));
×
801
        MediaCoordinates const media_coords(static_cast<float>(event->scenePos().x() / getXAspect()),
×
802
                                            static_cast<float>(event->scenePos().y() / getYAspect()));
×
803
        emit rightClickCanvas(canvas_coords);
×
UNCOV
804
        emit rightClickMediaCoords(media_coords);
×
805

806
    } else {
UNCOV
807
        QGraphicsScene::mousePressEvent(event);
×
808
    }
809
}
×
810
void Media_Window::mouseReleaseEvent(QGraphicsSceneMouseEvent * event) {
×
811
    if (_debug_performance) {
×
812
        std::cout << "Mouse RELEASE - Button: " << (event->button() == Qt::LeftButton ? "LEFT" : "RIGHT")
×
UNCOV
813
                  << ", Was drawing: " << _is_drawing << ", Drawing points: " << _drawing_points.size() << std::endl;
×
814
    }
815

UNCOV
816
    if (event->button() == Qt::LeftButton) {
×
817
        // Always emit leftRelease signal
UNCOV
818
        emit leftRelease();
×
819

820
        // Only emit drawing-specific signal and reset drawing state when in drawing mode
821
        if (_is_drawing) {
×
822
            _is_drawing = false;
×
823
            emit leftReleaseDrawing();
×
824
            if (_debug_performance) {
×
UNCOV
825
                std::cout << "  Drawing finished - emitted leftReleaseDrawing signal" << std::endl;
×
826
            }
827
        }
UNCOV
828
    } else if (event->button() == Qt::RightButton) {
×
829
        // Always emit rightRelease signal
UNCOV
830
        emit rightRelease();
×
831

832
        // Only emit drawing-specific signal and reset drawing state when in drawing mode
833
        if (_is_drawing) {
×
834
            _is_drawing = false;
×
UNCOV
835
            emit rightReleaseDrawing();
×
836
        }
837
    }
838
    QGraphicsScene::mouseReleaseEvent(event);
×
839
}
×
UNCOV
840
void Media_Window::mouseMoveEvent(QGraphicsSceneMouseEvent * event) {
×
841
    static int move_count = 0;
UNCOV
842
    move_count++;
×
843

UNCOV
844
    auto pos = event->scenePos();
×
845

UNCOV
846
    _hover_position = pos;
×
847

848
    if (_is_drawing) {
×
849
        _drawing_points.push_back(pos);
×
850
        if (_debug_performance && move_count % 10 == 0) {// Only print every 10th move to avoid spam
×
851
            std::cout << "Mouse MOVE #" << move_count << " - Drawing: adding point (total: "
×
UNCOV
852
                      << _drawing_points.size() << ")" << std::endl;
×
853
        }
854
    } else if (_debug_performance && move_count % 50 == 0) {// Print every 50th move when not drawing
×
UNCOV
855
        std::cout << "Mouse MOVE #" << move_count << " - Hover only" << std::endl;
×
856
    }
857

858
    // Emit legacy signal
UNCOV
859
    emit mouseMove(event->scenePos().x(), event->scenePos().y());
×
860

861
    // Emit strong-typed coordinate signal
862
    CanvasCoordinates const canvas_coords(static_cast<float>(event->scenePos().x()),
×
863
                                          static_cast<float>(event->scenePos().y()));
×
UNCOV
864
    emit mouseMoveCanvas(canvas_coords);
×
865

866
    QGraphicsScene::mouseMoveEvent(event);
×
UNCOV
867
}
×
868

869
float Media_Window::getXAspect() const {
16✔
870

871
    std::string active_media_key;
16✔
872
    for (auto const & [config_key, config]: _media_configs) {
32✔
873
        if (config->is_visible) {
16✔
874
            active_media_key = config_key;
×
UNCOV
875
            break;
×
876
        }
877
    }
878
    if (active_media_key.empty()) {
16✔
879
        // No active media, return default aspect ratio
880
        return 1.0f;
16✔
881
    }
882

883
    auto _media = _data_manager->getData<MediaData>(active_media_key);
×
884
    if (!_media) {
×
UNCOV
885
        return 1.0f;// Default aspect ratio
×
886
    }
887

UNCOV
888
    float const scale_width = static_cast<float>(_canvasWidth) / static_cast<float>(_media->getWidth());
×
889

UNCOV
890
    return scale_width;
×
891
}
16✔
892

893
float Media_Window::getYAspect() const {
16✔
894

895
    std::string active_media_key;
16✔
896
    for (auto const & [config_key, config]: _media_configs) {
32✔
897
        if (config->is_visible) {
16✔
898
            active_media_key = config_key;
×
UNCOV
899
            break;
×
900
        }
901
    }
902
    if (active_media_key.empty()) {
16✔
903
        // No active media, return default aspect ratio
904
        return 1.0f;
16✔
905
    }
906

907
    auto _media = _data_manager->getData<MediaData>(active_media_key);
×
908
    if (!_media) {
×
UNCOV
909
        return 1.0f;// Default aspect ratio
×
910
    }
911

UNCOV
912
    float const scale_height = static_cast<float>(_canvasHeight) / static_cast<float>(_media->getHeight());
×
913

UNCOV
914
    return scale_height;
×
915
}
16✔
916

917
void Media_Window::_plotLineData() {
16✔
918
    auto const current_time = _data_manager->getCurrentTime();
16✔
919

920
    auto video_timeframe = _data_manager->getTime(TimeKey("time"));
16✔
921

922
    auto xAspect = getXAspect();
16✔
923
    auto yAspect = getYAspect();
16✔
924

925
    for (auto const & [line_key, _line_config]: _line_configs) {
16✔
926

UNCOV
927
        if (!_line_config.get()->is_visible) continue;
×
928

UNCOV
929
        auto plot_color = plot_color_with_alpha(_line_config.get());
×
930

931
        auto line_timeframe_key = _data_manager->getTimeKey(line_key);
×
UNCOV
932
        auto line_timeframe = _data_manager->getTime(line_timeframe_key);
×
933

934
        auto line_data = _data_manager->getData<LineData>(line_key);
×
UNCOV
935
        auto lineData = line_data->getAtTime(TimeFrameIndex(current_time), video_timeframe.get(), line_timeframe.get());
×
936

937
        // Check for line-specific image size scaling
UNCOV
938
        auto image_size = line_data->getImageSize();
×
939

940
        if (image_size.height != -1) {
×
941
            auto const line_height = static_cast<float>(image_size.height);
×
UNCOV
942
            yAspect = static_cast<float>(_canvasHeight) / line_height;
×
943
        }
944

945
        if (image_size.width != -1) {
×
946
            auto const line_width = static_cast<float>(image_size.width);
×
UNCOV
947
            xAspect = static_cast<float>(_canvasWidth) / line_width;
×
948
        }
949

950
        if (lineData.empty()) {
×
UNCOV
951
            continue;
×
952
        }
953

954
        for (int line_idx = 0; line_idx < static_cast<int>(lineData.size()); ++line_idx) {
×
UNCOV
955
            auto const & single_line = lineData[line_idx];
×
956

957
            if (single_line.empty()) {
×
UNCOV
958
                continue;
×
959
            }
960

961
            // Check if this line is selected
UNCOV
962
            bool const is_selected = (_line_config.get()->selected_line_index == line_idx);
×
963

964
            // Use segment if enabled, otherwise use full line
965
            Line2D line_to_plot;
×
966
            if (_line_config.get()->show_segment) {
×
967
                float const start_percentage = static_cast<float>(_line_config.get()->segment_start_percentage) / 100.0f;
×
968
                float const end_percentage = static_cast<float>(_line_config.get()->segment_end_percentage) / 100.0f;
×
UNCOV
969
                line_to_plot = get_segment_between_percentages(single_line, start_percentage, end_percentage);
×
970

971
                // If segment is empty (invalid percentages), skip this line
972
                if (line_to_plot.empty()) {
×
UNCOV
973
                    continue;
×
974
                }
975
            } else {
UNCOV
976
                line_to_plot = single_line;
×
977
            }
978

UNCOV
979
            QPainterPath path = QPainterPath();
×
980

UNCOV
981
            auto single_line_thres = 1000.0;
×
982

UNCOV
983
            path.moveTo(QPointF(static_cast<float>(line_to_plot[0].x) * xAspect, static_cast<float>(line_to_plot[0].y) * yAspect));
×
984

985
            for (size_t i = 1; i < line_to_plot.size(); i++) {
×
986
                auto dx = line_to_plot[i].x - line_to_plot[i - 1].x;
×
987
                auto dy = line_to_plot[i].y - line_to_plot[i - 1].y;
×
988
                auto d = std::sqrt((dx * dx) + (dy * dy));
×
989
                if (d > single_line_thres) {
×
UNCOV
990
                    path.moveTo(QPointF(static_cast<float>(line_to_plot[i].x) * xAspect, static_cast<float>(line_to_plot[i].y) * yAspect));
×
991
                } else {
UNCOV
992
                    path.lineTo(QPointF(static_cast<float>(line_to_plot[i].x) * xAspect, static_cast<float>(line_to_plot[i].y) * yAspect));
×
993
                }
994
            }
995

996
            // Create pen with configurable thickness - selected lines are thicker and have different color
997
            QPen linePen;
×
998
            if (is_selected) {
×
999
                linePen.setColor(QColor(255, 0, 0));                     // Red for selected lines
×
1000
                linePen.setWidth(_line_config.get()->line_thickness + 2);// Thicker for selected
×
UNCOV
1001
                linePen.setStyle(Qt::DashLine);                          // Dashed line for selected
×
1002
            } else {
1003
                linePen.setColor(plot_color);
×
UNCOV
1004
                linePen.setWidth(_line_config.get()->line_thickness);
×
1005
            }
1006

1007
            auto linePath = addPath(path, linePen);
×
UNCOV
1008
            _line_paths.append(linePath);
×
1009

1010
            // Add dot at line base (always filled) - selected lines have red dot
1011
            QColor const dot_color = is_selected ? QColor(255, 0, 0) : plot_color;
×
1012
            auto ellipse = addEllipse(
×
1013
                    static_cast<float>(line_to_plot[0].x) * xAspect - 2.5,
×
UNCOV
1014
                    static_cast<float>(line_to_plot[0].y) * yAspect - 2.5,
×
1015
                    5.0, 5.0,
1016
                    QPen(dot_color),
×
1017
                    QBrush(dot_color));
×
UNCOV
1018
            _points.append(ellipse);
×
1019

1020
            // If show_points is enabled, add open circles at each point on the line
UNCOV
1021
            if (_line_config.get()->show_points) {
×
1022
                // Create pen and brush for open circles
1023
                QPen pointPen(dot_color);
×
UNCOV
1024
                pointPen.setWidth(1);
×
1025

1026
                // Empty brush for open circles
UNCOV
1027
                QBrush const emptyBrush(Qt::NoBrush);
×
1028

1029
                // Start from the second point (first one is already shown as filled)
1030
                for (size_t i = 1; i < line_to_plot.size(); i++) {
×
1031
                    auto ellipse = addEllipse(
×
1032
                            static_cast<float>(line_to_plot[i].x) * xAspect - 2.5,
×
UNCOV
1033
                            static_cast<float>(line_to_plot[i].y) * yAspect - 2.5,
×
1034
                            5.0, 5.0,
1035
                            pointPen,
1036
                            emptyBrush);
UNCOV
1037
                    _points.append(ellipse);
×
1038
                }
UNCOV
1039
            }
×
1040

1041
            // If position marker is enabled, add a marker at the specified percentage
1042
            if (_line_config.get()->show_position_marker) {
×
1043
                float const percentage = static_cast<float>(_line_config.get()->position_percentage) / 100.0f;
×
UNCOV
1044
                Point2D<float> const marker_pos = get_position_at_percentage(line_to_plot, percentage);
×
1045

1046
                float const marker_x = marker_pos.x * xAspect;
×
UNCOV
1047
                float const marker_y = marker_pos.y * yAspect;
×
1048

1049
                // Create a distinctive marker (filled circle with border)
1050
                QPen markerPen(QColor(255, 255, 255));// White border
×
1051
                markerPen.setWidth(2);
×
UNCOV
1052
                QBrush const markerBrush(dot_color);// Same color as line but filled
×
1053

1054
                auto marker = addEllipse(
×
1055
                        marker_x - 4.0f,
×
UNCOV
1056
                        marker_y - 4.0f,
×
1057
                        8.0f, 8.0f,
1058
                        markerPen,
1059
                        markerBrush);
1060
                _points.append(marker);
×
1061
            }
×
1062
        }
×
UNCOV
1063
    }
×
1064
}
32✔
1065

1066
void Media_Window::_plotMaskData() {
16✔
1067
    auto const current_time = _data_manager->getCurrentTime();
16✔
1068

1069
    auto video_timeframe = _data_manager->getTime(TimeKey("time"));
16✔
1070

1071
    for (auto const & [mask_key, _mask_config]: _mask_configs) {
29✔
1072
        if (!_mask_config.get()->is_visible) continue;
13✔
1073

UNCOV
1074
        auto plot_color = plot_color_with_alpha(_mask_config.get());
×
1075

1076
        auto mask = _data_manager->getData<MaskData>(mask_key);
×
UNCOV
1077
        auto image_size = mask->getImageSize();
×
1078

1079
        auto mask_timeframe_key = _data_manager->getTimeKey(mask_key);
×
UNCOV
1080
        auto mask_timeframe = _data_manager->getTime(mask_timeframe_key);
×
1081

1082
        // Check for preview data first
1083
        std::vector<Mask2D> maskData;
×
UNCOV
1084
        std::vector<Mask2D> maskData2;
×
1085

UNCOV
1086
        if (_mask_preview_active && _preview_mask_data.count(mask_key) > 0) {
×
1087
            // Use preview data
1088
            maskData = _preview_mask_data[mask_key];
×
UNCOV
1089
            maskData2.clear();// No time -1 data for preview
×
1090
        } else {
1091
            // Use original data
1092
            maskData = mask->getAtTime(TimeFrameIndex(current_time), video_timeframe.get(), mask_timeframe.get());
×
UNCOV
1093
            maskData2 = mask->getAtTime(TimeFrameIndex(-1));
×
1094
        }
1095

1096
        _plotSingleMaskData(maskData, image_size, plot_color, _mask_config.get());
×
UNCOV
1097
        _plotSingleMaskData(maskData2, image_size, plot_color, _mask_config.get());
×
1098

1099
        // Plot bounding boxes if enabled
UNCOV
1100
        if (_mask_config.get()->show_bounding_box) {
×
1101
            // Calculate scaling factors based on mask image size, not media aspect ratio
1102
            float const xAspect = static_cast<float>(_canvasWidth) / static_cast<float>(image_size.width);
×
UNCOV
1103
            float const yAspect = static_cast<float>(_canvasHeight) / static_cast<float>(image_size.height);
×
1104

1105
            // For current time masks
1106
            for (auto const & single_mask: maskData) {
×
1107
                if (!single_mask.empty()) {
×
1108
                    auto bounding_box = get_bounding_box(single_mask);
×
1109
                    auto min_point = bounding_box.first;
×
UNCOV
1110
                    auto max_point = bounding_box.second;
×
1111

1112
                    // Scale coordinates to canvas using mask image size
1113
                    float const min_x = static_cast<float>(min_point.x) * xAspect;
×
1114
                    float const min_y = static_cast<float>(min_point.y) * yAspect;
×
1115
                    float const max_x = static_cast<float>(max_point.x) * xAspect;
×
UNCOV
1116
                    float const max_y = static_cast<float>(max_point.y) * yAspect;
×
1117

1118
                    // Draw bounding box rectangle (no fill, just outline)
1119
                    QPen boundingBoxPen(plot_color);
×
1120
                    boundingBoxPen.setWidth(2);
×
UNCOV
1121
                    QBrush const emptyBrush(Qt::NoBrush);
×
1122

UNCOV
1123
                    auto boundingBoxRect = addRect(min_x, min_y, max_x - min_x, max_y - min_y,
×
1124
                                                   boundingBoxPen, emptyBrush);
1125
                    _mask_bounding_boxes.append(boundingBoxRect);
×
UNCOV
1126
                }
×
1127
            }
1128

1129
            // For time -1 masks
1130
            for (auto const & single_mask: maskData2) {
×
1131
                if (!single_mask.empty()) {
×
1132
                    auto bounding_box = get_bounding_box(single_mask);
×
1133
                    auto min_point = bounding_box.first;
×
UNCOV
1134
                    auto max_point = bounding_box.second;
×
1135

1136
                    // Scale coordinates to canvas using mask image size
1137
                    float const min_x = static_cast<float>(min_point.x) * xAspect;
×
1138
                    float const min_y = static_cast<float>(min_point.y) * yAspect;
×
1139
                    float const max_x = static_cast<float>(max_point.x) * xAspect;
×
UNCOV
1140
                    float const max_y = static_cast<float>(max_point.y) * yAspect;
×
1141

1142
                    // Draw bounding box rectangle (no fill, just outline)
1143
                    QPen boundingBoxPen(plot_color);
×
1144
                    boundingBoxPen.setWidth(2);
×
UNCOV
1145
                    QBrush const emptyBrush(Qt::NoBrush);
×
1146

UNCOV
1147
                    auto boundingBoxRect = addRect(min_x, min_y, max_x - min_x, max_y - min_y,
×
1148
                                                   boundingBoxPen, emptyBrush);
1149
                    _mask_bounding_boxes.append(boundingBoxRect);
×
UNCOV
1150
                }
×
1151
            }
1152
        }
1153

1154
        // Plot outlines if enabled
UNCOV
1155
        if (_mask_config.get()->show_outline) {
×
1156
            // Create a slightly darker color for outlines
UNCOV
1157
            QRgb const outline_color = plot_color;
×
1158

1159
            // For current time masks
1160
            for (auto const & single_mask: maskData) {
×
UNCOV
1161
                if (!single_mask.empty()) {
×
1162
                    // Generate outline mask with thickness of 2 pixels
UNCOV
1163
                    auto outline_mask = generate_outline_mask(single_mask, 2, image_size.width, image_size.height);
×
1164

UNCOV
1165
                    if (!outline_mask.empty()) {
×
1166
                        // Plot the outline mask using the same approach as regular masks
UNCOV
1167
                        _plotSingleMaskData({outline_mask}, image_size, outline_color, _mask_config.get());
×
1168
                    }
UNCOV
1169
                }
×
1170
            }
1171

1172
            // For time -1 masks
1173
            for (auto const & single_mask: maskData2) {
×
UNCOV
1174
                if (!single_mask.empty()) {
×
1175
                    // Generate outline mask with thickness of 2 pixels
UNCOV
1176
                    auto outline_mask = generate_outline_mask(single_mask, 2, image_size.width, image_size.height);
×
1177

UNCOV
1178
                    if (!outline_mask.empty()) {
×
1179
                        // Plot the outline mask using the same approach as regular masks
UNCOV
1180
                        _plotSingleMaskData({outline_mask}, image_size, outline_color, _mask_config.get());
×
1181
                    }
UNCOV
1182
                }
×
1183
            }
1184
        }
UNCOV
1185
    }
×
1186
}
32✔
1187

UNCOV
1188
void Media_Window::_plotSingleMaskData(std::vector<Mask2D> const & maskData, ImageSize mask_size, QRgb plot_color, MaskDisplayOptions const * mask_config) {
×
1189
    // Skip transparency masks as they are handled at the media level
1190
    if (mask_config && mask_config->use_as_transparency) {
×
UNCOV
1191
        return;
×
1192
    }
1193

UNCOV
1194
    for (auto const & single_mask: maskData) {
×
1195
        // Normal mode: overlay mask on top of media
1196
        QImage unscaled_mask_image(mask_size.width, mask_size.height, QImage::Format::Format_ARGB32);
×
UNCOV
1197
        unscaled_mask_image.fill(0);
×
1198

1199
        for (auto const point: single_mask) {
×
1200
            unscaled_mask_image.setPixel(
×
UNCOV
1201
                    QPoint(static_cast<int>(point.x), static_cast<int>(point.y)),
×
1202
                    plot_color);
1203
        }
1204

1205
        auto scaled_mask_image = unscaled_mask_image.scaled(_canvasWidth, _canvasHeight);
×
1206
        auto maskPixmap = addPixmap(QPixmap::fromImage(scaled_mask_image));
×
1207
        _masks.append(maskPixmap);
×
UNCOV
1208
    }
×
1209
}
1210

1211
QImage Media_Window::_applyTransparencyMasks(QImage const & media_image) {
×
UNCOV
1212
    std::cout << "Applying transparency masks..." << std::endl;
×
1213

1214
    std::cout << "Media image size: " << media_image.width() << "x" << media_image.height() << std::endl;
×
UNCOV
1215
    std::cout << "Canvas dimensions: " << _canvasWidth << "x" << _canvasHeight << std::endl;
×
1216

UNCOV
1217
    auto video_timeframe = _data_manager->getTime(TimeKey("time"));
×
1218

UNCOV
1219
    QImage final_image = media_image;
×
1220

1221
    int transparency_mask_count = 0;
×
UNCOV
1222
    int const total_mask_points = 0;
×
1223

1224
    // Process all transparency masks
1225
    for (auto const & [mask_key, mask_config]: _mask_configs) {
×
1226
        if (!mask_config->is_visible || !mask_config->use_as_transparency) {
×
UNCOV
1227
            continue;
×
1228
        }
1229

1230
        transparency_mask_count++;
×
UNCOV
1231
        std::cout << "Processing transparency mask: " << mask_key << std::endl;
×
1232

1233
        auto mask_data = _data_manager->getData<MaskData>(mask_key);
×
UNCOV
1234
        auto image_size = mask_data->getImageSize();
×
1235

1236
        auto mask_timeframe_key = _data_manager->getTimeKey(mask_key);
×
UNCOV
1237
        auto mask_timeframe = _data_manager->getTime(mask_timeframe_key);
×
1238

UNCOV
1239
        std::cout << "Mask image size: " << image_size.width << "x" << image_size.height << std::endl;
×
1240

1241
        auto const current_time = _data_manager->getCurrentTime();
×
UNCOV
1242
        auto maskData = mask_data->getAtTime(TimeFrameIndex(current_time), video_timeframe.get(), mask_timeframe.get());
×
1243

UNCOV
1244
        std::cout << "Mask data size: " << maskData.size() << std::endl;
×
1245

1246
        // Calculate scaling factors
1247
        float const xAspect = static_cast<float>(_canvasWidth) / static_cast<float>(image_size.width);
×
UNCOV
1248
        float const yAspect = static_cast<float>(_canvasHeight) / static_cast<float>(image_size.height);
×
1249

UNCOV
1250
        std::cout << "Scaling factors: x=" << xAspect << ", y=" << yAspect << std::endl;
×
1251

1252
        QImage unscaled_mask_image(image_size.width, image_size.height, QImage::Format::Format_ARGB32);
×
UNCOV
1253
        unscaled_mask_image.fill(0);
×
1254
        // Add mask points to combined mask
1255
        for (auto const & single_mask: maskData) {
×
1256
            for (auto const point: single_mask) {
×
1257
                unscaled_mask_image.setPixel(
×
UNCOV
1258
                        QPoint(static_cast<int>(point.x), static_cast<int>(point.y)),
×
1259
                        qRgba(255, 255, 255, 255));
1260
            }
1261
        }
1262

UNCOV
1263
        QImage const scaled_mask_image = unscaled_mask_image.scaled(_canvasWidth, _canvasHeight);
×
1264
        // I want to copy final_image where scaled_mask_image is white, and keep the rest of the image the same
1265
        for (int y = 0; y < _canvasHeight; ++y) {
×
1266
            for (int x = 0; x < _canvasWidth; ++x) {
×
1267
                if (scaled_mask_image.pixel(x, y) == qRgba(255, 255, 255, 255)) {
×
UNCOV
1268
                    final_image.setPixel(x, y, final_image.pixel(x, y));
×
1269
                } else {
UNCOV
1270
                    final_image.setPixel(x, y, qRgba(0, 0, 0, 255));
×
1271
                }
1272
            }
1273
        }
UNCOV
1274
    }
×
1275

1276

1277
    return final_image;
×
UNCOV
1278
}
×
1279

1280
void Media_Window::_plotPointData() {
16✔
1281

1282
    auto const current_time = TimeFrameIndex(_data_manager->getCurrentTime());
16✔
1283
    auto video_timeframe = _data_manager->getTime(TimeKey("time"));
16✔
1284

1285
    if (!video_timeframe) {
16✔
1286
        std::cerr << "Error: Could not get video timeframe 'time' for point conversion" << std::endl;
×
UNCOV
1287
        return;
×
1288
    }
1289

1290
    for (auto const & [point_key, _point_config]: _point_configs) {
16✔
UNCOV
1291
        if (!_point_config.get()->is_visible) continue;
×
1292

UNCOV
1293
        auto plot_color = plot_color_with_alpha(_point_config.get());
×
1294

UNCOV
1295
        auto point = _data_manager->getData<PointData>(point_key);
×
1296

1297
        auto point_timeframe_key = _data_manager->getTimeKey(point_key);
×
1298
        if (point_timeframe_key.empty()) {
×
1299
            std::cerr << "Error: No timeframe found for point data: " << point_key << std::endl;
×
UNCOV
1300
            continue;
×
1301
        }
1302

UNCOV
1303
        auto point_timeframe = _data_manager->getTime(point_timeframe_key);
×
1304

1305
        auto xAspect = getXAspect();
×
UNCOV
1306
        auto yAspect = getYAspect();
×
1307

UNCOV
1308
        auto image_size = point->getImageSize();
×
1309

1310
        if (image_size.height != -1) {
×
1311
            auto const mask_height = static_cast<float>(image_size.height);
×
UNCOV
1312
            yAspect = static_cast<float>(_canvasHeight) / mask_height;
×
1313
        }
1314

1315
        if (image_size.width != -1) {
×
1316
            auto const mask_width = static_cast<float>(image_size.width);
×
UNCOV
1317
            xAspect = static_cast<float>(_canvasWidth) / mask_width;
×
1318
        }
1319

UNCOV
1320
        auto pointData = point->getAtTime(current_time, video_timeframe.get(), point_timeframe.get());
×
1321

1322
        // Get configurable point size
UNCOV
1323
        float const point_size = static_cast<float>(_point_config.get()->point_size);
×
1324

1325
        for (auto const & single_point: pointData) {
×
1326
            float const x_pos = single_point.x * xAspect;
×
UNCOV
1327
            float const y_pos = single_point.y * yAspect;
×
1328

1329
            // Create the appropriate marker shape based on configuration
1330
            switch (_point_config.get()->marker_shape) {
×
1331
                case PointMarkerShape::Circle: {
×
1332
                    QPen pen(plot_color);
×
1333
                    pen.setWidth(2);
×
1334
                    QBrush const brush(plot_color);
×
UNCOV
1335
                    auto ellipse = addEllipse(x_pos - point_size / 2, y_pos - point_size / 2,
×
1336
                                              point_size, point_size, pen, brush);
1337
                    _points.append(ellipse);
×
1338
                    break;
×
1339
                }
×
1340
                case PointMarkerShape::Square: {
×
1341
                    QPen pen(plot_color);
×
1342
                    pen.setWidth(2);
×
1343
                    QBrush const brush(plot_color);
×
UNCOV
1344
                    auto rect = addRect(x_pos - point_size / 2, y_pos - point_size / 2,
×
1345
                                        point_size, point_size, pen, brush);
1346
                    _points.append(rect);
×
1347
                    break;
×
1348
                }
×
1349
                case PointMarkerShape::Triangle: {
×
1350
                    QPen pen(plot_color);
×
1351
                    pen.setWidth(2);
×
UNCOV
1352
                    QBrush const brush(plot_color);
×
1353

1354
                    // Create triangle polygon
1355
                    QPolygonF triangle;
×
1356
                    float const half_size = point_size / 2;
×
1357
                    triangle << QPointF(x_pos, y_pos - half_size)             // Top point
×
1358
                             << QPointF(x_pos - half_size, y_pos + half_size) // Bottom left
×
UNCOV
1359
                             << QPointF(x_pos + half_size, y_pos + half_size);// Bottom right
×
1360

1361
                    auto polygon = addPolygon(triangle, pen, brush);
×
1362
                    _points.append(polygon);
×
1363
                    break;
×
1364
                }
×
1365
                case PointMarkerShape::Cross: {
×
1366
                    QPen pen(plot_color);
×
UNCOV
1367
                    pen.setWidth(3);
×
1368

UNCOV
1369
                    float const half_size = point_size / 2;
×
1370
                    // Draw horizontal line
1371
                    auto hLine = addLine(x_pos - half_size, y_pos, x_pos + half_size, y_pos, pen);
×
UNCOV
1372
                    _points.append(hLine);
×
1373

1374
                    // Draw vertical line
1375
                    auto vLine = addLine(x_pos, y_pos - half_size, x_pos, y_pos + half_size, pen);
×
1376
                    _points.append(vLine);
×
1377
                    break;
×
1378
                }
×
1379
                case PointMarkerShape::X: {
×
1380
                    QPen pen(plot_color);
×
UNCOV
1381
                    pen.setWidth(3);
×
1382

UNCOV
1383
                    float const half_size = point_size / 2;
×
1384
                    // Draw diagonal line (\)
1385
                    auto dLine1 = addLine(x_pos - half_size, y_pos - half_size,
×
1386
                                          x_pos + half_size, y_pos + half_size, pen);
×
UNCOV
1387
                    _points.append(dLine1);
×
1388

1389
                    // Draw diagonal line (/)
1390
                    auto dLine2 = addLine(x_pos - half_size, y_pos + half_size,
×
1391
                                          x_pos + half_size, y_pos - half_size, pen);
×
1392
                    _points.append(dLine2);
×
1393
                    break;
×
1394
                }
×
1395
                case PointMarkerShape::Diamond: {
×
1396
                    QPen pen(plot_color);
×
1397
                    pen.setWidth(2);
×
UNCOV
1398
                    QBrush brush(plot_color);
×
1399

1400
                    // Create diamond polygon (rotated square)
1401
                    QPolygonF diamond;
×
1402
                    float const half_size = point_size / 2;
×
1403
                    diamond << QPointF(x_pos, y_pos - half_size) // Top
×
1404
                            << QPointF(x_pos + half_size, y_pos) // Right
×
1405
                            << QPointF(x_pos, y_pos + half_size) // Bottom
×
UNCOV
1406
                            << QPointF(x_pos - half_size, y_pos);// Left
×
1407

1408
                    auto polygon = addPolygon(diamond, pen, brush);
×
1409
                    _points.append(polygon);
×
1410
                    break;
×
UNCOV
1411
                }
×
1412
            }
1413
        }
UNCOV
1414
    }
×
1415
}
16✔
1416

1417
void Media_Window::_plotDigitalIntervalSeries() {
16✔
1418
    auto const current_time = _data_manager->getCurrentTime();
16✔
1419
    auto video_timeframe = _data_manager->getTime(TimeKey("time"));
16✔
1420

1421
    for (auto const & [key, _interval_config]: _interval_configs) {
16✔
UNCOV
1422
        if (!_interval_config.get()->is_visible) continue;
×
1423

1424
        // Only render if using Box plotting style
UNCOV
1425
        if (_interval_config.get()->plotting_style != IntervalPlottingStyle::Box) continue;
×
1426

UNCOV
1427
        auto plot_color = plot_color_with_alpha(_interval_config.get());
×
1428

UNCOV
1429
        auto interval_series = _data_manager->getData<DigitalIntervalSeries>(key);
×
1430

1431
        // Get the timeframes for conversion
1432
        auto interval_timeframe_key = _data_manager->getTimeKey(key);
×
1433
        if (interval_timeframe_key.empty()) {
×
1434
            std::cerr << "Error: No timeframe found for digital interval series: " << key << std::endl;
×
UNCOV
1435
            continue;
×
1436
        }
1437

UNCOV
1438
        auto interval_timeframe = _data_manager->getTime(interval_timeframe_key);
×
1439

1440
        if (!video_timeframe) {
×
1441
            std::cerr << "Error: Could not get video timeframe 'time' for interval conversion" << std::endl;
×
UNCOV
1442
            continue;
×
1443
        }
1444
        if (!interval_timeframe) {
×
1445
            std::cerr << "Error: Could not get interval timeframe '" << interval_timeframe_key
×
1446
                      << "' for series: " << key << std::endl;
×
UNCOV
1447
            continue;
×
1448
        }
1449

UNCOV
1450
        bool const needs_conversion = _needsTimeFrameConversion(video_timeframe, interval_timeframe);
×
1451

1452
        // Generate relative times based on frame range setting
1453
        std::vector<int> relative_times;
×
1454
        int const frame_range = _interval_config->frame_range;
×
1455
        for (int i = -frame_range; i <= frame_range; ++i) {
×
UNCOV
1456
            relative_times.push_back(i);
×
1457
        }
1458

UNCOV
1459
        int const square_size = _interval_config->box_size;
×
1460

1461
        // Calculate position based on location setting
1462
        int start_x, start_y;
1463
        switch (_interval_config->location) {
×
1464
            case IntervalLocation::TopLeft:
×
1465
                start_x = 0;
×
1466
                start_y = 0;
×
1467
                break;
×
1468
            case IntervalLocation::TopRight:
×
1469
                start_x = _canvasWidth - square_size * static_cast<int>(relative_times.size());
×
1470
                start_y = 0;
×
1471
                break;
×
1472
            case IntervalLocation::BottomLeft:
×
1473
                start_x = 0;
×
1474
                start_y = _canvasHeight - square_size;
×
1475
                break;
×
1476
            case IntervalLocation::BottomRight:
×
1477
                start_x = _canvasWidth - square_size * static_cast<int>(relative_times.size());
×
1478
                start_y = _canvasHeight - square_size;
×
UNCOV
1479
                break;
×
1480
        }
1481

1482
        for (size_t i = 0; i < relative_times.size(); ++i) {
×
1483
            int const video_time = current_time + relative_times[i];
×
UNCOV
1484
            int query_time = video_time;// Default: no conversion needed
×
1485

UNCOV
1486
            if (needs_conversion) {
×
1487
                // Convert from video timeframe ("time") to interval series timeframe
1488
                // 1. Convert video time index to actual time value
UNCOV
1489
                int const video_time_value = video_timeframe->getTimeAtIndex(TimeFrameIndex(video_time));
×
1490

1491
                // 2. Convert time value to index in interval series timeframe
UNCOV
1492
                query_time = interval_timeframe->getIndexAtTime(static_cast<float>(video_time_value)).getValue();
×
1493
            }
1494

UNCOV
1495
            bool const event_present = interval_series->isEventAtTime(TimeFrameIndex(query_time));
×
1496

UNCOV
1497
            auto color = event_present ? plot_color : QColor(255, 255, 255, 10);// Transparent if no event
×
1498

1499
            auto intervalPixmap = addRect(
×
UNCOV
1500
                    start_x + i * square_size,
×
1501
                    start_y,
1502
                    square_size,
1503
                    square_size,
1504
                    QPen(Qt::black),// Black border
×
1505
                    QBrush(color)   // Fill with color if event is present
×
UNCOV
1506
            );
×
1507

UNCOV
1508
            _intervals.append(intervalPixmap);
×
1509
        }
UNCOV
1510
    }
×
1511
}
32✔
1512

1513
void Media_Window::_plotDigitalIntervalBorders() {
16✔
1514
    auto const current_time = _data_manager->getCurrentTime();
16✔
1515

1516
    for (auto const & [key, _interval_config]: _interval_configs) {
16✔
UNCOV
1517
        if (!_interval_config.get()->is_visible) continue;
×
1518

1519
        // Only render if using Border plotting style
UNCOV
1520
        if (_interval_config.get()->plotting_style != IntervalPlottingStyle::Border) continue;
×
1521

UNCOV
1522
        auto interval_series = _data_manager->getData<DigitalIntervalSeries>(key);
×
1523

1524
        // Get the timeframes for conversion
1525
        auto interval_timeframe_key = _data_manager->getTimeKey(key);
×
1526
        if (interval_timeframe_key.empty()) {
×
1527
            std::cerr << "Error: No timeframe found for digital interval series: " << key << std::endl;
×
UNCOV
1528
            continue;
×
1529
        }
1530

1531
        auto video_timeframe = _data_manager->getTime(TimeKey("time"));
×
UNCOV
1532
        auto interval_timeframe = _data_manager->getTime(interval_timeframe_key);
×
1533

1534
        if (!video_timeframe) {
×
1535
            std::cerr << "Error: Could not get video timeframe 'time' for interval conversion" << std::endl;
×
UNCOV
1536
            continue;
×
1537
        }
1538
        if (!interval_timeframe) {
×
1539
            std::cerr << "Error: Could not get interval timeframe '" << interval_timeframe_key
×
1540
                      << "' for series: " << key << std::endl;
×
UNCOV
1541
            continue;
×
1542
        }
1543

UNCOV
1544
        bool const needs_conversion = _needsTimeFrameConversion(video_timeframe, interval_timeframe);
×
1545

1546
        // Check if an interval is present at the current frame
1547
        bool interval_present = false;
×
UNCOV
1548
        if (needs_conversion) {
×
1549
            // Convert current video time to interval timeframe
1550
            auto video_time = video_timeframe->getTimeAtIndex(TimeFrameIndex(current_time));
×
1551
            auto interval_index = interval_timeframe->getIndexAtTime(video_time);
×
UNCOV
1552
            interval_present = interval_series->isEventAtTime(interval_index);
×
1553
        } else {
1554
            // Direct comparison (no timeframe conversion needed)
UNCOV
1555
            interval_present = interval_series->isEventAtTime(TimeFrameIndex(current_time));
×
1556
        }
1557

1558
        // If an interval is present, draw a border around the entire image
1559
        if (interval_present) {
×
UNCOV
1560
            auto plot_color = plot_color_with_alpha(_interval_config.get());
×
1561

1562
            // Get border thickness from config
UNCOV
1563
            int const thickness = _interval_config->border_thickness;
×
1564

1565
            QPen border_pen(plot_color);
×
UNCOV
1566
            border_pen.setWidth(thickness);
×
1567

1568
            // Draw border as 4 rectangles around the edges of the canvas
1569
            // Top border
1570
            auto top_border = addRect(0, 0, _canvasWidth, thickness, border_pen, QBrush(plot_color));
×
UNCOV
1571
            _intervals.append(top_border);
×
1572

1573
            // Bottom border
1574
            auto bottom_border = addRect(0, _canvasHeight - thickness, _canvasWidth, thickness, border_pen, QBrush(plot_color));
×
UNCOV
1575
            _intervals.append(bottom_border);
×
1576

1577
            // Left border
1578
            auto left_border = addRect(0, 0, thickness, _canvasHeight, border_pen, QBrush(plot_color));
×
UNCOV
1579
            _intervals.append(left_border);
×
1580

1581
            // Right border
1582
            auto right_border = addRect(_canvasWidth - thickness, 0, thickness, _canvasHeight, border_pen, QBrush(plot_color));
×
1583
            _intervals.append(right_border);
×
1584
        }
×
UNCOV
1585
    }
×
1586
}
16✔
1587

1588
void Media_Window::_plotTensorData() {
16✔
1589

1590
    auto const current_time = _data_manager->getCurrentTime();
16✔
1591

1592
    for (auto const & [key, config]: _tensor_configs) {
16✔
UNCOV
1593
        if (!config.get()->is_visible) continue;
×
1594

UNCOV
1595
        auto tensor_data = _data_manager->getData<TensorData>(key);
×
1596

UNCOV
1597
        auto tensor_shape = tensor_data->getFeatureShape();
×
1598

UNCOV
1599
        auto tensor_slice = tensor_data->getChannelSlice(TimeFrameIndex(current_time), config->display_channel);
×
1600

1601
        // Create a QImage from the tensor data
1602
        QImage tensor_image(static_cast<int>(tensor_shape[1]), static_cast<int>(tensor_shape[0]), QImage::Format::Format_ARGB32);
×
1603
        for (size_t y = 0; y < tensor_shape[0]; ++y) {
×
1604
            for (size_t x = 0; x < tensor_shape[1]; ++x) {
×
UNCOV
1605
                float const value = tensor_slice[y * tensor_shape[1] + x];
×
1606
                //int const pixel_value = static_cast<int>(value * 255);// Assuming the tensor values are normalized between 0 and 1
1607

1608
                // Use the config color with alpha
1609
                QColor const color(QString::fromStdString(config->hex_color));
×
1610
                int const alpha = std::lround(config->alpha * 255.0f * (value > 0 ? 1.0f : 0.0f));
×
UNCOV
1611
                QRgb const rgb = qRgba(color.red(), color.green(), color.blue(), alpha);
×
1612

UNCOV
1613
                tensor_image.setPixel(x, y, rgb);
×
1614
            }
1615
        }
1616

1617
        // Scale the tensor image to the size of the canvas
UNCOV
1618
        QImage const scaled_tensor_image = tensor_image.scaled(_canvasWidth, _canvasHeight, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
×
1619

UNCOV
1620
        auto tensor_pixmap = addPixmap(QPixmap::fromImage(scaled_tensor_image));
×
1621

1622
        _tensors.append(tensor_pixmap);
×
UNCOV
1623
    }
×
1624
}
16✔
1625

UNCOV
1626
std::vector<uint8_t> Media_Window::getDrawingMask() {
×
1627
    // Create a QImage with _canvasWidth and _canvasHeight
1628
    QImage maskImage(_canvasWidth, _canvasHeight, QImage::Format_Grayscale8);
×
UNCOV
1629
    maskImage.fill(0);
×
1630

1631
    QPainter painter(&maskImage);
×
1632
    painter.setPen(Qt::white);
×
UNCOV
1633
    painter.setBrush(QBrush(Qt::white));// Fill the circles with white
×
1634

UNCOV
1635
    for (auto const & point: _drawing_points) {
×
1636
        // Draw a filled circle with the current brush size (hover circle radius)
1637
        float const radius = static_cast<float>(_hover_circle_radius);
×
UNCOV
1638
        painter.drawEllipse(point, radius, radius);
×
1639
    }
UNCOV
1640
    painter.end();
×
1641

1642
    // Scale the QImage to the size of the media
1643
    auto media = _data_manager->getData<MediaData>("media");
×
1644
    int const mediaWidth = media->getWidth();
×
1645
    int const mediaHeight = media->getHeight();
×
UNCOV
1646
    QImage scaledMaskImage = maskImage.scaled(mediaWidth, mediaHeight);
×
1647

1648
    // Convert the QImage to a std::vector<uint8_t>
UNCOV
1649
    std::vector<uint8_t> mask(scaledMaskImage.bits(), scaledMaskImage.bits() + scaledMaskImage.sizeInBytes());
×
1650

1651
    return mask;
×
UNCOV
1652
}
×
1653

1654
void Media_Window::setShowHoverCircle(bool show) {
16✔
1655
    _show_hover_circle = show;
16✔
1656
    if (_show_hover_circle) {
16✔
1657
        if (_debug_performance) {
2✔
UNCOV
1658
            std::cout << "Hover circle enabled" << std::endl;
×
1659
        }
1660

1661
        // Create the hover circle item if it doesn't exist
1662
        if (!_hover_circle_item) {
2✔
1663
            QPen circlePen(Qt::red);
2✔
1664
            circlePen.setWidth(2);
2✔
1665
            _hover_circle_item = addEllipse(0, 0, _hover_circle_radius * 2, _hover_circle_radius * 2, circlePen);
2✔
1666
            _hover_circle_item->setVisible(false);// Initially hidden until mouse moves
2✔
1667
            // DO NOT add to _points vector - hover circle is managed separately
1668
            if (_debug_performance) {
2✔
UNCOV
1669
                std::cout << "  Created new hover circle item" << std::endl;
×
1670
            }
1671
        }
2✔
1672

1673
        // Connect mouse move to efficient hover circle update instead of full canvas update
1674
        connect(this, &Media_Window::mouseMove, this, &Media_Window::_updateHoverCirclePosition);
2✔
1675
    } else {
1676
        if (_debug_performance) {
14✔
UNCOV
1677
            std::cout << "Hover circle disabled" << std::endl;
×
1678
        }
1679

1680
        // Remove the hover circle item
1681
        if (_hover_circle_item) {
14✔
1682
            removeItem(_hover_circle_item);
2✔
1683
            delete _hover_circle_item;
2✔
1684
            _hover_circle_item = nullptr;
2✔
1685
            if (_debug_performance) {
2✔
UNCOV
1686
                std::cout << "  Deleted hover circle item" << std::endl;
×
1687
            }
1688
        }
1689

1690
        // Disconnect the mouse move signal
1691
        disconnect(this, &Media_Window::mouseMove, this, &Media_Window::_updateHoverCirclePosition);
14✔
1692
    }
1693
}
16✔
1694

1695
void Media_Window::setHoverCircleRadius(int radius) {
2✔
1696
    _hover_circle_radius = radius;
2✔
1697

1698
    // Update the existing hover circle item if it exists
1699
    if (_hover_circle_item && _show_hover_circle) {
2✔
1700
        qreal const x = _hover_position.x() - _hover_circle_radius;
2✔
1701
        qreal const y = _hover_position.y() - _hover_circle_radius;
2✔
1702
        _hover_circle_item->setRect(x, y, _hover_circle_radius * 2, _hover_circle_radius * 2);
2✔
1703
    }
1704
}
2✔
1705

UNCOV
1706
void Media_Window::_updateHoverCirclePosition() {
×
1707
    static int call_count = 0;
UNCOV
1708
    call_count++;
×
1709

UNCOV
1710
    if (_hover_circle_item && _show_hover_circle) {
×
1711
        // Update the position of the existing hover circle item
1712
        qreal const x = _hover_position.x() - _hover_circle_radius;
×
1713
        qreal const y = _hover_position.y() - _hover_circle_radius;
×
1714
        _hover_circle_item->setRect(x, y, _hover_circle_radius * 2, _hover_circle_radius * 2);
×
UNCOV
1715
        _hover_circle_item->setVisible(true);
×
1716

1717
        if (_debug_performance) {
×
1718
            std::cout << "Hover circle updated (call #" << call_count << ") at ("
×
UNCOV
1719
                      << _hover_position.x() << ", " << _hover_position.y() << ")" << std::endl;
×
1720
        }
1721
    } else {
×
1722
        if (_debug_performance) {
×
1723
            std::cout << "Hover circle update skipped (call #" << call_count << ") - item: "
×
UNCOV
1724
                      << (_hover_circle_item ? "exists" : "null") << ", show: " << _show_hover_circle << std::endl;
×
1725
        }
1726
    }
UNCOV
1727
}
×
1728

1729
void Media_Window::_addRemoveData() {
1✔
1730
    //New data key was added. This is where we may want to repopulate a custom table
1731
}
1✔
1732

UNCOV
1733
bool Media_Window::_needsTimeFrameConversion(std::shared_ptr<TimeFrame> video_timeframe,
×
1734
                                             std::shared_ptr<TimeFrame> const & interval_timeframe) {
1735
    // If either timeframe is null, no conversion is possible/needed
1736
    if (!video_timeframe || !interval_timeframe) {
×
UNCOV
1737
        return false;
×
1738
    }
1739

1740
    // Conversion is needed if the timeframes are different objects
UNCOV
1741
    return video_timeframe.get() != interval_timeframe.get();
×
1742
}
1743

1744

1745
QRgb plot_color_with_alpha(BaseDisplayOptions const * opts) {
×
1746
    auto color = QColor(QString::fromStdString(opts->hex_color));
×
UNCOV
1747
    auto output_color = qRgba(color.red(), color.green(), color.blue(), std::lround(opts->alpha * 255.0f));
×
1748

UNCOV
1749
    return output_color;
×
1750
}
1751

1752
bool Media_Window::hasPreviewMaskData(std::string const & mask_key) const {
×
UNCOV
1753
    return _mask_preview_active && _preview_mask_data.count(mask_key) > 0;
×
1754
}
1755

UNCOV
1756
std::vector<Mask2D> Media_Window::getPreviewMaskData(std::string const & mask_key) const {
×
1757

1758
    if (hasPreviewMaskData(mask_key)) {
×
UNCOV
1759
        return _preview_mask_data.at(mask_key);
×
1760
    }
UNCOV
1761
    return {};
×
1762
}
1763

UNCOV
1764
void Media_Window::setPreviewMaskData(std::string const & mask_key,
×
1765
                                      std::vector<std::vector<Point2D<uint32_t>>> const & preview_data,
1766
                                      bool active) {
1767
    if (active) {
×
1768
        _preview_mask_data[mask_key] = preview_data;
×
UNCOV
1769
        _mask_preview_active = true;
×
1770
    } else {
1771
        _preview_mask_data.erase(mask_key);
×
UNCOV
1772
        _mask_preview_active = !_preview_mask_data.empty();
×
1773
    }
UNCOV
1774
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc