• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

paulmthompson / WhiskerToolbox / 17846711083

19 Sep 2025 02:28AM UTC coverage: 72.02% (+0.08%) from 71.942%
17846711083

push

github

paulmthompson
event in interval computer works with entity ids

259 of 280 new or added lines in 6 files covered. (92.5%)

268 existing lines in 17 files now uncovered.

40247 of 55883 relevant lines covered (72.02%)

1227.29 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

1.58
/src/DataManager/transforms/Media/whisker_tracing.cpp
1
#include "whisker_tracing.hpp"
2

3
#include "Masks/Mask_Data.hpp"
4
#include "whiskertracker.hpp"
5

6
#include <omp.h>
7

8
#include <algorithm>
9
#include <chrono>
10
#include <cmath>
11
#include <iostream>
12
#include <memory>
13
#include <vector>
14

15
namespace {
16
constexpr uint8_t MASK_TRUE_VALUE = 255;
17
constexpr int PROGRESS_COMPLETE = 100;
18
constexpr double PROGRESS_SCALE = 100.0;
19
}// namespace
20

21
// Convert whisker::Line2D to Line2D
UNCOV
22
Line2D WhiskerTracingOperation::convert_to_Line2D(whisker::Line2D const & whisker_line) {
×
UNCOV
23
    Line2D line;
×
24

UNCOV
25
    for (auto const & point: whisker_line) {
×
UNCOV
26
        line.push_back(Point2D<float>{point.x, point.y});
×
27
    }
28

UNCOV
29
    return line;
×
30
}
×
31

32
// Convert mask data to binary mask format for whisker tracker
UNCOV
33
std::vector<uint8_t> convert_mask_to_binary(MaskData const * mask_data,
×
34
                                                                     int time_index,
35
                                                                     ImageSize const & image_size) {
UNCOV
36
    std::vector<uint8_t> binary_mask(static_cast<size_t>(image_size.width * image_size.height), 0);
×
37

UNCOV
38
    if (!mask_data) {
×
39
        return binary_mask;// Return empty mask if no mask data
×
40
    }
41

42
    // Get mask at the specified time
UNCOV
43
    auto const & masks_at_time = mask_data->getAtTime(TimeFrameIndex(time_index));
×
UNCOV
44
    if (masks_at_time.empty()) {
×
45
        return binary_mask;// Return empty mask if no mask at this time
×
46
    }
47

48
    // Source mask image size (may differ from target media image size)
UNCOV
49
    auto const src_size = mask_data->getImageSize();
×
50

51
    // Fast path: identical sizes, just map points directly
UNCOV
52
    if (src_size.width == image_size.width && src_size.height == image_size.height) {
×
UNCOV
53
        for (auto const & mask: masks_at_time) {
×
UNCOV
54
            for (auto const & point: mask) {
×
UNCOV
55
                if (point.x < image_size.width && point.y < image_size.height) {
×
UNCOV
56
                    auto const index = static_cast<size_t>(point.y) * static_cast<size_t>(image_size.width)
×
UNCOV
57
                                     + static_cast<size_t>(point.x);
×
UNCOV
58
                    if (index < binary_mask.size()) {
×
UNCOV
59
                        binary_mask[index] = MASK_TRUE_VALUE;// Set to 255 for true pixels
×
60
                    }
61
                }
62
            }
63
        }
64
        return binary_mask;
65
    }
66

67
    // Build a source binary mask for nearest-neighbor scaling when sizes differ
UNCOV
68
    std::vector<uint8_t> src_binary(static_cast<size_t>(src_size.width * src_size.height), 0);
×
UNCOV
69
    for (auto const & mask: masks_at_time) {
×
UNCOV
70
        for (auto const & point: mask) {
×
UNCOV
71
            if (point.x < src_size.width && point.y < src_size.height) {
×
UNCOV
72
                auto const src_index = static_cast<size_t>(point.y) * static_cast<size_t>(src_size.width)
×
UNCOV
73
                                     + static_cast<size_t>(point.x);
×
UNCOV
74
                if (src_index < src_binary.size()) {
×
UNCOV
75
                    src_binary[src_index] = MASK_TRUE_VALUE;
×
76
                }
77
            }
78
        }
79
    }
80

81
    // Nearest-neighbor scale from src_binary (src_size) to binary_mask (image_size)
UNCOV
82
    auto const src_w = std::max(1, src_size.width);
×
UNCOV
83
    auto const src_h = std::max(1, src_size.height);
×
UNCOV
84
    auto const dst_w = std::max(1, image_size.width);
×
UNCOV
85
    auto const dst_h = std::max(1, image_size.height);
×
86

87
    // Precompute ratios; use (N-1) mapping to preserve endpoints
UNCOV
88
    auto const rx = (dst_w > 1 && src_w > 1)
×
UNCOV
89
                            ? (static_cast<double>(src_w - 1) / static_cast<double>(dst_w - 1))
×
90
                            : 0.0;
UNCOV
91
    auto const ry = (dst_h > 1 && src_h > 1)
×
UNCOV
92
                            ? (static_cast<double>(src_h - 1) / static_cast<double>(dst_h - 1))
×
93
                            : 0.0;
94

UNCOV
95
    for (int y = 0; y < dst_h; ++y) {
×
UNCOV
96
        int const ys = (dst_h > 1 && src_h > 1)
×
UNCOV
97
                               ? static_cast<int>(std::round(static_cast<double>(y) * ry))
×
98
                               : 0;
UNCOV
99
        for (int x = 0; x < dst_w; ++x) {
×
UNCOV
100
            int const xs = (dst_w > 1 && src_w > 1)
×
UNCOV
101
                                   ? static_cast<int>(std::round(static_cast<double>(x) * rx))
×
102
                                   : 0;
103

UNCOV
104
            auto const src_index = static_cast<size_t>(ys) * static_cast<size_t>(src_w)
×
UNCOV
105
                                 + static_cast<size_t>(xs);
×
UNCOV
106
            auto const dst_index = static_cast<size_t>(y) * static_cast<size_t>(dst_w)
×
UNCOV
107
                                 + static_cast<size_t>(x);
×
108

UNCOV
109
            if (src_index < src_binary.size() && dst_index < binary_mask.size()) {
×
UNCOV
110
                binary_mask[dst_index] = src_binary[src_index] ? MASK_TRUE_VALUE : 0;
×
111
            }
112
        }
113
    }
114

UNCOV
115
    return binary_mask;
×
UNCOV
116
}
×
117

118
// Clip whisker line by removing points from the end
UNCOV
119
void WhiskerTracingOperation::clip_whisker(Line2D & line, int clip_length) {
×
UNCOV
120
    if (line.size() <= static_cast<std::size_t>(clip_length)) {
×
121
        return;
×
122
    }
123

UNCOV
124
    line.erase(line.end() - clip_length, line.end());
×
125
}
126

127
// Trace whiskers in a single image
UNCOV
128
std::vector<Line2D> WhiskerTracingOperation::trace_single_image(
×
129
        whisker::WhiskerTracker & whisker_tracker,
130
        std::vector<uint8_t> const & image_data,
131
        ImageSize const & image_size,
132
        int clip_length,
133
        MaskData const * mask_data,
134
        int time_index) {
135

UNCOV
136
    std::vector<Line2D> whisker_lines;
×
137

UNCOV
138
    if (mask_data) {
×
139
        // Use mask-based tracing
UNCOV
140
        auto binary_mask = convert_mask_to_binary(mask_data, time_index, image_size);
×
UNCOV
141
        auto whiskers = whisker_tracker.trace_with_mask(image_data, binary_mask, image_size.height, image_size.width);
×
142

UNCOV
143
        whisker_lines.reserve(whiskers.size());
×
UNCOV
144
        for (auto const & whisker: whiskers) {
×
UNCOV
145
            Line2D line = convert_to_Line2D(whisker);
×
UNCOV
146
            clip_whisker(line, clip_length);
×
UNCOV
147
            whisker_lines.push_back(std::move(line));
×
UNCOV
148
        }
×
UNCOV
149
    } else {
×
150
        // Use standard tracing
151
        auto whiskers = whisker_tracker.trace(image_data, image_size.height, image_size.width);
×
152

153
        whisker_lines.reserve(whiskers.size());
×
154
        for (auto const & whisker: whiskers) {
×
155
            Line2D line = convert_to_Line2D(whisker);
×
156
            clip_whisker(line, clip_length);
×
157
            whisker_lines.push_back(std::move(line));
×
158
        }
×
159
    }
×
160

UNCOV
161
    return whisker_lines;
×
162
}
×
163

164
// Trace whiskers in multiple images in parallel
165
std::vector<std::vector<Line2D>> WhiskerTracingOperation::trace_multiple_images(
×
166
        whisker::WhiskerTracker & whisker_tracker,
167
        std::vector<std::vector<uint8_t>> const & images,
168
        ImageSize const & image_size,
169
        int clip_length,
170
        MaskData const * mask_data,
171
        std::vector<int> const & time_indices) {
172

173
    std::vector<std::vector<Line2D>> result;
×
174
    result.reserve(images.size());
×
175

176
    if (mask_data && !time_indices.empty()) {
×
177
        // Use mask-based parallel tracing
178

179
        auto t0 = std::chrono::high_resolution_clock::now();
×
180

181
        std::vector<std::vector<uint8_t>> masks;
×
182
        masks.reserve(images.size());
×
183

184
        for (size_t i = 0; i < images.size(); ++i) {
×
185
            int const time_idx = (i < time_indices.size()) ? time_indices[i] : 0;
×
186
            auto binary_mask = convert_mask_to_binary(mask_data, time_idx, image_size);
×
187
            masks.push_back(std::move(binary_mask));
×
188
        }
×
189

190
        //auto t1 = std::chrono::high_resolution_clock::now();
191

192
        //std::cout << "Mask Generation: " << std::chrono::duration_cast<std::chrono::milliseconds>(t1 - t0).count() << "ms" << std::endl;
193

194
        auto whiskers_batch = whisker_tracker.trace_multiple_images_with_masks(images, masks, image_size.height, image_size.width);
×
195

196
        //auto t2 = std::chrono::high_resolution_clock::now();
197

198
        //std::cout << "Mask Tracing: " << std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count() << "ms" << std::endl;
199

200

201
        for (auto const & whiskers: whiskers_batch) {
×
202
            std::vector<Line2D> whisker_lines;
×
203
            whisker_lines.reserve(whiskers.size());
×
204

205
            for (auto const & whisker: whiskers) {
×
206
                Line2D line = convert_to_Line2D(whisker);
×
207
                clip_whisker(line, clip_length);
×
208
                whisker_lines.push_back(std::move(line));
×
209
            }
×
210

211
            result.push_back(std::move(whisker_lines));
×
212
        }
×
213
    } else {
×
214
        // Use standard parallel tracing
215
        auto whiskers_batch = whisker_tracker.trace_multiple_images(images, image_size.height, image_size.width);
×
216

217
        for (auto const & whiskers: whiskers_batch) {
×
218
            std::vector<Line2D> whisker_lines;
×
219
            whisker_lines.reserve(whiskers.size());
×
220

221
            for (auto const & whisker: whiskers) {
×
222
                Line2D line = convert_to_Line2D(whisker);
×
223
                clip_whisker(line, clip_length);
×
224
                whisker_lines.push_back(std::move(line));
×
225
            }
×
226

227
            result.push_back(std::move(whisker_lines));
×
228
        }
×
229
    }
×
230

231
    return result;
×
232
}
×
233

234
// Producer thread function that loads frames from media_data
235
void WhiskerTracingOperation::producer_thread(std::shared_ptr<MediaData> media_data,
×
236
                                             FrameQueue& frame_queue,
237
                                             WhiskerTracingParameters const* params,
238
                                             int total_frames,
239
                                             std::atomic<int>& progress_atomic) {
240
    try {
241
        for (int frame_idx = 0; frame_idx < total_frames; ++frame_idx) {
×
242
            std::vector<uint8_t> image_data;
×
243
            
244
            if (params->use_processed_data) {
×
245
                image_data = media_data->getProcessedData8(frame_idx);
×
246
            } else {
247
                image_data = media_data->getRawData8(frame_idx);
×
248
            }
249
            
250
            if (!image_data.empty()) {
×
251
                FrameData frame(std::move(image_data), frame_idx);
×
252
                frame_queue.push(std::move(frame));
×
253
            }
×
254
            
255
            progress_atomic.fetch_add(1);
×
256
        }
×
257
        
258
        // Signal end of data
259
        frame_queue.push_end_marker();
×
260
    } catch (const std::exception& e) {
×
261
        std::cerr << "Producer thread error: " << e.what() << std::endl;
×
262
        frame_queue.push_end_marker();
×
263
    }
×
264
}
×
265

266
// Consumer function that processes frames from the queue
267
void WhiskerTracingOperation::consumer_processing(FrameQueue& frame_queue,
×
268
                                                 whisker::WhiskerTracker& tracker,
269
                                                 ImageSize const& image_size,
270
                                                 WhiskerTracingParameters const* params,
271
                                                 std::shared_ptr<LineData> traced_whiskers,
272
                                                 std::atomic<int>& progress_atomic,
273
                                                 int total_frames,
274
                                                 ProgressCallback progressCallback) {
275
    std::vector<std::vector<uint8_t>> batch_images;
×
276
    std::vector<int> batch_times;
×
277
    batch_images.reserve(params->batch_size);
×
278
    batch_times.reserve(params->batch_size);
×
279
    
280
    int processed_frames = 0;
×
281
    
282
    while (processed_frames < total_frames) {
×
283
        FrameData frame;
×
284
        
285
        // Try to get a frame with timeout
286
        bool got_frame = frame_queue.pop(frame, std::chrono::milliseconds(1000));
×
287
        
288
        if (!got_frame) {
×
289
            std::cerr << "Consumer timeout waiting for frame" << std::endl;
×
290
            break;
×
291
        }
292
        
293
        // Check for end marker
294
        if (frame.is_end_marker) {
×
295
            break;
×
296
        }
297
        
298
        batch_images.push_back(std::move(frame.image_data));
×
299
        batch_times.push_back(frame.time_index);
×
300
        
301
        // Process batch when we have enough frames or we've reached the end
302
        // Use adaptive batch sizing: smaller batches when queue is empty, larger when full
303
        int adaptive_batch_size = (frame_queue.size() > 10) ? params->batch_size : std::min(params->batch_size, 20);
×
304
        if (batch_images.size() >= static_cast<size_t>(adaptive_batch_size) || 
×
305
            processed_frames + batch_images.size() >= total_frames) {
×
306
            
307
            if (!batch_images.empty()) {
×
308
                // Trace whiskers in parallel for this batch
309
                auto batch_results = trace_multiple_images(tracker,
×
310
                                                         batch_images,
311
                                                         image_size,
312
                                                         params->clip_length,
×
313
                                                         params->use_mask_data ? params->mask_data.get() : nullptr,
×
314
                                                         batch_times);
×
315
                
316
                // Add results to LineData
317
                for (size_t j = 0; j < batch_results.size(); ++j) {
×
318
                    for (auto const & line: batch_results[j]) {
×
319
                        traced_whiskers->addAtTime(TimeFrameIndex(batch_times[j]), line, false);
×
320
                    }
321
                }
322
                
323
                processed_frames += batch_images.size();
×
324
                
325
                // Update progress from consumer thread
326
                if (progressCallback) {
×
327
                    int const current_progress = static_cast<int>(std::round(static_cast<double>(processed_frames) / static_cast<double>(total_frames) * PROGRESS_SCALE));
×
328
                    progressCallback(current_progress);
×
329
                }
330
                
331
                // Clear batch for next iteration
332
                batch_images.clear();
×
333
                batch_times.clear();
×
334
            }
×
335
        }
336
    }
×
337
}
×
338

339
std::string WhiskerTracingOperation::getName() const {
148✔
340
    return "Whisker Tracing";
444✔
341
}
342

343
std::type_index WhiskerTracingOperation::getTargetInputTypeIndex() const {
148✔
344
    return typeid(std::shared_ptr<MediaData>);
148✔
345
}
346

347
bool WhiskerTracingOperation::canApply(DataTypeVariant const & dataVariant) const {
×
348
    if (!std::holds_alternative<std::shared_ptr<MediaData>>(dataVariant)) {
×
349
        return false;
×
350
    }
351

352
    auto const * ptr_ptr = std::get_if<std::shared_ptr<MediaData>>(&dataVariant);
×
353
    return ptr_ptr && *ptr_ptr;
×
354
}
355

356
std::unique_ptr<TransformParametersBase> WhiskerTracingOperation::getDefaultParameters() const {
×
357
    return std::make_unique<WhiskerTracingParameters>();
×
358
}
359

UNCOV
360
DataTypeVariant WhiskerTracingOperation::execute(DataTypeVariant const & dataVariant,
×
361
                                                 TransformParametersBase const * transformParameters) {
UNCOV
362
    return execute(dataVariant, transformParameters, [](int) {});
×
363
}
364

UNCOV
365
DataTypeVariant WhiskerTracingOperation::execute(DataTypeVariant const & dataVariant,
×
366
                                                 TransformParametersBase const * transformParameters,
367
                                                 ProgressCallback progressCallback) {
UNCOV
368
    auto const * ptr_ptr = std::get_if<std::shared_ptr<MediaData>>(&dataVariant);
×
UNCOV
369
    if (!ptr_ptr || !(*ptr_ptr)) {
×
370
        std::cerr << "WhiskerTracingOperation::execute: Incompatible variant type or null data." << std::endl;
×
371
        if (progressCallback) progressCallback(PROGRESS_COMPLETE);
×
372
        return {};
×
373
    }
374

UNCOV
375
    auto media_data = *ptr_ptr;
×
376

UNCOV
377
    auto const * typed_params =
×
UNCOV
378
            transformParameters ? dynamic_cast<WhiskerTracingParameters const *>(transformParameters) : nullptr;
×
379

UNCOV
380
    if (!typed_params) {
×
381
        std::cerr << "WhiskerTracingOperation::execute: Invalid parameters." << std::endl;
×
382
        if (progressCallback) progressCallback(PROGRESS_COMPLETE);
×
383
        return {};
×
384
    }
385

386
    // Allow caller (tests) to pass an already-initialized tracker to avoid heavy setup
UNCOV
387
    std::shared_ptr<whisker::WhiskerTracker> tracker_ptr = typed_params->tracker;
×
UNCOV
388
    if (!tracker_ptr) {
×
389
        tracker_ptr = std::make_shared<whisker::WhiskerTracker>();
×
390
        std::cout << "Whisker Tracker Initialized" << std::endl;
×
391
    }
UNCOV
392
    tracker_ptr->setWhiskerLengthThreshold(typed_params->whisker_length_threshold);
×
393
    // Disable whisker pad exclusion by using a large radius by default
UNCOV
394
    tracker_ptr->setWhiskerPadRadius(1000.0f);
×
395

UNCOV
396
    if (progressCallback) progressCallback(0);
×
397

398
    // Create new LineData for the traced whiskers
UNCOV
399
    auto traced_whiskers = std::make_shared<LineData>();
×
UNCOV
400
    traced_whiskers->setImageSize(media_data->getImageSize());
×
401

402
    // Get times with data
UNCOV
403
    auto total_frame_count = media_data->getTotalFrameCount();
×
UNCOV
404
    if (total_frame_count <= 0) {
×
405
        std::cerr << "WhiskerTracingOperation::execute: No data available in media." << std::endl;
×
406
        if (progressCallback) progressCallback(PROGRESS_COMPLETE);
×
407
        return {};
×
408
    }
409

UNCOV
410
    auto total_time_points = static_cast<size_t>(total_frame_count);
×
UNCOV
411
    size_t processed_time_points = 0;
×
412

413
    // Process frames using producer-consumer pattern for parallel processing
UNCOV
414
    if (typed_params->use_parallel_processing && typed_params->batch_size > 1) {
×
415

416
        auto max_threads = omp_get_max_threads();
×
417
        // Reserve threads for producer, use rest for OpenMP processing
418
        int omp_threads = std::max(1, max_threads - typed_params->producer_threads);
×
419
        omp_set_num_threads(omp_threads);
×
420
        std::cout << "Total CPU cores: " << max_threads 
×
421
                  << ", OpenMP threads: " << omp_threads 
×
422
                  << ", Producer threads: " << typed_params->producer_threads << std::endl;
×
423

424
        // Create frame queue for producer-consumer pattern
425
        FrameQueue frame_queue(typed_params->queue_size);
×
426
        
427
        // Atomic counter for progress tracking
428
        std::atomic<int> progress_atomic{0};
×
429
        
430
        // Start producer thread
431
        std::thread producer([&]() {
×
432
            producer_thread(media_data, frame_queue, typed_params, 
×
433
                          static_cast<int>(total_time_points), progress_atomic);
×
434
        });
×
435
        
436
        // Consumer processing (runs in main thread)
437
        consumer_processing(frame_queue, *tracker_ptr, media_data->getImageSize(),
×
438
                          typed_params, traced_whiskers, progress_atomic,
439
                          static_cast<int>(total_time_points), progressCallback);
440
        
441
        // Wait for producer to finish
442
        producer.join();
×
443
        
444
        processed_time_points = total_time_points;
×
445
        
446
    } else {
×
447
        // Process frames one by one (original sequential approach)
UNCOV
448
        for (size_t time = 0; time < total_time_points; ++time) {
×
UNCOV
449
            std::vector<uint8_t> image_data;
×
450

UNCOV
451
            if (typed_params->use_processed_data) {
×
452
                image_data = media_data->getProcessedData8(static_cast<int>(time));
×
453
            } else {
UNCOV
454
                image_data = media_data->getRawData8(static_cast<int>(time));
×
455
            }
456

UNCOV
457
            if (!image_data.empty()) {
×
UNCOV
458
                auto whisker_lines = trace_single_image(*tracker_ptr, image_data, media_data->getImageSize(),
×
UNCOV
459
                                                        typed_params->clip_length,
×
UNCOV
460
                                                        typed_params->use_mask_data ? typed_params->mask_data.get() : nullptr,
×
UNCOV
461
                                                        static_cast<int>(time));
×
462

UNCOV
463
                for (auto const & line: whisker_lines) {
×
UNCOV
464
                    traced_whiskers->addAtTime(TimeFrameIndex(static_cast<int64_t>(time)), line, false);
×
465
                }
UNCOV
466
            }
×
467

UNCOV
468
            processed_time_points++;
×
UNCOV
469
            if (progressCallback) {
×
UNCOV
470
                int const current_progress = static_cast<int>(std::round(static_cast<double>(processed_time_points) / static_cast<double>(total_time_points) * PROGRESS_SCALE));
×
UNCOV
471
                progressCallback(current_progress);
×
472
            }
UNCOV
473
        }
×
474
    }
475

UNCOV
476
    if (progressCallback) progressCallback(PROGRESS_COMPLETE);
×
477

UNCOV
478
    std::cout << "WhiskerTracingOperation executed successfully. Traced "
×
UNCOV
479
              << traced_whiskers->GetAllLinesAsRange().size() << " whiskers across "
×
UNCOV
480
              << total_frame_count << " time points." << std::endl;
×
481

UNCOV
482
    return traced_whiskers;
×
UNCOV
483
}
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc