• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

openmc-dev / openmc / 22590555373

02 Mar 2026 06:44PM UTC coverage: 81.331% (-0.2%) from 81.508%
22590555373

Pull #3550

github

web-flow
Merge d11f2e51a into 823b4c96c
Pull Request #3550: [Point Detector] Add distribution get_pdf functionality

17484 of 25271 branches covered (69.19%)

Branch coverage included in aggregate %.

53 of 164 new or added lines in 12 files covered. (32.32%)

33 existing lines in 3 files now uncovered.

57704 of 67176 relevant lines covered (85.9%)

44912746.91 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

85.13
/include/openmc/tensor.h
1
//! \file tensor.h
2
//! \brief Multi-dimensional tensor types for OpenMC.
3
//!
4
//! Tensor<T> is the primary type: a dynamic-rank owning container that stores
5
//! elements contiguously in row-major order.  View<T> is a lightweight
6
//! non-owning reference into a Tensor's storage, returned by the slice()
7
//! method and flat().  StaticTensor2D<T,R,C> is a small stack-allocated 2D
8
//! array used only for simulation::global_tallies.
9
//!
10
//! Slicing follows numpy conventions: each axis takes an index (rank-reducing),
11
//! All (keep entire axis), or Range (keep sub-range).  For example,
12
//! arr.slice(0, all, range(2, 5)) is equivalent to numpy's arr[0, :, 2:5].
13
//!
14
//! View is declared before Tensor because Tensor's methods return View objects.
15

16
#ifndef OPENMC_TENSOR_H
17
#define OPENMC_TENSOR_H
18

19
#include "openmc/vector.h"
20

21
#include <algorithm>
22
#include <array>
23
#include <cmath>
24
#include <cstddef>
25
#include <cstdint>
26
#include <initializer_list>
27
#include <limits>
28
#include <type_traits>
29

30
namespace openmc {
31
namespace tensor {
32

33
//==============================================================================
34
// Forward declarations
35
//==============================================================================
36

37
template<typename T>
38
class Tensor;
39

40
template<typename T, size_t R, size_t C>
41
class StaticTensor2D;
42

43
//==============================================================================
44
// Storage type mapping
45
//
46
// std::vector<bool> is a bit-packed specialization that returns proxy objects
47
// instead of real references, which breaks generic code.  storage_type_map
48
// redirects bool to unsigned char so that Tensor<bool> stores one byte per
49
// element with normal reference semantics.
50
//==============================================================================
51

52
template<typename T>
53
struct storage_type_map {
54
  using type = T;
55
};
56
template<>
57
struct storage_type_map<bool> {
58
  using type = unsigned char;
59
};
60
template<typename T>
61
using storage_type = typename storage_type_map<T>::type;
62

63
//==============================================================================
64
// Slice argument types
65
//
66
// Used with the variadic slice() method on Tensor, View, and StaticTensor2D.
67
// Each argument corresponds to one axis: a plain integer fixes that axis at
68
// a single index (rank-reducing), All keeps the entire axis, and Range keeps
69
// a sub-range.
70
//==============================================================================
71

72
//! Keep an entire axis (equivalent to numpy's ':' or xtensor's xt::all())
73
struct All {};
74
constexpr All all {};
75

76
//! Sub-range along an axis [start, end)
77
struct Range {
78
  size_t start;
79
  size_t end; // SIZE_MAX means "to end of axis"
80
};
81

82
//! Create a Range [start, end)
83
inline Range range(size_t start, size_t end)
171,796,430✔
84
{
85
  return {start, end};
73,527,722✔
86
}
87

88
//! Create a Range [0, end)
89
inline Range range(size_t end)
90
{
91
  return {0, end};
92
}
93

94
namespace detail {
95

96
//! Internal: normalized representation of a per-axis slice argument
97
struct SliceArg {
98
  enum Kind { INDEX, ALL, RANGE } kind;
99
  size_t start;
100
  size_t end;
101
};
102

103
inline SliceArg to_slice_arg(All)
110,805✔
104
{
105
  return {SliceArg::ALL, 0, 0};
110,805✔
106
}
107
inline SliceArg to_slice_arg(Range r)
171,796,430✔
108
{
109
  return {SliceArg::RANGE, r.start, r.end};
171,796,430✔
110
}
111

112
template<typename I>
113
inline
114
  typename std::enable_if<std::is_integral<I>::value || std::is_enum<I>::value,
115
    SliceArg>::type
116
  to_slice_arg(I i)
286,391,000✔
117
{
118
  return {SliceArg::INDEX, static_cast<size_t>(i), 0};
286,391,000✔
119
}
120

121
//! Result of a slice computation: pointer offset + new shape/strides
122
struct SliceResult {
286,391,022✔
123
  size_t ptr_offset;
124
  vector<size_t> shape;
125
  vector<size_t> strides;
126
};
127

128
//! Compute the result of applying slice arguments to shape/strides
129
template<typename First, typename... Rest>
130
SliceResult compute_slice(const vector<size_t>& shape,
286,391,022✔
131
  const vector<size_t>& strides, First first, Rest... rest)
132
{
133
  const size_t n = 1 + sizeof...(Rest);
286,391,022✔
134
  SliceArg args[1 + sizeof...(Rest)] = {
135
    to_slice_arg(first), to_slice_arg(rest)...};
286,391,022✔
136

137
  size_t offset = 0;
286,391,022✔
138
  vector<size_t> new_shape;
286,391,022✔
139
  vector<size_t> new_strides;
286,391,022✔
140

141
  for (size_t a = 0; a < n; ++a) {
859,226,902✔
142
    switch (args[a].kind) {
458,362,725✔
143
    case SliceArg::INDEX:
286,402,674✔
144
      offset += args[a].start * strides[a];
286,402,674✔
145
      break;
286,402,674✔
146
    case SliceArg::ALL:
163,621✔
147
      new_shape.push_back(shape[a]);
163,621✔
148
      new_strides.push_back(strides[a]);
163,621✔
149
      break;
150
    case SliceArg::RANGE: {
171,796,430✔
151
      offset += args[a].start * strides[a];
171,796,430!
152
      size_t end = (args[a].end == SIZE_MAX) ? shape[a] : args[a].end;
171,796,430!
153
      new_shape.push_back(end - args[a].start);
171,796,430✔
154
      new_strides.push_back(strides[a]);
171,796,430✔
155
      break;
156
    }
157
    }
158
  }
159

160
  // Trailing axes not covered by arguments are implicitly All.
161
  // This matches numpy: a[i] on a 2D array returns a 1D row.
162
  for (size_t a = n; a < shape.size(); ++a) {
400,874,809✔
163
    new_shape.push_back(shape[a]);
114,483,787✔
164
    new_strides.push_back(strides[a]);
114,483,787✔
165
  }
166

167
  return {offset, std::move(new_shape), std::move(new_strides)};
286,391,022✔
168
}
286,391,022✔
169

170
} // namespace detail
171

172
//==============================================================================
173
// View<T>: a non-owning N-dimensional view into a tensor's storage.
174
//
175
// Holds a base pointer, shape, and strides (in elements).  Supports arbitrary
176
// rank and multi-axis slicing via the variadic slice() method.
177
//==============================================================================
178

179
template<typename T>
180
class View {
136,904,226!
181
public:
182
  //--------------------------------------------------------------------------
183
  // Constructors
184

185
  View(T* data, vector<size_t> shape, vector<size_t> strides)
286,391,022✔
186
    : data_(data), shape_(std::move(shape)), strides_(std::move(strides))
286,391,022✔
187
  {}
188

189
  // Explicitly default copy/move constructors (declaring copy assignment
190
  // below would otherwise suppress the implicit move constructor).
UNCOV
191
  View(const View&) = default;
×
192
  View(View&&) = default;
193

194
  //--------------------------------------------------------------------------
195
  // Indexing
196

197
  //! Multi-index element access (1D, 2D, 3D, ...)
198
  template<typename... Indices>
199
  T& operator()(Indices... indices)
611,239,030✔
200
  {
201
    const size_t idx[] = {static_cast<size_t>(indices)...};
611,239,030✔
202
    size_t off = 0;
203
    for (size_t d = 0; d < sizeof...(Indices); ++d)
295,231,038✔
204
      off += idx[d] * strides_[d];
316,007,992✔
205
    return data_[off];
316,008,322✔
206
  }
207

208
  template<typename... Indices>
209
  const T& operator()(Indices... indices) const
210
  {
211
    const size_t idx[] = {static_cast<size_t>(indices)...};
212
    size_t off = 0;
213
    for (size_t d = 0; d < sizeof...(Indices); ++d)
214
      off += idx[d] * strides_[d];
215
    return data_[off];
216
  }
217

218
  //! Flat logical index (row-major order)
219
  T& operator[](size_t i) { return data_[flat_to_offset(i)]; }
220
  const T& operator[](size_t i) const { return data_[flat_to_offset(i)]; }
1,507,987,669!
221

222
  //--------------------------------------------------------------------------
223
  // Accessors
224

225
  size_t size() const
527,860,124✔
226
  {
227
    size_t s = 1;
527,860,124✔
228
    for (auto d : shape_)
1,055,825,880!
229
      s *= d;
527,965,756✔
230
    return s;
231
  }
232
  size_t ndim() const { return shape_.size(); }
233
  size_t shape(size_t axis) const { return shape_[axis]; }
234
  const vector<size_t>& shape_vec() const { return shape_; }
22,892,576✔
235
  T* data() { return data_; }
236
  const T* data() const { return data_; }
237

238
  //--------------------------------------------------------------------------
239
  // View accessors
240

241
  //! Multi-axis slice.  Each argument corresponds to one axis and is either:
242
  //!   - an integer (fixes that axis, rank-reducing)
243
  //!   - All (keeps entire axis)
244
  //!   - Range (keeps sub-range along that axis)
245
  //! Example: v.slice(0, all, range(2, 5)) == numpy v[0, :, 2:5]
246
  template<typename First, typename... Rest>
247
  View<T> slice(First first, Rest... rest)
248
  {
249
    auto r = detail::compute_slice(shape_, strides_, first, rest...);
250
    return {data_ + r.ptr_offset, std::move(r.shape), std::move(r.strides)};
251
  }
252

253
  template<typename First, typename... Rest>
254
  View<const T> slice(First first, Rest... rest) const
255
  {
256
    auto r = detail::compute_slice(shape_, strides_, first, rest...);
257
    return {data_ + r.ptr_offset, std::move(r.shape), std::move(r.strides)};
258
  }
259

260
  //--------------------------------------------------------------------------
261
  // Assignment operators
262

263
  //! Copy assignment: element-wise deep copy (writes through data pointer).
264
  //! Without this, the compiler's implicit copy assignment just copies the
265
  //! View metadata (pointer, shape, strides) instead of the viewed data.
266
  View& operator=(const View& other)
5,162✔
267
  {
268
    size_t n = size();
96,946✔
269
    for (size_t i = 0; i < n; ++i)
96,946✔
270
      data_[flat_to_offset(i)] = other[i];
91,784✔
271
    return *this;
5,162✔
272
  }
273

274
  //! Fill all elements with a scalar
275
  View& operator=(T val)
389,789✔
276
  {
277
    size_t n = size();
17,274,607✔
278
    for (size_t i = 0; i < n; ++i)
17,274,607✔
279
      data_[flat_to_offset(i)] = val;
16,884,818✔
280
    return *this;
389,789✔
281
  }
282

283
  //! Assignment from initializer_list (for 1D views)
284
  View& operator=(std::initializer_list<T> vals)
419,979✔
285
  {
286
    auto it = vals.begin();
419,979✔
287
    for (size_t i = 0; i < size() && it != vals.end(); ++i, ++it)
6,857,192!
288
      data_[flat_to_offset(i)] = *it;
3,008,617✔
289
    return *this;
419,979✔
290
  }
291

292
  //! Assignment from another View
293
  template<typename U>
294
  View& operator=(const View<U>& other)
295
  {
296
    size_t n = size();
297
    for (size_t i = 0; i < n; ++i)
298
      data_[flat_to_offset(i)] = other[i];
299
    return *this;
300
  }
301

302
  //! Assignment from Tensor (forward-declared, defined after Tensor)
303
  template<typename U>
304
  View& operator=(const Tensor<U>& other);
305

306
  //! Compound addition from Tensor (forward-declared, defined after Tensor)
307
  template<typename U>
308
  View& operator+=(const Tensor<U>& o);
309

310
  //! Compound multiply by scalar
311
  View& operator*=(T val)
52,631✔
312
  {
313
    size_t n = size();
1,631,561✔
314
    for (size_t i = 0; i < n; ++i)
1,631,561✔
315
      data_[flat_to_offset(i)] *= val;
1,578,930✔
316
    return *this;
52,631✔
317
  }
318

319
  //! Compound divide by scalar
320
  View& operator/=(T val)
6,643✔
321
  {
322
    size_t n = size();
103,769✔
323
    for (size_t i = 0; i < n; ++i)
103,769✔
324
      data_[flat_to_offset(i)] /= val;
97,126✔
325
    return *this;
6,643✔
326
  }
327

328
  //--------------------------------------------------------------------------
329
  // Reductions
330

331
  //! Sum of all elements
332
  T sum() const
6,643✔
333
  {
334
    // remove_const needed so accumulator is mutable when T is const-qualified
335
    std::remove_const_t<T> s = 0;
6,643✔
336
    size_t n = size();
103,769✔
337
    for (size_t i = 0; i < n; ++i)
103,769✔
338
      s += data_[flat_to_offset(i)];
97,126✔
339
    return s;
6,643✔
340
  }
341

342
  //--------------------------------------------------------------------------
343
  // Iterators
344
  //
345
  // Lightweight row-major iterator parameterized on pointer type (Ptr).
346
  // Stores a flat logical position and converts to a physical offset on each
347
  // dereference via divmod over shape/strides.  For contiguous 1D views (the
348
  // common case) the divmod chain reduces to a single multiply-by-1, which
349
  // the compiler optimizes away.
350
  //
351
  // view_iterator<T*>       = mutable iterator  (from non-const View)
352
  // view_iterator<const T*> = read-only iterator (from const View)
353

354
  template<typename Ptr>
355
  class view_iterator {
356
    Ptr base_;
357
    size_t count_;
358
    const size_t* shape_;
359
    const size_t* strides_;
360
    size_t ndim_;
361

362
  public:
363
    using iterator_category = std::random_access_iterator_tag;
364
    using value_type = std::remove_const_t<T>;
365
    using difference_type = std::ptrdiff_t;
366
    using pointer = Ptr;
367
    using reference = decltype(*std::declval<Ptr>());
368

369
    view_iterator(Ptr base, size_t count, const View* v)
329,747,044✔
370
      : base_(base), count_(count), shape_(v->shape_.data()),
329,747,044✔
371
        strides_(v->strides_.data()), ndim_(v->shape_.size())
63,475,571✔
372
    {}
373

374
    reference operator*() const { return base_[offset()]; }
2,147,483,647✔
375
    reference operator[](difference_type n) const
376
    {
377
      return base_[offset_of(count_ + n)];
378
    }
379
    view_iterator& operator++()
2,147,483,647✔
380
    {
381
      ++count_;
2,147,483,647✔
382
      return *this;
423,643✔
383
    }
384
    view_iterator operator++(int)
385
    {
386
      auto tmp = *this;
387
      ++count_;
388
      return tmp;
389
    }
390
    view_iterator& operator--()
×
391
    {
392
      --count_;
×
393
      return *this;
×
394
    }
395
    view_iterator operator+(difference_type n) const
396
    {
397
      auto tmp = *this;
398
      tmp.count_ += n;
399
      return tmp;
400
    }
401
    view_iterator operator-(difference_type n) const
402
    {
403
      auto tmp = *this;
404
      tmp.count_ -= n;
405
      return tmp;
406
    }
407
    difference_type operator-(const view_iterator& o) const
323,064,091✔
408
    {
409
      return static_cast<difference_type>(count_) -
323,064,091✔
410
             static_cast<difference_type>(o.count_);
323,064,091!
411
    }
412
    view_iterator& operator+=(difference_type n)
56,055,910✔
413
    {
414
      count_ += n;
56,055,910✔
415
      return *this;
56,055,910✔
416
    }
417
    view_iterator& operator-=(difference_type n)
418
    {
419
      count_ -= n;
420
      return *this;
421
    }
422
    bool operator==(const view_iterator& o) const { return count_ == o.count_; }
423
    bool operator!=(const view_iterator& o) const { return count_ != o.count_; }
847,054✔
424
    bool operator<(const view_iterator& o) const { return count_ < o.count_; }
425
    bool operator>(const view_iterator& o) const { return count_ > o.count_; }
426
    bool operator<=(const view_iterator& o) const { return count_ <= o.count_; }
427
    bool operator>=(const view_iterator& o) const { return count_ >= o.count_; }
428
    friend view_iterator operator+(difference_type n, const view_iterator& it)
429
    {
430
      return it + n;
431
    }
432

433
  private:
434
    size_t offset() const { return offset_of(count_); }
2,147,483,647✔
435
    size_t offset_of(size_t flat) const
2,147,483,647✔
436
    {
437
      size_t off = 0;
2,147,483,647✔
438
      for (int d = static_cast<int>(ndim_) - 1; d >= 0; --d) {
2,147,483,647✔
439
        off += (flat % shape_[d]) * strides_[d];
2,147,483,647✔
440
        flat /= shape_[d];
2,147,483,647✔
441
      }
442
      return off;
2,147,483,647✔
443
    }
444
  };
445

446
  using iterator = view_iterator<T*>;
447
  using const_iterator = view_iterator<const T*>;
448

449
  iterator begin() { return {data_, 0, this}; }
52,264,389✔
450
  iterator end() { return {data_, size(), this}; }
153,662,076✔
UNCOV
451
  const_iterator begin() const { return cbegin(); }
×
UNCOV
452
  const_iterator end() const { return cend(); }
×
453
  const_iterator cbegin() const { return {data_, 0, this}; }
11,211,182✔
454
  const_iterator cend() const { return {data_, size(), this}; }
11,211,182✔
455

456
private:
457
  //! Convert a logical flat index (row-major) to a physical element offset
458
  size_t flat_to_offset(size_t flat) const
2,147,483,647✔
459
  {
460
    size_t off = 0;
2,147,483,647✔
461
    for (int d = static_cast<int>(shape_.size()) - 1; d >= 0; --d) {
2,147,483,647✔
462
      off += (flat % shape_[d]) * strides_[d];
2,147,483,647✔
463
      flat /= shape_[d];
2,147,483,647✔
464
    }
465
    return off;
2,147,483,647✔
466
  }
467

468
  T* data_;
469
  vector<size_t> shape_;
470
  vector<size_t> strides_;
471
};
472

473
//==============================================================================
474
// Tensor<T>: dynamic-rank N-dimensional tensor.
475
//
476
// Stores elements in a contiguous row-major vector<storage_type<T>>
477
// with a dynamic shape.
478
//==============================================================================
479

480
template<typename T>
481
class Tensor {
153,681,674!
482
public:
483
  using value_type = T;
484
  using stored_type = storage_type<T>;
485
  using iterator = typename vector<stored_type>::iterator;
486
  using const_iterator = typename vector<stored_type>::const_iterator;
487

488
  //--------------------------------------------------------------------------
489
  // Constructors
490

491
  Tensor() = default;
6,776,069✔
492

493
  //! Construct with shape (uninitialized for arithmetic types via vector
494
  //! resize)
495
  explicit Tensor(vector<size_t> shape)
272,369,270✔
496
    : shape_(std::move(shape)), data_(compute_size())
544,738,540✔
497
  {}
272,369,270✔
498

499
  //! Construct with shape and fill value
500
  Tensor(vector<size_t> shape, T fill)
203,823✔
501
    : shape_(std::move(shape)), data_(compute_size(), fill)
407,646✔
502
  {}
203,823✔
503

504
  //! Construct from initializer_list shape
505
  explicit Tensor(std::initializer_list<size_t> shape)
114,985✔
506
    : shape_(shape), data_(compute_size())
229,970✔
507
  {}
114,985✔
508

509
  //! Construct from initializer_list shape with fill
510
  Tensor(std::initializer_list<size_t> shape, T fill)
11,396✔
511
    : shape_(shape), data_(compute_size(), fill)
22,792✔
512
  {}
11,396✔
513

514
  //! 1D copy from raw pointer + count
515
  Tensor(const T* ptr, size_t count) : shape_({count}), data_(ptr, ptr + count)
19,454,367✔
516
  {}
19,454,367✔
517

518
  //! Copy from View (preserves view's shape)
519
  template<typename U>
520
  explicit Tensor(const View<U>& v) : shape_(v.shape_vec())
90,138✔
521
  {
522
    size_t n = v.size();
90,138✔
523
    data_.resize(n);
90,138✔
524
    for (size_t i = 0; i < n; ++i)
33,110,289✔
525
      data_[i] = v[i];
33,020,151✔
526
  }
90,138✔
527

528
  //--------------------------------------------------------------------------
529
  // Assignment
530

531
  //! Assignment from View
532
  template<typename U>
533
  Tensor& operator=(const View<U>& v)
22,802,438✔
534
  {
535
    shape_ = v.shape_vec();
22,802,438✔
536
    size_t n = v.size();
22,802,438✔
537
    data_.resize(n);
22,802,438✔
538
    for (size_t i = 0; i < n; ++i)
1,497,678,172✔
539
      data_[i] = v[i];
1,474,875,734✔
540
    return *this;
22,802,438✔
541
  }
542

543
  //! Assignment from initializer_list of values (1D)
544
  Tensor& operator=(std::initializer_list<T> vals)
2,072✔
545
  {
546
    shape_ = {vals.size()};
2,072✔
547
    data_.assign(vals.begin(), vals.end());
2,072✔
548
    return *this;
2,072✔
549
  }
550

551
  //--------------------------------------------------------------------------
552
  // Accessors
553

554
  stored_type* data() { return data_.data(); }
181,948,753!
555
  const stored_type* data() const { return data_.data(); }
2,147,483,647✔
556
  size_t size() const { return data_.size(); }
188,839,763!
557
  const vector<size_t>& shape() const { return shape_; }
127,166,914✔
558
  size_t shape(size_t dim) const
2,147,483,647✔
559
  {
560
    return dim < shape_.size() ? shape_[dim] : 0;
2,147,483,647!
561
  }
562
  size_t ndim() const { return shape_.size(); }
563
  bool empty() const { return data_.empty(); }
3,975!
564

565
  //--------------------------------------------------------------------------
566
  // Indexing (row-major)
567

568
  template<typename... Indices>
569
  stored_type& operator()(Indices... indices)
2,147,483,647✔
570
  {
571
    const size_t idx[] = {static_cast<size_t>(indices)...};
2,147,483,647✔
572
    size_t off = 0;
2,147,483,647✔
573
    for (size_t d = 0; d < sizeof...(Indices); ++d)
2,147,483,647✔
574
      off = off * shape_[d] + idx[d];
2,147,483,647✔
575
    return data_[off];
2,147,483,647!
576
  }
577

578
  template<typename... Indices>
579
  const stored_type& operator()(Indices... indices) const
2,147,483,647✔
580
  {
581
    const size_t idx[] = {static_cast<size_t>(indices)...};
2,147,483,647✔
582
    size_t off = 0;
2,147,483,647✔
583
    for (size_t d = 0; d < sizeof...(Indices); ++d)
2,147,483,647✔
584
      off = off * shape_[d] + idx[d];
2,147,483,647✔
585
    return data_[off];
2,147,483,647✔
586
  }
587

588
  stored_type& operator[](size_t i) { return data_[i]; }
2,147,483,647!
589
  const stored_type& operator[](size_t i) const { return data_[i]; }
2,147,483,647!
590

591
  //! First and last element
592
  stored_type& front() { return data_.front(); }
3,216✔
593
  const stored_type& front() const { return data_.front(); }
594
  stored_type& back() { return data_.back(); }
3,216✔
595
  const stored_type& back() const { return data_.back(); }
596

597
  //--------------------------------------------------------------------------
598
  // Iterators
599

600
  iterator begin() { return data_.begin(); }
17,430✔
601
  iterator end() { return data_.end(); }
17,430✔
602
  const_iterator begin() const { return data_.begin(); }
13,548!
603
  const_iterator end() const { return data_.end(); }
13,548!
604
  const_iterator cbegin() const { return data_.cbegin(); }
84,172,171✔
605
  const_iterator cend() const { return data_.cend(); }
84,172,171✔
606

607
  //--------------------------------------------------------------------------
608
  // Mutation
609

610
  void resize(const vector<size_t>& shape)
6,712,842✔
611
  {
612
    shape_ = shape;
6,712,842✔
613
    data_.resize(compute_size());
6,712,842✔
614
  }
6,712,842✔
615

616
  void resize(std::initializer_list<size_t> shape)
60,024✔
617
  {
618
    shape_.assign(shape.begin(), shape.end());
60,024✔
619
    data_.resize(compute_size());
60,024✔
620
  }
60,024✔
621

622
  void reshape(const vector<size_t>& new_shape) { shape_ = new_shape; }
×
623

624
  void fill(T val) { std::fill(data_.begin(), data_.end(), val); }
13,690✔
625

626
  //--------------------------------------------------------------------------
627
  // View accessors
628

629
  //! Fix one axis at a given index, returning an (N-1)-dimensional view
630
  //! Multi-axis slice.  Each argument corresponds to one axis and is either:
631
  //!   - an integer (fixes that axis, rank-reducing)
632
  //!   - All (keeps entire axis)
633
  //!   - Range (keeps sub-range along that axis)
634
  //! Example: t.slice(0, all, range(2, 5)) == numpy t[0, :, 2:5]
635
  template<typename First, typename... Rest>
636
  View<stored_type> slice(First first, Rest... rest)
178,958,482✔
637
  {
638
    auto strides = compute_strides();
178,958,482✔
639
    auto r = detail::compute_slice(shape_, strides, first, rest...);
178,958,482✔
640
    return {
641
      data_.data() + r.ptr_offset, std::move(r.shape), std::move(r.strides)};
178,958,482✔
642
  }
178,958,482✔
643

644
  template<typename First, typename... Rest>
645
  View<const stored_type> slice(First first, Rest... rest) const
107,378,020✔
646
  {
647
    auto strides = compute_strides();
107,378,020✔
648
    auto r = detail::compute_slice(shape_, strides, first, rest...);
107,378,020✔
649
    return {
650
      data_.data() + r.ptr_offset, std::move(r.shape), std::move(r.strides)};
107,378,020✔
651
  }
107,378,020✔
652

653
  //! Flat 1D view of all elements
654
  View<stored_type> flat()
655
  {
656
    return {data_.data(), {data_.size()}, {size_t(1)}};
657
  }
658
  View<const stored_type> flat() const
659
  {
660
    return {data_.data(), {data_.size()}, {size_t(1)}};
661
  }
662

663
  //--------------------------------------------------------------------------
664
  // Reductions and transforms
665

666
  //! Sum of all elements
667
  T sum() const
41,044✔
668
  {
669
    T s = T(0);
41,044✔
670
    for (size_t i = 0; i < data_.size(); ++i)
1,082,480✔
671
      s += data_[i];
1,041,436✔
672
    return s;
41,044✔
673
  }
674

675
  //! Sum along an axis, reducing rank by 1 (defined out-of-line below)
676
  Tensor<T> sum(size_t axis) const;
677

678
  //! Product of all elements
679
  T prod() const
2,346✔
680
  {
681
    T p = T(1);
2,346✔
682
    for (size_t i = 0; i < data_.size(); ++i)
8,837✔
683
      p *= data_[i];
6,491✔
684
    return p;
2,346✔
685
  }
686

687
  //! True if any element is nonzero
688
  bool any() const
689
  {
690
    for (size_t i = 0; i < data_.size(); ++i)
36,809,825✔
691
      if (data_[i])
18,670,802!
692
        return true;
693
    return false;
694
  }
695

696
  //! True if all elements are nonzero
697
  bool all() const
698
  {
699
    for (size_t i = 0; i < data_.size(); ++i)
68,375✔
700
      if (!data_[i])
52,909!
701
        return false;
702
    return true;
703
  }
704

705
  //! Flat index of the minimum element
706
  size_t argmin() const
127,041,210✔
707
  {
708
    return static_cast<size_t>(std::distance(data_.data(),
127,041,210!
709
      std::min_element(data_.data(), data_.data() + data_.size())));
127,041,210!
710
  }
711

712
  //! Reverse element order along an axis (e.g. flip(0) reverses rows)
713
  Tensor flip(size_t axis) const
4,730✔
714
  {
715
    size_t outer_size = 1;
4,730✔
716
    for (size_t d = 0; d < axis; ++d)
4,730!
717
      outer_size *= shape_[d];
×
718
    size_t axis_size = shape_[axis];
4,730✔
719
    size_t inner_size = 1;
4,730✔
720
    for (size_t d = axis + 1; d < shape_.size(); ++d)
9,460✔
721
      inner_size *= shape_[d];
4,730✔
722

723
    Tensor r(shape_);
4,730✔
724
    for (size_t o = 0; o < outer_size; ++o)
9,460✔
725
      for (size_t a = 0; a < axis_size; ++a)
912,230✔
726
        for (size_t i = 0; i < inner_size; ++i)
179,382,500✔
727
          r.data_[(o * axis_size + (axis_size - 1 - a)) * inner_size + i] =
178,475,000✔
728
            data_[(o * axis_size + a) * inner_size + i];
178,475,000✔
729
    return r;
4,730✔
730
  }
731

732
  //--------------------------------------------------------------------------
733
  // Operators
734

735
  Tensor& operator+=(T val)
736
  {
737
    for (auto& x : data_)
738
      x += val;
739
    return *this;
740
  }
741
  Tensor& operator-=(T val)
7,936✔
742
  {
743
    for (auto& x : data_)
1,786,160✔
744
      x -= val;
1,781,590✔
745
    return *this;
746
  }
747
  Tensor& operator*=(T val)
15,210✔
748
  {
749
    for (auto& x : data_)
130,748!
750
      x *= val;
109,320✔
751
    return *this;
752
  }
753
  Tensor& operator/=(T val)
29,853✔
754
  {
755
    for (auto& x : data_)
857,060✔
756
      x /= val;
827,207✔
757
    return *this;
758
  }
759
  Tensor& operator+=(const Tensor& o)
23,847✔
760
  {
761
    for (size_t i = 0; i < data_.size(); ++i)
13,097,995✔
762
      data_[i] += o.data_[i];
13,074,148✔
763
    return *this;
23,847✔
764
  }
765
  Tensor& operator-=(const Tensor& o)
766
  {
767
    for (size_t i = 0; i < data_.size(); ++i)
768
      data_[i] -= o.data_[i];
769
    return *this;
770
  }
771
  Tensor& operator*=(const Tensor& o)
772
  {
773
    for (size_t i = 0; i < data_.size(); ++i)
774
      data_[i] *= o.data_[i];
775
    return *this;
776
  }
777
  Tensor& operator/=(const Tensor& o)
778
  {
779
    for (size_t i = 0; i < data_.size(); ++i)
780
      data_[i] /= o.data_[i];
781
    return *this;
782
  }
783

784
  Tensor operator+(const Tensor& o) const
1,368✔
785
  {
786
    Tensor r(shape_);
1,368✔
787
    for (size_t i = 0; i < data_.size(); ++i)
1,194,596✔
788
      r.data_[i] = data_[i] + o.data_[i];
1,193,228✔
789
    return r;
1,368✔
790
  }
791
  Tensor operator-(const Tensor& o) const
5,655✔
792
  {
793
    Tensor r(shape_);
5,655✔
794
    for (size_t i = 0; i < data_.size(); ++i)
23,769✔
795
      r.data_[i] = data_[i] - o.data_[i];
18,114✔
796
    return r;
5,655✔
797
  }
798
  Tensor operator/(const Tensor& o) const
2,931✔
799
  {
800
    Tensor r(shape_);
2,931✔
801
    for (size_t i = 0; i < data_.size(); ++i)
259,461✔
802
      r.data_[i] = data_[i] / o.data_[i];
256,530✔
803
    return r;
2,931✔
804
  }
805
  Tensor operator*(const Tensor& o) const
806
  {
807
    Tensor r(shape_);
808
    for (size_t i = 0; i < data_.size(); ++i)
809
      r.data_[i] = data_[i] * o.data_[i];
810
    return r;
811
  }
812

813
  Tensor operator+(T val) const
3,366✔
814
  {
815
    Tensor r(shape_);
3,366✔
816
    for (size_t i = 0; i < data_.size(); ++i)
15,115✔
817
      r.data_[i] = data_[i] + val;
11,749✔
818
    return r;
3,366✔
819
  }
820
  Tensor operator-(T val) const
127,041,210✔
821
  {
822
    Tensor r(shape_);
127,041,210✔
823
    for (size_t i = 0; i < data_.size(); ++i)
258,827,779✔
824
      r.data_[i] = data_[i] - val;
131,786,569✔
825
    return r;
127,041,210✔
826
  }
827
  Tensor operator*(T val) const
23,847✔
828
  {
829
    Tensor r(shape_);
23,847✔
830
    for (size_t i = 0; i < data_.size(); ++i)
13,097,995✔
831
      r.data_[i] = data_[i] * val;
13,074,148✔
832
    return r;
23,847✔
833
  }
834

835
  Tensor<bool> operator<=(T val) const
3,048✔
836
  {
837
    Tensor<bool> r(shape_);
3,048✔
838
    for (size_t i = 0; i < data_.size(); ++i)
16,877✔
839
      r.data()[i] = data_[i] <= val;
13,829✔
840
    return r;
3,048✔
841
  }
842
  Tensor<bool> operator<(T val) const
46✔
843
  {
844
    Tensor<bool> r(shape_);
46✔
845
    for (size_t i = 0; i < data_.size(); ++i)
139✔
846
      r.data()[i] = data_[i] < val;
93✔
847
    return r;
46✔
848
  }
849
  Tensor<bool> operator>=(T val) const
15,466✔
850
  {
851
    Tensor<bool> r(shape_);
15,466✔
852
    for (size_t i = 0; i < data_.size(); ++i)
67,453✔
853
      r.data()[i] = data_[i] >= val;
51,987✔
854
    return r;
15,466✔
855
  }
856
  Tensor<bool> operator>(T val) const
18,143,868✔
857
  {
858
    Tensor<bool> r(shape_);
18,143,868✔
859
    for (size_t i = 0; i < data_.size(); ++i)
48,364,771✔
860
      r.data()[i] = data_[i] > val;
30,220,903✔
861
    return r;
18,143,868✔
862
  }
863
  Tensor<bool> operator<(const Tensor& o) const
2,080✔
864
  {
865
    Tensor<bool> r(shape_);
2,080✔
866
    for (size_t i = 0; i < data_.size(); ++i)
7,818✔
867
      r.data()[i] = data_[i] < o.data_[i];
5,738✔
868
    return r;
2,080✔
869
  }
870

871
private:
872
  size_t compute_size() const
279,472,340✔
873
  {
874
    size_t s = 1;
279,472,340✔
875
    for (auto d : shape_)
566,034,398!
876
      s *= d;
286,562,058✔
877
    return s;
878
  }
879

880
  //! Compute row-major strides from shape
881
  vector<size_t> compute_strides() const
284,786,558✔
882
  {
883
    vector<size_t> strides(shape_.size());
286,336,502✔
884
    if (!shape_.empty()) {
286,336,502!
885
      strides.back() = 1;
286,336,502✔
886
      for (int d = static_cast<int>(shape_.size()) - 2; d >= 0; --d)
572,737,472✔
887
        strides[d] = strides[d + 1] * shape_[d + 1];
286,400,970✔
888
    }
889
    return strides;
286,336,502✔
890
  }
891

892
  //--------------------------------------------------------------------------
893
  // Data members
894

895
  vector<size_t> shape_;
896
  vector<storage_type<T>> data_;
897
};
898

899
//==============================================================================
900
// Non-member operators (scalar op tensor)
901
//==============================================================================
902

903
template<typename T>
904
Tensor<T> operator*(T val, const Tensor<T>& arr)
23,847✔
905
{
906
  return arr * val;
23,847✔
907
}
908

909
template<typename T>
910
Tensor<T> operator+(T val, const Tensor<T>& arr)
911
{
912
  return arr + val;
913
}
914

915
// Mixed-type arithmetic: Tensor<T1> op Tensor<T2> -> Tensor<double>
916
// A SFINAE guard is used here, as without !is_same Tensor<T> * Tensor<T>
917
// would be ambiguous between the member operator* and this non-member function.
918
template<typename T1, typename T2,
919
  typename = std::enable_if_t<!std::is_same<T1, T2>::value>>
920
Tensor<double> operator*(const Tensor<T1>& a, const Tensor<T2>& b)
68✔
921
{
922
  Tensor<double> r(a.shape());
68✔
923
  for (size_t i = 0; i < a.size(); ++i)
227✔
924
    r.data()[i] =
159✔
925
      static_cast<double>(a.data()[i]) * static_cast<double>(b.data()[i]);
159✔
926
  return r;
68✔
927
}
928

929
// Same SFINAE guard as operator* above.
930
template<typename T1, typename T2,
931
  typename = std::enable_if_t<!std::is_same<T1, T2>::value>>
932
Tensor<double> operator/(const Tensor<T1>& a, const Tensor<T2>& b)
2,278✔
933
{
934
  Tensor<double> r(a.shape());
2,278✔
935
  for (size_t i = 0; i < a.size(); ++i)
8,610✔
936
    r.data()[i] =
6,332✔
937
      static_cast<double>(a.data()[i]) / static_cast<double>(b.data()[i]);
6,332✔
938
  return r;
2,278✔
939
}
940

941
//==============================================================================
942
// Out-of-line method definitions (require complete types)
943
//==============================================================================
944

945
template<typename T>
946
template<typename U>
947
View<T>& View<T>::operator=(const Tensor<U>& other)
45,213✔
948
{
949
  size_t n = size();
22,358,650✔
950
  for (size_t i = 0; i < n; ++i)
22,358,650✔
951
    data_[flat_to_offset(i)] = static_cast<T>(other.data()[i]);
22,313,437✔
952
  return *this;
45,213✔
953
}
954

955
template<typename T>
956
template<typename U>
957
View<T>& View<T>::operator+=(const Tensor<U>& o)
1,585,729✔
958
{
959
  size_t n = size();
2,147,483,647✔
960
  for (size_t i = 0; i < n; ++i)
2,147,483,647✔
961
    data_[flat_to_offset(i)] += o.data()[i];
2,147,483,647✔
962
  return *this;
1,585,729✔
963
}
964

965
template<typename T>
966
Tensor<T> Tensor<T>::sum(size_t axis) const
2,782✔
967
{
968
  // Build output shape (all dims except the summed axis)
969
  vector<size_t> out_shape;
2,782✔
970
  for (size_t d = 0; d < shape_.size(); ++d)
9,857✔
971
    if (d != axis)
7,075✔
972
      out_shape.push_back(shape_[d]);
4,293✔
973

974
  // Split dimensions into three zones: outer | axis | inner
975
  size_t outer_size = 1;
976
  for (size_t d = 0; d < axis; ++d)
6,940✔
977
    outer_size *= shape_[d];
4,158✔
978
  size_t axis_size = shape_[axis];
2,782✔
979
  size_t inner_size = 1;
2,782✔
980
  for (size_t d = axis + 1; d < shape_.size(); ++d)
2,917✔
981
    inner_size *= shape_[d];
135✔
982

983
  Tensor<T> result(out_shape, T(0));
2,782✔
984
  for (size_t o = 0; o < outer_size; ++o)
4,833✔
985
    for (size_t a = 0; a < axis_size; ++a)
8,653✔
986
      for (size_t i = 0; i < inner_size; ++i)
13,534✔
987
        result.data()[o * inner_size + i] +=
6,932✔
988
          data_[(o * axis_size + a) * inner_size + i];
6,932✔
989

990
  return result;
2,782✔
991
}
2,782✔
992

993
//==============================================================================
994
// StaticTensor2D<T, R, C>: compile-time fixed 2D tensor.
995
//==============================================================================
996

997
template<typename T, size_t R, size_t C>
998
class StaticTensor2D {
999
public:
1000
  using value_type = T;
1001

1002
  //--------------------------------------------------------------------------
1003
  // Indexing
1004

1005
  //! Templated to accept enum class indices (e.g. GlobalTally, TallyResult)
1006
  //! which don't implicitly convert to integer types.
1007
  template<typename I0, typename I1>
1008
  T& operator()(I0 i, I1 j)
547,620✔
1009
  {
1010
    return data_[static_cast<size_t>(i) * C + static_cast<size_t>(j)];
547,620✔
1011
  }
1012
  template<typename I0, typename I1>
1013
  const T& operator()(I0 i, I1 j) const
1014
  {
1015
    return data_[static_cast<size_t>(i) * C + static_cast<size_t>(j)];
1016
  }
1017

1018
  //--------------------------------------------------------------------------
1019
  // Accessors
1020

1021
  T* data() { return data_; }
74✔
1022
  const T* data() const { return data_; }
6,972✔
1023
  constexpr size_t size() const { return R * C; }
1024
  std::array<size_t, 2> shape() const { return {R, C}; }
1025

1026
  //--------------------------------------------------------------------------
1027
  // Mutation
1028

1029
  void fill(T val) { std::fill(data_, data_ + R * C, val); }
16,120✔
1030

1031
  //--------------------------------------------------------------------------
1032
  // Iterators
1033

1034
  T* begin() { return data_; }
1035
  T* end() { return data_ + R * C; }
1036
  const T* begin() const { return data_; }
1037
  const T* end() const { return data_ + R * C; }
1038

1039
  //--------------------------------------------------------------------------
1040
  // View accessors
1041

1042
  //! Multi-axis slice (same interface as Tensor/View).
1043
  template<typename First, typename... Rest>
1044
  View<T> slice(First first, Rest... rest)
54,520✔
1045
  {
1046
    vector<size_t> sh = {R, C};
54,520✔
1047
    vector<size_t> st = {C, 1};
54,520✔
1048
    auto r = detail::compute_slice(sh, st, first, rest...);
54,520✔
1049
    return {data_ + r.ptr_offset, std::move(r.shape), std::move(r.strides)};
54,520✔
1050
  }
54,520✔
1051
  template<typename First, typename... Rest>
1052
  View<const T> slice(First first, Rest... rest) const
1053
  {
1054
    vector<size_t> sh = {R, C};
1055
    vector<size_t> st = {C, 1};
1056
    auto r = detail::compute_slice(sh, st, first, rest...);
1057
    return {data_ + r.ptr_offset, std::move(r.shape), std::move(r.strides)};
1058
  }
1059

1060
  //! Flat view (1D, contiguous)
1061
  View<T> flat() { return {data_, {R * C}, {size_t(1)}}; }
1062
  View<const T> flat() const { return {data_, {R * C}, {size_t(1)}}; }
1063

1064
private:
1065
  //--------------------------------------------------------------------------
1066
  // Data members
1067

1068
  T data_[R * C] = {};
1069
};
1070

1071
//==============================================================================
1072
// Non-member functions
1073
//==============================================================================
1074

1075
template<typename T>
1076
Tensor<T> zeros(std::initializer_list<size_t> shape)
138,645✔
1077
{
1078
  vector<size_t> s(shape);
138,645✔
1079
  return Tensor<T>(std::move(s), T(0));
277,290✔
1080
}
138,645✔
1081

1082
template<typename T>
1083
Tensor<T> zeros(const vector<size_t>& shape)
56,477✔
1084
{
1085
  return Tensor<T>(shape, T(0));
112,954✔
1086
}
1087

1088
template<typename T>
1089
Tensor<T> ones(std::initializer_list<size_t> shape)
5,230✔
1090
{
1091
  vector<size_t> s(shape);
5,230✔
1092
  return Tensor<T>(std::move(s), T(1));
10,460✔
1093
}
5,230✔
1094

1095
template<typename T>
1096
Tensor<T> ones(const vector<size_t>& shape)
1097
{
1098
  return Tensor<T>(shape, T(1));
1099
}
1100

1101
template<typename T>
1102
Tensor<T> zeros_like(const Tensor<T>& o)
11✔
1103
{
1104
  return Tensor<T>(o.shape(), T(0));
22✔
1105
}
1106

1107
template<typename T, typename V>
1108
Tensor<T> full_like(const Tensor<T>& o, V val)
1109
{
1110
  return Tensor<T>(o.shape(), static_cast<T>(val));
1111
}
1112

1113
//! Return a 1D tensor of n evenly spaced values from start to stop (inclusive)
1114
template<typename T>
1115
Tensor<T> linspace(T start, T stop, size_t n)
34,802✔
1116
{
1117
  Tensor<T> result({n});
34,802✔
1118
  if (n < 2) {
34,802!
1119
    result[0] = start;
×
1120
    return result;
×
1121
  }
1122
  for (size_t i = 0; i < n; ++i) {
232,033,691✔
1123
    result[i] =
231,998,889✔
1124
      start + static_cast<T>(i) * (stop - start) / static_cast<T>(n - 1);
231,998,889✔
1125
  }
1126
  return result;
1127
}
1128

1129
//! Concatenate two 1D tensors end-to-end
1130
template<typename T>
1131
Tensor<T> concatenate(const Tensor<T>& a, const Tensor<T>& b)
11✔
1132
{
1133
  size_t total = a.size() + b.size();
11✔
1134
  Tensor<T> result({total});
11✔
1135
  std::copy(a.data(), a.data() + a.size(), result.data());
11✔
1136
  std::copy(b.data(), b.data() + b.size(), result.data() + a.size());
11✔
1137
  return result;
11✔
1138
}
1139

1140
//! Element-wise natural logarithm
1141
template<typename T>
1142
Tensor<T> log(const Tensor<T>& a)
10,039✔
1143
{
1144
  Tensor<T> r(a.shape());
10,039✔
1145
  for (size_t i = 0; i < a.size(); ++i)
12,712,063✔
1146
    r.data()[i] = std::log(a.data()[i]);
12,702,024✔
1147
  return r;
10,039✔
1148
}
1149

1150
//! Element-wise absolute value
1151
template<typename T>
1152
Tensor<T> abs(const Tensor<T>& a)
127,041,210✔
1153
{
1154
  Tensor<T> r(a.shape());
127,041,210✔
1155
  for (size_t i = 0; i < a.size(); ++i)
258,827,779✔
1156
    r.data()[i] = std::abs(a.data()[i]);
131,786,569✔
1157
  return r;
127,041,210✔
1158
}
1159

1160
//! Element-wise conditional: select from true_val where cond is true,
1161
//! otherwise use false_val
1162
template<typename T, typename V>
1163
Tensor<T> where(
2,838✔
1164
  const Tensor<bool>& cond, const Tensor<T>& true_val, V false_val)
1165
{
1166
  Tensor<T> r(cond.shape());
9,097✔
1167
  for (size_t i = 0; i < cond.size(); ++i)
11,570,860✔
1168
    r.data()[i] =
11,561,763✔
1169
      cond.data()[i] ? true_val.data()[i] : static_cast<T>(false_val);
11,561,763✔
1170
  return r;
9,097✔
1171
}
1172

1173
//! Replace NaN/Inf values with finite substitutes
1174
template<typename T>
1175
Tensor<T> nan_to_num(const Tensor<T>& a, T nan_val = T(0),
2,931✔
1176
  T posinf_val = std::numeric_limits<T>::max(),
1177
  T neginf_val = std::numeric_limits<T>::lowest())
1178
{
1179
  Tensor<T> r(a.shape());
2,931✔
1180
  for (size_t i = 0; i < a.size(); ++i) {
259,461✔
1181
    T val = a.data()[i];
256,530✔
1182
    if (std::isnan(val))
256,530✔
1183
      r.data()[i] = nan_val;
214,397✔
1184
    else if (std::isinf(val))
42,133!
1185
      r.data()[i] = val > 0 ? posinf_val : neginf_val;
×
1186
    else
1187
      r.data()[i] = val;
42,133✔
1188
  }
1189
  return r;
2,931✔
1190
}
1191

1192
//==============================================================================
1193
// Type traits
1194
//==============================================================================
1195

1196
//! Type trait that is true for Tensor and StaticTensor2D.
1197
//! Used by hdf5_interface.h to select the correct write_dataset overload.
1198
template<typename T>
1199
struct is_tensor : std::false_type {};
1200

1201
template<typename T>
1202
struct is_tensor<Tensor<T>> : std::true_type {};
1203

1204
template<typename T, size_t R, size_t C>
1205
struct is_tensor<StaticTensor2D<T, R, C>> : std::true_type {};
1206

1207
} // namespace tensor
1208
} // namespace openmc
1209

1210
#endif // OPENMC_TENSOR_H
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc