• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

NVIDIA / MatX / 780

28 Aug 2024 06:04PM UTC coverage: 93.386%. Remained the same
780

Pull #741

jenkins

Pull Request #741: optimize our iterator to avoid an unnecessary constructor call

6622 of 7091 relevant lines covered (93.39%)

20869734.09 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

98.11
/include/matx/core/iterator.h
1
////////////////////////////////////////////////////////////////////////////////
2
// BSD 3-Clause License
3
//
4
// Copyright (c) 2021, NVIDIA Corporation
5
// All rights reserved.
6
//
7
// Redistribution and use in source and binary forms, with or without
8
// modification, are permitted provided that the following conditions are met:
9
//
10
// 1. Redistributions of source code must retain the above copyright notice, this
11
//    list of conditions and the following disclaimer.
12
//
13
// 2. Redistributions in binary form must reproduce the above copyright notice,
14
//    this list of conditions and the following disclaimer in the documentation
15
//    and/or other materials provided with the distribution.
16
//
17
// 3. Neither the name of the copyright holder nor the names of its
18
//    contributors may be used to endorse or promote products derived from
19
//    this software without specific prior written permission.
20
//
21
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
/////////////////////////////////////////////////////////////////////////////////
32

33
#pragma once
34

35
#include "matx/core/defines.h"
36
#include "matx/core/tensor_utils.h"
37

38
namespace matx {
39
/**
40
 * @brief Iterator around operators for libraries that can take iterators as input (CUB).
41
 * 
42
 * @tparam T Data type
43
 * @tparam RANK Rank of tensor
44
 * @tparam Desc Descriptor for tensor
45
 * 
46
 */
47
template <typename OperatorType, bool ConvertType = true>
48
struct RandomOperatorIterator {
49
  using self_type = RandomOperatorIterator<OperatorType, ConvertType>;
50
  using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::value_type>, typename OperatorType::value_type>;
51
  // using stride_type = std::conditional_t<is_tensor_view_v<OperatorType>, typename OperatorType::desc_type::stride_type,
52
  //                         index_t>;
53
  using stride_type = index_t;
54
  using pointer = value_type*;
55
  using reference = value_type&;
56
  using iterator_category = std::random_access_iterator_tag;
57
  using difference_type = index_t;
58
  using OperatorBaseType = typename detail::base_type_t<OperatorType>;
59

1,926✔
60
  __MATX_INLINE__ RandomOperatorIterator(const RandomOperatorIterator &) = default;
12✔
61
  __MATX_INLINE__ RandomOperatorIterator(RandomOperatorIterator &&) = default;
74,896✔
62
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorIterator(const OperatorType &t) : t_(t), offset_(0) { }
920✔
63
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorIterator(OperatorType &&t) : t_(t), offset_(0) { }
1,392✔
64
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorIterator(const OperatorType &t, stride_type offset) : t_(t), offset_(offset) {}
3,636✔
65
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorIterator(OperatorType &&t, stride_type offset) : t_(t), offset_(offset) {}
66

67
  template<typename T = OperatorType, std::enable_if_t<!std::is_same<T, OperatorBaseType>::value, bool> = true>
68
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorIterator(const OperatorBaseType &t, stride_type offset) : t_(t), offset_(offset) {}
69
  
70
  template<typename T = OperatorType, std::enable_if_t<!std::is_same<T, OperatorBaseType>::value, bool> = true>
71
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorIterator(OperatorBaseType &&t, stride_type offset) : t_(t), offset_(offset) {}
72

73
  /**
74
   * @brief Dereference value at a pre-computed offset
75
   * 
76
   * @return Value at offset 
77
   */
878,088,499✔
78
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ value_type operator*() const
2,147,483,647✔
79
  {
1,756,177,490✔
80
    if constexpr (OperatorType::Rank() == 0) {
1,756,176,998✔
81
      const auto tmp = t_.operator()();
82
      return tmp;
83
    }
84
    else {
85
      auto arrs = detail::GetIdxFromAbs(t_, offset_);
86
      return cuda::std::apply([&](auto &&...args) -> value_type {
87
          const auto tmp = t_.operator()(args...);
1,851✔
88
          return tmp;
89
        }, arrs);     
1,059,025✔
90
    }
1,059,025✔
91
  }  
92

93
    __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type& operator=(const self_type &rhs)
94
  {
95
    if constexpr (is_tensor_view_v<OperatorType>) {
96
      t_.copy(rhs.t_);
881,574✔
97
    }
98
    offset_ = rhs.offset_;
99
    return *this;
100
  }  
101

102

103
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type operator+(difference_type offset) const
104
  {
105
    return self_type{t_, offset_ + offset};
106
  }
107

108
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ value_type operator[](difference_type offset) const
109
  {
110
    if constexpr (OperatorType::Rank() == 0) {
111
      return static_cast<value_type>(t_.operator()());
112
    }
113
    else {
114
      auto arrs = detail::GetIdxFromAbs(t_, offset_+offset);
115
      return cuda::std::apply([&](auto &&...args) -> value_type {
116
          return static_cast<value_type>(t_.operator()(args...));
117
        }, arrs);     
118
    }
119
  }  
120

121
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__  self_type operator++(int)
24,624✔
122
  {
688,207,266✔
123
      self_type retval = *this;
124
      offset_++;
125
      return retval;
126
  }  
127

128
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type operator++()
129
  {
130
      offset_++;
131
      return *this;
132
  }  
133

134
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ difference_type offset()
135
  {
136
      return offset_;
137
  }    
138

139
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type& operator+=(difference_type offset)
140
  {
141
      offset_ += offset;
142
      return *this;
143
  }
144

558,785,667✔
145
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type& operator-=(difference_type offset)
146
  {
147
      offset_ -= offset;
148
      return *this;
149
  }  
144,450✔
150

151
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ friend bool operator!=(const self_type &a, const self_type &b)
152
  {
153
    return a.offset_ != b.offset_;
149,576✔
154
  }
155

156
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ friend bool operator==(const self_type &a, const self_type &b)
157
  {
158
    return a.offset_ == b.offset_;
959,720✔
159
  }  
160

161
  static __MATX_INLINE__ constexpr __MATX_HOST__ __MATX_DEVICE__ int32_t Rank() {
162
    return OperatorType::Rank();
163
  }
164

165
  constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const
166
  {
167
    return t_.Size(dim);
168
  }  
46,278✔
169

170
  OperatorBaseType t_;
171
  stride_type offset_;  
172
};
173

174
template <typename OperatorType, bool ConvertType = true>
175
__MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t operator-(const RandomOperatorIterator<OperatorType, ConvertType> &a, const RandomOperatorIterator<OperatorType, ConvertType> &b)
176
{
177
  return a.offset_ - b.offset_;
178
}    
179

180

181

182
/**
183
 * @brief Iterator around operators for libraries that can take iterators as output (CUB).
184
 * 
185
 * @tparam T Data type
186
 * @tparam RANK Rank of tensor
187
 * @tparam Desc Descriptor for tensor
188
 * 
189
 */
190
template <typename OperatorType, bool ConvertType = true>
191
struct RandomOperatorOutputIterator {
192
  using self_type = RandomOperatorOutputIterator<OperatorType, ConvertType>;
193
  using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::value_type>, typename OperatorType::value_type>;
194
  // using stride_type = std::conditional_t<is_tensor_view_v<OperatorType>, typename OperatorType::desc_type::stride_type,
195
  //                         index_t>;
18✔
196
  using stride_type = index_t;
39,300✔
197
  using pointer = value_type*;
488,526✔
198
  using reference = value_type&;
12✔
199
  using iterator_category = std::random_access_iterator_tag;
200
  using difference_type = index_t;
201
  using OperatorBaseType = typename detail::base_type_t<OperatorType>;
202

203
  __MATX_INLINE__ RandomOperatorOutputIterator(RandomOperatorOutputIterator &&) = default;
×
204
  __MATX_INLINE__ RandomOperatorOutputIterator(const RandomOperatorOutputIterator &) = default;
205
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorOutputIterator(OperatorType &&t) : t_(t), offset_(0) { }
206
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorOutputIterator(const OperatorType &t) : t_(t), offset_(0) { }
503,760✔
207
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorOutputIterator(const OperatorType &t, stride_type offset) : t_(t), offset_(offset) {}
208
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorOutputIterator(OperatorType &&t, stride_type offset) : t_(t), offset_(offset) {}
1,511,280✔
209

1,007,520✔
210
  template<typename T = OperatorType, std::enable_if_t<!std::is_same<T, OperatorBaseType>::value, bool> = true>
1,007,520✔
211
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorOutputIterator(const OperatorBaseType &t, stride_type offset) : t_(t), offset_(offset) {}
212
  
213
  template<typename T = OperatorType, std::enable_if_t<!std::is_same<T, OperatorBaseType>::value, bool> = true>
214
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorOutputIterator(OperatorBaseType &&t, stride_type offset) : t_(t), offset_(offset) {}
215

216
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ reference operator*()
330,126✔
217
  {
218
    if constexpr (OperatorType::Rank() == 0) {
219
      auto &tmp = t_.operator()();
220
      return tmp;
221
    }
222
    else {
316,800✔
223
      auto arrs = detail::GetIdxFromAbs(t_, offset_);
224

225
      return cuda::std::apply([&](auto &&...args) -> reference {
226
          auto &tmp = t_.operator()(args...);
227
          return tmp;
228
        }, arrs);    
229
    }
230
  }  
231

232
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type operator+(difference_type offset) const
233
  {
234
    return self_type{t_, offset_ + offset};
5,514✔
235
  }
5,514✔
236
  
237

238
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ reference operator[](difference_type offset) 
239
  {
240
    return *self_type{t_, offset_ + offset};
241
  }  
242

243
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__  self_type operator++(int)
244
  {
245
      self_type retval = *this;
246
      offset_++;
6✔
247
      return retval;
6✔
248
  }  
6✔
249

250
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type operator++()
251
  {
252
      offset_++;
253
      return *this;
254
  }  
255

256
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type& operator+=(difference_type offset)
257
  {
258
      offset_ += offset;
259
      return *this;
260
  }
261

262
    __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type& operator=(const self_type &rhs)
263
  {
264
    t_.copy(rhs.t_);
3,258✔
265
    offset_ = rhs.offset_;
3,258✔
266
    return *this;
267
  }
268

269
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type operator-(difference_type offset) const
270
  {
4,188✔
271
      return self_type{t_, offset_ - offset};
272
  }
273

274

275
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type& operator-=(difference_type offset)
342✔
276
  {
277
      offset_ -= offset;
278
      return *this;
279
  }  
280

281
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type& operator--() {
282
    --offset_;
283
    return *this;
284
  }
504✔
285

286
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ friend bool operator!=(const self_type &a, const self_type &b)
287
  {
288
    return a.offset_ != b.offset_;
289
  }
290

291
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ friend bool operator==(const self_type &a, const self_type &b)
292
  {
293
    return a.offset_ == b.offset_;
294
  }
295

296
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ friend bool operator<(const self_type &a, const self_type &b) {
297
    return a.offset_ < b.offset_;
298
  }
299

300
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ friend bool operator>(const self_type &a, const self_type &b) {
301
    return a.offset_ > b.offset_;
302
  }
303

304
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ friend bool operator<=(const self_type &a, const self_type &b) {
225,018✔
305
    return a.offset_ <= b.offset_;
306
  }
440,262✔
307

308
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ friend bool operator>=(const self_type &a, const self_type &b) {
309
    return a.offset_ >= b.offset_;
310
  }
311

312
  static __MATX_INLINE__ constexpr __MATX_HOST__ __MATX_DEVICE__ int32_t Rank() {
313
    return OperatorType::Rank();
314
  }
315

440,262✔
316
  constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const
317
  {
318
    return t_.Size(dim);
319
  }    
320

321
  OperatorBaseType t_;
322
  stride_type offset_;  
323
};
324

325
/**
880,524✔
326
 * @brief Iterator around operators for libraries that can take iterators as input/output (Thrust).
327
 * 
328
 * @tparam T Data type
329
 * @tparam RANK Rank of tensor
330
 * @tparam Desc Descriptor for tensor
331
 * 
332
 */
333
template <typename OperatorType, bool ConvertType = true>
334
struct RandomOperatorThrustIterator {
335
  using self_type = RandomOperatorThrustIterator<OperatorType, ConvertType>;
336
  using const_strip_type = remove_cvref_t<typename OperatorType::value_type>;
337
  using value_type = typename std::conditional_t<ConvertType,
338
              detail::convert_matx_type_t<const_strip_type>, 
339
              const_strip_type>;
340
  // using stride_type = std::conditional_t<is_tensor_view_v<OperatorType>, typename OperatorType::desc_type::stride_type,
341
  //                         index_t>;
342
  using stride_type = index_t;
343
  using pointer = cuda::std::remove_const_t<value_type>*;
344
  using reference = cuda::std::remove_const_t<value_type>&;
345
  using const_reference = cuda::std::remove_const_t<value_type>&;
346
  using iterator_category = std::random_access_iterator_tag;
347
  using difference_type = index_t;
348
  using OperatorBaseType = typename detail::base_type_t<OperatorType>;
349

350
  __MATX_INLINE__ RandomOperatorThrustIterator(RandomOperatorThrustIterator &&) = default;
351
  __MATX_INLINE__ RandomOperatorThrustIterator(const RandomOperatorThrustIterator &) = default;
352
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorThrustIterator(OperatorType &&t) : t_(t), offset_(0) { }
353
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorThrustIterator(const OperatorType &t) : t_(t), offset_(0) { }
354
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorThrustIterator(const OperatorType &t, stride_type offset) : t_(t), offset_(offset) {}
355
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorThrustIterator(OperatorType &&t, stride_type offset) : t_(t), offset_(offset) {}
356

357
  template<typename T = OperatorType, std::enable_if_t<!std::is_same<T, OperatorBaseType>::value, bool> = true>
358
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorThrustIterator(const OperatorBaseType &t, stride_type offset) : t_(t), offset_(offset) {}
359
  
360
  template<typename T = OperatorType, std::enable_if_t<!std::is_same<T, OperatorBaseType>::value, bool> = true>
361
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ RandomOperatorThrustIterator(OperatorBaseType &&t, stride_type offset) : t_(t), offset_(offset) {}
362

363
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ reference operator*() const
225,018✔
364
  {
365
    if constexpr (OperatorType::Rank() == 0) {
366
      auto &tmp = const_cast<const_strip_type&>(t_.operator()());
367
      return tmp;
368
    }
369
    else {
370
      auto arrs = detail::GetIdxFromAbs(t_, offset_);
371

372
      return cuda::std::apply([&](auto &&...args) -> reference {
373
          auto &tmp = const_cast<const_strip_type&>(t_.operator()(args...));
374
          return tmp;
375
        }, arrs);    
376
    }
377
  }  
378

379
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type operator+(difference_type offset) const
380
  {
381
    return self_type{t_, offset_ + offset};
382
  }
383
  
384

440,262✔
385
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ reference operator[](difference_type offset) 
386
  {
387
    return *self_type{t_, offset_ + offset};
388
  }  
389

390
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__  self_type operator++(int)
391
  {
392
      self_type retval = *this;
393
      offset_++;
394
      return retval;
395
  }  
396

7,512✔
397
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type operator++()
398
  {
399
      offset_++;
400
      return *this;
401
  }  
402

920✔
403
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type& operator+=(difference_type offset)
404
  {
405
      offset_ += offset;
406
      return *this;
407
  }
3,636✔
408

409
    __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type& operator=(const self_type &rhs)
410
  {
411
    t_.copy(rhs.t_);
412
    offset_ = rhs.offset_;
18✔
413
    return *this;
414
  }
415

416
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type operator-(difference_type offset) const
417
  {
12✔
418
      return self_type{t_, offset_ - offset};
419
  }
420

421

422
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type& operator-=(difference_type offset)
423
  {
424
      offset_ -= offset;
425
      return *this;
426
  }  
427

428
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type& operator--() {
429
    --offset_;
430
    return *this;
431
  }
432

433
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ friend bool operator!=(const self_type &a, const self_type &b)
434
  {
435
    return a.offset_ != b.offset_;
436
  }
437

438
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ friend bool operator==(const self_type &a, const self_type &b)
439
  {
440
    return a.offset_ == b.offset_;
441
  }    
442

443
  static __MATX_INLINE__ constexpr __MATX_HOST__ __MATX_DEVICE__ int32_t Rank() {
444
    return OperatorType::Rank();
445
  }
446

447
  constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const
448
  {
449
    return t_.Size(dim);
450
  }    
451

452
  OperatorBaseType t_;
453
  stride_type offset_;  
454
};
455

456

457
template <typename OperatorType>
458
struct BeginOffset {
459
  using self_type = BeginOffset<OperatorType>;
460
  using value_type = index_t;
461
  // using stride_type = std::conditional_t<is_tensor_view_v<OperatorType>, typename OperatorType::desc_type::stride_type,
462
  //                         index_t>;
463
  using stride_type = index_t;
464
  using pointer = value_type*;
465
  using reference = value_type;
466
  using iterator_category = std::random_access_iterator_tag;
467
  using difference_type = index_t;
468

469
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ BeginOffset(const OperatorType &t) : size_(t.Size(t.Rank() - 1)), offset_(0) { }
470
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ BeginOffset(const OperatorType &t, stride_type offset) : size_(t.Size(t.Rank() - 1)), offset_(offset) {}
471
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ BeginOffset(stride_type size, stride_type offset) : size_(size), offset_(offset) {}
472

473
  /**
474
   * @brief Dereference value at a pre-computed offset
475
   *
476
   * @return Value at offset
477
   */
478
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ stride_type operator*() const
479
  {
480
    return offset_ * size_;
481
  }
482

483
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type operator+(difference_type offset) const
484
  {
485
    return self_type{size_, offset_ + offset};
486
  }
487

488
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ stride_type operator[](difference_type offset) const
489
  {
490
    return *self_type{size_, offset_ + offset};
491
  }
492

493
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__  self_type operator++(int)
494
  {
495
      self_type retval = *this;
496
      offset_++;
497
      return retval;
498
  }
499

500
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type operator++()
501
  {
502
      offset_++;
503
      return *this;
504
  }
505

506
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type& operator+=(difference_type offset)
507
  {
508
      offset_ += offset;
509
      return *this;
510
  }
511

512
  stride_type size_;
513
  stride_type offset_;
514
};
515

516
template <typename OperatorType>
517
struct EndOffset {
518
  using self_type = EndOffset<OperatorType>;
519
  using value_type = index_t;
520
  // using stride_type = std::conditional_t<is_tensor_view_v<OperatorType>, typename OperatorType::desc_type::stride_type,
521
  //                         index_t>;
522
  using stride_type = index_t;
523
  using pointer = value_type*;
524
  using reference = value_type;
525
  using iterator_category = std::random_access_iterator_tag;
526
  using difference_type = index_t;
527

528
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ EndOffset(const OperatorType &t) : size_(t.Size(t.Rank() - 1)), offset_(0) { }
529
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ EndOffset(const OperatorType &t, stride_type offset) : size_(t.Size(t.Rank() - 1)), offset_(offset) {}
530
  __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ EndOffset(stride_type size, stride_type offset) : size_(size), offset_(offset) {}
531

532
  /**
533
   * @brief Dereference value at a pre-computed offset
534
   *
535
   * @return Value at offset
536
   */
537
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ stride_type operator*() const
538
  {
539
    return (offset_ + 1) * size_;
540
  }
541

542
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ self_type operator+(difference_type offset) const
543
  {
544
    return self_type{size_, offset_ + offset};
545
  }
546

547
  [[nodiscard]] __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ stride_type operator[](difference_type offset) const
548
  {
549
    return ( offset + 1) * size_;
550
  }
551

552
  stride_type size_;
553
  stride_type offset_;
554
};
555

556

557

558
template <typename OperatorType>
559
__MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t operator-(const RandomOperatorOutputIterator<OperatorType> &a, const RandomOperatorOutputIterator<OperatorType> &b)
560
{
561
  return a.offset_ - b.offset_;
562
}    
563

564

565
template <typename Op>
566
auto  __MATX_INLINE__ __MATX_HOST__  cbegin(Op &&op) {
567
  return RandomOperatorIterator{static_cast<typename detail::base_type_t<Op>>(op)};
568
}
569

570
template <typename Op>
571
auto  __MATX_INLINE__ __MATX_HOST__  cend(Op &&op) {
572
  return RandomOperatorIterator{static_cast<typename detail::base_type_t<Op>>(op), TotalSize(op)};
573
}
574

575
template <typename Op>
576
auto  __MATX_INLINE__ __MATX_HOST__  begin(Op &&op) {
577
  return RandomOperatorOutputIterator{static_cast<typename detail::base_type_t<Op>>(op)};
578
}
579

580
template <typename Op>
581
auto  __MATX_INLINE__ __MATX_HOST__  end(Op &&op) {
582
  return RandomOperatorOutputIterator{static_cast<typename detail::base_type_t<Op>>(op), TotalSize(op)};
583
}
584

585
};
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc