• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

JuliaLang / julia / #37998

01 Feb 2025 04:36AM UTC coverage: 25.515% (+8.2%) from 17.283%
#37998

push

local

web-flow
🤖 [master] Bump the LinearAlgebra stdlib from da6d052 to 57e9a0d (#57177)

Stdlib: LinearAlgebra
URL: https://github.com/JuliaLang/LinearAlgebra.jl.git
Stdlib branch: master
Julia branch: master
Old commit: da6d052
New commit: 57e9a0d
Julia version: 1.12.0-DEV
LinearAlgebra version: 1.12.0
Bump invoked by: @ViralBShah
Powered by:
[BumpStdlibs.jl](https://github.com/JuliaLang/BumpStdlibs.jl)

Diff:
https://github.com/JuliaLang/LinearAlgebra.jl/compare/da6d05213...57e9a0d19

```
$ git log --oneline da6d052..57e9a0d
57e9a0d Reduce allocations and improve performance in `syevr!` (#1176)
8bb9f6b fix error messages (#1171)
97a712f Update .ci/Manifest.toml (#1179)
```

Co-authored-by: ViralBShah <744411+ViralBShah@users.noreply.github.com>
Co-authored-by: Viral B. Shah <viral@juliacomputing.com>

12529 of 49105 relevant lines covered (25.51%)

393354.34 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

6.44
/base/reinterpretarray.jl
1
# This file is a part of Julia. License is MIT: https://julialang.org/license
2

3
"""
4
Gives a reinterpreted view (of element type T) of the underlying array (of element type S).
5
If the size of `T` differs from the size of `S`, the array will be compressed/expanded in
6
the first dimension. The variant `reinterpret(reshape, T, a)` instead adds or consumes the first dimension
7
depending on the ratio of element sizes.
8
"""
9
struct ReinterpretArray{T,N,S,A<:AbstractArray{S},IsReshaped} <: AbstractArray{T, N}
10
    parent::A
11
    readable::Bool
12
    writable::Bool
13

14
    function throwbits(S::Type, T::Type, U::Type)
×
15
        @noinline
×
16
        throw(ArgumentError(LazyString("cannot reinterpret `", S, "` as `", T, "`, type `", U, "` is not a bits type")))
×
17
    end
18
    function throwsize0(S::Type, T::Type, msg)
×
19
        @noinline
×
20
        throw(ArgumentError(LazyString("cannot reinterpret a zero-dimensional `", S, "` array to `", T,
×
21
            "` which is of a ", msg, " size")))
22
    end
23
    function throwsingleton(S::Type, T::Type)
×
24
        @noinline
×
25
        throw(ArgumentError(LazyString("cannot reinterpret a `", S, "` array to `", T, "` which is a singleton type")))
×
26
    end
27

28
    global reinterpret
29

30
    @doc """
31
        reinterpret(T::DataType, A::AbstractArray)
32

33
    Construct a view of the array with the same binary data as the given
34
    array, but with `T` as element type.
35

36
    This function also works on "lazy" array whose elements are not computed until they are explicitly retrieved.
37
    For instance, `reinterpret` on the range `1:6` works similarly as on the dense vector `collect(1:6)`:
38

39
    ```jldoctest
40
    julia> reinterpret(Float32, UInt32[1 2 3 4 5])
41
    1×5 reinterpret(Float32, ::Matrix{UInt32}):
42
     1.0f-45  3.0f-45  4.0f-45  6.0f-45  7.0f-45
43

44
    julia> reinterpret(Complex{Int}, 1:6)
45
    3-element reinterpret(Complex{$Int}, ::UnitRange{$Int}):
46
     1 + 2im
47
     3 + 4im
48
     5 + 6im
49
    ```
50

51
    If the location of padding bits does not line up between `T` and `eltype(A)`, the resulting array will be
52
    read-only or write-only, to prevent invalid bits from being written to or read from, respectively.
53

54
    ```jldoctest
55
    julia> a = reinterpret(Tuple{UInt8, UInt32}, UInt32[1, 2])
56
    1-element reinterpret(Tuple{UInt8, UInt32}, ::Vector{UInt32}):
57
     (0x01, 0x00000002)
58

59
    julia> a[1] = 3
60
    ERROR: Padding of type Tuple{UInt8, UInt32} is not compatible with type UInt32.
61

62
    julia> b = reinterpret(UInt32, Tuple{UInt8, UInt32}[(0x01, 0x00000002)]); # showing will error
63

64
    julia> b[1]
65
    ERROR: Padding of type UInt32 is not compatible with type Tuple{UInt8, UInt32}.
66
    ```
67
    """
68
    function reinterpret(::Type{T}, a::A) where {T,N,S,A<:AbstractArray{S, N}}
69
        function thrownonint(S::Type, T::Type, dim)
1✔
70
            @noinline
×
71
            throw(ArgumentError(LazyString(
×
72
                "cannot reinterpret an `", S, "` array to `", T, "` whose first dimension has size `", dim,
73
                "`. The resulting array would have a non-integral first dimension.")))
74
        end
75
        function throwaxes1(S::Type, T::Type, ax1)
1✔
76
            @noinline
77
            throw(ArgumentError(LazyString("cannot reinterpret a `", S, "` array to `", T,
78
                "` when the first axis is ", ax1, ". Try reshaping first.")))
79
        end
80
        isbitstype(T) || throwbits(S, T, T)
1✔
81
        isbitstype(S) || throwbits(S, T, S)
1✔
82
        (N != 0 || sizeof(T) == sizeof(S)) || throwsize0(S, T, "different")
1✔
83
        if N != 0 && sizeof(S) != sizeof(T)
1✔
84
            ax1 = axes(a)[1]
17✔
85
            dim = length(ax1)
1✔
86
            if issingletontype(T)
1✔
87
                issingletontype(S) || throwsingleton(S, T)
×
88
            else
89
                rem(dim*sizeof(S),sizeof(T)) == 0 || thrownonint(S, T, dim)
17✔
90
            end
91
            first(ax1) == 1 || throwaxes1(S, T, ax1)
1✔
92
        end
93
        readable = array_subpadding(T, S)
1✔
94
        writable = array_subpadding(S, T)
1✔
95
        new{T, N, S, A, false}(a, readable, writable)
17✔
96
    end
97
    reinterpret(::Type{T}, a::AbstractArray{T}) where {T} = a
×
98

99
    # With reshaping
100
    function reinterpret(::typeof(reshape), ::Type{T}, a::A) where {T,S,A<:AbstractArray{S}}
×
101
        function throwintmult(S::Type, T::Type)
×
102
            @noinline
×
103
            throw(ArgumentError(LazyString("`reinterpret(reshape, T, a)` requires that one of `sizeof(T)` (got ",
×
104
                sizeof(T), ") and `sizeof(eltype(a))` (got ", sizeof(S), ") be an integer multiple of the other")))
105
        end
106
        function throwsize1(a::AbstractArray, T::Type)
×
107
            @noinline
×
108
            throw(ArgumentError(LazyString("`reinterpret(reshape, ", T, ", a)` where `eltype(a)` is ", eltype(a),
×
109
                " requires that `axes(a, 1)` (got ", axes(a, 1), ") be equal to 1:",
110
                sizeof(T) ÷ sizeof(eltype(a)), " (from the ratio of element sizes)")))
111
        end
112
        function throwfromsingleton(S, T)
×
113
            @noinline
×
114
            throw(ArgumentError(LazyString("`reinterpret(reshape, ", T, ", a)` where `eltype(a)` is ", S,
×
115
                " requires that ", T, " be a singleton type, since ", S, " is one")))
116
        end
117
        isbitstype(T) || throwbits(S, T, T)
×
118
        isbitstype(S) || throwbits(S, T, S)
×
119
        if sizeof(S) == sizeof(T)
×
120
            N = ndims(a)
×
121
        elseif sizeof(S) > sizeof(T)
×
122
            issingletontype(T) && throwsingleton(S, T)
×
123
            rem(sizeof(S), sizeof(T)) == 0 || throwintmult(S, T)
×
124
            N = ndims(a) + 1
×
125
        else
126
            issingletontype(S) && throwfromsingleton(S, T)
×
127
            rem(sizeof(T), sizeof(S)) == 0 || throwintmult(S, T)
×
128
            N = ndims(a) - 1
×
129
            N > -1 || throwsize0(S, T, "larger")
×
130
            axes(a, 1) == OneTo(sizeof(T) ÷ sizeof(S)) || throwsize1(a, T)
×
131
        end
132
        readable = array_subpadding(T, S)
×
133
        writable = array_subpadding(S, T)
×
134
        new{T, N, S, A, true}(a, readable, writable)
×
135
    end
136
    reinterpret(::typeof(reshape), ::Type{T}, a::AbstractArray{T}) where {T} = a
×
137
end
138

139
ReshapedReinterpretArray{T,N,S,A<:AbstractArray{S}} = ReinterpretArray{T,N,S,A,true}
140
NonReshapedReinterpretArray{T,N,S,A<:AbstractArray{S, N}} = ReinterpretArray{T,N,S,A,false}
141

142
"""
143
    reinterpret(reshape, T, A::AbstractArray{S}) -> B
144

145
Change the type-interpretation of `A` while consuming or adding a "channel dimension."
146

147
If `sizeof(T) = n*sizeof(S)` for `n>1`, `A`'s first dimension must be
148
of size `n` and `B` lacks `A`'s first dimension. Conversely, if `sizeof(S) = n*sizeof(T)` for `n>1`,
149
`B` gets a new first dimension of size `n`. The dimensionality is unchanged if `sizeof(T) == sizeof(S)`.
150

151
!!! compat "Julia 1.6"
152
    This method requires at least Julia 1.6.
153

154
# Examples
155

156
```jldoctest
157
julia> A = [1 2; 3 4]
158
2×2 Matrix{$Int}:
159
 1  2
160
 3  4
161

162
julia> reinterpret(reshape, Complex{Int}, A)    # the result is a vector
163
2-element reinterpret(reshape, Complex{$Int}, ::Matrix{$Int}) with eltype Complex{$Int}:
164
 1 + 3im
165
 2 + 4im
166

167
julia> a = [(1,2,3), (4,5,6)]
168
2-element Vector{Tuple{$Int, $Int, $Int}}:
169
 (1, 2, 3)
170
 (4, 5, 6)
171

172
julia> reinterpret(reshape, Int, a)             # the result is a matrix
173
3×2 reinterpret(reshape, $Int, ::Vector{Tuple{$Int, $Int, $Int}}) with eltype $Int:
174
 1  4
175
 2  5
176
 3  6
177
```
178
"""
179
reinterpret(::typeof(reshape), T::Type, a::AbstractArray)
180

181
reinterpret(::Type{T}, a::NonReshapedReinterpretArray) where {T} = reinterpret(T, a.parent)
×
182
reinterpret(::typeof(reshape), ::Type{T}, a::ReshapedReinterpretArray) where {T} = reinterpret(reshape, T, a.parent)
×
183

184
# Definition of StridedArray
185
StridedFastContiguousSubArray{T,N,A<:DenseArray} = FastContiguousSubArray{T,N,A}
186
StridedReinterpretArray{T,N,A<:Union{DenseArray,StridedFastContiguousSubArray},IsReshaped} = ReinterpretArray{T,N,S,A,IsReshaped} where S
187
StridedReshapedArray{T,N,A<:Union{DenseArray,StridedFastContiguousSubArray,StridedReinterpretArray}} = ReshapedArray{T,N,A}
188
StridedSubArray{T,N,A<:Union{DenseArray,StridedReshapedArray,StridedReinterpretArray},
189
    I<:Tuple{Vararg{Union{RangeIndex, ReshapedUnitRange, AbstractCartesianIndex}}}} = SubArray{T,N,A,I}
190
StridedArray{T,N} = Union{DenseArray{T,N}, StridedSubArray{T,N}, StridedReshapedArray{T,N}, StridedReinterpretArray{T,N}}
191
StridedVector{T} = StridedArray{T,1}
192
StridedMatrix{T} = StridedArray{T,2}
193
StridedVecOrMat{T} = Union{StridedVector{T}, StridedMatrix{T}}
194

195
strides(a::Union{DenseArray,StridedReshapedArray,StridedReinterpretArray}) = size_to_strides(1, size(a)...)
×
196
stride(A::Union{DenseArray,StridedReshapedArray,StridedReinterpretArray}, k::Integer) =
×
197
    k ≤ ndims(A) ? strides(A)[k] : length(A)
198

199
function strides(a::ReinterpretArray{T,<:Any,S,<:AbstractArray{S},IsReshaped}) where {T,S,IsReshaped}
×
200
    _checkcontiguous(Bool, a) && return size_to_strides(1, size(a)...)
×
201
    stp = strides(parent(a))
×
202
    els, elp = sizeof(T), sizeof(S)
×
203
    els == elp && return stp # 0dim parent is also handled here.
×
204
    IsReshaped && els < elp && return (1, _checked_strides(stp, els, elp)...)
×
205
    stp[1] == 1 || throw(ArgumentError("Parent must be contiguous in the 1st dimension!"))
×
206
    st′ = _checked_strides(tail(stp), els, elp)
×
207
    return IsReshaped ? st′ : (1, st′...)
×
208
end
209

210
@inline function _checked_strides(stp::Tuple, els::Integer, elp::Integer)
×
211
    if elp > els && rem(elp, els) == 0
×
212
        N = div(elp, els)
×
213
        return map(i -> N * i, stp)
×
214
    end
215
    drs = map(i -> divrem(elp * i, els), stp)
×
216
    all(i->iszero(i[2]), drs) ||
×
217
        throw(ArgumentError("Parent's strides could not be exactly divided!"))
218
    map(first, drs)
×
219
end
220

221
_checkcontiguous(::Type{Bool}, A::ReinterpretArray) = _checkcontiguous(Bool, parent(A))
×
222

223
similar(a::ReinterpretArray, T::Type, d::Dims) = similar(a.parent, T, d)
16✔
224

225
function check_readable(a::ReinterpretArray{T, N, S} where N) where {T,S}
226
    # See comment in check_writable
227
    if !a.readable && !array_subpadding(T, S)
4✔
228
        throw(PaddingError(T, S))
×
229
    end
230
end
231

232
function check_writable(a::ReinterpretArray{T, N, S} where N) where {T,S}
×
233
    # `array_subpadding` is relatively expensive (compared to a simple arrayref),
234
    # so it is cached in the array. However, it is computable at compile time if,
235
    # inference has the types available. By using this form of the check, we can
236
    # get the best of both worlds for the success case. If the types were not
237
    # available to inference, we simply need to check the field (relatively cheap)
238
    # and if they were we should be able to fold this check away entirely.
239
    if !a.writable && !array_subpadding(S, T)
×
240
        throw(PaddingError(T, S))
×
241
    end
242
end
243

244
## IndexStyle specializations
245

246
# For `reinterpret(reshape, T, a)` where we're adding a channel dimension and with
247
# `IndexStyle(a) == IndexLinear()`, it's advantageous to retain pseudo-linear indexing.
248
struct IndexSCartesian2{K} <: IndexStyle end   # K = sizeof(S) ÷ sizeof(T), a static-sized 2d cartesian iterator
249

250
IndexStyle(::Type{ReinterpretArray{T,N,S,A,false}}) where {T,N,S,A<:AbstractArray{S,N}} = IndexStyle(A)
×
251
function IndexStyle(::Type{ReinterpretArray{T,N,S,A,true}}) where {T,N,S,A<:AbstractArray{S}}
×
252
    if sizeof(T) < sizeof(S)
×
253
        IndexStyle(A) === IndexLinear() && return IndexSCartesian2{sizeof(S) ÷ sizeof(T)}()
×
254
        return IndexCartesian()
×
255
    end
256
    return IndexStyle(A)
×
257
end
258
IndexStyle(::IndexSCartesian2{K}, ::IndexSCartesian2{K}) where {K} = IndexSCartesian2{K}()
×
259

260
struct SCartesianIndex2{K}   # can't make <:AbstractCartesianIndex without N, and 2 would be a bit misleading
261
    i::Int
262
    j::Int
263
end
264
to_index(i::SCartesianIndex2) = i
×
265

266
struct SCartesianIndices2{K,R<:AbstractUnitRange{Int}} <: AbstractMatrix{SCartesianIndex2{K}}
267
    indices2::R
268
end
269
SCartesianIndices2{K}(indices2::AbstractUnitRange{Int}) where {K} = (@assert K::Int > 1; SCartesianIndices2{K,typeof(indices2)}(indices2))
×
270

271
eachindex(::IndexSCartesian2{K}, A::ReshapedReinterpretArray) where {K} = SCartesianIndices2{K}(eachindex(IndexLinear(), parent(A)))
×
272
@inline function eachindex(style::IndexSCartesian2{K}, A::AbstractArray, B::AbstractArray...) where {K}
×
273
    iter = eachindex(style, A)
×
274
    _all_match_first(C->eachindex(style, C), iter, B...) || throw_eachindex_mismatch_indices(IndexSCartesian2{K}(), axes(A), axes.(B)...)
×
275
    return iter
×
276
end
277

278
size(iter::SCartesianIndices2{K}) where K = (K, length(iter.indices2))
×
279
axes(iter::SCartesianIndices2{K}) where K = (OneTo(K), iter.indices2)
×
280

281
first(iter::SCartesianIndices2{K}) where {K} = SCartesianIndex2{K}(1, first(iter.indices2))
×
282
last(iter::SCartesianIndices2{K}) where {K}  = SCartesianIndex2{K}(K, last(iter.indices2))
×
283

284
@inline function getindex(iter::SCartesianIndices2{K}, i::Int, j::Int) where {K}
×
285
    @boundscheck checkbounds(iter, i, j)
×
286
    return SCartesianIndex2{K}(i, iter.indices2[j])
×
287
end
288

289
function iterate(iter::SCartesianIndices2{K}) where {K}
×
290
    ret = iterate(iter.indices2)
×
291
    ret === nothing && return nothing
×
292
    item2, state2 = ret
×
293
    return SCartesianIndex2{K}(1, item2), (1, item2, state2)
×
294
end
295

296
function iterate(iter::SCartesianIndices2{K}, (state1, item2, state2)) where {K}
×
297
    if state1 < K
×
298
        item1 = state1 + 1
×
299
        return SCartesianIndex2{K}(item1, item2), (item1, item2, state2)
×
300
    end
301
    ret = iterate(iter.indices2, state2)
×
302
    ret === nothing && return nothing
×
303
    item2, state2 = ret
×
304
    return SCartesianIndex2{K}(1, item2), (1, item2, state2)
×
305
end
306

307
SimdLoop.simd_outer_range(iter::SCartesianIndices2) = iter.indices2
×
308
SimdLoop.simd_inner_length(::SCartesianIndices2{K}, ::Any) where K = K
×
309
@inline function SimdLoop.simd_index(::SCartesianIndices2{K}, Ilast::Int, I1::Int) where {K}
×
310
    SCartesianIndex2{K}(I1+1, Ilast)
×
311
end
312

313
_maybe_reshape(::IndexSCartesian2, A::ReshapedReinterpretArray, I...) = A
×
314

315
# fallbacks
316
function _getindex(::IndexSCartesian2, A::AbstractArray{T,N}, I::Vararg{Int, N}) where {T,N}
×
317
    @_propagate_inbounds_meta
×
318
    getindex(A, I...)
×
319
end
320
function _setindex!(::IndexSCartesian2, A::AbstractArray{T,N}, v, I::Vararg{Int, N}) where {T,N}
×
321
    @_propagate_inbounds_meta
×
322
    setindex!(A, v, I...)
×
323
end
324
# fallbacks for array types that use "pass-through" indexing (e.g., `IndexStyle(A) = IndexStyle(parent(A))`)
325
# but which don't handle SCartesianIndex2
326
function _getindex(::IndexSCartesian2, A::AbstractArray{T,N}, ind::SCartesianIndex2) where {T,N}
×
327
    @_propagate_inbounds_meta
×
328
    J = _ind2sub(tail(axes(A)), ind.j)
×
329
    getindex(A, ind.i, J...)
×
330
end
331
function _setindex!(::IndexSCartesian2, A::AbstractArray{T,N}, v, ind::SCartesianIndex2) where {T,N}
×
332
    @_propagate_inbounds_meta
×
333
    J = _ind2sub(tail(axes(A)), ind.j)
×
334
    setindex!(A, v, ind.i, J...)
×
335
end
336
eachindex(style::IndexSCartesian2, A::AbstractArray) = eachindex(style, parent(A))
×
337

338
## AbstractArray interface
339

340
parent(a::ReinterpretArray) = a.parent
4✔
341
dataids(a::ReinterpretArray) = dataids(a.parent)
×
342
unaliascopy(a::NonReshapedReinterpretArray{T}) where {T} = reinterpret(T, unaliascopy(a.parent))
×
343
unaliascopy(a::ReshapedReinterpretArray{T}) where {T} = reinterpret(reshape, T, unaliascopy(a.parent))
×
344

345
function size(a::NonReshapedReinterpretArray{T,N,S} where {N}) where {T,S}
346
    psize = size(a.parent)
1✔
347
    size1 = issingletontype(T) ? psize[1] : div(psize[1]*sizeof(S), sizeof(T))
1✔
348
    tuple(size1, tail(psize)...)
1✔
349
end
350
function size(a::ReshapedReinterpretArray{T,N,S} where {N}) where {T,S}
×
351
    psize = size(a.parent)
×
352
    sizeof(S) > sizeof(T) && return (div(sizeof(S), sizeof(T)), psize...)
×
353
    sizeof(S) < sizeof(T) && return tail(psize)
×
354
    return psize
×
355
end
356
size(a::NonReshapedReinterpretArray{T,0}) where {T} = ()
×
357

358
function axes(a::NonReshapedReinterpretArray{T,N,S} where {N}) where {T,S}
359
    paxs = axes(a.parent)
2,449✔
360
    f, l = first(paxs[1]), length(paxs[1])
5✔
361
    size1 = issingletontype(T) ? l : div(l*sizeof(S), sizeof(T))
2,449✔
362
    tuple(oftype(paxs[1], f:f+size1-1), tail(paxs)...)
2,449✔
363
end
364
function axes(a::ReshapedReinterpretArray{T,N,S} where {N}) where {T,S}
×
365
    paxs = axes(a.parent)
×
366
    sizeof(S) > sizeof(T) && return (OneTo(div(sizeof(S), sizeof(T))), paxs...)
×
367
    sizeof(S) < sizeof(T) && return tail(paxs)
×
368
    return paxs
×
369
end
370
axes(a::NonReshapedReinterpretArray{T,0}) where {T} = ()
×
371

372
has_offset_axes(a::ReinterpretArray) = has_offset_axes(a.parent)
×
373

374
elsize(::Type{<:ReinterpretArray{T}}) where {T} = sizeof(T)
4✔
375
cconvert(::Type{Ptr{T}}, a::ReinterpretArray{T,N,S} where N) where {T,S} = cconvert(Ptr{S}, a.parent)
2,416✔
376
unsafe_convert(::Type{Ptr{T}}, a::ReinterpretArray{T,N,S} where N) where {T,S} = Ptr{T}(unsafe_convert(Ptr{S},a.parent))
×
377

378
@propagate_inbounds function getindex(a::NonReshapedReinterpretArray{T,0,S}) where {T,S}
×
379
    if isprimitivetype(T) && isprimitivetype(S)
×
380
        reinterpret(T, a.parent[])
×
381
    else
382
        a[firstindex(a)]
×
383
    end
384
end
385

386
check_ptr_indexable(a::ReinterpretArray, sz = elsize(a)) = check_ptr_indexable(parent(a), sz)
8✔
387
check_ptr_indexable(a::ReshapedArray, sz) = check_ptr_indexable(parent(a), sz)
×
388
check_ptr_indexable(a::FastContiguousSubArray, sz) = check_ptr_indexable(parent(a), sz)
×
389
check_ptr_indexable(a::Array, sz) = sizeof(eltype(a)) !== sz
×
390
check_ptr_indexable(a::Memory, sz) = true
×
391
check_ptr_indexable(a::AbstractArray, sz) = false
×
392

393
@propagate_inbounds getindex(a::ReinterpretArray) = a[firstindex(a)]
×
394

395
@propagate_inbounds isassigned(a::ReinterpretArray, inds::Integer...) = checkbounds(Bool, a, inds...) && (check_ptr_indexable(a) || _isassigned_ra(a, inds...))
×
396
@propagate_inbounds isassigned(a::ReinterpretArray, inds::SCartesianIndex2) = isassigned(a.parent, inds.j)
×
397
@propagate_inbounds _isassigned_ra(a::ReinterpretArray, inds...) = true # that is not entirely true, but computing exactly which indexes will be accessed in the parent requires a lot of duplication from the _getindex_ra code
×
398

399
@propagate_inbounds function getindex(a::ReinterpretArray{T,N,S}, inds::Vararg{Int, N}) where {T,N,S}
400
    check_readable(a)
4✔
401
    check_ptr_indexable(a) && return _getindex_ptr(a, inds...)
2,416✔
402
    _getindex_ra(a, inds[1], tail(inds))
×
403
end
404

405
@propagate_inbounds function getindex(a::ReinterpretArray{T,N,S}, i::Int) where {T,N,S}
×
406
    check_readable(a)
×
407
    check_ptr_indexable(a) && return _getindex_ptr(a, i)
×
408
    if isa(IndexStyle(a), IndexLinear)
×
409
        return _getindex_ra(a, i, ())
×
410
    end
411
    # Convert to full indices here, to avoid needing multiple conversions in
412
    # the loop in _getindex_ra
413
    inds = _to_subscript_indices(a, i)
×
414
    isempty(inds) ? _getindex_ra(a, 1, ()) : _getindex_ra(a, inds[1], tail(inds))
×
415
end
416

417
@propagate_inbounds function getindex(a::ReshapedReinterpretArray{T,N,S}, ind::SCartesianIndex2) where {T,N,S}
×
418
    check_readable(a)
×
419
    s = Ref{S}(a.parent[ind.j])
×
420
    tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
421
    GC.@preserve s return unsafe_load(tptr, ind.i)
×
422
end
423

424
@inline function _getindex_ptr(a::ReinterpretArray{T}, inds...) where {T}
425
    @boundscheck checkbounds(a, inds...)
2,416✔
426
    li = _to_linear_index(a, inds...)
4✔
427
    ap = cconvert(Ptr{T}, a)
2,416✔
428
    p = unsafe_convert(Ptr{T}, ap) + sizeof(T) * (li - 1)
2,416✔
429
    GC.@preserve ap return unsafe_load(p)
2,416✔
430
end
431

432
@propagate_inbounds function _getindex_ra(a::NonReshapedReinterpretArray{T,N,S}, i1::Int, tailinds::TT) where {T,N,S,TT}
433
    # Make sure to match the scalar reinterpret if that is applicable
434
    if sizeof(T) == sizeof(S) && (fieldcount(T) + fieldcount(S)) == 0
×
435
        if issingletontype(T) # singleton types
×
436
            @boundscheck checkbounds(a, i1, tailinds...)
×
437
            return T.instance
×
438
        end
439
        return reinterpret(T, a.parent[i1, tailinds...])
×
440
    else
441
        @boundscheck checkbounds(a, i1, tailinds...)
×
442
        ind_start, sidx = divrem((i1-1)*sizeof(T), sizeof(S))
×
443
        # Optimizations that avoid branches
444
        if sizeof(T) % sizeof(S) == 0
×
445
            # T is bigger than S and contains an integer number of them
446
            n = sizeof(T) ÷ sizeof(S)
×
447
            t = Ref{T}()
×
448
            GC.@preserve t begin
×
449
                sptr = Ptr{S}(unsafe_convert(Ref{T}, t))
×
450
                for i = 1:n
×
451
                     s = a.parent[ind_start + i, tailinds...]
×
452
                     unsafe_store!(sptr, s, i)
×
453
                end
×
454
            end
455
            return t[]
×
456
        elseif sizeof(S) % sizeof(T) == 0
×
457
            # S is bigger than T and contains an integer number of them
458
            s = Ref{S}(a.parent[ind_start + 1, tailinds...])
×
459
            GC.@preserve s begin
×
460
                tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
461
                return unsafe_load(tptr + sidx)
×
462
            end
463
        else
464
            i = 1
×
465
            nbytes_copied = 0
×
466
            # This is a bit complicated to deal with partial elements
467
            # at both the start and the end. LLVM will fold as appropriate,
468
            # once it knows the data layout
469
            s = Ref{S}()
×
470
            t = Ref{T}()
×
471
            GC.@preserve s t begin
×
472
                sptr = Ptr{S}(unsafe_convert(Ref{S}, s))
×
473
                tptr = Ptr{T}(unsafe_convert(Ref{T}, t))
×
474
                while nbytes_copied < sizeof(T)
×
475
                    s[] = a.parent[ind_start + i, tailinds...]
×
476
                    nb = min(sizeof(S) - sidx, sizeof(T)-nbytes_copied)
×
477
                    memcpy(tptr + nbytes_copied, sptr + sidx, nb)
×
478
                    nbytes_copied += nb
×
479
                    sidx = 0
×
480
                    i += 1
×
481
                end
×
482
            end
483
            return t[]
×
484
        end
485
    end
486
end
487

488
@propagate_inbounds function _getindex_ra(a::ReshapedReinterpretArray{T,N,S}, i1::Int, tailinds::TT) where {T,N,S,TT}
×
489
    # Make sure to match the scalar reinterpret if that is applicable
490
    if sizeof(T) == sizeof(S) && (fieldcount(T) + fieldcount(S)) == 0
×
491
        if issingletontype(T) # singleton types
×
492
            @boundscheck checkbounds(a, i1, tailinds...)
×
493
            return T.instance
×
494
        end
495
        return reinterpret(T, a.parent[i1, tailinds...])
×
496
    end
497
    @boundscheck checkbounds(a, i1, tailinds...)
×
498
    if sizeof(T) >= sizeof(S)
×
499
        t = Ref{T}()
×
500
        GC.@preserve t begin
×
501
            sptr = Ptr{S}(unsafe_convert(Ref{T}, t))
×
502
            if sizeof(T) > sizeof(S)
×
503
                # Extra dimension in the parent array
504
                n = sizeof(T) ÷ sizeof(S)
×
505
                if isempty(tailinds) && IndexStyle(a.parent) === IndexLinear()
×
506
                    offset = n * (i1 - firstindex(a))
×
507
                    for i = 1:n
×
508
                        s = a.parent[i + offset]
×
509
                        unsafe_store!(sptr, s, i)
×
510
                    end
×
511
                else
512
                    for i = 1:n
×
513
                        s = a.parent[i, i1, tailinds...]
×
514
                        unsafe_store!(sptr, s, i)
×
515
                    end
×
516
                end
517
            else
518
                # No extra dimension
519
                s = a.parent[i1, tailinds...]
×
520
                unsafe_store!(sptr, s)
×
521
            end
522
        end
523
        return t[]
×
524
    end
525
    # S is bigger than T and contains an integer number of them
526
    # n = sizeof(S) ÷ sizeof(T)
527
    s = Ref{S}()
×
528
    GC.@preserve s begin
×
529
        tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
530
        s[] = a.parent[tailinds...]
×
531
        return unsafe_load(tptr, i1)
×
532
    end
533
end
534

535
@propagate_inbounds function setindex!(a::NonReshapedReinterpretArray{T,0,S}, v) where {T,S}
×
536
    if isprimitivetype(S) && isprimitivetype(T)
×
537
        a.parent[] = reinterpret(S, v)
×
538
        return a
×
539
    end
540
    setindex!(a, v, firstindex(a))
×
541
end
542

543
@propagate_inbounds setindex!(a::ReinterpretArray, v) = setindex!(a, v, firstindex(a))
×
544

545
@propagate_inbounds function setindex!(a::ReinterpretArray{T,N,S}, v, inds::Vararg{Int, N}) where {T,N,S}
546
    check_writable(a)
×
547
    check_ptr_indexable(a) && return _setindex_ptr!(a, v, inds...)
×
548
    _setindex_ra!(a, v, inds[1], tail(inds))
×
549
end
550

551
@propagate_inbounds function setindex!(a::ReinterpretArray{T,N,S}, v, i::Int) where {T,N,S}
×
552
    check_writable(a)
×
553
    check_ptr_indexable(a) && return _setindex_ptr!(a, v, i)
×
554
    if isa(IndexStyle(a), IndexLinear)
×
555
        return _setindex_ra!(a, v, i, ())
×
556
    end
557
    inds = _to_subscript_indices(a, i)
×
558
    _setindex_ra!(a, v, inds[1], tail(inds))
×
559
end
560

561
@propagate_inbounds function setindex!(a::ReshapedReinterpretArray{T,N,S}, v, ind::SCartesianIndex2) where {T,N,S}
×
562
    check_writable(a)
×
563
    v = convert(T, v)::T
×
564
    s = Ref{S}(a.parent[ind.j])
×
565
    GC.@preserve s begin
×
566
        tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
567
        unsafe_store!(tptr, v, ind.i)
×
568
    end
569
    a.parent[ind.j] = s[]
×
570
    return a
×
571
end
572

573
@inline function _setindex_ptr!(a::ReinterpretArray{T}, v, inds...) where {T}
×
574
    @boundscheck checkbounds(a, inds...)
×
575
    li = _to_linear_index(a, inds...)
×
576
    ap = cconvert(Ptr{T}, a)
×
577
    p = unsafe_convert(Ptr{T}, ap) + sizeof(T) * (li - 1)
×
578
    GC.@preserve ap unsafe_store!(p, v)
×
579
    return a
×
580
end
581

582
@propagate_inbounds function _setindex_ra!(a::NonReshapedReinterpretArray{T,N,S}, v, i1::Int, tailinds::TT) where {T,N,S,TT}
583
    v = convert(T, v)::T
×
584
    # Make sure to match the scalar reinterpret if that is applicable
585
    if sizeof(T) == sizeof(S) && (fieldcount(T) + fieldcount(S)) == 0
×
586
        if issingletontype(T) # singleton types
×
587
            @boundscheck checkbounds(a, i1, tailinds...)
×
588
            # setindex! is a noop except for the index check
589
        else
590
            setindex!(a.parent, reinterpret(S, v), i1, tailinds...)
×
591
        end
592
    else
593
        @boundscheck checkbounds(a, i1, tailinds...)
×
594
        ind_start, sidx = divrem((i1-1)*sizeof(T), sizeof(S))
×
595
        # Optimizations that avoid branches
596
        if sizeof(T) % sizeof(S) == 0
×
597
            # T is bigger than S and contains an integer number of them
598
            t = Ref{T}(v)
×
599
            GC.@preserve t begin
×
600
                sptr = Ptr{S}(unsafe_convert(Ref{T}, t))
×
601
                n = sizeof(T) ÷ sizeof(S)
×
602
                for i = 1:n
×
603
                    s = unsafe_load(sptr, i)
×
604
                    a.parent[ind_start + i, tailinds...] = s
×
605
                end
×
606
            end
607
        elseif sizeof(S) % sizeof(T) == 0
×
608
            # S is bigger than T and contains an integer number of them
609
            s = Ref{S}(a.parent[ind_start + 1, tailinds...])
×
610
            GC.@preserve s begin
×
611
                tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
612
                unsafe_store!(tptr + sidx, v)
×
613
                a.parent[ind_start + 1, tailinds...] = s[]
×
614
            end
615
        else
616
            t = Ref{T}(v)
×
617
            s = Ref{S}()
×
618
            GC.@preserve t s begin
×
619
                tptr = Ptr{UInt8}(unsafe_convert(Ref{T}, t))
×
620
                sptr = Ptr{UInt8}(unsafe_convert(Ref{S}, s))
×
621
                nbytes_copied = 0
×
622
                i = 1
×
623
                # Deal with any partial elements at the start. We'll have to copy in the
624
                # element from the original array and overwrite the relevant parts
625
                if sidx != 0
×
626
                    s[] = a.parent[ind_start + i, tailinds...]
×
627
                    nb = min((sizeof(S) - sidx) % UInt, sizeof(T) % UInt)
×
628
                    memcpy(sptr + sidx, tptr, nb)
×
629
                    nbytes_copied += nb
×
630
                    a.parent[ind_start + i, tailinds...] = s[]
×
631
                    i += 1
×
632
                    sidx = 0
×
633
                end
634
                # Deal with the main body of elements
635
                while nbytes_copied < sizeof(T) && (sizeof(T) - nbytes_copied) > sizeof(S)
×
636
                    nb = min(sizeof(S), sizeof(T) - nbytes_copied)
×
637
                    memcpy(sptr, tptr + nbytes_copied, nb)
×
638
                    nbytes_copied += nb
×
639
                    a.parent[ind_start + i, tailinds...] = s[]
×
640
                    i += 1
×
641
                end
×
642
                # Deal with trailing partial elements
643
                if nbytes_copied < sizeof(T)
×
644
                    s[] = a.parent[ind_start + i, tailinds...]
×
645
                    nb = min(sizeof(S), sizeof(T) - nbytes_copied)
×
646
                    memcpy(sptr, tptr + nbytes_copied, nb)
×
647
                    a.parent[ind_start + i, tailinds...] = s[]
×
648
                end
649
            end
650
        end
651
    end
652
    return a
×
653
end
654

655
@propagate_inbounds function _setindex_ra!(a::ReshapedReinterpretArray{T,N,S}, v, i1::Int, tailinds::TT) where {T,N,S,TT}
×
656
    v = convert(T, v)::T
×
657
    # Make sure to match the scalar reinterpret if that is applicable
658
    if sizeof(T) == sizeof(S) && (fieldcount(T) + fieldcount(S)) == 0
×
659
        if issingletontype(T) # singleton types
×
660
            @boundscheck checkbounds(a, i1, tailinds...)
×
661
            # setindex! is a noop except for the index check
662
        else
663
            setindex!(a.parent, reinterpret(S, v), i1, tailinds...)
×
664
        end
665
    end
666
    @boundscheck checkbounds(a, i1, tailinds...)
×
667
    if sizeof(T) >= sizeof(S)
×
668
        t = Ref{T}(v)
×
669
        GC.@preserve t begin
×
670
            sptr = Ptr{S}(unsafe_convert(Ref{T}, t))
×
671
            if sizeof(T) > sizeof(S)
×
672
                # Extra dimension in the parent array
673
                n = sizeof(T) ÷ sizeof(S)
×
674
                if isempty(tailinds) && IndexStyle(a.parent) === IndexLinear()
×
675
                    offset = n * (i1 - firstindex(a))
×
676
                    for i = 1:n
×
677
                        s = unsafe_load(sptr, i)
×
678
                        a.parent[i + offset] = s
×
679
                    end
×
680
                else
681
                    for i = 1:n
×
682
                        s = unsafe_load(sptr, i)
×
683
                        a.parent[i, i1, tailinds...] = s
×
684
                    end
×
685
                end
686
            else # sizeof(T) == sizeof(S)
687
                # No extra dimension
688
                s = unsafe_load(sptr)
×
689
                a.parent[i1, tailinds...] = s
×
690
            end
691
        end
692
    else
693
        # S is bigger than T and contains an integer number of them
694
        s = Ref{S}()
×
695
        GC.@preserve s begin
×
696
            tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
697
            s[] = a.parent[tailinds...]
×
698
            unsafe_store!(tptr, v, i1)
×
699
            a.parent[tailinds...] = s[]
×
700
        end
701
    end
702
    return a
×
703
end
704

705
# Padding
706
struct Padding
707
    offset::Int # 0-indexed offset of the next valid byte; sizeof(T) indicates trailing padding
708
    size::Int   # bytes of padding before a valid byte
709
end
710
function intersect(p1::Padding, p2::Padding)
×
711
    start = max(p1.offset, p2.offset)
×
712
    stop = min(p1.offset + p1.size, p2.offset + p2.size)
×
713
    Padding(start, max(0, stop-start))
×
714
end
715

716
struct PaddingError <: Exception
717
    S::Type
718
    T::Type
719
end
720

721
function showerror(io::IO, p::PaddingError)
×
722
    print(io, "Padding of type $(p.S) is not compatible with type $(p.T).")
×
723
end
724

725
"""
726
    CyclePadding(padding, total_size)
727

728
Cycles an iterator of `Padding` structs, restarting the padding at `total_size`.
729
E.g. if `padding` is all the padding in a struct and `total_size` is the total
730
aligned size of that array, `CyclePadding` will correspond to the padding in an
731
infinite vector of such structs.
732
"""
733
struct CyclePadding{P}
734
    padding::P
735
    total_size::Int
736
end
737
eltype(::Type{<:CyclePadding}) = Padding
×
738
IteratorSize(::Type{<:CyclePadding}) = IsInfinite()
×
739
isempty(cp::CyclePadding) = isempty(cp.padding)
×
740
function iterate(cp::CyclePadding)
×
741
    y = iterate(cp.padding)
×
742
    y === nothing && return nothing
×
743
    y[1], (0, y[2])
×
744
end
745
function iterate(cp::CyclePadding, state::Tuple)
×
746
    y = iterate(cp.padding, tail(state)...)
×
747
    y === nothing && return iterate(cp, (state[1]+cp.total_size,))
×
748
    Padding(y[1].offset+state[1], y[1].size), (state[1], tail(y)...)
×
749
end
750

751
"""
752
    Compute the location of padding in an isbits datatype. Recursive over the fields of that type.
753
"""
754
@assume_effects :foldable function padding(T::DataType, baseoffset::Int = 0)
×
755
    pads = Padding[]
×
756
    last_end::Int = baseoffset
×
757
    for i = 1:fieldcount(T)
×
758
        offset = baseoffset + Int(fieldoffset(T, i))
×
759
        fT = fieldtype(T, i)
×
760
        append!(pads, padding(fT, offset))
×
761
        if offset != last_end
×
762
            push!(pads, Padding(offset, offset-last_end))
×
763
        end
764
        last_end = offset + sizeof(fT)
×
765
    end
×
766
    if 0 < last_end - baseoffset < sizeof(T)
×
767
        push!(pads, Padding(baseoffset + sizeof(T), sizeof(T) - last_end + baseoffset))
×
768
    end
769
    return Core.svec(pads...)
×
770
end
771

772
function CyclePadding(T::DataType)
×
773
    a, s = datatype_alignment(T), sizeof(T)
×
774
    as = s + (a - (s % a)) % a
×
775
    pad = padding(T)
×
776
    if s != as
×
777
        pad = Core.svec(pad..., Padding(s, as - s))
×
778
    end
779
    CyclePadding(pad, as)
×
780
end
781

782
@assume_effects :total function array_subpadding(S, T)
×
783
    lcm_size = lcm(sizeof(S), sizeof(T))
×
784
    s, t = CyclePadding(S), CyclePadding(T)
×
785
    checked_size = 0
×
786
    # use of Stateful harms inference and makes this vulnerable to invalidation
787
    (pad, tstate) = let
×
788
        it = iterate(t)
×
789
        it === nothing && return true
×
790
        it
×
791
    end
792
    (ps, sstate) = let
×
793
        it = iterate(s)
×
794
        it === nothing && return false
×
795
        it
×
796
    end
797
    while checked_size < lcm_size
×
798
        while true
×
799
            # See if there's corresponding padding in S
800
            ps.offset > pad.offset && return false
×
801
            intersect(ps, pad) == pad && break
×
802
            ps, sstate = iterate(s, sstate)
×
803
        end
×
804
        checked_size = pad.offset + pad.size
×
805
        pad, tstate = iterate(t, tstate)
×
806
    end
×
807
    return true
×
808
end
809

810
@assume_effects :foldable function struct_subpadding(::Type{Out}, ::Type{In}) where {Out, In}
×
811
    padding(Out) == padding(In)
×
812
end
813

814
@assume_effects :foldable function packedsize(::Type{T}) where T
×
815
    pads = padding(T)
×
816
    return sizeof(T) - sum((p.size for p ∈ pads), init = 0)
×
817
end
818

819
@assume_effects :foldable ispacked(::Type{T}) where T = isempty(padding(T))
×
820

821
function _copytopacked!(ptr_out::Ptr{Out}, ptr_in::Ptr{In}) where {Out, In}
×
822
    writeoffset = 0
×
823
    for i ∈ 1:fieldcount(In)
×
824
        readoffset = fieldoffset(In, i)
×
825
        fT = fieldtype(In, i)
×
826
        if ispacked(fT)
×
827
            readsize = sizeof(fT)
×
828
            memcpy(ptr_out + writeoffset, ptr_in + readoffset, readsize)
×
829
            writeoffset += readsize
×
830
        else # nested padded type
831
            _copytopacked!(ptr_out + writeoffset, Ptr{fT}(ptr_in + readoffset))
×
832
            writeoffset += packedsize(fT)
×
833
        end
834
    end
×
835
end
836

837
function _copyfrompacked!(ptr_out::Ptr{Out}, ptr_in::Ptr{In}) where {Out, In}
×
838
    readoffset = 0
×
839
    for i ∈ 1:fieldcount(Out)
×
840
        writeoffset = fieldoffset(Out, i)
×
841
        fT = fieldtype(Out, i)
×
842
        if ispacked(fT)
×
843
            writesize = sizeof(fT)
×
844
            memcpy(ptr_out + writeoffset, ptr_in + readoffset, writesize)
×
845
            readoffset += writesize
×
846
        else # nested padded type
847
            _copyfrompacked!(Ptr{fT}(ptr_out + writeoffset), ptr_in + readoffset)
×
848
            readoffset += packedsize(fT)
×
849
        end
850
    end
×
851
end
852

853
@inline function _reinterpret(::Type{Out}, x::In) where {Out, In}
×
854
    # handle non-primitive types
855
    isbitstype(Out) || throw(ArgumentError("Target type for `reinterpret` must be isbits"))
×
856
    isbitstype(In) || throw(ArgumentError("Source type for `reinterpret` must be isbits"))
×
857
    inpackedsize = packedsize(In)
×
858
    outpackedsize = packedsize(Out)
×
859
    inpackedsize == outpackedsize ||
×
860
        throw(ArgumentError(LazyString("Packed sizes of types ", Out, " and ", In,
861
            " do not match; got ", outpackedsize, " and ", inpackedsize, ", respectively.")))
862
    in = Ref{In}(x)
×
863
    out = Ref{Out}()
×
864
    if struct_subpadding(Out, In)
×
865
        # if packed the same, just copy
866
        GC.@preserve in out begin
×
867
            ptr_in = unsafe_convert(Ptr{In}, in)
×
868
            ptr_out = unsafe_convert(Ptr{Out}, out)
×
869
            memcpy(ptr_out, ptr_in, sizeof(Out))
×
870
        end
871
        return out[]
×
872
    else
873
        # mismatched padding
874
        return _reinterpret_padding(Out, x)
×
875
    end
876
end
877

878
# If the code reaches this part, it needs to handle padding and is unlikely
879
# to compile to a noop. Therefore, we don't forcibly inline it.
880
function _reinterpret_padding(::Type{Out}, x::In) where {Out, In}
×
881
    inpackedsize = packedsize(In)
×
882
    in = Ref{In}(x)
×
883
    out = Ref{Out}()
×
884
    GC.@preserve in out begin
×
885
        ptr_in = unsafe_convert(Ptr{In}, in)
×
886
        ptr_out = unsafe_convert(Ptr{Out}, out)
×
887

888
        if fieldcount(In) > 0 && ispacked(Out)
×
889
            _copytopacked!(ptr_out, ptr_in)
×
890
        elseif fieldcount(Out) > 0 && ispacked(In)
×
891
            _copyfrompacked!(ptr_out, ptr_in)
×
892
        else
893
            packed = Ref{NTuple{inpackedsize, UInt8}}()
×
894
            GC.@preserve packed begin
×
895
                ptr_packed = unsafe_convert(Ptr{NTuple{inpackedsize, UInt8}}, packed)
×
896
                _copytopacked!(ptr_packed, ptr_in)
×
897
                _copyfrompacked!(ptr_out, ptr_packed)
×
898
            end
899
        end
900
    end
901
    return out[]
×
902
end
903

904

905
# Reductions with IndexSCartesian2
906

907
function _mapreduce(f::F, op::OP, style::IndexSCartesian2{K}, A::AbstractArrayOrBroadcasted) where {F,OP,K}
×
908
    inds = eachindex(style, A)
×
909
    n = size(inds)[2]
×
910
    if n == 0
×
911
        return mapreduce_empty_iter(f, op, A, IteratorEltype(A))
×
912
    else
913
        return mapreduce_impl(f, op, A, first(inds), last(inds))
×
914
    end
915
end
916

917
@noinline function mapreduce_impl(f::F, op::OP, A::AbstractArrayOrBroadcasted,
×
918
                                  ifirst::SCI, ilast::SCI, blksize::Int) where {F,OP,SCI<:SCartesianIndex2{K}} where K
919
    if ilast.j - ifirst.j < blksize
×
920
        # sequential portion
921
        @inbounds a1 = A[ifirst]
×
922
        @inbounds a2 = A[SCI(2,ifirst.j)]
×
923
        v = op(f(a1), f(a2))
×
924
        @simd for i = ifirst.i + 2 : K
×
925
            @inbounds ai = A[SCI(i,ifirst.j)]
×
926
            v = op(v, f(ai))
×
927
        end
×
928
        # Remaining columns
929
        for j = ifirst.j+1 : ilast.j
×
930
            @simd for i = 1:K
×
931
                @inbounds ai = A[SCI(i,j)]
×
932
                v = op(v, f(ai))
×
933
            end
×
934
        end
×
935
        return v
×
936
    else
937
        # pairwise portion
938
        jmid = ifirst.j + (ilast.j - ifirst.j) >> 1
×
939
        v1 = mapreduce_impl(f, op, A, ifirst, SCI(K,jmid), blksize)
×
940
        v2 = mapreduce_impl(f, op, A, SCI(1,jmid+1), ilast, blksize)
×
941
        return op(v1, v2)
×
942
    end
943
end
944

945
mapreduce_impl(f::F, op::OP, A::AbstractArrayOrBroadcasted, ifirst::SCartesianIndex2, ilast::SCartesianIndex2) where {F,OP} =
×
946
    mapreduce_impl(f, op, A, ifirst, ilast, pairwise_blocksize(f, op))
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc