• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

JuliaLang / julia / #38162

06 Aug 2025 08:25PM UTC coverage: 25.688% (-43.6%) from 69.336%
#38162

push

local

web-flow
fix runtime cglobal builtin function implementation (#59210)

This had failed to be updated for the LazyLibrary changes to codegen.

12976 of 50513 relevant lines covered (25.69%)

676965.51 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

2.41
/base/reinterpretarray.jl
1
# This file is a part of Julia. License is MIT: https://julialang.org/license
2

3
"""
4
Gives a reinterpreted view (of element type T) of the underlying array (of element type S).
5
If the size of `T` differs from the size of `S`, the array will be compressed/expanded in
6
the first dimension. The variant `reinterpret(reshape, T, a)` instead adds or consumes the first dimension
7
depending on the ratio of element sizes.
8
"""
9
struct ReinterpretArray{T,N,S,A<:AbstractArray{S},IsReshaped} <: AbstractArray{T, N}
10
    parent::A
11
    readable::Bool
12
    writable::Bool
13

14
    function throwbits(S::Type, T::Type, U::Type)
×
15
        @noinline
×
16
        throw(ArgumentError(LazyString("cannot reinterpret `", S, "` as `", T, "`, type `", U, "` is not a bits type")))
×
17
    end
18
    function throwsize0(S::Type, T::Type, msg)
×
19
        @noinline
×
20
        throw(ArgumentError(LazyString("cannot reinterpret a zero-dimensional `", S, "` array to `", T,
×
21
            "` which is of a ", msg, " size")))
22
    end
23
    function throwsingleton(S::Type, T::Type)
×
24
        @noinline
×
25
        throw(ArgumentError(LazyString("cannot reinterpret a `", S, "` array to `", T, "` which is a singleton type")))
×
26
    end
27

28
    global reinterpret
29

30
    @doc """
31
        reinterpret(T::DataType, A::AbstractArray)
32

33
    Construct a view of the array with the same binary data as the given
34
    array, but with `T` as element type.
35

36
    This function also works on "lazy" array whose elements are not computed until they are explicitly retrieved.
37
    For instance, `reinterpret` on the range `1:6` works similarly as on the dense vector `collect(1:6)`:
38

39
    ```jldoctest
40
    julia> reinterpret(Float32, UInt32[1 2 3 4 5])
41
    1×5 reinterpret(Float32, ::Matrix{UInt32}):
42
     1.0f-45  3.0f-45  4.0f-45  6.0f-45  7.0f-45
43

44
    julia> reinterpret(Complex{Int}, 1:6)
45
    3-element reinterpret(Complex{$Int}, ::UnitRange{$Int}):
46
     1 + 2im
47
     3 + 4im
48
     5 + 6im
49
    ```
50

51
    If the location of padding bits does not line up between `T` and `eltype(A)`, the resulting array will be
52
    read-only or write-only, to prevent invalid bits from being written to or read from, respectively.
53

54
    ```jldoctest
55
    julia> a = reinterpret(Tuple{UInt8, UInt32}, UInt32[1, 2])
56
    1-element reinterpret(Tuple{UInt8, UInt32}, ::Vector{UInt32}):
57
     (0x01, 0x00000002)
58

59
    julia> a[1] = 3
60
    ERROR: Padding of type Tuple{UInt8, UInt32} is not compatible with type UInt32.
61

62
    julia> b = reinterpret(UInt32, Tuple{UInt8, UInt32}[(0x01, 0x00000002)]); # showing will error
63

64
    julia> b[1]
65
    ERROR: Padding of type UInt32 is not compatible with type Tuple{UInt8, UInt32}.
66
    ```
67
    """
68
    function reinterpret(::Type{T}, a::A) where {T,N,S,A<:AbstractArray{S, N}}
69
        function thrownonint(S::Type, T::Type, dim)
×
70
            @noinline
×
71
            throw(ArgumentError(LazyString(
×
72
                "cannot reinterpret an `", S, "` array to `", T, "` whose first dimension has size `", dim,
73
                "`. The resulting array would have a non-integral first dimension.")))
74
        end
75
        function throwaxes1(S::Type, T::Type, ax1)
×
76
            @noinline
77
            throw(ArgumentError(LazyString("cannot reinterpret a `", S, "` array to `", T,
78
                "` when the first axis is ", ax1, ". Try reshaping first.")))
79
        end
80
        isbitstype(T) || throwbits(S, T, T)
×
81
        isbitstype(S) || throwbits(S, T, S)
×
82
        (N != 0 || sizeof(T) == sizeof(S)) || throwsize0(S, T, "different")
×
83
        if N != 0 && sizeof(S) != sizeof(T)
×
84
            ax1 = axes(a)[1]
8✔
85
            dim = length(ax1)
×
86
            if issingletontype(T)
×
87
                issingletontype(S) || throwsingleton(S, T)
×
88
            else
89
                rem(dim*sizeof(S),sizeof(T)) == 0 || thrownonint(S, T, dim)
8✔
90
            end
91
            first(ax1) == 1 || throwaxes1(S, T, ax1)
×
92
        end
93
        readable = array_subpadding(T, S)
×
94
        writable = array_subpadding(S, T)
×
95
        new{T, N, S, A, false}(a, readable, writable)
8✔
96
    end
97
    reinterpret(::Type{T}, a::AbstractArray{T}) where {T} = a
×
98

99
    # With reshaping
100
    function reinterpret(::typeof(reshape), ::Type{T}, a::A) where {T,S,A<:AbstractArray{S}}
×
101
        function throwintmult(S::Type, T::Type)
×
102
            @noinline
×
103
            throw(ArgumentError(LazyString("`reinterpret(reshape, T, a)` requires that one of `sizeof(T)` (got ",
×
104
                sizeof(T), ") and `sizeof(eltype(a))` (got ", sizeof(S), ") be an integer multiple of the other")))
105
        end
106
        function throwsize1(a::AbstractArray, T::Type)
×
107
            @noinline
×
108
            throw(ArgumentError(LazyString("`reinterpret(reshape, ", T, ", a)` where `eltype(a)` is ", eltype(a),
×
109
                " requires that `axes(a, 1)` (got ", axes(a, 1), ") be equal to 1:",
110
                sizeof(T) ÷ sizeof(eltype(a)), " (from the ratio of element sizes)")))
111
        end
112
        function throwfromsingleton(S, T)
×
113
            @noinline
×
114
            throw(ArgumentError(LazyString("`reinterpret(reshape, ", T, ", a)` where `eltype(a)` is ", S,
×
115
                " requires that ", T, " be a singleton type, since ", S, " is one")))
116
        end
117
        isbitstype(T) || throwbits(S, T, T)
×
118
        isbitstype(S) || throwbits(S, T, S)
×
119
        if sizeof(S) == sizeof(T)
×
120
            N = ndims(a)
×
121
        elseif sizeof(S) > sizeof(T)
×
122
            issingletontype(T) && throwsingleton(S, T)
×
123
            rem(sizeof(S), sizeof(T)) == 0 || throwintmult(S, T)
×
124
            N = ndims(a) + 1
×
125
        else
126
            issingletontype(S) && throwfromsingleton(S, T)
×
127
            rem(sizeof(T), sizeof(S)) == 0 || throwintmult(S, T)
×
128
            N = ndims(a) - 1
×
129
            N > -1 || throwsize0(S, T, "larger")
×
130
            axes(a, 1) == OneTo(sizeof(T) ÷ sizeof(S)) || throwsize1(a, T)
×
131
        end
132
        readable = array_subpadding(T, S)
×
133
        writable = array_subpadding(S, T)
×
134
        new{T, N, S, A, true}(a, readable, writable)
×
135
    end
136
    reinterpret(::typeof(reshape), ::Type{T}, a::AbstractArray{T}) where {T} = a
×
137
end
138

139
ReshapedReinterpretArray{T,N,S,A<:AbstractArray{S}} = ReinterpretArray{T,N,S,A,true}
140
NonReshapedReinterpretArray{T,N,S,A<:AbstractArray{S, N}} = ReinterpretArray{T,N,S,A,false}
141

142
"""
143
    reinterpret(reshape, T, A::AbstractArray{S}) -> B
144

145
Change the type-interpretation of `A` while consuming or adding a "channel dimension."
146

147
If `sizeof(T) = n*sizeof(S)` for `n>1`, `A`'s first dimension must be
148
of size `n` and `B` lacks `A`'s first dimension. Conversely, if `sizeof(S) = n*sizeof(T)` for `n>1`,
149
`B` gets a new first dimension of size `n`. The dimensionality is unchanged if `sizeof(T) == sizeof(S)`.
150

151
!!! compat "Julia 1.6"
152
    This method requires at least Julia 1.6.
153

154
# Examples
155

156
```jldoctest
157
julia> A = [1 2; 3 4]
158
2×2 Matrix{$Int}:
159
 1  2
160
 3  4
161

162
julia> reinterpret(reshape, Complex{Int}, A)    # the result is a vector
163
2-element reinterpret(reshape, Complex{$Int}, ::Matrix{$Int}) with eltype Complex{$Int}:
164
 1 + 3im
165
 2 + 4im
166

167
julia> a = [(1,2,3), (4,5,6)]
168
2-element Vector{Tuple{$Int, $Int, $Int}}:
169
 (1, 2, 3)
170
 (4, 5, 6)
171

172
julia> reinterpret(reshape, Int, a)             # the result is a matrix
173
3×2 reinterpret(reshape, $Int, ::Vector{Tuple{$Int, $Int, $Int}}) with eltype $Int:
174
 1  4
175
 2  5
176
 3  6
177
```
178
"""
179
reinterpret(::typeof(reshape), T::Type, a::AbstractArray)
180

181
reinterpret(::Type{T}, a::NonReshapedReinterpretArray) where {T} = reinterpret(T, a.parent)
×
182
reinterpret(::typeof(reshape), ::Type{T}, a::ReshapedReinterpretArray) where {T} = reinterpret(reshape, T, a.parent)
×
183

184
# Definition of StridedArray
185
StridedFastContiguousSubArray{T,N,A<:DenseArray} = FastContiguousSubArray{T,N,A}
186
StridedReinterpretArray{T,N,A<:Union{DenseArray,StridedFastContiguousSubArray},IsReshaped} = ReinterpretArray{T,N,S,A,IsReshaped} where S
187
StridedReshapedArray{T,N,A<:Union{DenseArray,StridedFastContiguousSubArray,StridedReinterpretArray}} = ReshapedArray{T,N,A}
188
StridedSubArray{T,N,A<:Union{DenseArray,StridedReshapedArray,StridedReinterpretArray},
189
    I<:Tuple{Vararg{Union{RangeIndex, ReshapedUnitRange, AbstractCartesianIndex}}}} = SubArray{T,N,A,I}
190
StridedArray{T,N} = Union{DenseArray{T,N}, StridedSubArray{T,N}, StridedReshapedArray{T,N}, StridedReinterpretArray{T,N}}
191
StridedVector{T} = StridedArray{T,1}
192
StridedMatrix{T} = StridedArray{T,2}
193
StridedVecOrMat{T} = Union{StridedVector{T}, StridedMatrix{T}}
194

195
strides(a::Union{DenseArray,StridedReshapedArray,StridedReinterpretArray}) = size_to_strides(1, size(a)...)
×
196
stride(A::Union{DenseArray,StridedReshapedArray,StridedReinterpretArray}, k::Integer) =
×
197
    k ≤ ndims(A) ? strides(A)[k] : length(A)
198

199
function strides(a::ReinterpretArray{T,<:Any,S,<:AbstractArray{S},IsReshaped}) where {T,S,IsReshaped}
×
200
    _checkcontiguous(Bool, a) && return size_to_strides(1, size(a)...)
×
201
    stp = strides(parent(a))
×
202
    els, elp = sizeof(T), sizeof(S)
×
203
    els == elp && return stp # 0dim parent is also handled here.
×
204
    IsReshaped && els < elp && return (1, _checked_strides(stp, els, elp)...)
×
205
    stp[1] == 1 || throw(ArgumentError("Parent must be contiguous in the 1st dimension!"))
×
206
    st′ = _checked_strides(tail(stp), els, elp)
×
207
    return IsReshaped ? st′ : (1, st′...)
×
208
end
209

210
@inline function _checked_strides(stp::Tuple, els::Integer, elp::Integer)
×
211
    if elp > els && rem(elp, els) == 0
×
212
        N = div(elp, els)
×
213
        return map(i -> N * i, stp)
×
214
    end
215
    drs = map(i -> divrem(elp * i, els), stp)
×
216
    all(i->iszero(i[2]), drs) ||
×
217
        throw(ArgumentError("Parent's strides could not be exactly divided!"))
218
    map(first, drs)
×
219
end
220

221
_checkcontiguous(::Type{Bool}, A::ReinterpretArray) = _checkcontiguous(Bool, parent(A))
×
222

223
similar(a::ReinterpretArray, T::Type, d::Dims) = similar(a.parent, T, d)
8✔
224

225
function check_readable(a::ReinterpretArray{T, N, S} where N) where {T,S}
×
226
    # See comment in check_writable
227
    if !a.readable && !array_subpadding(T, S)
×
228
        throw(PaddingError(T, S))
×
229
    end
230
end
231

232
function check_writable(a::ReinterpretArray{T, N, S} where N) where {T,S}
×
233
    # `array_subpadding` is relatively expensive (compared to a simple arrayref),
234
    # so it is cached in the array. However, it is computable at compile time if,
235
    # inference has the types available. By using this form of the check, we can
236
    # get the best of both worlds for the success case. If the types were not
237
    # available to inference, we simply need to check the field (relatively cheap)
238
    # and if they were we should be able to fold this check away entirely.
239
    if !a.writable && !array_subpadding(S, T)
×
240
        throw(PaddingError(T, S))
×
241
    end
242
end
243

244
## IndexStyle specializations
245

246
# For `reinterpret(reshape, T, a)` where we're adding a channel dimension and with
247
# `IndexStyle(a) == IndexLinear()`, it's advantageous to retain pseudo-linear indexing.
248
struct IndexSCartesian2{K} <: IndexStyle end   # K = sizeof(S) ÷ sizeof(T), a static-sized 2d cartesian iterator
249

250
IndexStyle(::Type{ReinterpretArray{T,N,S,A,false}}) where {T,N,S,A<:AbstractArray{S,N}} = IndexStyle(A)
×
251
function IndexStyle(::Type{ReinterpretArray{T,N,S,A,true}}) where {T,N,S,A<:AbstractArray{S}}
×
252
    if sizeof(T) < sizeof(S)
×
253
        IndexStyle(A) === IndexLinear() && return IndexSCartesian2{sizeof(S) ÷ sizeof(T)}()
×
254
        return IndexCartesian()
×
255
    end
256
    return IndexStyle(A)
×
257
end
258
IndexStyle(::IndexSCartesian2{K}, ::IndexSCartesian2{K}) where {K} = IndexSCartesian2{K}()
×
259

260
struct SCartesianIndex2{K}   # can't make <:AbstractCartesianIndex without N, and 2 would be a bit misleading
261
    i::Int
262
    j::Int
263
end
264
to_index(i::SCartesianIndex2) = i
×
265

266
struct SCartesianIndices2{K,R<:AbstractUnitRange{Int}} <: AbstractMatrix{SCartesianIndex2{K}}
267
    indices2::R
268
end
269
SCartesianIndices2{K}(indices2::AbstractUnitRange{Int}) where {K} = (@assert K::Int > 1; SCartesianIndices2{K,typeof(indices2)}(indices2))
×
270

271
eachindex(::IndexSCartesian2{K}, A::ReshapedReinterpretArray) where {K} = SCartesianIndices2{K}(eachindex(IndexLinear(), parent(A)))
×
272
@inline function eachindex(style::IndexSCartesian2{K}, A::AbstractArray, B::AbstractArray...) where {K}
×
273
    iter = eachindex(style, A)
×
274
    itersBs = map(C->eachindex(style, C), B)
×
275
    all(==(iter), itersBs) || throw_eachindex_mismatch_indices("axes", axes(A), map(axes, B)...)
×
276
    return iter
×
277
end
278

279
size(iter::SCartesianIndices2{K}) where K = (K, length(iter.indices2))
×
280
axes(iter::SCartesianIndices2{K}) where K = (OneTo(K), iter.indices2)
×
281

282
first(iter::SCartesianIndices2{K}) where {K} = SCartesianIndex2{K}(1, first(iter.indices2))
×
283
last(iter::SCartesianIndices2{K}) where {K}  = SCartesianIndex2{K}(K, last(iter.indices2))
×
284

285
@inline function getindex(iter::SCartesianIndices2{K}, i::Int, j::Int) where {K}
×
286
    @boundscheck checkbounds(iter, i, j)
×
287
    return SCartesianIndex2{K}(i, iter.indices2[j])
×
288
end
289

290
function iterate(iter::SCartesianIndices2{K}) where {K}
×
291
    ret = iterate(iter.indices2)
×
292
    ret === nothing && return nothing
×
293
    item2, state2 = ret
×
294
    return SCartesianIndex2{K}(1, item2), (1, item2, state2)
×
295
end
296

297
function iterate(iter::SCartesianIndices2{K}, (state1, item2, state2)) where {K}
×
298
    if state1 < K
×
299
        item1 = state1 + 1
×
300
        return SCartesianIndex2{K}(item1, item2), (item1, item2, state2)
×
301
    end
302
    ret = iterate(iter.indices2, state2)
×
303
    ret === nothing && return nothing
×
304
    item2, state2 = ret
×
305
    return SCartesianIndex2{K}(1, item2), (1, item2, state2)
×
306
end
307

308
SimdLoop.simd_outer_range(iter::SCartesianIndices2) = iter.indices2
×
309
SimdLoop.simd_inner_length(::SCartesianIndices2{K}, ::Any) where K = K
×
310
@inline function SimdLoop.simd_index(::SCartesianIndices2{K}, Ilast::Int, I1::Int) where {K}
×
311
    SCartesianIndex2{K}(I1+1, Ilast)
×
312
end
313

314
_maybe_reshape(::IndexSCartesian2, A::AbstractArray, I...) = _maybe_reshape(IndexCartesian(), A, I...)
×
315
_maybe_reshape(::IndexSCartesian2, A::ReshapedReinterpretArray, I...) = A
×
316

317
# fallbacks
318
function _getindex(::IndexSCartesian2, A::AbstractArray, I::Vararg{Int, N}) where {N}
×
319
    @_propagate_inbounds_meta
×
320
    _getindex(IndexCartesian(), A, I...)
×
321
end
322
function _setindex!(::IndexSCartesian2, A::AbstractArray, v, I::Vararg{Int, N}) where {N}
×
323
    @_propagate_inbounds_meta
×
324
    _setindex!(IndexCartesian(), A, v, I...)
×
325
end
326
# fallbacks for array types that use "pass-through" indexing (e.g., `IndexStyle(A) = IndexStyle(parent(A))`)
327
# but which don't handle SCartesianIndex2
328
function _getindex(::IndexSCartesian2, A::AbstractArray{T,N}, ind::SCartesianIndex2) where {T,N}
×
329
    @_propagate_inbounds_meta
×
330
    J = _ind2sub(tail(axes(A)), ind.j)
×
331
    getindex(A, ind.i, J...)
×
332
end
333

334
function _getindex(::IndexSCartesian2{2}, A::AbstractArray{T,2}, ind::SCartesianIndex2) where {T}
×
335
    @_propagate_inbounds_meta
×
336
    J = first(axes(A, 2)) + ind.j - 1
×
337
    getindex(A, ind.i, J)
×
338
end
339

340
function _setindex!(::IndexSCartesian2, A::AbstractArray{T,N}, v, ind::SCartesianIndex2) where {T,N}
×
341
    @_propagate_inbounds_meta
×
342
    J = _ind2sub(tail(axes(A)), ind.j)
×
343
    setindex!(A, v, ind.i, J...)
×
344
end
345

346
function _setindex!(::IndexSCartesian2{2}, A::AbstractArray{T,2}, v, ind::SCartesianIndex2) where {T}
×
347
    @_propagate_inbounds_meta
×
348
    J = first(axes(A, 2)) + ind.j - 1
×
349
    setindex!(A, v, ind.i, J)
×
350
end
351

352
eachindex(style::IndexSCartesian2, A::AbstractArray) = eachindex(style, parent(A))
×
353

354
## AbstractArray interface
355

356
parent(a::ReinterpretArray) = a.parent
×
357
dataids(a::ReinterpretArray) = dataids(a.parent)
×
358
unaliascopy(a::NonReshapedReinterpretArray{T}) where {T} = reinterpret(T, unaliascopy(a.parent))
×
359
unaliascopy(a::ReshapedReinterpretArray{T}) where {T} = reinterpret(reshape, T, unaliascopy(a.parent))
×
360

361
function size(a::NonReshapedReinterpretArray{T,N,S} where {N}) where {T,S}
×
362
    psize = size(a.parent)
×
363
    size1 = issingletontype(T) ? psize[1] : div(psize[1]*sizeof(S), sizeof(T))
×
364
    tuple(size1, tail(psize)...)
×
365
end
366
function size(a::ReshapedReinterpretArray{T,N,S} where {N}) where {T,S}
×
367
    psize = size(a.parent)
×
368
    sizeof(S) > sizeof(T) && return (div(sizeof(S), sizeof(T)), psize...)
×
369
    sizeof(S) < sizeof(T) && return tail(psize)
×
370
    return psize
×
371
end
372
size(a::NonReshapedReinterpretArray{T,0}) where {T} = ()
×
373

374
function axes(a::NonReshapedReinterpretArray{T,N,S} where {N}) where {T,S}
375
    paxs = axes(a.parent)
2,428✔
376
    f, l = first(paxs[1]), length(paxs[1])
×
377
    size1 = issingletontype(T) ? l : div(l*sizeof(S), sizeof(T))
2,428✔
378
    tuple(oftype(paxs[1], f:f+size1-1), tail(paxs)...)
2,428✔
379
end
380
function axes(a::ReshapedReinterpretArray{T,N,S} where {N}) where {T,S}
×
381
    paxs = axes(a.parent)
×
382
    sizeof(S) > sizeof(T) && return (OneTo(div(sizeof(S), sizeof(T))), paxs...)
×
383
    sizeof(S) < sizeof(T) && return tail(paxs)
×
384
    return paxs
×
385
end
386
axes(a::NonReshapedReinterpretArray{T,0}) where {T} = ()
×
387

388
has_offset_axes(a::ReinterpretArray) = has_offset_axes(a.parent)
×
389

390
elsize(::Type{<:ReinterpretArray{T}}) where {T} = sizeof(T)
×
391
cconvert(::Type{Ptr{T}}, a::ReinterpretArray{T,N,S} where N) where {T,S} = cconvert(Ptr{S}, a.parent)
1,206✔
392
unsafe_convert(::Type{Ptr{T}}, a::ReinterpretArray{T,N,S} where N) where {T,S} = Ptr{T}(unsafe_convert(Ptr{S},a.parent))
×
393

394
@propagate_inbounds function getindex(a::NonReshapedReinterpretArray{T,0,S}) where {T,S}
×
395
    if isprimitivetype(T) && isprimitivetype(S)
×
396
        reinterpret(T, a.parent[])
×
397
    else
398
        a[firstindex(a)]
×
399
    end
400
end
401

402
check_ptr_indexable(a::ReinterpretArray, sz = elsize(a)) = check_ptr_indexable(parent(a), sz)
×
403
check_ptr_indexable(a::ReshapedArray, sz) = check_ptr_indexable(parent(a), sz)
×
404
check_ptr_indexable(a::FastContiguousSubArray, sz) = check_ptr_indexable(parent(a), sz)
×
405
check_ptr_indexable(a::Array, sz) = sizeof(eltype(a)) !== sz
×
406
check_ptr_indexable(a::Memory, sz) = true
×
407
check_ptr_indexable(a::AbstractArray, sz) = false
×
408

409
@propagate_inbounds getindex(a::ReshapedReinterpretArray{T,0}) where {T} = a[firstindex(a)]
×
410

411
@propagate_inbounds isassigned(a::ReinterpretArray, inds::Integer...) = checkbounds(Bool, a, inds...) && (check_ptr_indexable(a) || _isassigned_ra(a, inds...))
×
412
@propagate_inbounds isassigned(a::ReinterpretArray, inds::SCartesianIndex2) = isassigned(a.parent, inds.j)
×
413
@propagate_inbounds _isassigned_ra(a::ReinterpretArray, inds...) = true # that is not entirely true, but computing exactly which indexes will be accessed in the parent requires a lot of duplication from the _getindex_ra code
×
414

415
@propagate_inbounds function getindex(a::ReinterpretArray{T,N,S}, inds::Vararg{Int, N}) where {T,N,S}
416
    check_readable(a)
×
417
    check_ptr_indexable(a) && return _getindex_ptr(a, inds...)
1,206✔
418
    _getindex_ra(a, inds[1], tail(inds))
×
419
end
420

421
@propagate_inbounds function getindex(a::ReinterpretArray{T,N,S}, i::Int) where {T,N,S}
×
422
    check_readable(a)
×
423
    check_ptr_indexable(a) && return _getindex_ptr(a, i)
×
424
    if isa(IndexStyle(a), IndexLinear)
×
425
        return _getindex_ra(a, i, ())
×
426
    end
427
    # Convert to full indices here, to avoid needing multiple conversions in
428
    # the loop in _getindex_ra
429
    inds = _to_subscript_indices(a, i)
×
430
    isempty(inds) ? _getindex_ra(a, firstindex(a), ()) : _getindex_ra(a, inds[1], tail(inds))
×
431
end
432

433
@propagate_inbounds function getindex(a::ReshapedReinterpretArray{T,N,S}, ind::SCartesianIndex2) where {T,N,S}
×
434
    check_readable(a)
×
435
    s = Ref{S}(a.parent[ind.j])
×
436
    tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
437
    GC.@preserve s return unsafe_load(tptr, ind.i)
×
438
end
439

440
@inline function _getindex_ptr(a::ReinterpretArray{T}, inds...) where {T}
441
    @boundscheck checkbounds(a, inds...)
1,206✔
442
    li = _to_linear_index(a, inds...)
×
443
    ap = cconvert(Ptr{T}, a)
1,206✔
444
    p = unsafe_convert(Ptr{T}, ap) + sizeof(T) * (li - 1)
1,206✔
445
    GC.@preserve ap return unsafe_load(p)
1,206✔
446
end
447

448
@propagate_inbounds function _getindex_ra(a::NonReshapedReinterpretArray{T,N,S}, i1::Int, tailinds::TT) where {T,N,S,TT}
449
    # Make sure to match the scalar reinterpret if that is applicable
450
    if sizeof(T) == sizeof(S) && (fieldcount(T) + fieldcount(S)) == 0
×
451
        if issingletontype(T) # singleton types
×
452
            @boundscheck checkbounds(a, i1, tailinds...)
×
453
            return T.instance
×
454
        end
455
        return reinterpret(T, a.parent[i1, tailinds...])
×
456
    else
457
        @boundscheck checkbounds(a, i1, tailinds...)
×
458
        ind_start, sidx = divrem((i1-1)*sizeof(T), sizeof(S))
×
459
        # Optimizations that avoid branches
460
        if sizeof(T) % sizeof(S) == 0
×
461
            # T is bigger than S and contains an integer number of them
462
            n = sizeof(T) ÷ sizeof(S)
×
463
            t = Ref{T}()
×
464
            GC.@preserve t begin
×
465
                sptr = Ptr{S}(unsafe_convert(Ref{T}, t))
×
466
                for i = 1:n
×
467
                     s = a.parent[ind_start + i, tailinds...]
×
468
                     unsafe_store!(sptr, s, i)
×
469
                end
×
470
            end
471
            return t[]
×
472
        elseif sizeof(S) % sizeof(T) == 0
×
473
            # S is bigger than T and contains an integer number of them
474
            s = Ref{S}(a.parent[ind_start + 1, tailinds...])
×
475
            GC.@preserve s begin
×
476
                tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
477
                return unsafe_load(tptr + sidx)
×
478
            end
479
        else
480
            i = 1
×
481
            nbytes_copied = 0
×
482
            # This is a bit complicated to deal with partial elements
483
            # at both the start and the end. LLVM will fold as appropriate,
484
            # once it knows the data layout
485
            s = Ref{S}()
×
486
            t = Ref{T}()
×
487
            GC.@preserve s t begin
×
488
                sptr = Ptr{S}(unsafe_convert(Ref{S}, s))
×
489
                tptr = Ptr{T}(unsafe_convert(Ref{T}, t))
×
490
                while nbytes_copied < sizeof(T)
×
491
                    s[] = a.parent[ind_start + i, tailinds...]
×
492
                    nb = min(sizeof(S) - sidx, sizeof(T)-nbytes_copied)
×
493
                    memcpy(tptr + nbytes_copied, sptr + sidx, nb)
×
494
                    nbytes_copied += nb
×
495
                    sidx = 0
×
496
                    i += 1
×
497
                end
×
498
            end
499
            return t[]
×
500
        end
501
    end
502
end
503

504
@propagate_inbounds function _getindex_ra(a::ReshapedReinterpretArray{T,N,S}, i1::Int, tailinds::TT) where {T,N,S,TT}
×
505
    # Make sure to match the scalar reinterpret if that is applicable
506
    if sizeof(T) == sizeof(S) && (fieldcount(T) + fieldcount(S)) == 0
×
507
        if issingletontype(T) # singleton types
×
508
            @boundscheck checkbounds(a, i1, tailinds...)
×
509
            return T.instance
×
510
        end
511
        return reinterpret(T, a.parent[i1, tailinds...])
×
512
    end
513
    @boundscheck checkbounds(a, i1, tailinds...)
×
514
    if sizeof(T) >= sizeof(S)
×
515
        t = Ref{T}()
×
516
        GC.@preserve t begin
×
517
            sptr = Ptr{S}(unsafe_convert(Ref{T}, t))
×
518
            if sizeof(T) > sizeof(S)
×
519
                # Extra dimension in the parent array
520
                n = sizeof(T) ÷ sizeof(S)
×
521
                if isempty(tailinds) && IndexStyle(a.parent) === IndexLinear()
×
522
                    offset = n * (i1 - firstindex(a))
×
523
                    for i = 1:n
×
524
                        s = a.parent[i + offset]
×
525
                        unsafe_store!(sptr, s, i)
×
526
                    end
×
527
                else
528
                    for i = 1:n
×
529
                        s = a.parent[i, i1, tailinds...]
×
530
                        unsafe_store!(sptr, s, i)
×
531
                    end
×
532
                end
533
            else
534
                # No extra dimension
535
                s = a.parent[i1, tailinds...]
×
536
                unsafe_store!(sptr, s)
×
537
            end
538
        end
539
        return t[]
×
540
    end
541
    # S is bigger than T and contains an integer number of them
542
    # n = sizeof(S) ÷ sizeof(T)
543
    s = Ref{S}()
×
544
    GC.@preserve s begin
×
545
        tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
546
        s[] = a.parent[tailinds...]
×
547
        return unsafe_load(tptr, i1)
×
548
    end
549
end
550

551
@propagate_inbounds function setindex!(a::NonReshapedReinterpretArray{T,0,S}, v) where {T,S}
×
552
    if isprimitivetype(S) && isprimitivetype(T)
×
553
        a.parent[] = reinterpret(S, convert(T, v)::T)
×
554
        return a
×
555
    end
556
    setindex!(a, v, firstindex(a))
×
557
end
558

559
@propagate_inbounds setindex!(a::ReshapedReinterpretArray{T,0}, v) where {T} = setindex!(a, v, firstindex(a))
×
560

561
@propagate_inbounds function setindex!(a::ReinterpretArray{T,N,S}, v, inds::Vararg{Int, N}) where {T,N,S}
562
    check_writable(a)
×
563
    check_ptr_indexable(a) && return _setindex_ptr!(a, v, inds...)
×
564
    _setindex_ra!(a, v, inds[1], tail(inds))
×
565
end
566

567
@propagate_inbounds function setindex!(a::ReinterpretArray{T,N,S}, v, i::Int) where {T,N,S}
×
568
    check_writable(a)
×
569
    check_ptr_indexable(a) && return _setindex_ptr!(a, v, i)
×
570
    if isa(IndexStyle(a), IndexLinear)
×
571
        return _setindex_ra!(a, v, i, ())
×
572
    end
573
    inds = _to_subscript_indices(a, i)
×
574
    isempty(inds) ? _setindex_ra!(a, v, firstindex(a), ()) : _setindex_ra!(a, v, inds[1], tail(inds))
×
575
end
576

577
@propagate_inbounds function setindex!(a::ReshapedReinterpretArray{T,N,S}, v, ind::SCartesianIndex2) where {T,N,S}
×
578
    check_writable(a)
×
579
    v = convert(T, v)::T
×
580
    s = Ref{S}(a.parent[ind.j])
×
581
    GC.@preserve s begin
×
582
        tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
583
        unsafe_store!(tptr, v, ind.i)
×
584
    end
585
    a.parent[ind.j] = s[]
×
586
    return a
×
587
end
588

589
@inline function _setindex_ptr!(a::ReinterpretArray{T}, v, inds...) where {T}
×
590
    @boundscheck checkbounds(a, inds...)
×
591
    li = _to_linear_index(a, inds...)
×
592
    ap = cconvert(Ptr{T}, a)
×
593
    p = unsafe_convert(Ptr{T}, ap) + sizeof(T) * (li - 1)
×
594
    GC.@preserve ap unsafe_store!(p, v)
×
595
    return a
×
596
end
597

598
@propagate_inbounds function _setindex_ra!(a::NonReshapedReinterpretArray{T,N,S}, v, i1::Int, tailinds::TT) where {T,N,S,TT}
599
    v = convert(T, v)::T
×
600
    # Make sure to match the scalar reinterpret if that is applicable
601
    if sizeof(T) == sizeof(S) && (fieldcount(T) + fieldcount(S)) == 0
×
602
        if issingletontype(T) # singleton types
×
603
            @boundscheck checkbounds(a, i1, tailinds...)
×
604
            # setindex! is a noop except for the index check
605
        else
606
            setindex!(a.parent, reinterpret(S, v), i1, tailinds...)
×
607
        end
608
    else
609
        @boundscheck checkbounds(a, i1, tailinds...)
×
610
        ind_start, sidx = divrem((i1-1)*sizeof(T), sizeof(S))
×
611
        # Optimizations that avoid branches
612
        if sizeof(T) % sizeof(S) == 0
×
613
            # T is bigger than S and contains an integer number of them
614
            t = Ref{T}(v)
×
615
            GC.@preserve t begin
×
616
                sptr = Ptr{S}(unsafe_convert(Ref{T}, t))
×
617
                n = sizeof(T) ÷ sizeof(S)
×
618
                for i = 1:n
×
619
                    s = unsafe_load(sptr, i)
×
620
                    a.parent[ind_start + i, tailinds...] = s
×
621
                end
×
622
            end
623
        elseif sizeof(S) % sizeof(T) == 0
×
624
            # S is bigger than T and contains an integer number of them
625
            s = Ref{S}(a.parent[ind_start + 1, tailinds...])
×
626
            GC.@preserve s begin
×
627
                tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
628
                unsafe_store!(tptr + sidx, v)
×
629
                a.parent[ind_start + 1, tailinds...] = s[]
×
630
            end
631
        else
632
            t = Ref{T}(v)
×
633
            s = Ref{S}()
×
634
            GC.@preserve t s begin
×
635
                tptr = Ptr{UInt8}(unsafe_convert(Ref{T}, t))
×
636
                sptr = Ptr{UInt8}(unsafe_convert(Ref{S}, s))
×
637
                nbytes_copied = 0
×
638
                i = 1
×
639
                # Deal with any partial elements at the start. We'll have to copy in the
640
                # element from the original array and overwrite the relevant parts
641
                if sidx != 0
×
642
                    s[] = a.parent[ind_start + i, tailinds...]
×
643
                    nb = min((sizeof(S) - sidx) % UInt, sizeof(T) % UInt)
×
644
                    memcpy(sptr + sidx, tptr, nb)
×
645
                    nbytes_copied += nb
×
646
                    a.parent[ind_start + i, tailinds...] = s[]
×
647
                    i += 1
×
648
                    sidx = 0
×
649
                end
650
                # Deal with the main body of elements
651
                while nbytes_copied < sizeof(T) && (sizeof(T) - nbytes_copied) > sizeof(S)
×
652
                    nb = min(sizeof(S), sizeof(T) - nbytes_copied)
×
653
                    memcpy(sptr, tptr + nbytes_copied, nb)
×
654
                    nbytes_copied += nb
×
655
                    a.parent[ind_start + i, tailinds...] = s[]
×
656
                    i += 1
×
657
                end
×
658
                # Deal with trailing partial elements
659
                if nbytes_copied < sizeof(T)
×
660
                    s[] = a.parent[ind_start + i, tailinds...]
×
661
                    nb = min(sizeof(S), sizeof(T) - nbytes_copied)
×
662
                    memcpy(sptr, tptr + nbytes_copied, nb)
×
663
                    a.parent[ind_start + i, tailinds...] = s[]
×
664
                end
665
            end
666
        end
667
    end
668
    return a
×
669
end
670

671
@propagate_inbounds function _setindex_ra!(a::ReshapedReinterpretArray{T,N,S}, v, i1::Int, tailinds::TT) where {T,N,S,TT}
×
672
    v = convert(T, v)::T
×
673
    # Make sure to match the scalar reinterpret if that is applicable
674
    if sizeof(T) == sizeof(S) && (fieldcount(T) + fieldcount(S)) == 0
×
675
        if issingletontype(T) # singleton types
×
676
            @boundscheck checkbounds(a, i1, tailinds...)
×
677
            # setindex! is a noop except for the index check
678
        else
679
            setindex!(a.parent, reinterpret(S, v), i1, tailinds...)
×
680
        end
681
    end
682
    @boundscheck checkbounds(a, i1, tailinds...)
×
683
    if sizeof(T) >= sizeof(S)
×
684
        t = Ref{T}(v)
×
685
        GC.@preserve t begin
×
686
            sptr = Ptr{S}(unsafe_convert(Ref{T}, t))
×
687
            if sizeof(T) > sizeof(S)
×
688
                # Extra dimension in the parent array
689
                n = sizeof(T) ÷ sizeof(S)
×
690
                if isempty(tailinds) && IndexStyle(a.parent) === IndexLinear()
×
691
                    offset = n * (i1 - firstindex(a))
×
692
                    for i = 1:n
×
693
                        s = unsafe_load(sptr, i)
×
694
                        a.parent[i + offset] = s
×
695
                    end
×
696
                else
697
                    for i = 1:n
×
698
                        s = unsafe_load(sptr, i)
×
699
                        a.parent[i, i1, tailinds...] = s
×
700
                    end
×
701
                end
702
            else # sizeof(T) == sizeof(S)
703
                # No extra dimension
704
                s = unsafe_load(sptr)
×
705
                a.parent[i1, tailinds...] = s
×
706
            end
707
        end
708
    else
709
        # S is bigger than T and contains an integer number of them
710
        s = Ref{S}()
×
711
        GC.@preserve s begin
×
712
            tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
713
            s[] = a.parent[tailinds...]
×
714
            unsafe_store!(tptr, v, i1)
×
715
            a.parent[tailinds...] = s[]
×
716
        end
717
    end
718
    return a
×
719
end
720

721
# Padding
722
struct Padding
723
    offset::Int # 0-indexed offset of the next valid byte; sizeof(T) indicates trailing padding
724
    size::Int   # bytes of padding before a valid byte
725
end
726
function intersect(p1::Padding, p2::Padding)
×
727
    start = max(p1.offset, p2.offset)
×
728
    stop = min(p1.offset + p1.size, p2.offset + p2.size)
×
729
    Padding(start, max(0, stop-start))
×
730
end
731

732
struct PaddingError <: Exception
733
    S::Type
734
    T::Type
735
end
736

737
function showerror(io::IO, p::PaddingError)
×
738
    print(io, "Padding of type $(p.S) is not compatible with type $(p.T).")
×
739
end
740

741
"""
742
    CyclePadding(padding, total_size)
743

744
Cycles an iterator of `Padding` structs, restarting the padding at `total_size`.
745
E.g. if `padding` is all the padding in a struct and `total_size` is the total
746
aligned size of that array, `CyclePadding` will correspond to the padding in an
747
infinite vector of such structs.
748
"""
749
struct CyclePadding{P}
750
    padding::P
751
    total_size::Int
752
end
753
eltype(::Type{<:CyclePadding}) = Padding
×
754
IteratorSize(::Type{<:CyclePadding}) = IsInfinite()
×
755
isempty(cp::CyclePadding) = isempty(cp.padding)
×
756
function iterate(cp::CyclePadding)
×
757
    y = iterate(cp.padding)
×
758
    y === nothing && return nothing
×
759
    y[1], (0, y[2])
×
760
end
761
function iterate(cp::CyclePadding, state::Tuple)
×
762
    y = iterate(cp.padding, tail(state)...)
×
763
    y === nothing && return iterate(cp, (state[1]+cp.total_size,))
×
764
    Padding(y[1].offset+state[1], y[1].size), (state[1], tail(y)...)
×
765
end
766

767
"""
768
    Compute the location of padding in an isbits datatype. Recursive over the fields of that type.
769
"""
770
@assume_effects :foldable function padding(T::DataType, baseoffset::Int = 0)
×
771
    pads = Padding[]
×
772
    last_end::Int = baseoffset
×
773
    for i = 1:fieldcount(T)
×
774
        offset = baseoffset + Int(fieldoffset(T, i))
×
775
        fT = fieldtype(T, i)
×
776
        append!(pads, padding(fT, offset))
×
777
        if offset != last_end
×
778
            push!(pads, Padding(offset, offset-last_end))
×
779
        end
780
        last_end = offset + sizeof(fT)
×
781
    end
×
782
    if 0 < last_end - baseoffset < sizeof(T)
×
783
        push!(pads, Padding(baseoffset + sizeof(T), sizeof(T) - last_end + baseoffset))
×
784
    end
785
    return Core.svec(pads...)
×
786
end
787

788
function CyclePadding(T::DataType)
×
789
    a, s = datatype_alignment(T), sizeof(T)
×
790
    as = s + (a - (s % a)) % a
×
791
    pad = padding(T)
×
792
    if s != as
×
793
        pad = Core.svec(pad..., Padding(s, as - s))
×
794
    end
795
    CyclePadding(pad, as)
×
796
end
797

798
@assume_effects :total function array_subpadding(S, T)
×
799
    lcm_size = lcm(sizeof(S), sizeof(T))
×
800
    s, t = CyclePadding(S), CyclePadding(T)
×
801
    checked_size = 0
×
802
    # use of Stateful harms inference and makes this vulnerable to invalidation
803
    (pad, tstate) = let
×
804
        it = iterate(t)
×
805
        it === nothing && return true
×
806
        it
×
807
    end
808
    (ps, sstate) = let
×
809
        it = iterate(s)
×
810
        it === nothing && return false
×
811
        it
×
812
    end
813
    while checked_size < lcm_size
×
814
        while true
×
815
            # See if there's corresponding padding in S
816
            ps.offset > pad.offset && return false
×
817
            intersect(ps, pad) == pad && break
×
818
            ps, sstate = iterate(s, sstate)
×
819
        end
×
820
        checked_size = pad.offset + pad.size
×
821
        pad, tstate = iterate(t, tstate)
×
822
    end
×
823
    return true
×
824
end
825

826
@assume_effects :foldable function struct_subpadding(::Type{Out}, ::Type{In}) where {Out, In}
×
827
    padding(Out) == padding(In)
×
828
end
829

830
@assume_effects :foldable function packedsize(::Type{T}) where T
×
831
    pads = padding(T)
×
832
    return sizeof(T) - sum((p.size for p ∈ pads), init = 0)
×
833
end
834

835
@assume_effects :foldable ispacked(::Type{T}) where T = isempty(padding(T))
×
836

837
function _copytopacked!(ptr_out::Ptr{Out}, ptr_in::Ptr{In}) where {Out, In}
×
838
    writeoffset = 0
×
839
    for i ∈ 1:fieldcount(In)
×
840
        readoffset = fieldoffset(In, i)
×
841
        fT = fieldtype(In, i)
×
842
        if ispacked(fT)
×
843
            readsize = sizeof(fT)
×
844
            memcpy(ptr_out + writeoffset, ptr_in + readoffset, readsize)
×
845
            writeoffset += readsize
×
846
        else # nested padded type
847
            _copytopacked!(ptr_out + writeoffset, Ptr{fT}(ptr_in + readoffset))
×
848
            writeoffset += packedsize(fT)
×
849
        end
850
    end
×
851
end
852

853
function _copyfrompacked!(ptr_out::Ptr{Out}, ptr_in::Ptr{In}) where {Out, In}
×
854
    readoffset = 0
×
855
    for i ∈ 1:fieldcount(Out)
×
856
        writeoffset = fieldoffset(Out, i)
×
857
        fT = fieldtype(Out, i)
×
858
        if ispacked(fT)
×
859
            writesize = sizeof(fT)
×
860
            memcpy(ptr_out + writeoffset, ptr_in + readoffset, writesize)
×
861
            readoffset += writesize
×
862
        else # nested padded type
863
            _copyfrompacked!(Ptr{fT}(ptr_out + writeoffset), ptr_in + readoffset)
×
864
            readoffset += packedsize(fT)
×
865
        end
866
    end
×
867
end
868

869
@inline function _reinterpret(::Type{Out}, x::In) where {Out, In}
×
870
    # handle non-primitive types
871
    isbitstype(Out) || throw(ArgumentError("Target type for `reinterpret` must be isbits"))
×
872
    isbitstype(In) || throw(ArgumentError("Source type for `reinterpret` must be isbits"))
×
873
    inpackedsize = packedsize(In)
×
874
    outpackedsize = packedsize(Out)
×
875
    inpackedsize == outpackedsize ||
×
876
        throw(ArgumentError(LazyString("Packed sizes of types ", Out, " and ", In,
877
            " do not match; got ", outpackedsize, " and ", inpackedsize, ", respectively.")))
878
    in = Ref{In}(x)
×
879
    out = Ref{Out}()
×
880
    if struct_subpadding(Out, In)
×
881
        # if packed the same, just copy
882
        GC.@preserve in out begin
×
883
            ptr_in = unsafe_convert(Ptr{In}, in)
×
884
            ptr_out = unsafe_convert(Ptr{Out}, out)
×
885
            memcpy(ptr_out, ptr_in, sizeof(Out))
×
886
        end
887
        return out[]
×
888
    else
889
        # mismatched padding
890
        return _reinterpret_padding(Out, x)
×
891
    end
892
end
893

894
# If the code reaches this part, it needs to handle padding and is unlikely
895
# to compile to a noop. Therefore, we don't forcibly inline it.
896
function _reinterpret_padding(::Type{Out}, x::In) where {Out, In}
×
897
    inpackedsize = packedsize(In)
×
898
    in = Ref{In}(x)
×
899
    out = Ref{Out}()
×
900
    GC.@preserve in out begin
×
901
        ptr_in = unsafe_convert(Ptr{In}, in)
×
902
        ptr_out = unsafe_convert(Ptr{Out}, out)
×
903

904
        if fieldcount(In) > 0 && ispacked(Out)
×
905
            _copytopacked!(ptr_out, ptr_in)
×
906
        elseif fieldcount(Out) > 0 && ispacked(In)
×
907
            _copyfrompacked!(ptr_out, ptr_in)
×
908
        else
909
            packed = Ref{NTuple{inpackedsize, UInt8}}()
×
910
            GC.@preserve packed begin
×
911
                ptr_packed = unsafe_convert(Ptr{NTuple{inpackedsize, UInt8}}, packed)
×
912
                _copytopacked!(ptr_packed, ptr_in)
×
913
                _copyfrompacked!(ptr_out, ptr_packed)
×
914
            end
915
        end
916
    end
917
    return out[]
×
918
end
919

920

921
# Reductions with IndexSCartesian2
922

923
function _mapreduce(f::F, op::OP, style::IndexSCartesian2{K}, A::AbstractArrayOrBroadcasted) where {F,OP,K}
×
924
    inds = eachindex(style, A)
×
925
    n = size(inds)[2]
×
926
    if n == 0
×
927
        return mapreduce_empty_iter(f, op, A, IteratorEltype(A))
×
928
    else
929
        return mapreduce_impl(f, op, A, first(inds), last(inds))
×
930
    end
931
end
932

933
@noinline function mapreduce_impl(f::F, op::OP, A::AbstractArrayOrBroadcasted,
×
934
                                  ifirst::SCI, ilast::SCI, blksize::Int) where {F,OP,SCI<:SCartesianIndex2{K}} where K
935
    if ilast.j - ifirst.j < blksize
×
936
        # sequential portion
937
        @inbounds a1 = A[ifirst]
×
938
        @inbounds a2 = A[SCI(2,ifirst.j)]
×
939
        v = op(f(a1), f(a2))
×
940
        @simd for i = ifirst.i + 2 : K
×
941
            @inbounds ai = A[SCI(i,ifirst.j)]
×
942
            v = op(v, f(ai))
×
943
        end
×
944
        # Remaining columns
945
        for j = ifirst.j+1 : ilast.j
×
946
            @simd for i = 1:K
×
947
                @inbounds ai = A[SCI(i,j)]
×
948
                v = op(v, f(ai))
×
949
            end
×
950
        end
×
951
        return v
×
952
    else
953
        # pairwise portion
954
        jmid = ifirst.j + (ilast.j - ifirst.j) >> 1
×
955
        v1 = mapreduce_impl(f, op, A, ifirst, SCI(K,jmid), blksize)
×
956
        v2 = mapreduce_impl(f, op, A, SCI(1,jmid+1), ilast, blksize)
×
957
        return op(v1, v2)
×
958
    end
959
end
960

961
mapreduce_impl(f::F, op::OP, A::AbstractArrayOrBroadcasted, ifirst::SCartesianIndex2, ilast::SCartesianIndex2) where {F,OP} =
×
962
    mapreduce_impl(f, op, A, ifirst, ilast, pairwise_blocksize(f, op))
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc