• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

JuliaLang / julia / #37997

29 Jan 2025 02:08AM UTC coverage: 17.283% (-68.7%) from 85.981%
#37997

push

local

web-flow
bpart: Start enforcing min_world for global variable definitions (#57150)

This is the analog of #57102 for global variables. Unlike for consants,
there is no automatic global backdate mechanism. The reasoning for this
is that global variables can be declared at any time, unlike constants
which can only be decalared once their value is available. As a result
code patterns using `Core.eval` to declare globals are rarer and likely
incorrect.

1 of 22 new or added lines in 3 files covered. (4.55%)

31430 existing lines in 188 files now uncovered.

7903 of 45728 relevant lines covered (17.28%)

98663.7 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

6.3
/base/reinterpretarray.jl
1
# This file is a part of Julia. License is MIT: https://julialang.org/license
2

3
"""
4
Gives a reinterpreted view (of element type T) of the underlying array (of element type S).
5
If the size of `T` differs from the size of `S`, the array will be compressed/expanded in
6
the first dimension. The variant `reinterpret(reshape, T, a)` instead adds or consumes the first dimension
7
depending on the ratio of element sizes.
8
"""
9
struct ReinterpretArray{T,N,S,A<:AbstractArray{S},IsReshaped} <: AbstractArray{T, N}
10
    parent::A
11
    readable::Bool
12
    writable::Bool
13

UNCOV
14
    function throwbits(S::Type, T::Type, U::Type)
×
UNCOV
15
        @noinline
×
UNCOV
16
        throw(ArgumentError(LazyString("cannot reinterpret `", S, "` as `", T, "`, type `", U, "` is not a bits type")))
×
17
    end
UNCOV
18
    function throwsize0(S::Type, T::Type, msg)
×
UNCOV
19
        @noinline
×
UNCOV
20
        throw(ArgumentError(LazyString("cannot reinterpret a zero-dimensional `", S, "` array to `", T,
×
21
            "` which is of a ", msg, " size")))
22
    end
UNCOV
23
    function throwsingleton(S::Type, T::Type)
×
UNCOV
24
        @noinline
×
UNCOV
25
        throw(ArgumentError(LazyString("cannot reinterpret a `", S, "` array to `", T, "` which is a singleton type")))
×
26
    end
27

28
    global reinterpret
29

30
    @doc """
31
        reinterpret(T::DataType, A::AbstractArray)
32

33
    Construct a view of the array with the same binary data as the given
34
    array, but with `T` as element type.
35

36
    This function also works on "lazy" array whose elements are not computed until they are explicitly retrieved.
37
    For instance, `reinterpret` on the range `1:6` works similarly as on the dense vector `collect(1:6)`:
38

39
    ```jldoctest
40
    julia> reinterpret(Float32, UInt32[1 2 3 4 5])
41
    1×5 reinterpret(Float32, ::Matrix{UInt32}):
42
     1.0f-45  3.0f-45  4.0f-45  6.0f-45  7.0f-45
43

44
    julia> reinterpret(Complex{Int}, 1:6)
45
    3-element reinterpret(Complex{$Int}, ::UnitRange{$Int}):
46
     1 + 2im
47
     3 + 4im
48
     5 + 6im
49
    ```
50

51
    If the location of padding bits does not line up between `T` and `eltype(A)`, the resulting array will be
52
    read-only or write-only, to prevent invalid bits from being written to or read from, respectively.
53

54
    ```jldoctest
55
    julia> a = reinterpret(Tuple{UInt8, UInt32}, UInt32[1, 2])
56
    1-element reinterpret(Tuple{UInt8, UInt32}, ::Vector{UInt32}):
57
     (0x01, 0x00000002)
58

59
    julia> a[1] = 3
60
    ERROR: Padding of type Tuple{UInt8, UInt32} is not compatible with type UInt32.
61

62
    julia> b = reinterpret(UInt32, Tuple{UInt8, UInt32}[(0x01, 0x00000002)]); # showing will error
63

64
    julia> b[1]
65
    ERROR: Padding of type UInt32 is not compatible with type Tuple{UInt8, UInt32}.
66
    ```
67
    """
68
    function reinterpret(::Type{T}, a::A) where {T,N,S,A<:AbstractArray{S, N}}
69
        function thrownonint(S::Type, T::Type, dim)
1✔
70
            @noinline
71
            throw(ArgumentError(LazyString(
72
                "cannot reinterpret an `", S, "` array to `", T, "` whose first dimension has size `", dim,
73
                "`. The resulting array would have a non-integral first dimension.")))
74
        end
75
        function throwaxes1(S::Type, T::Type, ax1)
1✔
76
            @noinline
77
            throw(ArgumentError(LazyString("cannot reinterpret a `", S, "` array to `", T,
78
                "` when the first axis is ", ax1, ". Try reshaping first.")))
79
        end
80
        isbitstype(T) || throwbits(S, T, T)
1✔
81
        isbitstype(S) || throwbits(S, T, S)
1✔
82
        (N != 0 || sizeof(T) == sizeof(S)) || throwsize0(S, T, "different")
1✔
83
        if N != 0 && sizeof(S) != sizeof(T)
1✔
84
            ax1 = axes(a)[1]
1✔
85
            dim = length(ax1)
1✔
86
            if issingletontype(T)
1✔
UNCOV
87
                issingletontype(S) || throwsingleton(S, T)
×
88
            else
89
                rem(dim*sizeof(S),sizeof(T)) == 0 || thrownonint(S, T, dim)
1✔
90
            end
91
            first(ax1) == 1 || throwaxes1(S, T, ax1)
1✔
92
        end
93
        readable = array_subpadding(T, S)
1✔
94
        writable = array_subpadding(S, T)
1✔
95
        new{T, N, S, A, false}(a, readable, writable)
1✔
96
    end
UNCOV
97
    reinterpret(::Type{T}, a::AbstractArray{T}) where {T} = a
×
98

99
    # With reshaping
UNCOV
100
    function reinterpret(::typeof(reshape), ::Type{T}, a::A) where {T,S,A<:AbstractArray{S}}
×
UNCOV
101
        function throwintmult(S::Type, T::Type)
×
UNCOV
102
            @noinline
×
UNCOV
103
            throw(ArgumentError(LazyString("`reinterpret(reshape, T, a)` requires that one of `sizeof(T)` (got ",
×
104
                sizeof(T), ") and `sizeof(eltype(a))` (got ", sizeof(S), ") be an integer multiple of the other")))
105
        end
UNCOV
106
        function throwsize1(a::AbstractArray, T::Type)
×
UNCOV
107
            @noinline
×
UNCOV
108
            throw(ArgumentError(LazyString("`reinterpret(reshape, ", T, ", a)` where `eltype(a)` is ", eltype(a),
×
109
                " requires that `axes(a, 1)` (got ", axes(a, 1), ") be equal to 1:",
110
                sizeof(T) ÷ sizeof(eltype(a)), " (from the ratio of element sizes)")))
111
        end
UNCOV
112
        function throwfromsingleton(S, T)
×
UNCOV
113
            @noinline
×
UNCOV
114
            throw(ArgumentError(LazyString("`reinterpret(reshape, ", T, ", a)` where `eltype(a)` is ", S,
×
115
                " requires that ", T, " be a singleton type, since ", S, " is one")))
116
        end
UNCOV
117
        isbitstype(T) || throwbits(S, T, T)
×
UNCOV
118
        isbitstype(S) || throwbits(S, T, S)
×
UNCOV
119
        if sizeof(S) == sizeof(T)
×
UNCOV
120
            N = ndims(a)
×
UNCOV
121
        elseif sizeof(S) > sizeof(T)
×
UNCOV
122
            issingletontype(T) && throwsingleton(S, T)
×
UNCOV
123
            rem(sizeof(S), sizeof(T)) == 0 || throwintmult(S, T)
×
UNCOV
124
            N = ndims(a) + 1
×
125
        else
UNCOV
126
            issingletontype(S) && throwfromsingleton(S, T)
×
UNCOV
127
            rem(sizeof(T), sizeof(S)) == 0 || throwintmult(S, T)
×
UNCOV
128
            N = ndims(a) - 1
×
UNCOV
129
            N > -1 || throwsize0(S, T, "larger")
×
UNCOV
130
            axes(a, 1) == OneTo(sizeof(T) ÷ sizeof(S)) || throwsize1(a, T)
×
131
        end
UNCOV
132
        readable = array_subpadding(T, S)
×
UNCOV
133
        writable = array_subpadding(S, T)
×
UNCOV
134
        new{T, N, S, A, true}(a, readable, writable)
×
135
    end
UNCOV
136
    reinterpret(::typeof(reshape), ::Type{T}, a::AbstractArray{T}) where {T} = a
×
137
end
138

139
ReshapedReinterpretArray{T,N,S,A<:AbstractArray{S}} = ReinterpretArray{T,N,S,A,true}
140
NonReshapedReinterpretArray{T,N,S,A<:AbstractArray{S, N}} = ReinterpretArray{T,N,S,A,false}
141

142
"""
143
    reinterpret(reshape, T, A::AbstractArray{S}) -> B
144

145
Change the type-interpretation of `A` while consuming or adding a "channel dimension."
146

147
If `sizeof(T) = n*sizeof(S)` for `n>1`, `A`'s first dimension must be
148
of size `n` and `B` lacks `A`'s first dimension. Conversely, if `sizeof(S) = n*sizeof(T)` for `n>1`,
149
`B` gets a new first dimension of size `n`. The dimensionality is unchanged if `sizeof(T) == sizeof(S)`.
150

151
!!! compat "Julia 1.6"
152
    This method requires at least Julia 1.6.
153

154
# Examples
155

156
```jldoctest
157
julia> A = [1 2; 3 4]
158
2×2 Matrix{$Int}:
159
 1  2
160
 3  4
161

162
julia> reinterpret(reshape, Complex{Int}, A)    # the result is a vector
163
2-element reinterpret(reshape, Complex{$Int}, ::Matrix{$Int}) with eltype Complex{$Int}:
164
 1 + 3im
165
 2 + 4im
166

167
julia> a = [(1,2,3), (4,5,6)]
168
2-element Vector{Tuple{$Int, $Int, $Int}}:
169
 (1, 2, 3)
170
 (4, 5, 6)
171

172
julia> reinterpret(reshape, Int, a)             # the result is a matrix
173
3×2 reinterpret(reshape, $Int, ::Vector{Tuple{$Int, $Int, $Int}}) with eltype $Int:
174
 1  4
175
 2  5
176
 3  6
177
```
178
"""
179
reinterpret(::typeof(reshape), T::Type, a::AbstractArray)
180

UNCOV
181
reinterpret(::Type{T}, a::NonReshapedReinterpretArray) where {T} = reinterpret(T, a.parent)
×
UNCOV
182
reinterpret(::typeof(reshape), ::Type{T}, a::ReshapedReinterpretArray) where {T} = reinterpret(reshape, T, a.parent)
×
183

184
# Definition of StridedArray
185
StridedFastContiguousSubArray{T,N,A<:DenseArray} = FastContiguousSubArray{T,N,A}
186
StridedReinterpretArray{T,N,A<:Union{DenseArray,StridedFastContiguousSubArray},IsReshaped} = ReinterpretArray{T,N,S,A,IsReshaped} where S
187
StridedReshapedArray{T,N,A<:Union{DenseArray,StridedFastContiguousSubArray,StridedReinterpretArray}} = ReshapedArray{T,N,A}
188
StridedSubArray{T,N,A<:Union{DenseArray,StridedReshapedArray,StridedReinterpretArray},
189
    I<:Tuple{Vararg{Union{RangeIndex, ReshapedUnitRange, AbstractCartesianIndex}}}} = SubArray{T,N,A,I}
190
StridedArray{T,N} = Union{DenseArray{T,N}, StridedSubArray{T,N}, StridedReshapedArray{T,N}, StridedReinterpretArray{T,N}}
191
StridedVector{T} = StridedArray{T,1}
192
StridedMatrix{T} = StridedArray{T,2}
193
StridedVecOrMat{T} = Union{StridedVector{T}, StridedMatrix{T}}
194

UNCOV
195
strides(a::Union{DenseArray,StridedReshapedArray,StridedReinterpretArray}) = size_to_strides(1, size(a)...)
×
UNCOV
196
stride(A::Union{DenseArray,StridedReshapedArray,StridedReinterpretArray}, k::Integer) =
×
197
    k ≤ ndims(A) ? strides(A)[k] : length(A)
198

UNCOV
199
function strides(a::ReinterpretArray{T,<:Any,S,<:AbstractArray{S},IsReshaped}) where {T,S,IsReshaped}
×
UNCOV
200
    _checkcontiguous(Bool, a) && return size_to_strides(1, size(a)...)
×
UNCOV
201
    stp = strides(parent(a))
×
UNCOV
202
    els, elp = sizeof(T), sizeof(S)
×
UNCOV
203
    els == elp && return stp # 0dim parent is also handled here.
×
UNCOV
204
    IsReshaped && els < elp && return (1, _checked_strides(stp, els, elp)...)
×
UNCOV
205
    stp[1] == 1 || throw(ArgumentError("Parent must be contiguous in the 1st dimension!"))
×
UNCOV
206
    st′ = _checked_strides(tail(stp), els, elp)
×
UNCOV
207
    return IsReshaped ? st′ : (1, st′...)
×
208
end
209

UNCOV
210
@inline function _checked_strides(stp::Tuple, els::Integer, elp::Integer)
×
UNCOV
211
    if elp > els && rem(elp, els) == 0
×
UNCOV
212
        N = div(elp, els)
×
UNCOV
213
        return map(i -> N * i, stp)
×
214
    end
UNCOV
215
    drs = map(i -> divrem(elp * i, els), stp)
×
UNCOV
216
    all(i->iszero(i[2]), drs) ||
×
217
        throw(ArgumentError("Parent's strides could not be exactly divided!"))
UNCOV
218
    map(first, drs)
×
219
end
220

UNCOV
221
_checkcontiguous(::Type{Bool}, A::ReinterpretArray) = _checkcontiguous(Bool, parent(A))
×
222

UNCOV
223
similar(a::ReinterpretArray, T::Type, d::Dims) = similar(a.parent, T, d)
×
224

225
function check_readable(a::ReinterpretArray{T, N, S} where N) where {T,S}
226
    # See comment in check_writable
227
    if !a.readable && !array_subpadding(T, S)
4✔
UNCOV
228
        throw(PaddingError(T, S))
×
229
    end
230
end
231

UNCOV
232
function check_writable(a::ReinterpretArray{T, N, S} where N) where {T,S}
×
233
    # `array_subpadding` is relatively expensive (compared to a simple arrayref),
234
    # so it is cached in the array. However, it is computable at compile time if,
235
    # inference has the types available. By using this form of the check, we can
236
    # get the best of both worlds for the success case. If the types were not
237
    # available to inference, we simply need to check the field (relatively cheap)
238
    # and if they were we should be able to fold this check away entirely.
UNCOV
239
    if !a.writable && !array_subpadding(S, T)
×
UNCOV
240
        throw(PaddingError(T, S))
×
241
    end
242
end
243

244
## IndexStyle specializations
245

246
# For `reinterpret(reshape, T, a)` where we're adding a channel dimension and with
247
# `IndexStyle(a) == IndexLinear()`, it's advantageous to retain pseudo-linear indexing.
248
struct IndexSCartesian2{K} <: IndexStyle end   # K = sizeof(S) ÷ sizeof(T), a static-sized 2d cartesian iterator
249

UNCOV
250
IndexStyle(::Type{ReinterpretArray{T,N,S,A,false}}) where {T,N,S,A<:AbstractArray{S,N}} = IndexStyle(A)
×
UNCOV
251
function IndexStyle(::Type{ReinterpretArray{T,N,S,A,true}}) where {T,N,S,A<:AbstractArray{S}}
×
UNCOV
252
    if sizeof(T) < sizeof(S)
×
UNCOV
253
        IndexStyle(A) === IndexLinear() && return IndexSCartesian2{sizeof(S) ÷ sizeof(T)}()
×
UNCOV
254
        return IndexCartesian()
×
255
    end
UNCOV
256
    return IndexStyle(A)
×
257
end
UNCOV
258
IndexStyle(::IndexSCartesian2{K}, ::IndexSCartesian2{K}) where {K} = IndexSCartesian2{K}()
×
259

260
struct SCartesianIndex2{K}   # can't make <:AbstractCartesianIndex without N, and 2 would be a bit misleading
261
    i::Int
262
    j::Int
263
end
UNCOV
264
to_index(i::SCartesianIndex2) = i
×
265

266
struct SCartesianIndices2{K,R<:AbstractUnitRange{Int}} <: AbstractMatrix{SCartesianIndex2{K}}
267
    indices2::R
268
end
UNCOV
269
SCartesianIndices2{K}(indices2::AbstractUnitRange{Int}) where {K} = (@assert K::Int > 1; SCartesianIndices2{K,typeof(indices2)}(indices2))
×
270

UNCOV
271
eachindex(::IndexSCartesian2{K}, A::ReshapedReinterpretArray) where {K} = SCartesianIndices2{K}(eachindex(IndexLinear(), parent(A)))
×
UNCOV
272
@inline function eachindex(style::IndexSCartesian2{K}, A::AbstractArray, B::AbstractArray...) where {K}
×
UNCOV
273
    iter = eachindex(style, A)
×
UNCOV
274
    _all_match_first(C->eachindex(style, C), iter, B...) || throw_eachindex_mismatch_indices(IndexSCartesian2{K}(), axes(A), axes.(B)...)
×
UNCOV
275
    return iter
×
276
end
277

UNCOV
278
size(iter::SCartesianIndices2{K}) where K = (K, length(iter.indices2))
×
UNCOV
279
axes(iter::SCartesianIndices2{K}) where K = (OneTo(K), iter.indices2)
×
280

UNCOV
281
first(iter::SCartesianIndices2{K}) where {K} = SCartesianIndex2{K}(1, first(iter.indices2))
×
UNCOV
282
last(iter::SCartesianIndices2{K}) where {K}  = SCartesianIndex2{K}(K, last(iter.indices2))
×
283

284
@inline function getindex(iter::SCartesianIndices2{K}, i::Int, j::Int) where {K}
×
285
    @boundscheck checkbounds(iter, i, j)
×
286
    return SCartesianIndex2{K}(i, iter.indices2[j])
×
287
end
288

UNCOV
289
function iterate(iter::SCartesianIndices2{K}) where {K}
×
UNCOV
290
    ret = iterate(iter.indices2)
×
UNCOV
291
    ret === nothing && return nothing
×
UNCOV
292
    item2, state2 = ret
×
UNCOV
293
    return SCartesianIndex2{K}(1, item2), (1, item2, state2)
×
294
end
295

UNCOV
296
function iterate(iter::SCartesianIndices2{K}, (state1, item2, state2)) where {K}
×
UNCOV
297
    if state1 < K
×
UNCOV
298
        item1 = state1 + 1
×
UNCOV
299
        return SCartesianIndex2{K}(item1, item2), (item1, item2, state2)
×
300
    end
UNCOV
301
    ret = iterate(iter.indices2, state2)
×
UNCOV
302
    ret === nothing && return nothing
×
UNCOV
303
    item2, state2 = ret
×
UNCOV
304
    return SCartesianIndex2{K}(1, item2), (1, item2, state2)
×
305
end
306

307
SimdLoop.simd_outer_range(iter::SCartesianIndices2) = iter.indices2
×
308
SimdLoop.simd_inner_length(::SCartesianIndices2{K}, ::Any) where K = K
×
309
@inline function SimdLoop.simd_index(::SCartesianIndices2{K}, Ilast::Int, I1::Int) where {K}
×
310
    SCartesianIndex2{K}(I1+1, Ilast)
×
311
end
312

313
_maybe_reshape(::IndexSCartesian2, A::ReshapedReinterpretArray, I...) = A
×
314

315
# fallbacks
UNCOV
316
function _getindex(::IndexSCartesian2, A::AbstractArray{T,N}, I::Vararg{Int, N}) where {T,N}
×
UNCOV
317
    @_propagate_inbounds_meta
×
UNCOV
318
    getindex(A, I...)
×
319
end
320
function _setindex!(::IndexSCartesian2, A::AbstractArray{T,N}, v, I::Vararg{Int, N}) where {T,N}
×
321
    @_propagate_inbounds_meta
×
322
    setindex!(A, v, I...)
×
323
end
324
# fallbacks for array types that use "pass-through" indexing (e.g., `IndexStyle(A) = IndexStyle(parent(A))`)
325
# but which don't handle SCartesianIndex2
UNCOV
326
function _getindex(::IndexSCartesian2, A::AbstractArray{T,N}, ind::SCartesianIndex2) where {T,N}
×
UNCOV
327
    @_propagate_inbounds_meta
×
UNCOV
328
    J = _ind2sub(tail(axes(A)), ind.j)
×
UNCOV
329
    getindex(A, ind.i, J...)
×
330
end
UNCOV
331
function _setindex!(::IndexSCartesian2, A::AbstractArray{T,N}, v, ind::SCartesianIndex2) where {T,N}
×
UNCOV
332
    @_propagate_inbounds_meta
×
UNCOV
333
    J = _ind2sub(tail(axes(A)), ind.j)
×
UNCOV
334
    setindex!(A, v, ind.i, J...)
×
335
end
UNCOV
336
eachindex(style::IndexSCartesian2, A::AbstractArray) = eachindex(style, parent(A))
×
337

338
## AbstractArray interface
339

340
parent(a::ReinterpretArray) = a.parent
4✔
UNCOV
341
dataids(a::ReinterpretArray) = dataids(a.parent)
×
UNCOV
342
unaliascopy(a::NonReshapedReinterpretArray{T}) where {T} = reinterpret(T, unaliascopy(a.parent))
×
UNCOV
343
unaliascopy(a::ReshapedReinterpretArray{T}) where {T} = reinterpret(reshape, T, unaliascopy(a.parent))
×
344

345
function size(a::NonReshapedReinterpretArray{T,N,S} where {N}) where {T,S}
346
    psize = size(a.parent)
1✔
347
    size1 = issingletontype(T) ? psize[1] : div(psize[1]*sizeof(S), sizeof(T))
1✔
348
    tuple(size1, tail(psize)...)
1✔
349
end
UNCOV
350
function size(a::ReshapedReinterpretArray{T,N,S} where {N}) where {T,S}
×
UNCOV
351
    psize = size(a.parent)
×
UNCOV
352
    sizeof(S) > sizeof(T) && return (div(sizeof(S), sizeof(T)), psize...)
×
UNCOV
353
    sizeof(S) < sizeof(T) && return tail(psize)
×
UNCOV
354
    return psize
×
355
end
UNCOV
356
size(a::NonReshapedReinterpretArray{T,0}) where {T} = ()
×
357

358
function axes(a::NonReshapedReinterpretArray{T,N,S} where {N}) where {T,S}
359
    paxs = axes(a.parent)
5✔
360
    f, l = first(paxs[1]), length(paxs[1])
5✔
361
    size1 = issingletontype(T) ? l : div(l*sizeof(S), sizeof(T))
5✔
362
    tuple(oftype(paxs[1], f:f+size1-1), tail(paxs)...)
5✔
363
end
UNCOV
364
function axes(a::ReshapedReinterpretArray{T,N,S} where {N}) where {T,S}
×
UNCOV
365
    paxs = axes(a.parent)
×
UNCOV
366
    sizeof(S) > sizeof(T) && return (OneTo(div(sizeof(S), sizeof(T))), paxs...)
×
UNCOV
367
    sizeof(S) < sizeof(T) && return tail(paxs)
×
UNCOV
368
    return paxs
×
369
end
UNCOV
370
axes(a::NonReshapedReinterpretArray{T,0}) where {T} = ()
×
371

UNCOV
372
has_offset_axes(a::ReinterpretArray) = has_offset_axes(a.parent)
×
373

374
elsize(::Type{<:ReinterpretArray{T}}) where {T} = sizeof(T)
4✔
375
cconvert(::Type{Ptr{T}}, a::ReinterpretArray{T,N,S} where N) where {T,S} = cconvert(Ptr{S}, a.parent)
4✔
376
unsafe_convert(::Type{Ptr{T}}, a::ReinterpretArray{T,N,S} where N) where {T,S} = Ptr{T}(unsafe_convert(Ptr{S},a.parent))
×
377

UNCOV
378
@propagate_inbounds function getindex(a::NonReshapedReinterpretArray{T,0,S}) where {T,S}
×
UNCOV
379
    if isprimitivetype(T) && isprimitivetype(S)
×
UNCOV
380
        reinterpret(T, a.parent[])
×
381
    else
UNCOV
382
        a[firstindex(a)]
×
383
    end
384
end
385

386
check_ptr_indexable(a::ReinterpretArray, sz = elsize(a)) = check_ptr_indexable(parent(a), sz)
8✔
UNCOV
387
check_ptr_indexable(a::ReshapedArray, sz) = check_ptr_indexable(parent(a), sz)
×
UNCOV
388
check_ptr_indexable(a::FastContiguousSubArray, sz) = check_ptr_indexable(parent(a), sz)
×
UNCOV
389
check_ptr_indexable(a::Array, sz) = sizeof(eltype(a)) !== sz
×
390
check_ptr_indexable(a::Memory, sz) = true
×
391
check_ptr_indexable(a::AbstractArray, sz) = false
×
392

UNCOV
393
@propagate_inbounds getindex(a::ReinterpretArray) = a[firstindex(a)]
×
394

UNCOV
395
@propagate_inbounds isassigned(a::ReinterpretArray, inds::Integer...) = checkbounds(Bool, a, inds...) && (check_ptr_indexable(a) || _isassigned_ra(a, inds...))
×
396
@propagate_inbounds isassigned(a::ReinterpretArray, inds::SCartesianIndex2) = isassigned(a.parent, inds.j)
×
397
@propagate_inbounds _isassigned_ra(a::ReinterpretArray, inds...) = true # that is not entirely true, but computing exactly which indexes will be accessed in the parent requires a lot of duplication from the _getindex_ra code
×
398

399
@propagate_inbounds function getindex(a::ReinterpretArray{T,N,S}, inds::Vararg{Int, N}) where {T,N,S}
400
    check_readable(a)
4✔
401
    check_ptr_indexable(a) && return _getindex_ptr(a, inds...)
4✔
UNCOV
402
    _getindex_ra(a, inds[1], tail(inds))
×
403
end
404

UNCOV
405
@propagate_inbounds function getindex(a::ReinterpretArray{T,N,S}, i::Int) where {T,N,S}
×
UNCOV
406
    check_readable(a)
×
UNCOV
407
    check_ptr_indexable(a) && return _getindex_ptr(a, i)
×
UNCOV
408
    if isa(IndexStyle(a), IndexLinear)
×
UNCOV
409
        return _getindex_ra(a, i, ())
×
410
    end
411
    # Convert to full indices here, to avoid needing multiple conversions in
412
    # the loop in _getindex_ra
UNCOV
413
    inds = _to_subscript_indices(a, i)
×
UNCOV
414
    isempty(inds) ? _getindex_ra(a, 1, ()) : _getindex_ra(a, inds[1], tail(inds))
×
415
end
416

UNCOV
417
@propagate_inbounds function getindex(a::ReshapedReinterpretArray{T,N,S}, ind::SCartesianIndex2) where {T,N,S}
×
UNCOV
418
    check_readable(a)
×
UNCOV
419
    s = Ref{S}(a.parent[ind.j])
×
UNCOV
420
    tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
UNCOV
421
    GC.@preserve s return unsafe_load(tptr, ind.i)
×
422
end
423

424
@inline function _getindex_ptr(a::ReinterpretArray{T}, inds...) where {T}
425
    @boundscheck checkbounds(a, inds...)
4✔
426
    li = _to_linear_index(a, inds...)
4✔
427
    ap = cconvert(Ptr{T}, a)
4✔
428
    p = unsafe_convert(Ptr{T}, ap) + sizeof(T) * (li - 1)
4✔
429
    GC.@preserve ap return unsafe_load(p)
4✔
430
end
431

UNCOV
432
@propagate_inbounds function _getindex_ra(a::NonReshapedReinterpretArray{T,N,S}, i1::Int, tailinds::TT) where {T,N,S,TT}
×
433
    # Make sure to match the scalar reinterpret if that is applicable
UNCOV
434
    if sizeof(T) == sizeof(S) && (fieldcount(T) + fieldcount(S)) == 0
×
UNCOV
435
        if issingletontype(T) # singleton types
×
UNCOV
436
            @boundscheck checkbounds(a, i1, tailinds...)
×
UNCOV
437
            return T.instance
×
438
        end
UNCOV
439
        return reinterpret(T, a.parent[i1, tailinds...])
×
440
    else
UNCOV
441
        @boundscheck checkbounds(a, i1, tailinds...)
×
UNCOV
442
        ind_start, sidx = divrem((i1-1)*sizeof(T), sizeof(S))
×
443
        # Optimizations that avoid branches
UNCOV
444
        if sizeof(T) % sizeof(S) == 0
×
445
            # T is bigger than S and contains an integer number of them
UNCOV
446
            n = sizeof(T) ÷ sizeof(S)
×
UNCOV
447
            t = Ref{T}()
×
UNCOV
448
            GC.@preserve t begin
×
UNCOV
449
                sptr = Ptr{S}(unsafe_convert(Ref{T}, t))
×
UNCOV
450
                for i = 1:n
×
UNCOV
451
                     s = a.parent[ind_start + i, tailinds...]
×
UNCOV
452
                     unsafe_store!(sptr, s, i)
×
UNCOV
453
                end
×
454
            end
UNCOV
455
            return t[]
×
UNCOV
456
        elseif sizeof(S) % sizeof(T) == 0
×
457
            # S is bigger than T and contains an integer number of them
UNCOV
458
            s = Ref{S}(a.parent[ind_start + 1, tailinds...])
×
UNCOV
459
            GC.@preserve s begin
×
UNCOV
460
                tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
UNCOV
461
                return unsafe_load(tptr + sidx)
×
462
            end
463
        else
UNCOV
464
            i = 1
×
UNCOV
465
            nbytes_copied = 0
×
466
            # This is a bit complicated to deal with partial elements
467
            # at both the start and the end. LLVM will fold as appropriate,
468
            # once it knows the data layout
UNCOV
469
            s = Ref{S}()
×
UNCOV
470
            t = Ref{T}()
×
UNCOV
471
            GC.@preserve s t begin
×
UNCOV
472
                sptr = Ptr{S}(unsafe_convert(Ref{S}, s))
×
UNCOV
473
                tptr = Ptr{T}(unsafe_convert(Ref{T}, t))
×
UNCOV
474
                while nbytes_copied < sizeof(T)
×
UNCOV
475
                    s[] = a.parent[ind_start + i, tailinds...]
×
UNCOV
476
                    nb = min(sizeof(S) - sidx, sizeof(T)-nbytes_copied)
×
UNCOV
477
                    memcpy(tptr + nbytes_copied, sptr + sidx, nb)
×
UNCOV
478
                    nbytes_copied += nb
×
UNCOV
479
                    sidx = 0
×
UNCOV
480
                    i += 1
×
UNCOV
481
                end
×
482
            end
UNCOV
483
            return t[]
×
484
        end
485
    end
486
end
487

UNCOV
488
@propagate_inbounds function _getindex_ra(a::ReshapedReinterpretArray{T,N,S}, i1::Int, tailinds::TT) where {T,N,S,TT}
×
489
    # Make sure to match the scalar reinterpret if that is applicable
UNCOV
490
    if sizeof(T) == sizeof(S) && (fieldcount(T) + fieldcount(S)) == 0
×
UNCOV
491
        if issingletontype(T) # singleton types
×
UNCOV
492
            @boundscheck checkbounds(a, i1, tailinds...)
×
UNCOV
493
            return T.instance
×
494
        end
UNCOV
495
        return reinterpret(T, a.parent[i1, tailinds...])
×
496
    end
UNCOV
497
    @boundscheck checkbounds(a, i1, tailinds...)
×
UNCOV
498
    if sizeof(T) >= sizeof(S)
×
UNCOV
499
        t = Ref{T}()
×
UNCOV
500
        GC.@preserve t begin
×
UNCOV
501
            sptr = Ptr{S}(unsafe_convert(Ref{T}, t))
×
UNCOV
502
            if sizeof(T) > sizeof(S)
×
503
                # Extra dimension in the parent array
UNCOV
504
                n = sizeof(T) ÷ sizeof(S)
×
UNCOV
505
                if isempty(tailinds) && IndexStyle(a.parent) === IndexLinear()
×
UNCOV
506
                    offset = n * (i1 - firstindex(a))
×
UNCOV
507
                    for i = 1:n
×
UNCOV
508
                        s = a.parent[i + offset]
×
UNCOV
509
                        unsafe_store!(sptr, s, i)
×
UNCOV
510
                    end
×
511
                else
UNCOV
512
                    for i = 1:n
×
UNCOV
513
                        s = a.parent[i, i1, tailinds...]
×
UNCOV
514
                        unsafe_store!(sptr, s, i)
×
UNCOV
515
                    end
×
516
                end
517
            else
518
                # No extra dimension
UNCOV
519
                s = a.parent[i1, tailinds...]
×
UNCOV
520
                unsafe_store!(sptr, s)
×
521
            end
522
        end
UNCOV
523
        return t[]
×
524
    end
525
    # S is bigger than T and contains an integer number of them
526
    # n = sizeof(S) ÷ sizeof(T)
UNCOV
527
    s = Ref{S}()
×
UNCOV
528
    GC.@preserve s begin
×
UNCOV
529
        tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
UNCOV
530
        s[] = a.parent[tailinds...]
×
UNCOV
531
        return unsafe_load(tptr, i1)
×
532
    end
533
end
534

UNCOV
535
@propagate_inbounds function setindex!(a::NonReshapedReinterpretArray{T,0,S}, v) where {T,S}
×
UNCOV
536
    if isprimitivetype(S) && isprimitivetype(T)
×
UNCOV
537
        a.parent[] = reinterpret(S, v)
×
UNCOV
538
        return a
×
539
    end
UNCOV
540
    setindex!(a, v, firstindex(a))
×
541
end
542

UNCOV
543
@propagate_inbounds setindex!(a::ReinterpretArray, v) = setindex!(a, v, firstindex(a))
×
544

UNCOV
545
@propagate_inbounds function setindex!(a::ReinterpretArray{T,N,S}, v, inds::Vararg{Int, N}) where {T,N,S}
×
UNCOV
546
    check_writable(a)
×
UNCOV
547
    check_ptr_indexable(a) && return _setindex_ptr!(a, v, inds...)
×
UNCOV
548
    _setindex_ra!(a, v, inds[1], tail(inds))
×
549
end
550

UNCOV
551
@propagate_inbounds function setindex!(a::ReinterpretArray{T,N,S}, v, i::Int) where {T,N,S}
×
UNCOV
552
    check_writable(a)
×
UNCOV
553
    check_ptr_indexable(a) && return _setindex_ptr!(a, v, i)
×
UNCOV
554
    if isa(IndexStyle(a), IndexLinear)
×
UNCOV
555
        return _setindex_ra!(a, v, i, ())
×
556
    end
UNCOV
557
    inds = _to_subscript_indices(a, i)
×
UNCOV
558
    _setindex_ra!(a, v, inds[1], tail(inds))
×
559
end
560

UNCOV
561
@propagate_inbounds function setindex!(a::ReshapedReinterpretArray{T,N,S}, v, ind::SCartesianIndex2) where {T,N,S}
×
UNCOV
562
    check_writable(a)
×
UNCOV
563
    v = convert(T, v)::T
×
UNCOV
564
    s = Ref{S}(a.parent[ind.j])
×
UNCOV
565
    GC.@preserve s begin
×
UNCOV
566
        tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
UNCOV
567
        unsafe_store!(tptr, v, ind.i)
×
568
    end
UNCOV
569
    a.parent[ind.j] = s[]
×
UNCOV
570
    return a
×
571
end
572

UNCOV
573
@inline function _setindex_ptr!(a::ReinterpretArray{T}, v, inds...) where {T}
×
UNCOV
574
    @boundscheck checkbounds(a, inds...)
×
UNCOV
575
    li = _to_linear_index(a, inds...)
×
UNCOV
576
    ap = cconvert(Ptr{T}, a)
×
UNCOV
577
    p = unsafe_convert(Ptr{T}, ap) + sizeof(T) * (li - 1)
×
UNCOV
578
    GC.@preserve ap unsafe_store!(p, v)
×
UNCOV
579
    return a
×
580
end
581

UNCOV
582
@propagate_inbounds function _setindex_ra!(a::NonReshapedReinterpretArray{T,N,S}, v, i1::Int, tailinds::TT) where {T,N,S,TT}
×
UNCOV
583
    v = convert(T, v)::T
×
584
    # Make sure to match the scalar reinterpret if that is applicable
UNCOV
585
    if sizeof(T) == sizeof(S) && (fieldcount(T) + fieldcount(S)) == 0
×
UNCOV
586
        if issingletontype(T) # singleton types
×
UNCOV
587
            @boundscheck checkbounds(a, i1, tailinds...)
×
588
            # setindex! is a noop except for the index check
589
        else
UNCOV
590
            setindex!(a.parent, reinterpret(S, v), i1, tailinds...)
×
591
        end
592
    else
UNCOV
593
        @boundscheck checkbounds(a, i1, tailinds...)
×
UNCOV
594
        ind_start, sidx = divrem((i1-1)*sizeof(T), sizeof(S))
×
595
        # Optimizations that avoid branches
UNCOV
596
        if sizeof(T) % sizeof(S) == 0
×
597
            # T is bigger than S and contains an integer number of them
UNCOV
598
            t = Ref{T}(v)
×
UNCOV
599
            GC.@preserve t begin
×
UNCOV
600
                sptr = Ptr{S}(unsafe_convert(Ref{T}, t))
×
UNCOV
601
                n = sizeof(T) ÷ sizeof(S)
×
UNCOV
602
                for i = 1:n
×
UNCOV
603
                    s = unsafe_load(sptr, i)
×
UNCOV
604
                    a.parent[ind_start + i, tailinds...] = s
×
UNCOV
605
                end
×
606
            end
UNCOV
607
        elseif sizeof(S) % sizeof(T) == 0
×
608
            # S is bigger than T and contains an integer number of them
UNCOV
609
            s = Ref{S}(a.parent[ind_start + 1, tailinds...])
×
UNCOV
610
            GC.@preserve s begin
×
UNCOV
611
                tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
UNCOV
612
                unsafe_store!(tptr + sidx, v)
×
UNCOV
613
                a.parent[ind_start + 1, tailinds...] = s[]
×
614
            end
615
        else
UNCOV
616
            t = Ref{T}(v)
×
UNCOV
617
            s = Ref{S}()
×
UNCOV
618
            GC.@preserve t s begin
×
UNCOV
619
                tptr = Ptr{UInt8}(unsafe_convert(Ref{T}, t))
×
UNCOV
620
                sptr = Ptr{UInt8}(unsafe_convert(Ref{S}, s))
×
UNCOV
621
                nbytes_copied = 0
×
UNCOV
622
                i = 1
×
623
                # Deal with any partial elements at the start. We'll have to copy in the
624
                # element from the original array and overwrite the relevant parts
UNCOV
625
                if sidx != 0
×
UNCOV
626
                    s[] = a.parent[ind_start + i, tailinds...]
×
UNCOV
627
                    nb = min((sizeof(S) - sidx) % UInt, sizeof(T) % UInt)
×
UNCOV
628
                    memcpy(sptr + sidx, tptr, nb)
×
UNCOV
629
                    nbytes_copied += nb
×
UNCOV
630
                    a.parent[ind_start + i, tailinds...] = s[]
×
UNCOV
631
                    i += 1
×
UNCOV
632
                    sidx = 0
×
633
                end
634
                # Deal with the main body of elements
UNCOV
635
                while nbytes_copied < sizeof(T) && (sizeof(T) - nbytes_copied) > sizeof(S)
×
UNCOV
636
                    nb = min(sizeof(S), sizeof(T) - nbytes_copied)
×
UNCOV
637
                    memcpy(sptr, tptr + nbytes_copied, nb)
×
UNCOV
638
                    nbytes_copied += nb
×
UNCOV
639
                    a.parent[ind_start + i, tailinds...] = s[]
×
UNCOV
640
                    i += 1
×
UNCOV
641
                end
×
642
                # Deal with trailing partial elements
UNCOV
643
                if nbytes_copied < sizeof(T)
×
UNCOV
644
                    s[] = a.parent[ind_start + i, tailinds...]
×
UNCOV
645
                    nb = min(sizeof(S), sizeof(T) - nbytes_copied)
×
UNCOV
646
                    memcpy(sptr, tptr + nbytes_copied, nb)
×
UNCOV
647
                    a.parent[ind_start + i, tailinds...] = s[]
×
648
                end
649
            end
650
        end
651
    end
UNCOV
652
    return a
×
653
end
654

UNCOV
655
@propagate_inbounds function _setindex_ra!(a::ReshapedReinterpretArray{T,N,S}, v, i1::Int, tailinds::TT) where {T,N,S,TT}
×
UNCOV
656
    v = convert(T, v)::T
×
657
    # Make sure to match the scalar reinterpret if that is applicable
UNCOV
658
    if sizeof(T) == sizeof(S) && (fieldcount(T) + fieldcount(S)) == 0
×
UNCOV
659
        if issingletontype(T) # singleton types
×
UNCOV
660
            @boundscheck checkbounds(a, i1, tailinds...)
×
661
            # setindex! is a noop except for the index check
662
        else
UNCOV
663
            setindex!(a.parent, reinterpret(S, v), i1, tailinds...)
×
664
        end
665
    end
UNCOV
666
    @boundscheck checkbounds(a, i1, tailinds...)
×
UNCOV
667
    if sizeof(T) >= sizeof(S)
×
UNCOV
668
        t = Ref{T}(v)
×
UNCOV
669
        GC.@preserve t begin
×
UNCOV
670
            sptr = Ptr{S}(unsafe_convert(Ref{T}, t))
×
UNCOV
671
            if sizeof(T) > sizeof(S)
×
672
                # Extra dimension in the parent array
UNCOV
673
                n = sizeof(T) ÷ sizeof(S)
×
UNCOV
674
                if isempty(tailinds) && IndexStyle(a.parent) === IndexLinear()
×
UNCOV
675
                    offset = n * (i1 - firstindex(a))
×
UNCOV
676
                    for i = 1:n
×
UNCOV
677
                        s = unsafe_load(sptr, i)
×
UNCOV
678
                        a.parent[i + offset] = s
×
UNCOV
679
                    end
×
680
                else
UNCOV
681
                    for i = 1:n
×
UNCOV
682
                        s = unsafe_load(sptr, i)
×
UNCOV
683
                        a.parent[i, i1, tailinds...] = s
×
UNCOV
684
                    end
×
685
                end
686
            else # sizeof(T) == sizeof(S)
687
                # No extra dimension
UNCOV
688
                s = unsafe_load(sptr)
×
UNCOV
689
                a.parent[i1, tailinds...] = s
×
690
            end
691
        end
692
    else
693
        # S is bigger than T and contains an integer number of them
UNCOV
694
        s = Ref{S}()
×
UNCOV
695
        GC.@preserve s begin
×
UNCOV
696
            tptr = Ptr{T}(unsafe_convert(Ref{S}, s))
×
UNCOV
697
            s[] = a.parent[tailinds...]
×
UNCOV
698
            unsafe_store!(tptr, v, i1)
×
UNCOV
699
            a.parent[tailinds...] = s[]
×
700
        end
701
    end
UNCOV
702
    return a
×
703
end
704

705
# Padding
706
struct Padding
707
    offset::Int # 0-indexed offset of the next valid byte; sizeof(T) indicates trailing padding
708
    size::Int   # bytes of padding before a valid byte
709
end
UNCOV
710
function intersect(p1::Padding, p2::Padding)
×
UNCOV
711
    start = max(p1.offset, p2.offset)
×
UNCOV
712
    stop = min(p1.offset + p1.size, p2.offset + p2.size)
×
UNCOV
713
    Padding(start, max(0, stop-start))
×
714
end
715

716
struct PaddingError <: Exception
717
    S::Type
718
    T::Type
719
end
720

721
function showerror(io::IO, p::PaddingError)
×
722
    print(io, "Padding of type $(p.S) is not compatible with type $(p.T).")
×
723
end
724

725
"""
726
    CyclePadding(padding, total_size)
727

728
Cycles an iterator of `Padding` structs, restarting the padding at `total_size`.
729
E.g. if `padding` is all the padding in a struct and `total_size` is the total
730
aligned size of that array, `CyclePadding` will correspond to the padding in an
731
infinite vector of such structs.
732
"""
733
struct CyclePadding{P}
734
    padding::P
735
    total_size::Int
736
end
737
eltype(::Type{<:CyclePadding}) = Padding
×
738
IteratorSize(::Type{<:CyclePadding}) = IsInfinite()
×
739
isempty(cp::CyclePadding) = isempty(cp.padding)
×
UNCOV
740
function iterate(cp::CyclePadding)
×
UNCOV
741
    y = iterate(cp.padding)
×
UNCOV
742
    y === nothing && return nothing
×
UNCOV
743
    y[1], (0, y[2])
×
744
end
UNCOV
745
function iterate(cp::CyclePadding, state::Tuple)
×
UNCOV
746
    y = iterate(cp.padding, tail(state)...)
×
UNCOV
747
    y === nothing && return iterate(cp, (state[1]+cp.total_size,))
×
UNCOV
748
    Padding(y[1].offset+state[1], y[1].size), (state[1], tail(y)...)
×
749
end
750

751
"""
752
    Compute the location of padding in an isbits datatype. Recursive over the fields of that type.
753
"""
UNCOV
754
@assume_effects :foldable function padding(T::DataType, baseoffset::Int = 0)
×
UNCOV
755
    pads = Padding[]
×
756
    last_end::Int = baseoffset
×
UNCOV
757
    for i = 1:fieldcount(T)
×
UNCOV
758
        offset = baseoffset + Int(fieldoffset(T, i))
×
UNCOV
759
        fT = fieldtype(T, i)
×
UNCOV
760
        append!(pads, padding(fT, offset))
×
UNCOV
761
        if offset != last_end
×
UNCOV
762
            push!(pads, Padding(offset, offset-last_end))
×
763
        end
UNCOV
764
        last_end = offset + sizeof(fT)
×
UNCOV
765
    end
×
UNCOV
766
    if 0 < last_end - baseoffset < sizeof(T)
×
UNCOV
767
        push!(pads, Padding(baseoffset + sizeof(T), sizeof(T) - last_end + baseoffset))
×
768
    end
UNCOV
769
    return Core.svec(pads...)
×
770
end
771

UNCOV
772
function CyclePadding(T::DataType)
×
UNCOV
773
    a, s = datatype_alignment(T), sizeof(T)
×
UNCOV
774
    as = s + (a - (s % a)) % a
×
UNCOV
775
    pad = padding(T)
×
UNCOV
776
    if s != as
×
777
        pad = Core.svec(pad..., Padding(s, as - s))
×
778
    end
UNCOV
779
    CyclePadding(pad, as)
×
780
end
781

UNCOV
782
@assume_effects :total function array_subpadding(S, T)
×
UNCOV
783
    lcm_size = lcm(sizeof(S), sizeof(T))
×
UNCOV
784
    s, t = CyclePadding(S), CyclePadding(T)
×
UNCOV
785
    checked_size = 0
×
786
    # use of Stateful harms inference and makes this vulnerable to invalidation
UNCOV
787
    (pad, tstate) = let
×
UNCOV
788
        it = iterate(t)
×
UNCOV
789
        it === nothing && return true
×
UNCOV
790
        it
×
791
    end
UNCOV
792
    (ps, sstate) = let
×
UNCOV
793
        it = iterate(s)
×
UNCOV
794
        it === nothing && return false
×
UNCOV
795
        it
×
796
    end
UNCOV
797
    while checked_size < lcm_size
×
UNCOV
798
        while true
×
799
            # See if there's corresponding padding in S
UNCOV
800
            ps.offset > pad.offset && return false
×
UNCOV
801
            intersect(ps, pad) == pad && break
×
UNCOV
802
            ps, sstate = iterate(s, sstate)
×
UNCOV
803
        end
×
UNCOV
804
        checked_size = pad.offset + pad.size
×
UNCOV
805
        pad, tstate = iterate(t, tstate)
×
UNCOV
806
    end
×
UNCOV
807
    return true
×
808
end
809

UNCOV
810
@assume_effects :foldable function struct_subpadding(::Type{Out}, ::Type{In}) where {Out, In}
×
UNCOV
811
    padding(Out) == padding(In)
×
812
end
813

UNCOV
814
@assume_effects :foldable function packedsize(::Type{T}) where T
×
UNCOV
815
    pads = padding(T)
×
UNCOV
816
    return sizeof(T) - sum((p.size for p ∈ pads), init = 0)
×
817
end
818

UNCOV
819
@assume_effects :foldable ispacked(::Type{T}) where T = isempty(padding(T))
×
820

UNCOV
821
function _copytopacked!(ptr_out::Ptr{Out}, ptr_in::Ptr{In}) where {Out, In}
×
UNCOV
822
    writeoffset = 0
×
UNCOV
823
    for i ∈ 1:fieldcount(In)
×
UNCOV
824
        readoffset = fieldoffset(In, i)
×
UNCOV
825
        fT = fieldtype(In, i)
×
UNCOV
826
        if ispacked(fT)
×
UNCOV
827
            readsize = sizeof(fT)
×
UNCOV
828
            memcpy(ptr_out + writeoffset, ptr_in + readoffset, readsize)
×
UNCOV
829
            writeoffset += readsize
×
830
        else # nested padded type
UNCOV
831
            _copytopacked!(ptr_out + writeoffset, Ptr{fT}(ptr_in + readoffset))
×
UNCOV
832
            writeoffset += packedsize(fT)
×
833
        end
UNCOV
834
    end
×
835
end
836

UNCOV
837
function _copyfrompacked!(ptr_out::Ptr{Out}, ptr_in::Ptr{In}) where {Out, In}
×
UNCOV
838
    readoffset = 0
×
UNCOV
839
    for i ∈ 1:fieldcount(Out)
×
UNCOV
840
        writeoffset = fieldoffset(Out, i)
×
UNCOV
841
        fT = fieldtype(Out, i)
×
UNCOV
842
        if ispacked(fT)
×
UNCOV
843
            writesize = sizeof(fT)
×
UNCOV
844
            memcpy(ptr_out + writeoffset, ptr_in + readoffset, writesize)
×
UNCOV
845
            readoffset += writesize
×
846
        else # nested padded type
UNCOV
847
            _copyfrompacked!(Ptr{fT}(ptr_out + writeoffset), ptr_in + readoffset)
×
UNCOV
848
            readoffset += packedsize(fT)
×
849
        end
UNCOV
850
    end
×
851
end
852

UNCOV
853
@inline function _reinterpret(::Type{Out}, x::In) where {Out, In}
×
854
    # handle non-primitive types
UNCOV
855
    isbitstype(Out) || throw(ArgumentError("Target type for `reinterpret` must be isbits"))
×
UNCOV
856
    isbitstype(In) || throw(ArgumentError("Source type for `reinterpret` must be isbits"))
×
UNCOV
857
    inpackedsize = packedsize(In)
×
UNCOV
858
    outpackedsize = packedsize(Out)
×
UNCOV
859
    inpackedsize == outpackedsize ||
×
860
        throw(ArgumentError(LazyString("Packed sizes of types ", Out, " and ", In,
861
            " do not match; got ", outpackedsize, " and ", inpackedsize, ", respectively.")))
UNCOV
862
    in = Ref{In}(x)
×
UNCOV
863
    out = Ref{Out}()
×
UNCOV
864
    if struct_subpadding(Out, In)
×
865
        # if packed the same, just copy
UNCOV
866
        GC.@preserve in out begin
×
UNCOV
867
            ptr_in = unsafe_convert(Ptr{In}, in)
×
UNCOV
868
            ptr_out = unsafe_convert(Ptr{Out}, out)
×
UNCOV
869
            memcpy(ptr_out, ptr_in, sizeof(Out))
×
870
        end
UNCOV
871
        return out[]
×
872
    else
873
        # mismatched padding
UNCOV
874
        GC.@preserve in out begin
×
UNCOV
875
            ptr_in = unsafe_convert(Ptr{In}, in)
×
UNCOV
876
            ptr_out = unsafe_convert(Ptr{Out}, out)
×
877

UNCOV
878
            if fieldcount(In) > 0 && ispacked(Out)
×
879
                _copytopacked!(ptr_out, ptr_in)
×
UNCOV
880
            elseif fieldcount(Out) > 0 && ispacked(In)
×
881
                _copyfrompacked!(ptr_out, ptr_in)
×
882
            else
UNCOV
883
                packed = Ref{NTuple{inpackedsize, UInt8}}()
×
UNCOV
884
                GC.@preserve packed begin
×
UNCOV
885
                    ptr_packed = unsafe_convert(Ptr{NTuple{inpackedsize, UInt8}}, packed)
×
UNCOV
886
                    _copytopacked!(ptr_packed, ptr_in)
×
UNCOV
887
                    _copyfrompacked!(ptr_out, ptr_packed)
×
888
                end
889
            end
890
        end
UNCOV
891
        return out[]
×
892
    end
893
end
894

895

896
# Reductions with IndexSCartesian2
897

UNCOV
898
function _mapreduce(f::F, op::OP, style::IndexSCartesian2{K}, A::AbstractArrayOrBroadcasted) where {F,OP,K}
×
UNCOV
899
    inds = eachindex(style, A)
×
UNCOV
900
    n = size(inds)[2]
×
UNCOV
901
    if n == 0
×
902
        return mapreduce_empty_iter(f, op, A, IteratorEltype(A))
×
903
    else
UNCOV
904
        return mapreduce_impl(f, op, A, first(inds), last(inds))
×
905
    end
906
end
907

UNCOV
908
@noinline function mapreduce_impl(f::F, op::OP, A::AbstractArrayOrBroadcasted,
×
909
                                  ifirst::SCI, ilast::SCI, blksize::Int) where {F,OP,SCI<:SCartesianIndex2{K}} where K
UNCOV
910
    if ilast.j - ifirst.j < blksize
×
911
        # sequential portion
UNCOV
912
        @inbounds a1 = A[ifirst]
×
UNCOV
913
        @inbounds a2 = A[SCI(2,ifirst.j)]
×
UNCOV
914
        v = op(f(a1), f(a2))
×
UNCOV
915
        @simd for i = ifirst.i + 2 : K
×
UNCOV
916
            @inbounds ai = A[SCI(i,ifirst.j)]
×
UNCOV
917
            v = op(v, f(ai))
×
UNCOV
918
        end
×
919
        # Remaining columns
UNCOV
920
        for j = ifirst.j+1 : ilast.j
×
UNCOV
921
            @simd for i = 1:K
×
UNCOV
922
                @inbounds ai = A[SCI(i,j)]
×
UNCOV
923
                v = op(v, f(ai))
×
UNCOV
924
            end
×
UNCOV
925
        end
×
UNCOV
926
        return v
×
927
    else
928
        # pairwise portion
UNCOV
929
        jmid = ifirst.j + (ilast.j - ifirst.j) >> 1
×
UNCOV
930
        v1 = mapreduce_impl(f, op, A, ifirst, SCI(K,jmid), blksize)
×
UNCOV
931
        v2 = mapreduce_impl(f, op, A, SCI(1,jmid+1), ilast, blksize)
×
UNCOV
932
        return op(v1, v2)
×
933
    end
934
end
935

UNCOV
936
mapreduce_impl(f::F, op::OP, A::AbstractArrayOrBroadcasted, ifirst::SCartesianIndex2, ilast::SCartesianIndex2) where {F,OP} =
×
937
    mapreduce_impl(f, op, A, ifirst, ilast, pairwise_blocksize(f, op))
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc