• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

JuliaLang / julia / #37997

29 Jan 2025 02:08AM UTC coverage: 17.283% (-68.7%) from 85.981%
#37997

push

local

web-flow
bpart: Start enforcing min_world for global variable definitions (#57150)

This is the analog of #57102 for global variables. Unlike for consants,
there is no automatic global backdate mechanism. The reasoning for this
is that global variables can be declared at any time, unlike constants
which can only be decalared once their value is available. As a result
code patterns using `Core.eval` to declare globals are rarer and likely
incorrect.

1 of 22 new or added lines in 3 files covered. (4.55%)

31430 existing lines in 188 files now uncovered.

7903 of 45728 relevant lines covered (17.28%)

98663.7 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

16.27
/base/subarray.jl
1
# This file is a part of Julia. License is MIT: https://julialang.org/license
2

3
abstract type AbstractCartesianIndex{N} end # This is a hacky forward declaration for CartesianIndex
4
const ViewIndex = Union{Real, AbstractArray}
5
const ScalarIndex = Real
6

7
"""
8
    SubArray{T,N,P,I,L} <: AbstractArray{T,N}
9

10
`N`-dimensional view into a parent array (of type `P`) with an element type `T`, restricted by a tuple of indices (of type `I`). `L` is true for types that support fast linear indexing, and `false` otherwise.
11

12
Construct `SubArray`s using the [`view`](@ref) function.
13
"""
14
struct SubArray{T,N,P,I,L} <: AbstractArray{T,N}
15
    parent::P
16
    indices::I
17
    offset1::Int       # for linear indexing and pointer, only valid when L==true
18
    stride1::Int       # used only for linear indexing
19
    function SubArray{T,N,P,I,L}(parent, indices, offset1, stride1) where {T,N,P,I,L}
20
        @inline
2✔
21
        check_parent_index_match(parent, indices)
2✔
22
        new(parent, indices, offset1, stride1)
404,027✔
23
    end
24
end
25
# Compute the linear indexability of the indices, and combine it with the linear indexing of the parent
26
function SubArray(parent::AbstractArray, indices::Tuple)
27
    @inline
2✔
28
    SubArray(IndexStyle(viewindexing(indices), IndexStyle(parent)), parent, ensure_indexable(indices), index_dimsum(indices...))
404,027✔
29
end
30
function SubArray(::IndexCartesian, parent::P, indices::I, ::NTuple{N,Any}) where {P,I,N}
UNCOV
31
    @inline
×
32
    SubArray{eltype(P), N, P, I, false}(parent, indices, 0, 0)
25✔
33
end
34
function SubArray(::IndexLinear, parent::P, indices::I, ::NTuple{N,Any}) where {P,I,N}
35
    @inline
2✔
36
    # Compute the stride and offset
37
    stride1 = compute_stride1(parent, indices)
2✔
38
    SubArray{eltype(P), N, P, I, true}(parent, indices, compute_offset1(parent, stride1, indices), stride1)
404,002✔
39
end
40

41
check_parent_index_match(parent, indices) = check_parent_index_match(parent, index_ndims(indices...))
2✔
42
check_parent_index_match(parent::AbstractArray{T,N}, ::NTuple{N, Bool}) where {T,N} = nothing
1✔
43
check_parent_index_match(parent, ::NTuple{N, Bool}) where {N} =
×
44
    throw(ArgumentError("number of indices ($N) must match the parent dimensionality ($(ndims(parent)))"))
45

46
# This computes the linear indexing compatibility for a given tuple of indices
47
viewindexing(I::Tuple{}) = IndexLinear()
×
48
# Leading scalar indices simply increase the stride
UNCOV
49
viewindexing(I::Tuple{ScalarIndex, Vararg{Any}}) = (@inline; viewindexing(tail(I)))
×
50
# Slices may begin a section which may be followed by any number of Slices
UNCOV
51
viewindexing(I::Tuple{Slice, Slice, Vararg{Any}}) = (@inline; viewindexing(tail(I)))
×
52
# A UnitRange can follow Slices, but only if all other indices are scalar
UNCOV
53
viewindexing(I::Tuple{Slice, AbstractUnitRange, Vararg{ScalarIndex}}) = IndexLinear()
×
UNCOV
54
viewindexing(I::Tuple{Slice, Slice, Vararg{ScalarIndex}}) = IndexLinear() # disambiguate
×
55
# In general, scalar ranges are only fast if all other indices are scalar
56
# Other ranges, such as those of `CartesianIndex`es, are not fast even if these
57
# are followed by `ScalarIndex`es
UNCOV
58
viewindexing(I::Tuple{AbstractRange{<:ScalarIndex}, Vararg{ScalarIndex}}) = IndexLinear()
×
59
# All other index combinations are slow
60
viewindexing(I::Tuple{Vararg{Any}}) = IndexCartesian()
×
61
# Of course, all other array types are slow
UNCOV
62
viewindexing(I::Tuple{AbstractArray, Vararg{Any}}) = IndexCartesian()
×
63

64
# Simple utilities
65
size(V::SubArray) = (@inline; map(length, axes(V)))
860,837✔
66

UNCOV
67
similar(V::SubArray, T::Type, dims::Dims) = similar(V.parent, T, dims)
×
68

UNCOV
69
sizeof(V::SubArray) = length(V) * sizeof(eltype(V))
×
UNCOV
70
sizeof(V::SubArray{<:Any,<:Any,<:Array}) = length(V) * elsize(V.parent)
×
71

UNCOV
72
function Base.copy(V::SubArray)
×
UNCOV
73
    v = V.parent[V.indices...]
×
UNCOV
74
    ndims(V) == 0 || return v
×
UNCOV
75
    x = similar(V) # ensure proper type of x
×
UNCOV
76
    x[] = v
×
UNCOV
77
    return x
×
78
end
79

UNCOV
80
parent(V::SubArray) = V.parent
×
UNCOV
81
parentindices(V::SubArray) = V.indices
×
82

83
"""
84
    parentindices(A)
85

86
Return the indices in the [`parent`](@ref) which correspond to the view `A`.
87

88
# Examples
89
```jldoctest
90
julia> A = [1 2; 3 4];
91

92
julia> V = view(A, 1, :)
93
2-element view(::Matrix{Int64}, 1, :) with eltype Int64:
94
 1
95
 2
96

97
julia> parentindices(V)
98
(1, Base.Slice(Base.OneTo(2)))
99
```
100
"""
101
function parentindices end
102

UNCOV
103
parentindices(a::AbstractArray) = map(oneto, size(a))
×
104

105
## Aliasing detection
106
dataids(A::SubArray) = (dataids(A.parent)..., _splatmap(dataids, A.indices)...)
360,177✔
107
_splatmap(f, ::Tuple{}) = ()
×
UNCOV
108
_splatmap(f, t::Tuple) = (f(t[1])..., _splatmap(f, tail(t))...)
×
UNCOV
109
unaliascopy(A::SubArray) = typeof(A)(unaliascopy(A.parent), map(unaliascopy, A.indices), A.offset1, A.stride1)
×
110

111
# When the parent is an Array we can trim the size down a bit. In the future this
112
# could possibly be extended to any mutable array.
UNCOV
113
function unaliascopy(V::SubArray{T,N,A,I,LD}) where {T,N,A<:Array,I<:Tuple{Vararg{Union{ScalarIndex,AbstractRange{<:ScalarIndex},Array{<:Union{ScalarIndex,AbstractCartesianIndex}}}}},LD}
×
UNCOV
114
    dest = Array{T}(undef, _trimmedshape(V.indices...))
×
UNCOV
115
    trimmedpind = _trimmedpind(V.indices...)
×
UNCOV
116
    vdest = trimmedpind isa Tuple{Vararg{Union{Slice,Colon}}} ? dest : view(dest, trimmedpind...)
×
UNCOV
117
    copyto!(vdest, view(V, _trimmedvind(V.indices...)...))
×
UNCOV
118
    indices = map(_trimmedindex, V.indices)
×
UNCOV
119
    stride1 = LD ? compute_stride1(dest, indices) : 0
×
UNCOV
120
    offset1 = LD ? compute_offset1(dest, stride1, indices) : 0
×
UNCOV
121
    SubArray{T,N,A,I,LD}(dest, indices, offset1, stride1)
×
122
end
123
# Get the proper trimmed shape
UNCOV
124
_trimmedshape(::ScalarIndex, rest...) = (1, _trimmedshape(rest...)...)
×
UNCOV
125
_trimmedshape(i::AbstractRange, rest...) = (isempty(i) ? zero(eltype(i)) : maximum(i), _trimmedshape(rest...)...)
×
UNCOV
126
_trimmedshape(i::Union{UnitRange,StepRange,OneTo}, rest...) = (length(i), _trimmedshape(rest...)...)
×
127
_trimmedshape(i::AbstractArray{<:ScalarIndex}, rest...) = (length(i), _trimmedshape(rest...)...)
×
UNCOV
128
_trimmedshape(i::AbstractArray{<:AbstractCartesianIndex{0}}, rest...) = _trimmedshape(rest...)
×
UNCOV
129
_trimmedshape(i::AbstractArray{<:AbstractCartesianIndex{N}}, rest...) where {N} = (length(i), ntuple(Returns(1), Val(N - 1))..., _trimmedshape(rest...)...)
×
130
_trimmedshape() = ()
×
131
# We can avoid the repeation from `AbstractArray{CartesianIndex{0}}`
UNCOV
132
_trimmedpind(i, rest...) = (map(Returns(:), axes(i))..., _trimmedpind(rest...)...)
×
UNCOV
133
_trimmedpind(i::AbstractRange, rest...) = (i, _trimmedpind(rest...)...)
×
UNCOV
134
_trimmedpind(i::Union{UnitRange,StepRange,OneTo}, rest...) = ((:), _trimmedpind(rest...)...)
×
UNCOV
135
_trimmedpind(i::AbstractArray{<:AbstractCartesianIndex{0}}, rest...) = _trimmedpind(rest...)
×
136
_trimmedpind() = ()
×
UNCOV
137
_trimmedvind(i, rest...) = (map(Returns(:), axes(i))..., _trimmedvind(rest...)...)
×
UNCOV
138
_trimmedvind(i::AbstractArray{<:AbstractCartesianIndex{0}}, rest...) = (map(first, axes(i))..., _trimmedvind(rest...)...)
×
139
_trimmedvind() = ()
×
140
# Transform indices to be "dense"
UNCOV
141
_trimmedindex(i::ScalarIndex) = oftype(i, 1)
×
UNCOV
142
_trimmedindex(i::AbstractRange) = i
×
UNCOV
143
_trimmedindex(i::Union{UnitRange,StepRange,OneTo}) = oftype(i, oneto(length(i)))
×
144
_trimmedindex(i::AbstractArray{<:ScalarIndex}) = oftype(i, reshape(eachindex(IndexLinear(), i), axes(i)))
×
UNCOV
145
_trimmedindex(i::AbstractArray{<:AbstractCartesianIndex{0}}) = oftype(i, copy(i))
×
UNCOV
146
function _trimmedindex(i::AbstractArray{<:AbstractCartesianIndex{N}}) where {N}
×
UNCOV
147
    padding = ntuple(Returns(1), Val(N - 1))
×
UNCOV
148
    ax1 = eachindex(IndexLinear(), i)
×
UNCOV
149
    return oftype(i, reshape(CartesianIndices((ax1, padding...)), axes(i)))
×
150
end
151
## SubArray creation
152
# We always assume that the dimensionality of the parent matches the number of
153
# indices that end up getting passed to it, so we store the parent as a
154
# ReshapedArray view if necessary. The trouble is that arrays of `CartesianIndex`
155
# can make the number of effective indices not equal to length(I).
UNCOV
156
_maybe_reshape_parent(A::AbstractArray, ::NTuple{1, Bool}) = reshape(A, Val(1))
×
157
_maybe_reshape_parent(A::AbstractArray{<:Any,1}, ::NTuple{1, Bool}) = reshape(A, Val(1))
1✔
UNCOV
158
_maybe_reshape_parent(A::AbstractArray{<:Any,N}, ::NTuple{N, Bool}) where {N} = A
×
UNCOV
159
_maybe_reshape_parent(A::AbstractArray, ::NTuple{N, Bool}) where {N} = reshape(A, Val(N))
×
160
# The trailing singleton indices could be eliminated after bounds checking.
UNCOV
161
rm_singleton_indices(ndims::Tuple, J1, Js...) = (J1, rm_singleton_indices(IteratorsMD._splitrest(ndims, index_ndims(J1)), Js...)...)
×
UNCOV
162
rm_singleton_indices(::Tuple{}, ::ScalarIndex, Js...) = rm_singleton_indices((), Js...)
×
UNCOV
163
rm_singleton_indices(::Tuple) = ()
×
164

165
"""
166
    view(A, inds...)
167

168
Like [`getindex`](@ref), but returns a lightweight array that lazily references
169
(or is effectively a _view_ into) the parent array `A` at the given index or indices
170
`inds` instead of eagerly extracting elements or constructing a copied subset.
171
Calling [`getindex`](@ref) or [`setindex!`](@ref) on the returned value
172
(often a [`SubArray`](@ref)) computes the indices to access or modify the
173
parent array on the fly.  The behavior is undefined if the shape of the parent array is
174
changed after `view` is called because there is no bound check for the parent array; e.g.,
175
it may cause a segmentation fault.
176

177
Some immutable parent arrays (like ranges) may choose to simply
178
recompute a new array in some circumstances instead of returning
179
a `SubArray` if doing so is efficient and provides compatible semantics.
180

181
!!! compat "Julia 1.6"
182
    In Julia 1.6 or later, `view` can be called on an `AbstractString`, returning a
183
    `SubString`.
184

185
# Examples
186
```jldoctest
187
julia> A = [1 2; 3 4]
188
2×2 Matrix{Int64}:
189
 1  2
190
 3  4
191

192
julia> b = view(A, :, 1)
193
2-element view(::Matrix{Int64}, :, 1) with eltype Int64:
194
 1
195
 3
196

197
julia> fill!(b, 0)
198
2-element view(::Matrix{Int64}, :, 1) with eltype Int64:
199
 0
200
 0
201

202
julia> A # Note A has changed even though we modified b
203
2×2 Matrix{Int64}:
204
 0  2
205
 0  4
206

207
julia> view(2:5, 2:3) # returns a range as type is immutable
208
3:4
209
```
210
"""
211
function view(A::AbstractArray, I::Vararg{Any,M}) where {M}
212
    @inline
403,900✔
213
    J = map(i->unalias(A,i), to_indices(A, I))
403,952✔
214
    @boundscheck checkbounds(A, J...)
404,027✔
215
    J′ = rm_singleton_indices(ntuple(Returns(true), Val(ndims(A))), J...)
403,900✔
216
    unsafe_view(_maybe_reshape_parent(A, index_ndims(J′...)), J′...)
404,027✔
217
end
218

219
# Ranges implement getindex to return recomputed ranges; use that for views, too (when possible)
UNCOV
220
function view(r1::AbstractUnitRange, r2::AbstractUnitRange{<:Integer})
×
UNCOV
221
    @_propagate_inbounds_meta
×
UNCOV
222
    getindex(r1, r2)
×
223
end
UNCOV
224
function view(r1::AbstractUnitRange, r2::StepRange{<:Integer})
×
UNCOV
225
    @_propagate_inbounds_meta
×
UNCOV
226
    getindex(r1, r2)
×
227
end
UNCOV
228
function view(r1::StepRange, r2::AbstractRange{<:Integer})
×
UNCOV
229
    @_propagate_inbounds_meta
×
UNCOV
230
    getindex(r1, r2)
×
231
end
232
function view(r1::StepRangeLen, r2::OrdinalRange{<:Integer})
×
233
    @_propagate_inbounds_meta
×
234
    getindex(r1, r2)
×
235
end
236
function view(r1::LinRange, r2::OrdinalRange{<:Integer})
×
237
    @_propagate_inbounds_meta
×
238
    getindex(r1, r2)
×
239
end
240

241
# getindex(r::AbstractRange, ::Colon) returns a copy of the range, and we may do the same for a view
UNCOV
242
function view(r1::AbstractRange, c::Colon)
×
UNCOV
243
    @_propagate_inbounds_meta
×
UNCOV
244
    getindex(r1, c)
×
245
end
246

247
function unsafe_view(A::AbstractArray, I::Vararg{ViewIndex,N}) where {N}
248
    @inline
2✔
249
    SubArray(A, I)
404,027✔
250
end
251
# When we take the view of a view, it's often possible to "reindex" the parent
252
# view's indices such that we can "pop" the parent view and keep just one layer
253
# of indirection. But we can't always do this because arrays of `CartesianIndex`
254
# might span multiple parent indices, making the reindex calculation very hard.
255
# So we use _maybe_reindex to figure out if there are any arrays of
256
# `CartesianIndex`, and if so, we punt and keep two layers of indirection.
257
unsafe_view(V::SubArray, I::Vararg{ViewIndex,N}) where {N} =
UNCOV
258
    (@inline; _maybe_reindex(V, I))
×
UNCOV
259
_maybe_reindex(V, I) = (@inline; _maybe_reindex(V, I, I))
×
UNCOV
260
_maybe_reindex(V, I, ::Tuple{AbstractArray{<:AbstractCartesianIndex}, Vararg{Any}}) =
×
UNCOV
261
    (@inline; SubArray(V, I))
×
262
# But allow arrays of CartesianIndex{1}; they behave just like arrays of Ints
263
_maybe_reindex(V, I, A::Tuple{AbstractArray{<:AbstractCartesianIndex{1}}, Vararg{Any}}) =
×
264
    (@inline; _maybe_reindex(V, I, tail(A)))
×
UNCOV
265
_maybe_reindex(V, I, A::Tuple{Any, Vararg{Any}}) = (@inline; _maybe_reindex(V, I, tail(A)))
×
266
function _maybe_reindex(V, I, ::Tuple{})
UNCOV
267
    @inline
×
UNCOV
268
    @inbounds idxs = to_indices(V.parent, reindex(V.indices, I))
×
UNCOV
269
    SubArray(V.parent, idxs)
×
270
end
271

272
## Re-indexing is the heart of a view, transforming A[i, j][x, y] to A[i[x], j[y]]
273
#
274
# Recursively look through the heads of the parent- and sub-indices, considering
275
# the following cases:
276
# * Parent index is array  -> re-index that with one or more sub-indices (one per dimension)
277
# * Parent index is Colon  -> just use the sub-index as provided
278
# * Parent index is scalar -> that dimension was dropped, so skip the sub-index and use the index as is
279

280
AbstractZeroDimArray{T} = AbstractArray{T, 0}
281

282
reindex(::Tuple{}, ::Tuple{}) = ()
×
283

284
# Skip dropped scalars, so simply peel them off the parent indices and continue
UNCOV
285
reindex(idxs::Tuple{ScalarIndex, Vararg{Any}}, subidxs::Tuple{Vararg{Any}}) =
×
UNCOV
286
    (@_propagate_inbounds_meta; (idxs[1], reindex(tail(idxs), subidxs)...))
×
287

288
# Slices simply pass their subindices straight through
UNCOV
289
reindex(idxs::Tuple{Slice, Vararg{Any}}, subidxs::Tuple{Any, Vararg{Any}}) =
×
UNCOV
290
    (@_propagate_inbounds_meta; (subidxs[1], reindex(tail(idxs), tail(subidxs))...))
×
291

292
# Re-index into parent vectors with one subindex
293
reindex(idxs::Tuple{AbstractVector, Vararg{Any}}, subidxs::Tuple{Any, Vararg{Any}}) =
294
    (@_propagate_inbounds_meta; (idxs[1][subidxs[1]], reindex(tail(idxs), tail(subidxs))...))
87✔
295

296
# Parent matrices are re-indexed with two sub-indices
UNCOV
297
reindex(idxs::Tuple{AbstractMatrix, Vararg{Any}}, subidxs::Tuple{Any, Any, Vararg{Any}}) =
×
UNCOV
298
    (@_propagate_inbounds_meta; (idxs[1][subidxs[1], subidxs[2]], reindex(tail(idxs), tail(tail(subidxs)))...))
×
299

300
# In general, we index N-dimensional parent arrays with N indices
UNCOV
301
@generated function reindex(idxs::Tuple{AbstractArray{T,N}, Vararg{Any}}, subidxs::Tuple{Vararg{Any}}) where {T,N}
×
UNCOV
302
    if length(subidxs.parameters) >= N
×
UNCOV
303
        subs = [:(subidxs[$d]) for d in 1:N]
×
UNCOV
304
        tail = [:(subidxs[$d]) for d in N+1:length(subidxs.parameters)]
×
UNCOV
305
        :(@_propagate_inbounds_meta; (idxs[1][$(subs...)], reindex(tail(idxs), ($(tail...),))...))
×
306
    else
UNCOV
307
        :(throw(ArgumentError("cannot re-index SubArray with fewer indices than dimensions\nThis should not occur; please submit a bug report.")))
×
308
    end
309
end
310

311
# In general, we simply re-index the parent indices by the provided ones
312
SlowSubArray{T,N,P,I} = SubArray{T,N,P,I,false}
UNCOV
313
function getindex(V::SubArray{T,N}, I::Vararg{Int,N}) where {T,N}
×
UNCOV
314
    @inline
×
UNCOV
315
    @boundscheck checkbounds(V, I...)
×
UNCOV
316
    @inbounds r = V.parent[reindex(V.indices, I)...]
×
UNCOV
317
    r
×
318
end
319

320
# But SubArrays with fast linear indexing pre-compute a stride and offset
321
FastSubArray{T,N,P,I} = SubArray{T,N,P,I,true}
322
# We define a convenience functions to compute the shifted parent index
323
# This differs from reindex as this accepts the view directly, instead of its indices
UNCOV
324
@inline _reindexlinear(V::FastSubArray, i::Int) = V.offset1 + V.stride1*i
×
UNCOV
325
@inline _reindexlinear(V::FastSubArray, i::AbstractUnitRange{Int}) = V.offset1 .+ V.stride1 .* i
×
326

UNCOV
327
function getindex(V::FastSubArray, i::Int)
×
UNCOV
328
    @inline
×
UNCOV
329
    @boundscheck checkbounds(V, i)
×
UNCOV
330
    @inbounds r = V.parent[_reindexlinear(V, i)]
×
UNCOV
331
    r
×
332
end
333

334
# For vector views with linear indexing, we disambiguate to favor the stride/offset
335
# computation as that'll generally be faster than (or just as fast as) re-indexing into a range.
336
function getindex(V::FastSubArray{<:Any, 1}, i::Int)
337
    @inline
127,820✔
338
    @boundscheck checkbounds(V, i)
21,102,433✔
339
    @inbounds r = V.parent[_reindexlinear(V, i)]
21,102,433✔
340
    r
127,820✔
341
end
342

343
# We can avoid a multiplication if the first parent index is a Colon or AbstractUnitRange,
344
# or if all the indices are scalars, i.e. the view is for a single value only
345
FastContiguousSubArray{T,N,P,I<:Union{Tuple{AbstractUnitRange, Vararg{Any}},
346
                                      Tuple{Vararg{ScalarIndex}}}} = SubArray{T,N,P,I,true}
347

348
@inline _reindexlinear(V::FastContiguousSubArray, i::Int) = V.offset1 + i
21,454,115✔
UNCOV
349
@inline _reindexlinear(V::FastContiguousSubArray, i::AbstractUnitRange{Int}) = V.offset1 .+ i
×
350

351
"""
352
An internal type representing arrays stored contiguously in memory.
353
"""
354
const DenseArrayType{T,N} = Union{
355
    DenseArray{T,N},
356
    <:FastContiguousSubArray{T,N,<:DenseArray},
357
}
358

359
"""
360
An internal type representing mutable arrays stored contiguously in memory.
361
"""
362
const MutableDenseArrayType{T,N} = Union{
363
    Array{T, N},
364
    Memory{T},
365
    FastContiguousSubArray{T,N,<:Array},
366
    FastContiguousSubArray{T,N,<:Memory}
367
}
368

369
# parents of FastContiguousSubArrays may support fast indexing with AbstractUnitRanges,
370
# so we may just forward the indexing to the parent
371
# This may only be done for non-offset ranges, as the result would otherwise have offset axes
372
const _OneBasedRanges = Union{OneTo{Int}, UnitRange{Int}, Slice{OneTo{Int}}, IdentityUnitRange{OneTo{Int}}}
UNCOV
373
function getindex(V::FastContiguousSubArray, i::_OneBasedRanges)
×
UNCOV
374
    @inline
×
UNCOV
375
    @boundscheck checkbounds(V, i)
×
UNCOV
376
    @inbounds r = V.parent[_reindexlinear(V, i)]
×
UNCOV
377
    r
×
378
end
379

UNCOV
380
@inline getindex(V::FastContiguousSubArray, i::Colon) = getindex(V, to_indices(V, (:,))...)
×
381

382
# Indexed assignment follows the same pattern as `getindex` above
383
function setindex!(V::SubArray{T,N}, x, I::Vararg{Int,N}) where {T,N}
384
    @inline
87✔
385
    @boundscheck checkbounds(V, I...)
87✔
386
    @inbounds V.parent[reindex(V.indices, I)...] = x
87✔
387
    V
87✔
388
end
UNCOV
389
function setindex!(V::FastSubArray, x, i::Int)
×
UNCOV
390
    @inline
×
UNCOV
391
    @boundscheck checkbounds(V, i)
×
UNCOV
392
    @inbounds V.parent[_reindexlinear(V, i)] = x
×
UNCOV
393
    V
×
394
end
395
function setindex!(V::FastSubArray{<:Any, 1}, x, i::Int)
396
    @inline
351,682✔
397
    @boundscheck checkbounds(V, i)
351,682✔
398
    @inbounds V.parent[_reindexlinear(V, i)] = x
351,682✔
399
    V
351,682✔
400
end
401

UNCOV
402
function setindex!(V::FastSubArray, x, i::AbstractUnitRange{Int})
×
UNCOV
403
    @inline
×
UNCOV
404
    @boundscheck checkbounds(V, i)
×
UNCOV
405
    @inbounds V.parent[_reindexlinear(V, i)] = x
×
UNCOV
406
    V
×
407
end
408

UNCOV
409
@inline setindex!(V::FastSubArray, x, i::Colon) = setindex!(V, x, to_indices(V, (i,))...)
×
410

UNCOV
411
function isassigned(V::SubArray{T,N}, I::Vararg{Int,N}) where {T,N}
×
UNCOV
412
    @inline
×
UNCOV
413
    @boundscheck checkbounds(Bool, V, I...) || return false
×
UNCOV
414
    @inbounds r = isassigned(V.parent, reindex(V.indices, I)...)
×
UNCOV
415
    r
×
416
end
417
function isassigned(V::FastSubArray, i::Int)
×
418
    @inline
×
419
    @boundscheck checkbounds(Bool, V, i) || return false
×
420
    @inbounds r = isassigned(V.parent, _reindexlinear(V, i))
×
421
    r
×
422
end
UNCOV
423
function isassigned(V::FastSubArray{<:Any, 1}, i::Int)
×
UNCOV
424
    @inline
×
UNCOV
425
    @boundscheck checkbounds(Bool, V, i) || return false
×
UNCOV
426
    @inbounds r = isassigned(V.parent, _reindexlinear(V, i))
×
UNCOV
427
    r
×
428
end
429

UNCOV
430
IndexStyle(::Type{<:FastSubArray}) = IndexLinear()
×
431

432
# Strides are the distance in memory between adjacent elements in a given dimension
433
# which we determine from the strides of the parent
UNCOV
434
strides(V::SubArray) = substrides(strides(V.parent), V.indices)
×
435

436
substrides(strds::Tuple{}, ::Tuple{}) = ()
×
UNCOV
437
substrides(strds::NTuple{N,Int}, I::Tuple{ScalarIndex, Vararg{Any}}) where N = (substrides(tail(strds), tail(I))...,)
×
UNCOV
438
substrides(strds::NTuple{N,Int}, I::Tuple{Slice, Vararg{Any}}) where N = (first(strds), substrides(tail(strds), tail(I))...)
×
UNCOV
439
substrides(strds::NTuple{N,Int}, I::Tuple{AbstractRange, Vararg{Any}}) where N = (first(strds)*step(I[1]), substrides(tail(strds), tail(I))...)
×
440
substrides(strds, I::Tuple{Any, Vararg{Any}}) = throw(ArgumentError(
×
441
    LazyString("strides is invalid for SubArrays with indices of type ", typeof(I[1]))))
442

UNCOV
443
stride(V::SubArray, d::Integer) = d <= ndims(V) ? strides(V)[d] : strides(V)[end] * size(V)[end]
×
444

445
compute_stride1(parent::AbstractArray, I::NTuple{N,Any}) where {N} =
446
    (@inline; compute_stride1(1, fill_to_length(axes(parent), OneTo(1), Val(N)), I))
2✔
UNCOV
447
compute_stride1(s, inds, I::Tuple{}) = s
×
UNCOV
448
compute_stride1(s, inds, I::Tuple{Vararg{ScalarIndex}}) = s
×
UNCOV
449
compute_stride1(s, inds, I::Tuple{ScalarIndex, Vararg{Any}}) =
×
UNCOV
450
    (@inline; compute_stride1(s*length(inds[1]), tail(inds), tail(I)))
×
UNCOV
451
compute_stride1(s, inds, I::Tuple{AbstractRange, Vararg{Any}}) = s*step(I[1])
×
UNCOV
452
compute_stride1(s, inds, I::Tuple{Slice, Vararg{Any}}) = s
×
453
compute_stride1(s, inds, I::Tuple{Any, Vararg{Any}}) = throw(ArgumentError(LazyString("invalid strided index type ", typeof(I[1]))))
×
454

UNCOV
455
elsize(::Type{<:SubArray{<:Any,<:Any,P}}) where {P} = elsize(P)
×
456

UNCOV
457
iscontiguous(A::SubArray) = iscontiguous(typeof(A))
×
458
iscontiguous(::Type{<:SubArray}) = false
×
459
iscontiguous(::Type{<:FastContiguousSubArray}) = true
×
460

461
first_index(V::FastSubArray) = V.offset1 + V.stride1 * firstindex(V) # cached for fast linear SubArrays
16✔
UNCOV
462
first_index(V::SubArray) = compute_linindex(parent(V), V.indices)
×
463

464
# Computing the first index simply steps through the indices, accumulating the
465
# sum of index each multiplied by the parent's stride.
466
# The running sum is `f`; the cumulative stride product is `s`.
467
# If the parent is a vector, then we offset the parent's own indices with parameters of I
468
compute_offset1(parent::AbstractVector, stride1::Integer, I::Tuple{AbstractRange}) =
469
    (@inline; first(I[1]) - stride1*first(axes1(I[1])))
104✔
470
# If the result is one-dimensional and it's a Colon, then linear
471
# indexing uses the indices along the given dimension.
472
# If the result is one-dimensional and it's a range, then linear
473
# indexing might be offset if the index itself is offset
474
# Otherwise linear indexing always matches the parent.
UNCOV
475
compute_offset1(parent, stride1::Integer, I::Tuple) =
×
UNCOV
476
    (@inline; compute_offset1(parent, stride1, find_extended_dims(1, I...), find_extended_inds(I...), I))
×
UNCOV
477
compute_offset1(parent, stride1::Integer, dims::Tuple{Int}, inds::Tuple{Slice}, I::Tuple) =
×
UNCOV
478
    (@inline; compute_linindex(parent, I) - stride1*first(axes(parent, dims[1])))  # index-preserving case
×
UNCOV
479
compute_offset1(parent, stride1::Integer, dims, inds::Tuple{AbstractRange}, I::Tuple) =
×
UNCOV
480
    (@inline; compute_linindex(parent, I) - stride1*first(axes1(inds[1]))) # potentially index-offsetting case
×
UNCOV
481
compute_offset1(parent, stride1::Integer, dims, inds, I::Tuple) =
×
UNCOV
482
    (@inline; compute_linindex(parent, I) - stride1)
×
UNCOV
483
function compute_linindex(parent, I::NTuple{N,Any}) where N
×
UNCOV
484
    @inline
×
UNCOV
485
    IP = fill_to_length(axes(parent), OneTo(1), Val(N))
×
UNCOV
486
    compute_linindex(first(LinearIndices(parent)), 1, IP, I)
×
487
end
UNCOV
488
function compute_linindex(f, s, IP::Tuple, I::Tuple{Any, Vararg{Any}})
×
UNCOV
489
    @inline
×
UNCOV
490
    Δi = first(I[1])-first(IP[1])
×
UNCOV
491
    compute_linindex(f + Δi*s, s*length(IP[1]), tail(IP), tail(I))
×
492
end
UNCOV
493
compute_linindex(f, s, IP::Tuple, I::Tuple{}) = f
×
494

UNCOV
495
find_extended_dims(dim, ::ScalarIndex, I...) = (@inline; find_extended_dims(dim + 1, I...))
×
UNCOV
496
find_extended_dims(dim, i1, I...) = (@inline; (dim, find_extended_dims(dim + 1, I...)...))
×
497
find_extended_dims(dim) = ()
×
UNCOV
498
find_extended_inds(::ScalarIndex, I...) = (@inline; find_extended_inds(I...))
×
UNCOV
499
find_extended_inds(i1, I...) = (@inline; (i1, find_extended_inds(I...)...))
×
500
find_extended_inds() = ()
×
501

UNCOV
502
pointer(V::FastSubArray, i::Int) = pointer(V.parent, V.offset1 + V.stride1*i)
×
UNCOV
503
pointer(V::FastContiguousSubArray, i::Int) = pointer(V.parent, V.offset1 + i)
×
504

505
function pointer(V::SubArray{<:Any,<:Any,<:Array,<:Tuple{Vararg{RangeIndex}}}, is::AbstractCartesianIndex{N}) where {N}
×
506
    index = first_index(V)
×
507
    strds = strides(V)
×
508
    for d = 1:N
×
509
        index += (is[d]-1)*strds[d]
×
510
    end
×
511
    return pointer(V.parent, index)
×
512
end
513

514
# indices are taken from the range/vector
515
# Since bounds-checking is performance-critical and uses
516
# indices, it's worth optimizing these implementations thoroughly
517
axes(S::SubArray) = (@inline; _indices_sub(S.indices...))
1,475,129✔
UNCOV
518
_indices_sub(::Real, I...) = (@inline; _indices_sub(I...))
×
519
_indices_sub() = ()
×
520
function _indices_sub(i1::AbstractArray, I...)
1✔
UNCOV
521
    @inline
×
522
    (axes(i1)..., _indices_sub(I...)...)
1,475,104✔
523
end
524

UNCOV
525
has_offset_axes(S::SubArray) = has_offset_axes(S.indices...)
×
526

UNCOV
527
function replace_in_print_matrix(S::SubArray{<:Any,2,<:AbstractMatrix}, i::Integer, j::Integer, s::AbstractString)
×
UNCOV
528
    replace_in_print_matrix(S.parent, to_indices(S.parent, reindex(S.indices, (i,j)))..., s)
×
529
end
UNCOV
530
function replace_in_print_matrix(S::SubArray{<:Any,1,<:AbstractVector}, i::Integer, j::Integer, s::AbstractString)
×
UNCOV
531
    replace_in_print_matrix(S.parent, to_indices(S.parent, reindex(S.indices, (i,)))..., j, s)
×
532
end
533

534
# XXX: this is considerably more unsafe than the other similarly named methods
535
unsafe_wrap(::Type{Vector{UInt8}}, s::FastContiguousSubArray{UInt8,1,Vector{UInt8}}) = unsafe_wrap(Vector{UInt8}, pointer(s), size(s))
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc