• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

JuliaLang / julia / #37527

pending completion
#37527

push

local

web-flow
make `IRShow.method_name` inferrable (#49607)

18 of 18 new or added lines in 3 files covered. (100.0%)

68710 of 81829 relevant lines covered (83.97%)

33068903.12 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

99.17
/stdlib/LinearAlgebra/src/dense.jl
1
# This file is a part of Julia. License is MIT: https://julialang.org/license
2

3
# Linear algebra functions for dense matrices in column major format
4

5
## BLAS cutoff threshold constants
6

7
#TODO const DOT_CUTOFF = 128
8
const ASUM_CUTOFF = 32
9
const NRM2_CUTOFF = 32
10

11
# Generic cross-over constant based on benchmarking on a single thread with an i7 CPU @ 2.5GHz
12
# L1 cache: 32K, L2 cache: 256K, L3 cache: 6144K
13
# This constant should ideally be determined by the actual CPU cache size
14
const ISONE_CUTOFF = 2^21 # 2M
15

16
function isone(A::AbstractMatrix)
37✔
17
    m, n = size(A)
37✔
18
    m != n && return false # only square matrices can satisfy x == one(x)
37✔
19
    if sizeof(A) < ISONE_CUTOFF
36✔
20
        _isone_triacheck(A, m)
35✔
21
    else
22
        _isone_cachefriendly(A, m)
1✔
23
    end
24
end
25

26
@inline function _isone_triacheck(A::AbstractMatrix, m::Int)
35✔
27
    @inbounds for i in 1:m, j in i:m
93✔
28
        if i == j
91✔
29
            isone(A[i,i]) || return false
73✔
30
        else
31
            iszero(A[i,j]) && iszero(A[j,i]) || return false
33✔
32
        end
33
    end
97✔
34
    return true
18✔
35
end
36

37
# Inner loop over rows to be friendly to the CPU cache
38
@inline function _isone_cachefriendly(A::AbstractMatrix, m::Int)
1✔
39
    @inbounds for i in 1:m, j in 1:m
1,001✔
40
        if i == j
1,000,000✔
41
            isone(A[i,i]) || return false
1,000✔
42
        else
43
            iszero(A[j,i]) || return false
999,000✔
44
        end
45
    end
1,000,999✔
46
    return true
1✔
47
end
48

49

50
"""
51
    isposdef!(A) -> Bool
52

53
Test whether a matrix is positive definite (and Hermitian) by trying to perform a
54
Cholesky factorization of `A`, overwriting `A` in the process.
55
See also [`isposdef`](@ref).
56

57
# Examples
58
```jldoctest
59
julia> A = [1. 2.; 2. 50.];
60

61
julia> isposdef!(A)
62
true
63

64
julia> A
65
2×2 Matrix{Float64}:
66
 1.0  2.0
67
 2.0  6.78233
68
```
69
"""
70
isposdef!(A::AbstractMatrix) =
28✔
71
    ishermitian(A) && isposdef(cholesky!(Hermitian(A); check = false))
72

73
"""
74
    isposdef(A) -> Bool
75

76
Test whether a matrix is positive definite (and Hermitian) by trying to perform a
77
Cholesky factorization of `A`.
78

79
See also [`isposdef!`](@ref), [`cholesky`](@ref).
80

81
# Examples
82
```jldoctest
83
julia> A = [1 2; 2 50]
84
2×2 Matrix{Int64}:
85
 1   2
86
 2  50
87

88
julia> isposdef(A)
89
true
90
```
91
"""
92
isposdef(A::AbstractMatrix) =
175✔
93
    ishermitian(A) && isposdef(cholesky(Hermitian(A); check = false))
94
isposdef(x::Number) = imag(x)==0 && real(x) > 0
14✔
95

96
function norm(x::StridedVector{T}, rx::Union{UnitRange{TI},AbstractRange{TI}}) where {T<:BlasFloat,TI<:Integer}
12✔
97
    if minimum(rx) < 1 || maximum(rx) > length(x)
20✔
98
        throw(BoundsError(x, rx))
8✔
99
    end
100
    GC.@preserve x BLAS.nrm2(length(rx), pointer(x)+(first(rx)-1)*sizeof(T), step(rx))
4✔
101
end
102

103
norm1(x::Union{Array{T},StridedVector{T}}) where {T<:BlasReal} =
1,197✔
104
    length(x) < ASUM_CUTOFF ? generic_norm1(x) : BLAS.asum(x)
105

106
norm2(x::Union{Array{T},StridedVector{T}}) where {T<:BlasFloat} =
144,080✔
107
    length(x) < NRM2_CUTOFF ? generic_norm2(x) : BLAS.nrm2(x)
108

109
"""
110
    triu!(M, k::Integer)
111

112
Return the upper triangle of `M` starting from the `k`th superdiagonal,
113
overwriting `M` in the process.
114

115
# Examples
116
```jldoctest
117
julia> M = [1 2 3 4 5; 1 2 3 4 5; 1 2 3 4 5; 1 2 3 4 5; 1 2 3 4 5]
118
5×5 Matrix{Int64}:
119
 1  2  3  4  5
120
 1  2  3  4  5
121
 1  2  3  4  5
122
 1  2  3  4  5
123
 1  2  3  4  5
124

125
julia> triu!(M, 1)
126
5×5 Matrix{Int64}:
127
 0  2  3  4  5
128
 0  0  3  4  5
129
 0  0  0  4  5
130
 0  0  0  0  5
131
 0  0  0  0  0
132
```
133
"""
134
function triu!(M::AbstractMatrix, k::Integer)
23,810✔
135
    require_one_based_indexing(M)
23,810✔
136
    m, n = size(M)
23,810✔
137
    for j in 1:min(n, m + k)
47,583✔
138
        for i in max(1, j - k + 1):m
352,724✔
139
            M[i,j] = zero(M[i,j])
756,565✔
140
        end
1,347,137✔
141
    end
351,453✔
142
    M
23,810✔
143
end
144

145
triu(M::Matrix, k::Integer) = triu!(copy(M), k)
1,226✔
146

147
"""
148
    tril!(M, k::Integer)
149

150
Return the lower triangle of `M` starting from the `k`th superdiagonal, overwriting `M` in
151
the process.
152

153
# Examples
154
```jldoctest
155
julia> M = [1 2 3 4 5; 1 2 3 4 5; 1 2 3 4 5; 1 2 3 4 5; 1 2 3 4 5]
156
5×5 Matrix{Int64}:
157
 1  2  3  4  5
158
 1  2  3  4  5
159
 1  2  3  4  5
160
 1  2  3  4  5
161
 1  2  3  4  5
162

163
julia> tril!(M, 2)
164
5×5 Matrix{Int64}:
165
 1  2  3  0  0
166
 1  2  3  4  0
167
 1  2  3  4  5
168
 1  2  3  4  5
169
 1  2  3  4  5
170
```
171
"""
172
function tril!(M::AbstractMatrix, k::Integer)
20,668✔
173
    require_one_based_indexing(M)
20,668✔
174
    m, n = size(M)
20,668✔
175
    for j in max(1, k + 1):n
41,299✔
176
        @inbounds for i in 1:min(j - k - 1, m)
325,949✔
177
            M[i,j] = zero(M[i,j])
710,882✔
178
        end
1,267,593✔
179
    end
324,689✔
180
    M
20,668✔
181
end
182
tril(M::Matrix, k::Integer) = tril!(copy(M), k)
1,212✔
183

184
"""
185
    fillband!(A::AbstractMatrix, x, l, u)
186

187
Fill the band between diagonals `l` and `u` with the value `x`.
188
"""
189
function fillband!(A::AbstractMatrix{T}, x, l, u) where T
8✔
190
    require_one_based_indexing(A)
8✔
191
    m, n = size(A)
8✔
192
    xT = convert(T, x)
8✔
193
    for j in 1:n
16✔
194
        for i in max(1,j-u):min(m,j-l)
53✔
195
            @inbounds A[i, j] = xT
91✔
196
        end
156✔
197
    end
46✔
198
    return A
8✔
199
end
200

201
diagind(m::Integer, n::Integer, k::Integer=0) =
40,647✔
202
    k <= 0 ? range(1-k, step=m+1, length=min(m+k, n)) : range(k*m+1, step=m+1, length=min(m, n-k))
203

204
"""
205
    diagind(M, k::Integer=0)
206

207
An `AbstractRange` giving the indices of the `k`th diagonal of the matrix `M`.
208

209
See also: [`diag`](@ref), [`diagm`](@ref), [`Diagonal`](@ref).
210

211
# Examples
212
```jldoctest
213
julia> A = [1 2 3; 4 5 6; 7 8 9]
214
3×3 Matrix{Int64}:
215
 1  2  3
216
 4  5  6
217
 7  8  9
218

219
julia> diagind(A,-1)
220
2:4:6
221
```
222
"""
223
function diagind(A::AbstractMatrix, k::Integer=0)
19,448✔
224
    require_one_based_indexing(A)
19,448✔
225
    diagind(size(A,1), size(A,2), k)
18,509✔
226
end
227

228
"""
229
    diag(M, k::Integer=0)
230

231
The `k`th diagonal of a matrix, as a vector.
232

233
See also [`diagm`](@ref), [`diagind`](@ref), [`Diagonal`](@ref), [`isdiag`](@ref).
234

235
# Examples
236
```jldoctest
237
julia> A = [1 2 3; 4 5 6; 7 8 9]
238
3×3 Matrix{Int64}:
239
 1  2  3
240
 4  5  6
241
 7  8  9
242

243
julia> diag(A,1)
244
2-element Vector{Int64}:
245
 2
246
 6
247
```
248
"""
249
diag(A::AbstractMatrix, k::Integer=0) = A[diagind(A,k)]
12,997✔
250

251
"""
252
    diagm(kv::Pair{<:Integer,<:AbstractVector}...)
253
    diagm(m::Integer, n::Integer, kv::Pair{<:Integer,<:AbstractVector}...)
254

255
Construct a matrix from `Pair`s of diagonals and vectors.
256
Vector `kv.second` will be placed on the `kv.first` diagonal.
257
By default the matrix is square and its size is inferred
258
from `kv`, but a non-square size `m`×`n` (padded with zeros as needed)
259
can be specified by passing `m,n` as the first arguments.
260
For repeated diagonal indices `kv.first` the values in the corresponding
261
vectors `kv.second` will be added.
262

263
`diagm` constructs a full matrix; if you want storage-efficient
264
versions with fast arithmetic, see [`Diagonal`](@ref), [`Bidiagonal`](@ref)
265
[`Tridiagonal`](@ref) and [`SymTridiagonal`](@ref).
266

267
# Examples
268
```jldoctest
269
julia> diagm(1 => [1,2,3])
270
4×4 Matrix{Int64}:
271
 0  1  0  0
272
 0  0  2  0
273
 0  0  0  3
274
 0  0  0  0
275

276
julia> diagm(1 => [1,2,3], -1 => [4,5])
277
4×4 Matrix{Int64}:
278
 0  1  0  0
279
 4  0  2  0
280
 0  5  0  3
281
 0  0  0  0
282

283
julia> diagm(1 => [1,2,3], 1 => [1,2,3])
284
4×4 Matrix{Int64}:
285
 0  2  0  0
286
 0  0  4  0
287
 0  0  0  6
288
 0  0  0  0
289
```
290
"""
291
diagm(kv::Pair{<:Integer,<:AbstractVector}...) = _diagm(nothing, kv...)
200✔
292
diagm(m::Integer, n::Integer, kv::Pair{<:Integer,<:AbstractVector}...) = _diagm((Int(m),Int(n)), kv...)
27✔
293
function _diagm(size, kv::Pair{<:Integer,<:AbstractVector}...)
227✔
294
    A = diagm_container(size, kv...)
516,494✔
295
    for p in kv
215✔
296
        inds = diagind(A, p.first)
377✔
297
        for (i, val) in enumerate(p.second)
714✔
298
            A[inds[i]] += val
7,163✔
299
        end
9,798✔
300
    end
475✔
301
    return A
215✔
302
end
303
function diagm_size(size::Nothing, kv::Pair{<:Integer,<:AbstractVector}...)
200✔
304
    mnmax = mapreduce(x -> length(x.second) + abs(Int(x.first)), max, kv; init=0)
534✔
305
    return mnmax, mnmax
200✔
306
end
307
function diagm_size(size::Tuple{Int,Int}, kv::Pair{<:Integer,<:AbstractVector}...)
27✔
308
    mmax = mapreduce(x -> length(x.second) - min(0,Int(x.first)), max, kv; init=0)
78✔
309
    nmax = mapreduce(x -> length(x.second) + max(0,Int(x.first)), max, kv; init=0)
78✔
310
    m, n = size
27✔
311
    (m ≥ mmax && n ≥ nmax) || throw(DimensionMismatch("invalid size=$size"))
39✔
312
    return m, n
15✔
313
end
314
function diagm_container(size, kv::Pair{<:Integer,<:AbstractVector}...)
227✔
315
    T = promote_type(map(x -> eltype(x.second), kv)...)
612✔
316
    # For some type `T`, `zero(T)` is not a `T` and `zeros(T, ...)` fails.
317
    U = promote_type(T, typeof(zero(T)))
227✔
318
    return zeros(U, diagm_size(size, kv...)...)
516,494✔
319
end
320
diagm_container(size, kv::Pair{<:Integer,<:BitVector}...) =
×
321
    falses(diagm_size(size, kv...)...)
322

323
"""
324
    diagm(v::AbstractVector)
325
    diagm(m::Integer, n::Integer, v::AbstractVector)
326

327
Construct a matrix with elements of the vector as diagonal elements.
328
By default, the matrix is square and its size is given by
329
`length(v)`, but a non-square size `m`×`n` can be specified
330
by passing `m,n` as the first arguments.
331

332
# Examples
333
```jldoctest
334
julia> diagm([1,2,3])
335
3×3 Matrix{Int64}:
336
 1  0  0
337
 0  2  0
338
 0  0  3
339
```
340
"""
341
diagm(v::AbstractVector) = diagm(0 => v)
5✔
342
diagm(m::Integer, n::Integer, v::AbstractVector) = diagm(m, n, 0 => v)
2✔
343

344
function tr(A::Matrix{T}) where T
128✔
345
    n = checksquare(A)
128✔
346
    t = zero(T)
128✔
347
    @inbounds @simd for i in 1:n
128✔
348
        t += A[i,i]
1,202✔
349
    end
×
350
    t
128✔
351
end
352

353
_kronsize(A::AbstractMatrix, B::AbstractMatrix) = map(*, size(A), size(B))
2,404✔
354
_kronsize(A::AbstractMatrix, B::AbstractVector) = (size(A, 1)*length(B), size(A, 2))
352✔
355
_kronsize(A::AbstractVector, B::AbstractMatrix) = (length(A)*size(B, 1), size(B, 2))
365✔
356

357
"""
358
    kron!(C, A, B)
359

360
Computes the Kronecker product of `A` and `B` and stores the result in `C`,
361
overwriting the existing content of `C`. This is the in-place version of [`kron`](@ref).
362

363
!!! compat "Julia 1.6"
364
    This function requires Julia 1.6 or later.
365
"""
366
function kron!(C::AbstractVecOrMat, A::AbstractVecOrMat, B::AbstractVecOrMat)
1,561✔
367
    size(C) == _kronsize(A, B) || throw(DimensionMismatch("kron!"))
1,561✔
368
    _kron!(C, A, B)
1,561✔
369
end
370
function kron!(c::AbstractVector, a::AbstractVector, b::AbstractVector)
17✔
371
    length(c) == length(a) * length(b) || throw(DimensionMismatch("kron!"))
17✔
372
    m = firstindex(c)
17✔
373
    @inbounds for i in eachindex(a)
34✔
374
        ai = a[i]
98✔
375
        for k in eachindex(b)
196✔
376
            c[m] = ai*b[k]
692✔
377
            m += 1
692✔
378
        end
1,286✔
379
    end
179✔
380
    return c
17✔
381
end
382
kron!(c::AbstractVecOrMat, a::AbstractVecOrMat, b::Number) = mul!(c, a, b)
1✔
383
kron!(c::AbstractVecOrMat, a::Number, b::AbstractVecOrMat) = mul!(c, a, b)
1✔
384

385
function _kron!(C, A::AbstractMatrix, B::AbstractMatrix)
1,202✔
386
    m = firstindex(C)
1,202✔
387
    @inbounds for j in axes(A,2), l in axes(B,2), i in axes(A,1)
66,865✔
388
        Aij = A[i,j]
455,267✔
389
        for k in axes(B,1)
910,534✔
390
            C[m] = Aij*B[k,l]
4,344,775✔
391
            m += 1
4,344,775✔
392
        end
8,234,283✔
393
    end
462,070✔
394
    return C
1,202✔
395
end
396
function _kron!(C, A::AbstractMatrix, b::AbstractVector)
176✔
397
    m = firstindex(C)
176✔
398
    @inbounds for j in axes(A,2), i in axes(A,1)
1,713✔
399
        Aij = A[i,j]
28,837✔
400
        for k in eachindex(b)
31,274✔
401
            C[m] = Aij*b[k]
130,541✔
402
            m += 1
129,411✔
403
        end
243,185✔
404
    end
17,000✔
405
    return C
176✔
406
end
407
function _kron!(C, a::AbstractVector, B::AbstractMatrix)
183✔
408
    m = firstindex(C)
183✔
409
    @inbounds for l in axes(B,2), i in eachindex(a)
1,770✔
410
        ai = a[i]
13,775✔
411
        for k in axes(B,1)
26,773✔
412
            C[m] = ai*B[k,l]
199,749✔
413
            m += 1
111,399✔
414
        end
209,251✔
415
    end
14,955✔
416
    return C
183✔
417
end
418

419
"""
420
    kron(A, B)
421

422
Computes the Kronecker product of two vectors, matrices or numbers.
423

424
For real vectors `v` and `w`, the Kronecker product is related to the outer product by
425
`kron(v,w) == vec(w * transpose(v))` or
426
`w * transpose(v) == reshape(kron(v,w), (length(w), length(v)))`.
427
Note how the ordering of `v` and `w` differs on the left and right
428
of these expressions (due to column-major storage).
429
For complex vectors, the outer product `w * v'` also differs by conjugation of `v`.
430

431
# Examples
432
```jldoctest
433
julia> A = [1 2; 3 4]
434
2×2 Matrix{Int64}:
435
 1  2
436
 3  4
437

438
julia> B = [im 1; 1 -im]
439
2×2 Matrix{Complex{Int64}}:
440
 0+1im  1+0im
441
 1+0im  0-1im
442

443
julia> kron(A, B)
444
4×4 Matrix{Complex{Int64}}:
445
 0+1im  1+0im  0+2im  2+0im
446
 1+0im  0-1im  2+0im  0-2im
447
 0+3im  3+0im  0+4im  4+0im
448
 3+0im  0-3im  4+0im  0-4im
449

450
julia> v = [1, 2]; w = [3, 4, 5];
451

452
julia> w*transpose(v)
453
3×2 Matrix{Int64}:
454
 3   6
455
 4   8
456
 5  10
457

458
julia> reshape(kron(v,w), (length(w), length(v)))
459
3×2 Matrix{Int64}:
460
 3   6
461
 4   8
462
 5  10
463
```
464
"""
465
function kron(A::AbstractVecOrMat{T}, B::AbstractVecOrMat{S}) where {T,S}
1,560✔
466
    R = Matrix{promote_op(*,T,S)}(undef, _kronsize(A, B))
3,120✔
467
    return kron!(R, A, B)
1,560✔
468
end
469
function kron(a::AbstractVector{T}, b::AbstractVector{S}) where {T,S}
16✔
470
    c = Vector{promote_op(*,T,S)}(undef, length(a)*length(b))
32✔
471
    return kron!(c, a, b)
16✔
472
end
473
kron(a::Number, b::Union{Number, AbstractVecOrMat}) = a * b
1✔
474
kron(a::AbstractVecOrMat, b::Number) = a * b
3✔
475
kron(a::AdjointAbsVec, b::AdjointAbsVec) = adjoint(kron(adjoint(a), adjoint(b)))
3✔
476
kron(a::AdjOrTransAbsVec, b::AdjOrTransAbsVec) = transpose(kron(transpose(a), transpose(b)))
4✔
477

478
# Matrix power
479
(^)(A::AbstractMatrix, p::Integer) = p < 0 ? power_by_squaring(inv(A), -p) : power_by_squaring(A, p)
1,067✔
480
function (^)(A::AbstractMatrix{T}, p::Integer) where T<:Integer
50✔
481
    # make sure that e.g. [1 1;1 0]^big(3)
482
    # gets promotes in a similar way as 2^big(3)
483
    TT = promote_op(^, T, typeof(p))
100✔
484
    return power_by_squaring(convert(AbstractMatrix{TT}, A), p)
50✔
485
end
486
function integerpow(A::AbstractMatrix{T}, p) where T
152✔
487
    TT = promote_op(^, T, typeof(p))
304✔
488
    return (TT == T ? A : convert(AbstractMatrix{TT}, A))^Integer(p)
152✔
489
end
490
function schurpow(A::AbstractMatrix, p)
84✔
491
    if istriu(A)
84✔
492
        # Integer part
493
        retmat = A ^ floor(p)
18✔
494
        # Real part
495
        if p - floor(p) == 0.5
18✔
496
            # special case: A^0.5 === sqrt(A)
497
            retmat = retmat * sqrt(A)
4✔
498
        else
499
            retmat = retmat * powm!(UpperTriangular(float.(A)), real(p - floor(p)))
32✔
500
        end
501
    else
502
        S,Q,d = Schur{Complex}(schur(A))
84✔
503
        # Integer part
504
        R = S ^ floor(p)
66✔
505
        # Real part
506
        if p - floor(p) == 0.5
66✔
507
            # special case: A^0.5 === sqrt(A)
508
            R = R * sqrt(S)
24✔
509
        else
510
            R = R * powm!(UpperTriangular(float.(S)), real(p - floor(p)))
42✔
511
        end
512
        retmat = Q * R * Q'
66✔
513
    end
514

515
    # if A has nonpositive real eigenvalues, retmat is a nonprincipal matrix power.
516
    if isreal(retmat)
84✔
517
        return real(retmat)
31✔
518
    else
519
        return retmat
53✔
520
    end
521
end
522
function (^)(A::AbstractMatrix{T}, p::Real) where T
266✔
523
    n = checksquare(A)
266✔
524

525
    # Quicker return if A is diagonal
526
    if isdiag(A)
406✔
527
        TT = promote_op(^, T, typeof(p))
48✔
528
        retmat = copymutable_oftype(A, TT)
25✔
529
        for i in 1:n
48✔
530
            retmat[i, i] = retmat[i, i] ^ p
82✔
531
        end
140✔
532
        return retmat
24✔
533
    end
534

535
    # For integer powers, use power_by_squaring
536
    isinteger(p) && return integerpow(A, p)
242✔
537

538
    # If possible, use diagonalization
539
    if issymmetric(A)
120✔
540
        return (Symmetric(A)^p)
33✔
541
    end
542
    if ishermitian(A)
87✔
543
        return (Hermitian(A)^p)
15✔
544
    end
545

546
    # Otherwise, use Schur decomposition
547
    return schurpow(A, p)
72✔
548
end
549

550
"""
551
    ^(A::AbstractMatrix, p::Number)
552

553
Matrix power, equivalent to ``\\exp(p\\log(A))``
554

555
# Examples
556
```jldoctest
557
julia> [1 2; 0 3]^3
558
2×2 Matrix{Int64}:
559
 1  26
560
 0  27
561
```
562
"""
563
(^)(A::AbstractMatrix, p::Number) = exp(p*log(A))
24✔
564

565
# Matrix exponential
566

567
"""
568
    exp(A::AbstractMatrix)
569

570
Compute the matrix exponential of `A`, defined by
571

572
```math
573
e^A = \\sum_{n=0}^{\\infty} \\frac{A^n}{n!}.
574
```
575

576
For symmetric or Hermitian `A`, an eigendecomposition ([`eigen`](@ref)) is
577
used, otherwise the scaling and squaring algorithm (see [^H05]) is chosen.
578

579
[^H05]: Nicholas J. Higham, "The squaring and scaling method for the matrix exponential revisited", SIAM Journal on Matrix Analysis and Applications, 26(4), 2005, 1179-1193. [doi:10.1137/090768539](https://doi.org/10.1137/090768539)
580

581
# Examples
582
```jldoctest
583
julia> A = Matrix(1.0I, 2, 2)
584
2×2 Matrix{Float64}:
585
 1.0  0.0
586
 0.0  1.0
587

588
julia> exp(A)
589
2×2 Matrix{Float64}:
590
 2.71828  0.0
591
 0.0      2.71828
592
```
593
"""
594
exp(A::AbstractMatrix) = exp!(copy_similar(A, eigtype(eltype(A))))
631✔
595
exp(A::AdjointAbsMat) = adjoint(exp(parent(A)))
40✔
596
exp(A::TransposeAbsMat) = transpose(exp(parent(A)))
28✔
597

598
"""
599
    cis(A::AbstractMatrix)
600

601
More efficient method for `exp(im*A)` of square matrix `A`
602
(especially if `A` is `Hermitian` or real-`Symmetric`).
603

604
See also [`cispi`](@ref), [`sincos`](@ref), [`exp`](@ref).
605

606
!!! compat "Julia 1.7"
607
    Support for using `cis` with matrices was added in Julia 1.7.
608

609
# Examples
610
```jldoctest
611
julia> cis([π 0; 0 π]) ≈ -I
612
true
613
```
614
"""
615
cis(A::AbstractMatrix) = exp(im * A)  # fallback
×
616
cis(A::AbstractMatrix{<:Base.HWNumber}) = exp_maybe_inplace(float.(im .* A))
62✔
617

618
exp_maybe_inplace(A::StridedMatrix{<:Union{ComplexF32, ComplexF64}}) = exp!(A)
62✔
619
exp_maybe_inplace(A) = exp(A)
×
620

621
"""
622
    ^(b::Number, A::AbstractMatrix)
623

624
Matrix exponential, equivalent to ``\\exp(\\log(b)A)``.
625

626
!!! compat "Julia 1.1"
627
    Support for raising `Irrational` numbers (like `ℯ`)
628
    to a matrix was added in Julia 1.1.
629

630
# Examples
631
```jldoctest
632
julia> 2^[1 2; 0 3]
633
2×2 Matrix{Float64}:
634
 2.0  6.0
635
 0.0  8.0
636

637
julia> ℯ^[1 2; 0 3]
638
2×2 Matrix{Float64}:
639
 2.71828  17.3673
640
 0.0      20.0855
641
```
642
"""
643
Base.:^(b::Number, A::AbstractMatrix) = exp!(log(b)*A)
12✔
644
# method for ℯ to explicitly elide the log(b) multiplication
645
Base.:^(::Irrational{:ℯ}, A::AbstractMatrix) = exp(A)
6✔
646

647
## Destructive matrix exponential using algorithm from Higham, 2008,
648
## "Functions of Matrices: Theory and Computation", SIAM
649
function exp!(A::StridedMatrix{T}) where T<:BlasFloat
1,774✔
650
    n = checksquare(A)
1,777✔
651
    if ishermitian(A)
1,771✔
652
        return copytri!(parent(exp(Hermitian(A))), 'U', true)
69✔
653
    end
654
    ilo, ihi, scale = LAPACK.gebal!('B', A)    # modifies A
1,702✔
655
    nA   = opnorm(A, 1)
1,701✔
656
    ## For sufficiently small nA, use lower order Padé-Approximations
657
    if (nA <= 2.1)
1,701✔
658
        if nA > 0.95
890✔
659
            C = T[17643225600.,8821612800.,2075673600.,302702400.,
1,980✔
660
                     30270240.,   2162160.,    110880.,     3960.,
661
                           90.,         1.]
662
        elseif nA > 0.25
692✔
663
            C = T[17297280.,8648640.,1995840.,277200.,
173✔
664
                     25200.,   1512.,     56.,     1.]
665
        elseif nA > 0.015
519✔
666
            C = T[30240.,15120.,3360.,
513✔
667
                    420.,   30.,   1.]
668
        else
669
            C = T[120.,60.,12.,1.]
6✔
670
        end
671
        A2 = A * A
890✔
672
        # Compute U and V: Even/odd terms in Padé numerator & denom
673
        # Expansion of k=1 in for loop
674
        P = A2
890✔
675
        U = mul!(C[4]*P, true, C[2]*I, true, true) #U = C[2]*I + C[4]*P
890✔
676
        V = mul!(C[3]*P, true, C[1]*I, true, true) #V = C[1]*I + C[3]*P
890✔
677
        for k in 2:(div(length(C), 2) - 1)
1,774✔
678
            P *= A2
1,453✔
679
            mul!(U, C[2k + 2], P, true, true) # U += C[2k+2]*P
2,906✔
680
            mul!(V, C[2k + 1], P, true, true) # V += C[2k+1]*P
2,906✔
681
        end
2,022✔
682

683
        U = A * U
890✔
684

685
        # Padé approximant:  (V-U)\(V+U)
686
        tmp1, tmp2 = A, A2 # Reuse already allocated arrays
890✔
687
        tmp1 .= V .- U
1,780✔
688
        tmp2 .= V .+ U
1,780✔
689
        X = LAPACK.gesv!(tmp1, tmp2)[1]
890✔
690
    else
691
        s  = log2(nA/5.4)               # power of 2 later reversed by squaring
811✔
692
        if s > 0
811✔
693
            si = ceil(Int,s)
494✔
694
            A ./= convert(T,2^si)
988✔
695
        end
696
        CC = T[64764752532480000.,32382376266240000.,7771770303897600.,
11,354✔
697
                1187353796428800.,  129060195264000.,  10559470521600.,
698
                    670442572800.,      33522128640.,      1323241920.,
699
                        40840800.,           960960.,           16380.,
700
                             182.,                1.]
701
        A2 = A * A
811✔
702
        A4 = A2 * A2
811✔
703
        A6 = A2 * A4
811✔
704
        tmp1, tmp2 = similar(A6), similar(A6)
811✔
705

706
        # Allocation economical version of:
707
        # U  = A * (A6 * (CC[14].*A6 .+ CC[12].*A4 .+ CC[10].*A2) .+
708
        #           CC[8].*A6 .+ CC[6].*A4 .+ CC[4]*A2+CC[2]*I)
709
        tmp1 .= CC[14].*A6 .+ CC[12].*A4 .+ CC[10].*A2
1,622✔
710
        tmp2 .= CC[8].*A6 .+ CC[6].*A4 .+ CC[4].*A2
1,622✔
711
        mul!(tmp2, true,CC[2]*I, true, true) # tmp2 .+= CC[2]*I
811✔
712
        U = mul!(tmp2, A6, tmp1, true, true)
811✔
713
        U, tmp1 = mul!(tmp1, A, U), A # U = A * U0
811✔
714

715
        # Allocation economical version of:
716
        # V  = A6 * (CC[13].*A6 .+ CC[11].*A4 .+ CC[9].*A2) .+
717
        #           CC[7].*A6 .+ CC[5].*A4 .+ CC[3]*A2 .+ CC[1]*I
718
        tmp1 .= CC[13].*A6 .+ CC[11].*A4 .+ CC[9].*A2
1,622✔
719
        tmp2 .= CC[7].*A6 .+ CC[5].*A4 .+ CC[3].*A2
1,622✔
720
        mul!(tmp2, true, CC[1]*I, true, true) # tmp2 .+= CC[1]*I
811✔
721
        V = mul!(tmp2, A6, tmp1, true, true)
811✔
722

723
        tmp1 .= V .+ U
1,622✔
724
        tmp2 .= V .- U # tmp2 already contained V but this seems more readable
1,622✔
725
        X = LAPACK.gesv!(tmp2, tmp1)[1] # X now contains r_13 in Higham 2008
811✔
726

727
        if s > 0
811✔
728
            # Repeated squaring to compute X = r_13^(2^si)
729
            for t=1:si
988✔
730
                mul!(tmp2, X, X)
596✔
731
                X, tmp2 = tmp2, X
596✔
732
            end
596✔
733
        end
734
    end
735

736
    # Undo the balancing
737
    for j = ilo:ihi
3,402✔
738
        scj = scale[j]
3,495✔
739
        for i = 1:n
6,990✔
740
            X[j,i] *= scj
9,521✔
741
        end
15,547✔
742
        for i = 1:n
6,990✔
743
            X[i,j] /= scj
9,521✔
744
        end
15,547✔
745
    end
5,289✔
746

747
    if ilo > 1       # apply lower permutations in reverse order
1,701✔
748
        for j in (ilo-1):-1:1; rcswap!(j, Int(scale[j]), X) end
4✔
749
    end
750
    if ihi < n       # apply upper permutations in forward order
1,701✔
751
        for j in (ihi+1):n;    rcswap!(j, Int(scale[j]), X) end
3,443✔
752
    end
753
    X
1,701✔
754
end
755

756
## Swap rows i and j and columns i and j in X
757
function rcswap!(i::Integer, j::Integer, X::AbstractMatrix{<:Number})
952✔
758
    for k = 1:size(X,1)
1,904✔
759
        X[k,i], X[k,j] = X[k,j], X[k,i]
4,566✔
760
    end
8,180✔
761
    for k = 1:size(X,2)
1,904✔
762
        X[i,k], X[j,k] = X[j,k], X[i,k]
4,566✔
763
    end
4,566✔
764
end
765

766
"""
767
    log(A::AbstractMatrix)
768

769
If `A` has no negative real eigenvalue, compute the principal matrix logarithm of `A`, i.e.
770
the unique matrix ``X`` such that ``e^X = A`` and ``-\\pi < Im(\\lambda) < \\pi`` for all
771
the eigenvalues ``\\lambda`` of ``X``. If `A` has nonpositive eigenvalues, a nonprincipal
772
matrix function is returned whenever possible.
773

774
If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is
775
used, if `A` is triangular an improved version of the inverse scaling and squaring method is
776
employed (see [^AH12] and [^AHR13]). If `A` is real with no negative eigenvalues, then
777
the real Schur form is computed. Otherwise, the complex Schur form is computed. Then
778
the upper (quasi-)triangular algorithm in [^AHR13] is used on the upper (quasi-)triangular
779
factor.
780

781
[^AH12]: Awad H. Al-Mohy and Nicholas J. Higham, "Improved inverse  scaling and squaring algorithms for the matrix logarithm", SIAM Journal on Scientific Computing, 34(4), 2012, C153-C169. [doi:10.1137/110852553](https://doi.org/10.1137/110852553)
782

783
[^AHR13]: Awad H. Al-Mohy, Nicholas J. Higham and Samuel D. Relton, "Computing the Fréchet derivative of the matrix logarithm and estimating the condition number", SIAM Journal on Scientific Computing, 35(4), 2013, C394-C410. [doi:10.1137/120885991](https://doi.org/10.1137/120885991)
784

785
# Examples
786
```jldoctest
787
julia> A = Matrix(2.7182818*I, 2, 2)
788
2×2 Matrix{Float64}:
789
 2.71828  0.0
790
 0.0      2.71828
791

792
julia> log(A)
793
2×2 Matrix{Float64}:
794
 1.0  0.0
795
 0.0  1.0
796
```
797
"""
798
function log(A::AbstractMatrix)
100✔
799
    # If possible, use diagonalization
800
    if ishermitian(A)
100✔
801
        logHermA = log(Hermitian(A))
10✔
802
        return ishermitian(logHermA) ? copytri!(parent(logHermA), 'U', true) : parent(logHermA)
10✔
803
    elseif istriu(A)
90✔
804
        return triu!(parent(log(UpperTriangular(A))))
24✔
805
    elseif isreal(A)
89✔
806
        SchurF = schur(real(A))
81✔
807
        if istriu(SchurF.T)
81✔
808
            logA = SchurF.Z * log(UpperTriangular(SchurF.T)) * SchurF.Z'
48✔
809
        else
810
            # real log exists whenever all eigenvalues are positive
811
            is_log_real = !any(x -> isreal(x) && real(x) ≤ 0, SchurF.values)
243✔
812
            if is_log_real
25✔
813
                logA = SchurF.Z * log_quasitriu(SchurF.T) * SchurF.Z'
13✔
814
            else
815
                SchurS = Schur{Complex}(SchurF)
12✔
816
                logA = SchurS.Z * log(UpperTriangular(SchurS.T)) * SchurS.Z'
12✔
817
            end
818
        end
819
        return eltype(A) <: Complex ? complex(logA) : logA
53✔
820
    else
821
        SchurF = schur(A)
13✔
822
        return SchurF.vectors * log(UpperTriangular(SchurF.T)) * SchurF.vectors'
13✔
823
    end
824
end
825

826
log(A::AdjointAbsMat) = adjoint(log(parent(A)))
8✔
827
log(A::TransposeAbsMat) = transpose(log(parent(A)))
8✔
828

829
"""
830
    sqrt(A::AbstractMatrix)
831

832
If `A` has no negative real eigenvalues, compute the principal matrix square root of `A`,
833
that is the unique matrix ``X`` with eigenvalues having positive real part such that
834
``X^2 = A``. Otherwise, a nonprincipal square root is returned.
835

836
If `A` is real-symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is
837
used to compute the square root.   For such matrices, eigenvalues λ that
838
appear to be slightly negative due to roundoff errors are treated as if they were zero.
839
More precisely, matrices with all eigenvalues `≥ -rtol*(max |λ|)` are treated as semidefinite
840
(yielding a Hermitian square root), with negative eigenvalues taken to be zero.
841
`rtol` is a keyword argument to `sqrt` (in the Hermitian/real-symmetric case only) that
842
defaults to machine precision scaled by `size(A,1)`.
843

844
Otherwise, the square root is determined by means of the
845
Björck-Hammarling method [^BH83], which computes the complex Schur form ([`schur`](@ref))
846
and then the complex square root of the triangular factor.
847
If a real square root exists, then an extension of this method [^H87] that computes the real
848
Schur form and then the real square root of the quasi-triangular factor is instead used.
849

850
[^BH83]:
851

852
    Åke Björck and Sven Hammarling, "A Schur method for the square root of a matrix",
853
    Linear Algebra and its Applications, 52-53, 1983, 127-140.
854
    [doi:10.1016/0024-3795(83)80010-X](https://doi.org/10.1016/0024-3795(83)80010-X)
855

856
[^H87]:
857

858
    Nicholas J. Higham, "Computing real square roots of a real matrix",
859
    Linear Algebra and its Applications, 88-89, 1987, 405-430.
860
    [doi:10.1016/0024-3795(87)90118-2](https://doi.org/10.1016/0024-3795(87)90118-2)
861

862
# Examples
863
```jldoctest
864
julia> A = [4 0; 0 4]
865
2×2 Matrix{Int64}:
866
 4  0
867
 0  4
868

869
julia> sqrt(A)
870
2×2 Matrix{Float64}:
871
 2.0  0.0
872
 0.0  2.0
873
```
874
"""
875
sqrt(::AbstractMatrix)
876

877
function sqrt(A::AbstractMatrix{T}) where {T<:Union{Real,Complex}}
358✔
878
    if ishermitian(A)
358✔
879
        sqrtHermA = sqrt(Hermitian(A))
92✔
880
        return ishermitian(sqrtHermA) ? copytri!(parent(sqrtHermA), 'U', true) : parent(sqrtHermA)
92✔
881
    elseif istriu(A)
266✔
882
        return triu!(parent(sqrt(UpperTriangular(A))))
76✔
883
    elseif isreal(A)
246✔
884
        SchurF = schur(real(A))
204✔
885
        if istriu(SchurF.T)
204✔
886
            sqrtA = SchurF.Z * sqrt(UpperTriangular(SchurF.T)) * SchurF.Z'
83✔
887
        else
888
            # real sqrt exists whenever no eigenvalues are negative
889
            is_sqrt_real = !any(x -> isreal(x) && real(x) < 0, SchurF.values)
1,936✔
890
            # sqrt_quasitriu uses LAPACK functions for non-triu inputs
891
            if typeof(sqrt(zero(T))) <: BlasFloat && is_sqrt_real
106✔
892
                sqrtA = SchurF.Z * sqrt_quasitriu(SchurF.T) * SchurF.Z'
82✔
893
            else
894
                SchurS = Schur{Complex}(SchurF)
24✔
895
                sqrtA = SchurS.Z * sqrt(UpperTriangular(SchurS.T)) * SchurS.Z'
24✔
896
            end
897
        end
898
        return eltype(A) <: Complex ? complex(sqrtA) : sqrtA
155✔
899
    else
900
        SchurF = schur(A)
35✔
901
        return SchurF.vectors * sqrt(UpperTriangular(SchurF.T)) * SchurF.vectors'
35✔
902
    end
903
end
904

905
sqrt(A::AdjointAbsMat) = adjoint(sqrt(parent(A)))
40✔
906
sqrt(A::TransposeAbsMat) = transpose(sqrt(parent(A)))
40✔
907

908
function inv(A::StridedMatrix{T}) where T
802✔
909
    checksquare(A)
805✔
910
    S = typeof((oneunit(T)*zero(T) + oneunit(T)*zero(T))/oneunit(T))
799✔
911
    AA = convert(AbstractArray{S}, A)
799✔
912
    if istriu(AA)
799✔
913
        Ai = triu!(parent(inv(UpperTriangular(AA))))
117✔
914
    elseif istril(AA)
682✔
915
        Ai = tril!(parent(inv(LowerTriangular(AA))))
36✔
916
    else
917
        Ai = inv!(lu(AA))
646✔
918
        Ai = convert(typeof(parent(Ai)), Ai)
640✔
919
    end
920
    return Ai
785✔
921
end
922

923
"""
924
    cos(A::AbstractMatrix)
925

926
Compute the matrix cosine of a square matrix `A`.
927

928
If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to
929
compute the cosine. Otherwise, the cosine is determined by calling [`exp`](@ref).
930

931
# Examples
932
```jldoctest
933
julia> cos(fill(1.0, (2,2)))
934
2×2 Matrix{Float64}:
935
  0.291927  -0.708073
936
 -0.708073   0.291927
937
```
938
"""
939
function cos(A::AbstractMatrix{<:Real})
111✔
940
    if issymmetric(A)
111✔
941
        return copytri!(parent(cos(Symmetric(A))), 'U')
36✔
942
    end
943
    T = complex(float(eltype(A)))
75✔
944
    return real(exp!(T.(im .* A)))
75✔
945
end
946
function cos(A::AbstractMatrix{<:Complex})
148✔
947
    if ishermitian(A)
148✔
948
        return copytri!(parent(cos(Hermitian(A))), 'U', true)
34✔
949
    end
950
    T = complex(float(eltype(A)))
114✔
951
    X = exp!(T.(im .* A))
228✔
952
    @. X = (X + $exp!(T(-im*A))) / 2
228✔
953
    return X
114✔
954
end
955

956
"""
957
    sin(A::AbstractMatrix)
958

959
Compute the matrix sine of a square matrix `A`.
960

961
If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to
962
compute the sine. Otherwise, the sine is determined by calling [`exp`](@ref).
963

964
# Examples
965
```jldoctest
966
julia> sin(fill(1.0, (2,2)))
967
2×2 Matrix{Float64}:
968
 0.454649  0.454649
969
 0.454649  0.454649
970
```
971
"""
972
function sin(A::AbstractMatrix{<:Real})
111✔
973
    if issymmetric(A)
111✔
974
        return copytri!(parent(sin(Symmetric(A))), 'U')
37✔
975
    end
976
    T = complex(float(eltype(A)))
74✔
977
    return imag(exp!(T.(im .* A)))
74✔
978
end
979
function sin(A::AbstractMatrix{<:Complex})
148✔
980
    if ishermitian(A)
148✔
981
        return copytri!(parent(sin(Hermitian(A))), 'U', true)
35✔
982
    end
983
    T = complex(float(eltype(A)))
113✔
984
    X = exp!(T.(im .* A))
226✔
985
    Y = exp!(T.(.-im .* A))
226✔
986
    @inbounds for i in eachindex(X)
226✔
987
        x, y = X[i]/2, Y[i]/2
825✔
988
        X[i] = Complex(imag(x)-imag(y), real(y)-real(x))
825✔
989
    end
1,537✔
990
    return X
113✔
991
end
992

993
"""
994
    sincos(A::AbstractMatrix)
995

996
Compute the matrix sine and cosine of a square matrix `A`.
997

998
# Examples
999
```jldoctest
1000
julia> S, C = sincos(fill(1.0, (2,2)));
1001

1002
julia> S
1003
2×2 Matrix{Float64}:
1004
 0.454649  0.454649
1005
 0.454649  0.454649
1006

1007
julia> C
1008
2×2 Matrix{Float64}:
1009
  0.291927  -0.708073
1010
 -0.708073   0.291927
1011
```
1012
"""
1013
function sincos(A::AbstractMatrix{<:Real})
31✔
1014
    if issymmetric(A)
31✔
1015
        symsinA, symcosA = sincos(Symmetric(A))
2✔
1016
        sinA = copytri!(parent(symsinA), 'U')
6✔
1017
        cosA = copytri!(parent(symcosA), 'U')
6✔
1018
        return sinA, cosA
2✔
1019
    end
1020
    T = complex(float(eltype(A)))
29✔
1021
    c, s = reim(exp!(T.(im .* A)))
58✔
1022
    return s, c
28✔
1023
end
1024
function sincos(A::AbstractMatrix{<:Complex})
64✔
1025
    if ishermitian(A)
64✔
1026
        hermsinA, hermcosA = sincos(Hermitian(A))
2✔
1027
        sinA = copytri!(parent(hermsinA), 'U', true)
6✔
1028
        cosA = copytri!(parent(hermcosA), 'U', true)
6✔
1029
        return sinA, cosA
2✔
1030
    end
1031
    T = complex(float(eltype(A)))
62✔
1032
    X = exp!(T.(im .* A))
124✔
1033
    Y = exp!(T.(.-im .* A))
124✔
1034
    @inbounds for i in eachindex(X)
124✔
1035
        x, y = X[i]/2, Y[i]/2
431✔
1036
        X[i] = Complex(imag(x)-imag(y), real(y)-real(x))
431✔
1037
        Y[i] = x+y
431✔
1038
    end
800✔
1039
    return X, Y
62✔
1040
end
1041

1042
"""
1043
    tan(A::AbstractMatrix)
1044

1045
Compute the matrix tangent of a square matrix `A`.
1046

1047
If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to
1048
compute the tangent. Otherwise, the tangent is determined by calling [`exp`](@ref).
1049

1050
# Examples
1051
```jldoctest
1052
julia> tan(fill(1.0, (2,2)))
1053
2×2 Matrix{Float64}:
1054
 -1.09252  -1.09252
1055
 -1.09252  -1.09252
1056
```
1057
"""
1058
function tan(A::AbstractMatrix)
113✔
1059
    if ishermitian(A)
113✔
1060
        return copytri!(parent(tan(Hermitian(A))), 'U', true)
36✔
1061
    end
1062
    S, C = sincos(A)
77✔
1063
    S /= C
76✔
1064
    return S
76✔
1065
end
1066

1067
"""
1068
    cosh(A::AbstractMatrix)
1069

1070
Compute the matrix hyperbolic cosine of a square matrix `A`.
1071
"""
1072
function cosh(A::AbstractMatrix)
162✔
1073
    if ishermitian(A)
162✔
1074
        return copytri!(parent(cosh(Hermitian(A))), 'U', true)
41✔
1075
    end
1076
    X = exp(A)
121✔
1077
    @. X = (X + $exp!(float(-A))) / 2
242✔
1078
    return X
121✔
1079
end
1080

1081
"""
1082
    sinh(A::AbstractMatrix)
1083

1084
Compute the matrix hyperbolic sine of a square matrix `A`.
1085
"""
1086
function sinh(A::AbstractMatrix)
162✔
1087
    if ishermitian(A)
162✔
1088
        return copytri!(parent(sinh(Hermitian(A))), 'U', true)
42✔
1089
    end
1090
    X = exp(A)
120✔
1091
    @. X = (X - $exp!(float(-A))) / 2
240✔
1092
    return X
120✔
1093
end
1094

1095
"""
1096
    tanh(A::AbstractMatrix)
1097

1098
Compute the matrix hyperbolic tangent of a square matrix `A`.
1099
"""
1100
function tanh(A::AbstractMatrix)
102✔
1101
    if ishermitian(A)
102✔
1102
        return copytri!(parent(tanh(Hermitian(A))), 'U', true)
30✔
1103
    end
1104
    X = exp(A)
72✔
1105
    Y = exp!(float.(.-A))
144✔
1106
    @inbounds for i in eachindex(X)
144✔
1107
        x, y = X[i], Y[i]
502✔
1108
        X[i] = x - y
502✔
1109
        Y[i] = x + y
502✔
1110
    end
904✔
1111
    X /= Y
72✔
1112
    return X
72✔
1113
end
1114

1115
"""
1116
    acos(A::AbstractMatrix)
1117

1118
Compute the inverse matrix cosine of a square matrix `A`.
1119

1120
If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to
1121
compute the inverse cosine. Otherwise, the inverse cosine is determined by using
1122
[`log`](@ref) and [`sqrt`](@ref).  For the theory and logarithmic formulas used to compute
1123
this function, see [^AH16_1].
1124

1125
[^AH16_1]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577)
1126

1127
# Examples
1128
```julia-repl
1129
julia> acos(cos([0.5 0.1; -0.2 0.3]))
1130
2×2 Matrix{ComplexF64}:
1131
  0.5-8.32667e-17im  0.1+0.0im
1132
 -0.2+2.63678e-16im  0.3-3.46945e-16im
1133
```
1134
"""
1135
function acos(A::AbstractMatrix)
35✔
1136
    if ishermitian(A)
35✔
1137
        acosHermA = acos(Hermitian(A))
10✔
1138
        return isa(acosHermA, Hermitian) ? copytri!(parent(acosHermA), 'U', true) : parent(acosHermA)
10✔
1139
    end
1140
    SchurF = Schur{Complex}(schur(A))
29✔
1141
    U = UpperTriangular(SchurF.T)
24✔
1142
    R = triu!(parent(-im * log(U + im * sqrt(I - U^2))))
24✔
1143
    return SchurF.Z * R * SchurF.Z'
24✔
1144
end
1145

1146
"""
1147
    asin(A::AbstractMatrix)
1148

1149
Compute the inverse matrix sine of a square matrix `A`.
1150

1151
If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to
1152
compute the inverse sine. Otherwise, the inverse sine is determined by using [`log`](@ref)
1153
and [`sqrt`](@ref).  For the theory and logarithmic formulas used to compute this function,
1154
see [^AH16_2].
1155

1156
[^AH16_2]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577)
1157

1158
# Examples
1159
```julia-repl
1160
julia> asin(sin([0.5 0.1; -0.2 0.3]))
1161
2×2 Matrix{ComplexF64}:
1162
  0.5-4.16334e-17im  0.1-5.55112e-17im
1163
 -0.2+9.71445e-17im  0.3-1.249e-16im
1164
```
1165
"""
1166
function asin(A::AbstractMatrix)
35✔
1167
    if ishermitian(A)
35✔
1168
        asinHermA = asin(Hermitian(A))
11✔
1169
        return isa(asinHermA, Hermitian) ? copytri!(parent(asinHermA), 'U', true) : parent(asinHermA)
11✔
1170
    end
1171
    SchurF = Schur{Complex}(schur(A))
28✔
1172
    U = UpperTriangular(SchurF.T)
23✔
1173
    R = triu!(parent(-im * log(im * U + sqrt(I - U^2))))
23✔
1174
    return SchurF.Z * R * SchurF.Z'
23✔
1175
end
1176

1177
"""
1178
    atan(A::AbstractMatrix)
1179

1180
Compute the inverse matrix tangent of a square matrix `A`.
1181

1182
If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to
1183
compute the inverse tangent. Otherwise, the inverse tangent is determined by using
1184
[`log`](@ref).  For the theory and logarithmic formulas used to compute this function, see
1185
[^AH16_3].
1186

1187
[^AH16_3]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577)
1188

1189
# Examples
1190
```julia-repl
1191
julia> atan(tan([0.5 0.1; -0.2 0.3]))
1192
2×2 Matrix{ComplexF64}:
1193
  0.5+1.38778e-17im  0.1-2.77556e-17im
1194
 -0.2+6.93889e-17im  0.3-4.16334e-17im
1195
```
1196
"""
1197
function atan(A::AbstractMatrix)
29✔
1198
    if ishermitian(A)
29✔
1199
        return copytri!(parent(atan(Hermitian(A))), 'U', true)
8✔
1200
    end
1201
    SchurF = Schur{Complex}(schur(A))
25✔
1202
    U = im * UpperTriangular(SchurF.T)
20✔
1203
    R = triu!(parent(log((I + U) / (I - U)) / 2im))
20✔
1204
    return SchurF.Z * R * SchurF.Z'
20✔
1205
end
1206

1207
"""
1208
    acosh(A::AbstractMatrix)
1209

1210
Compute the inverse hyperbolic matrix cosine of a square matrix `A`.  For the theory and
1211
logarithmic formulas used to compute this function, see [^AH16_4].
1212

1213
[^AH16_4]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577)
1214
"""
1215
function acosh(A::AbstractMatrix)
36✔
1216
    if ishermitian(A)
36✔
1217
        acoshHermA = acosh(Hermitian(A))
11✔
1218
        return isa(acoshHermA, Hermitian) ? copytri!(parent(acoshHermA), 'U', true) : parent(acoshHermA)
11✔
1219
    end
1220
    SchurF = Schur{Complex}(schur(A))
27✔
1221
    U = UpperTriangular(SchurF.T)
25✔
1222
    R = triu!(parent(log(U + sqrt(U - I) * sqrt(U + I))))
25✔
1223
    return SchurF.Z * R * SchurF.Z'
25✔
1224
end
1225

1226
"""
1227
    asinh(A::AbstractMatrix)
1228

1229
Compute the inverse hyperbolic matrix sine of a square matrix `A`.  For the theory and
1230
logarithmic formulas used to compute this function, see [^AH16_5].
1231

1232
[^AH16_5]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577)
1233
"""
1234
function asinh(A::AbstractMatrix)
30✔
1235
    if ishermitian(A)
30✔
1236
        return copytri!(parent(asinh(Hermitian(A))), 'U', true)
8✔
1237
    end
1238
    SchurF = Schur{Complex}(schur(A))
24✔
1239
    U = UpperTriangular(SchurF.T)
22✔
1240
    R = triu!(parent(log(U + sqrt(I + U^2))))
22✔
1241
    return SchurF.Z * R * SchurF.Z'
22✔
1242
end
1243

1244
"""
1245
    atanh(A::AbstractMatrix)
1246

1247
Compute the inverse hyperbolic matrix tangent of a square matrix `A`.  For the theory and
1248
logarithmic formulas used to compute this function, see [^AH16_6].
1249

1250
[^AH16_6]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577)
1251
"""
1252
function atanh(A::AbstractMatrix)
24✔
1253
    if ishermitian(A)
24✔
1254
        return copytri!(parent(atanh(Hermitian(A))), 'U', true)
6✔
1255
    end
1256
    SchurF = Schur{Complex}(schur(A))
20✔
1257
    U = UpperTriangular(SchurF.T)
18✔
1258
    R = triu!(parent(log((I + U) / (I - U)) / 2))
18✔
1259
    return SchurF.Z * R * SchurF.Z'
18✔
1260
end
1261

1262
for (finv, f, finvh, fh, fn) in ((:sec, :cos, :sech, :cosh, "secant"),
1263
                                 (:csc, :sin, :csch, :sinh, "cosecant"),
1264
                                 (:cot, :tan, :coth, :tanh, "cotangent"))
1265
    name = string(finv)
1266
    hname = string(finvh)
1267
    @eval begin
1268
        @doc """
1269
            $($name)(A::AbstractMatrix)
1270

1271
        Compute the matrix $($fn) of a square matrix `A`.
1272
        """ ($finv)(A::AbstractMatrix{T}) where {T} = inv(($f)(A))
126✔
1273
        @doc """
1274
            $($hname)(A::AbstractMatrix)
1275

1276
        Compute the matrix hyperbolic $($fn) of square matrix `A`.
1277
        """ ($finvh)(A::AbstractMatrix{T}) where {T} = inv(($fh)(A))
126✔
1278
    end
1279
end
1280

1281
for (tfa, tfainv, hfa, hfainv, fn) in ((:asec, :acos, :asech, :acosh, "secant"),
1282
                                       (:acsc, :asin, :acsch, :asinh, "cosecant"),
1283
                                       (:acot, :atan, :acoth, :atanh, "cotangent"))
1284
    tname = string(tfa)
1285
    hname = string(hfa)
1286
    @eval begin
1287
        @doc """
1288
            $($tname)(A::AbstractMatrix)
1289
        Compute the inverse matrix $($fn) of `A`. """ ($tfa)(A::AbstractMatrix{T}) where {T} = ($tfainv)(inv(A))
45✔
1290
        @doc """
1291
            $($hname)(A::AbstractMatrix)
1292
        Compute the inverse matrix hyperbolic $($fn) of `A`. """ ($hfa)(A::AbstractMatrix{T}) where {T} = ($hfainv)(inv(A))
36✔
1293
    end
1294
end
1295

1296
"""
1297
    factorize(A)
1298

1299
Compute a convenient factorization of `A`, based upon the type of the input matrix.
1300
`factorize` checks `A` to see if it is symmetric/triangular/etc. if `A` is passed
1301
as a generic matrix. `factorize` checks every element of `A` to verify/rule out
1302
each property. It will short-circuit as soon as it can rule out symmetry/triangular
1303
structure. The return value can be reused for efficient solving of multiple
1304
systems. For example: `A=factorize(A); x=A\\b; y=A\\C`.
1305

1306
| Properties of `A`          | type of factorization                          |
1307
|:---------------------------|:-----------------------------------------------|
1308
| Positive-definite          | Cholesky (see [`cholesky`](@ref))  |
1309
| Dense Symmetric/Hermitian  | Bunch-Kaufman (see [`bunchkaufman`](@ref)) |
1310
| Sparse Symmetric/Hermitian | LDLt (see [`ldlt`](@ref))      |
1311
| Triangular                 | Triangular                                     |
1312
| Diagonal                   | Diagonal                                       |
1313
| Bidiagonal                 | Bidiagonal                                     |
1314
| Tridiagonal                | LU (see [`lu`](@ref))            |
1315
| Symmetric real tridiagonal | LDLt (see [`ldlt`](@ref))      |
1316
| General square             | LU (see [`lu`](@ref))            |
1317
| General non-square         | QR (see [`qr`](@ref))            |
1318

1319
If `factorize` is called on a Hermitian positive-definite matrix, for instance, then `factorize`
1320
will return a Cholesky factorization.
1321

1322
# Examples
1323
```jldoctest
1324
julia> A = Array(Bidiagonal(fill(1.0, (5, 5)), :U))
1325
5×5 Matrix{Float64}:
1326
 1.0  1.0  0.0  0.0  0.0
1327
 0.0  1.0  1.0  0.0  0.0
1328
 0.0  0.0  1.0  1.0  0.0
1329
 0.0  0.0  0.0  1.0  1.0
1330
 0.0  0.0  0.0  0.0  1.0
1331

1332
julia> factorize(A) # factorize will check to see that A is already factorized
1333
5×5 Bidiagonal{Float64, Vector{Float64}}:
1334
 1.0  1.0   ⋅    ⋅    ⋅
1335
  ⋅   1.0  1.0   ⋅    ⋅
1336
  ⋅    ⋅   1.0  1.0   ⋅
1337
  ⋅    ⋅    ⋅   1.0  1.0
1338
  ⋅    ⋅    ⋅    ⋅   1.0
1339
```
1340
This returns a `5×5 Bidiagonal{Float64}`, which can now be passed to other linear algebra functions
1341
(e.g. eigensolvers) which will use specialized methods for `Bidiagonal` types.
1342
"""
1343
function factorize(A::AbstractMatrix{T}) where T
266✔
1344
    m, n = size(A)
266✔
1345
    if m == n
266✔
1346
        if m == 1 return A[1] end
121✔
1347
        utri    = true
121✔
1348
        utri1   = true
121✔
1349
        herm    = true
121✔
1350
        sym     = true
121✔
1351
        for j = 1:n-1, i = j+1:m
754✔
1352
            if utri1
2,921✔
1353
                if A[i,j] != 0
1,578✔
1354
                    utri1 = i == j + 1
302✔
1355
                    utri = false
302✔
1356
                end
1357
            end
1358
            if sym
2,921✔
1359
                sym &= A[i,j] == A[j,i]
1,666✔
1360
            end
1361
            if herm
2,921✔
1362
                herm &= A[i,j] == conj(A[j,i])
1,815✔
1363
            end
1364
            if !(utri1|herm|sym) break end
2,971✔
1365
        end
2,871✔
1366
        ltri = true
121✔
1367
        ltri1 = true
121✔
1368
        for j = 3:n, i = 1:j-2
447✔
1369
            ltri1 &= A[i,j] == 0
1,152✔
1370
            if !ltri1 break end
1,204✔
1371
        end
1,030✔
1372
        if ltri1
121✔
1373
            for i = 1:n-1
68✔
1374
                if A[i,i+1] != 0
158✔
1375
                    ltri &= false
19✔
1376
                    break
19✔
1377
                end
1378
            end
135✔
1379
            if ltri
34✔
1380
                if utri
15✔
1381
                    return Diagonal(A)
5✔
1382
                end
1383
                if utri1
10✔
1384
                    return Bidiagonal(diag(A), diag(A, -1), :L)
5✔
1385
                end
1386
                return LowerTriangular(A)
5✔
1387
            end
1388
            if utri
19✔
1389
                return Bidiagonal(diag(A), diag(A, 1), :U)
5✔
1390
            end
1391
            if utri1
14✔
1392
                # TODO: enable once a specialized, non-dense bunchkaufman method exists
1393
                # if (herm & (T <: Complex)) | sym
1394
                    # return bunchkaufman(SymTridiagonal(diag(A), diag(A, -1)))
1395
                # end
1396
                return lu(Tridiagonal(diag(A, -1), diag(A), diag(A, 1)))
14✔
1397
            end
1398
        end
1399
        if utri
87✔
1400
            return UpperTriangular(A)
5✔
1401
        end
1402
        if herm
82✔
1403
            cf = cholesky(A; check = false)
33✔
1404
            if cf.info == 0
33✔
1405
                return cf
12✔
1406
            else
1407
                return factorize(Hermitian(A))
21✔
1408
            end
1409
        end
1410
        if sym
49✔
1411
            return factorize(Symmetric(A))
4✔
1412
        end
1413
        return lu(A)
45✔
1414
    end
1415
    qr(A, ColumnNorm())
145✔
1416
end
1417
factorize(A::Adjoint)   =   adjoint(factorize(parent(A)))
2✔
1418
factorize(A::Transpose) = transpose(factorize(parent(A)))
2✔
1419
factorize(a::Number)    = a # same as how factorize behaves on Diagonal types
5✔
1420

1421
## Moore-Penrose pseudoinverse
1422

1423
"""
1424
    pinv(M; atol::Real=0, rtol::Real=atol>0 ? 0 : n*ϵ)
1425
    pinv(M, rtol::Real) = pinv(M; rtol=rtol) # to be deprecated in Julia 2.0
1426

1427
Computes the Moore-Penrose pseudoinverse.
1428

1429
For matrices `M` with floating point elements, it is convenient to compute
1430
the pseudoinverse by inverting only singular values greater than
1431
`max(atol, rtol*σ₁)` where `σ₁` is the largest singular value of `M`.
1432

1433
The optimal choice of absolute (`atol`) and relative tolerance (`rtol`) varies
1434
both with the value of `M` and the intended application of the pseudoinverse.
1435
The default relative tolerance is `n*ϵ`, where `n` is the size of the smallest
1436
dimension of `M`, and `ϵ` is the [`eps`](@ref) of the element type of `M`.
1437

1438
For inverting dense ill-conditioned matrices in a least-squares sense,
1439
`rtol = sqrt(eps(real(float(oneunit(eltype(M))))))` is recommended.
1440

1441
For more information, see [^issue8859], [^B96], [^S84], [^KY88].
1442

1443
# Examples
1444
```jldoctest
1445
julia> M = [1.5 1.3; 1.2 1.9]
1446
2×2 Matrix{Float64}:
1447
 1.5  1.3
1448
 1.2  1.9
1449

1450
julia> N = pinv(M)
1451
2×2 Matrix{Float64}:
1452
  1.47287   -1.00775
1453
 -0.930233   1.16279
1454

1455
julia> M * N
1456
2×2 Matrix{Float64}:
1457
 1.0          -2.22045e-16
1458
 4.44089e-16   1.0
1459
```
1460

1461
[^issue8859]: Issue 8859, "Fix least squares", [https://github.com/JuliaLang/julia/pull/8859](https://github.com/JuliaLang/julia/pull/8859)
1462

1463
[^B96]: Åke Björck, "Numerical Methods for Least Squares Problems",  SIAM Press, Philadelphia, 1996, "Other Titles in Applied Mathematics", Vol. 51. [doi:10.1137/1.9781611971484](http://epubs.siam.org/doi/book/10.1137/1.9781611971484)
1464

1465
[^S84]: G. W. Stewart, "Rank Degeneracy", SIAM Journal on Scientific and Statistical Computing, 5(2), 1984, 403-413. [doi:10.1137/0905030](http://epubs.siam.org/doi/abs/10.1137/0905030)
1466

1467
[^KY88]: Konstantinos Konstantinides and Kung Yao, "Statistical analysis of effective singular values in matrix rank determination", IEEE Transactions on Acoustics, Speech and Signal Processing, 36(5), 1988, 757-763. [doi:10.1109/29.1585](https://doi.org/10.1109/29.1585)
1468
"""
1469
function pinv(A::AbstractMatrix{T}; atol::Real = 0.0, rtol::Real = (eps(real(float(oneunit(T))))*min(size(A)...))*iszero(atol)) where T
552✔
1470
    m, n = size(A)
276✔
1471
    Tout = typeof(zero(T)/sqrt(oneunit(T) + oneunit(T)))
276✔
1472
    if m == 0 || n == 0
542✔
1473
        return similar(A, Tout, (n, m))
10✔
1474
    end
1475
    if isdiag(A)
429✔
1476
        indA = diagind(A)
35✔
1477
        dA = view(A, indA)
35✔
1478
        maxabsA = maximum(abs, dA)
35✔
1479
        tol = max(rtol * maxabsA, atol)
35✔
1480
        B = fill!(similar(A, Tout, (n, m)), 0)
1,680,070✔
1481
        indB = diagind(B)
35✔
1482
        B[indB] .= (x -> abs(x) > tol ? pinv(x) : zero(x)).(dA)
2,494✔
1483
        return B
35✔
1484
    end
1485
    SVD         = svd(A)
231✔
1486
    tol         = max(rtol*maximum(SVD.S), atol)
231✔
1487
    Stype       = eltype(SVD.S)
231✔
1488
    Sinv        = fill!(similar(A, Stype, length(SVD.S)), 0)
5,199✔
1489
    index       = SVD.S .> tol
231✔
1490
    Sinv[index] .= pinv.(view(SVD.S, index))
231✔
1491
    return SVD.Vt' * (Diagonal(Sinv) * SVD.U')
231✔
1492
end
1493
function pinv(x::Number)
2,765✔
1494
    xi = inv(x)
2,849✔
1495
    return ifelse(isfinite(xi), xi, zero(xi))
2,765✔
1496
end
1497

1498
## Basis for null space
1499

1500
"""
1501
    nullspace(M; atol::Real=0, rtol::Real=atol>0 ? 0 : n*ϵ)
1502
    nullspace(M, rtol::Real) = nullspace(M; rtol=rtol) # to be deprecated in Julia 2.0
1503

1504
Computes a basis for the nullspace of `M` by including the singular
1505
vectors of `M` whose singular values have magnitudes smaller than `max(atol, rtol*σ₁)`,
1506
where `σ₁` is `M`'s largest singular value.
1507

1508
By default, the relative tolerance `rtol` is `n*ϵ`, where `n`
1509
is the size of the smallest dimension of `M`, and `ϵ` is the [`eps`](@ref) of
1510
the element type of `M`.
1511

1512
# Examples
1513
```jldoctest
1514
julia> M = [1 0 0; 0 1 0; 0 0 0]
1515
3×3 Matrix{Int64}:
1516
 1  0  0
1517
 0  1  0
1518
 0  0  0
1519

1520
julia> nullspace(M)
1521
3×1 Matrix{Float64}:
1522
 0.0
1523
 0.0
1524
 1.0
1525

1526
julia> nullspace(M, rtol=3)
1527
3×3 Matrix{Float64}:
1528
 0.0  1.0  0.0
1529
 1.0  0.0  0.0
1530
 0.0  0.0  1.0
1531

1532
julia> nullspace(M, atol=0.95)
1533
3×1 Matrix{Float64}:
1534
 0.0
1535
 0.0
1536
 1.0
1537
```
1538
"""
1539
function nullspace(A::AbstractVecOrMat; atol::Real = 0.0, rtol::Real = (min(size(A, 1), size(A, 2))*eps(real(float(oneunit(eltype(A))))))*iszero(atol))
1,800✔
1540
    m, n = size(A, 1), size(A, 2)
900✔
1541
    (m == 0 || n == 0) && return Matrix{eigtype(eltype(A))}(I, n, n)
900✔
1542
    SVD = svd(A; full=true)
700✔
1543
    tol = max(atol, SVD.S[1]*rtol)
700✔
1544
    indstart = sum(s -> s .> tol, SVD.S) + 1
1,800✔
1545
    return copy((@view SVD.Vt[indstart:end,:])')
700✔
1546
end
1547

1548
"""
1549
    cond(M, p::Real=2)
1550

1551
Condition number of the matrix `M`, computed using the operator `p`-norm. Valid values for
1552
`p` are `1`, `2` (default), or `Inf`.
1553
"""
1554
function cond(A::AbstractMatrix, p::Real=2)
1,054✔
1555
    if p == 2
1,054✔
1556
        v = svdvals(A)
494✔
1557
        maxv = maximum(v)
494✔
1558
        return iszero(maxv) ? oftype(real(maxv), Inf) : maxv / minimum(v)
494✔
1559
    elseif p == 1 || p == Inf
124✔
1560
        checksquare(A)
95✔
1561
        try
95✔
1562
            Ainv = inv(A)
95✔
1563
            return opnorm(A, p)*opnorm(Ainv, p)
87✔
1564
        catch e
1565
            if isa(e, LAPACKException) || isa(e, SingularException)
8✔
1566
                return convert(float(real(eltype(A))), Inf)
8✔
1567
            else
1568
                rethrow()
×
1569
            end
1570
        end
1571
    end
1572
    throw(ArgumentError("p-norm must be 1, 2 or Inf, got $p"))
8✔
1573
end
1574

1575
## Lyapunov and Sylvester equation
1576

1577
# AX + XB + C = 0
1578

1579
"""
1580
    sylvester(A, B, C)
1581

1582
Computes the solution `X` to the Sylvester equation `AX + XB + C = 0`, where `A`, `B` and
1583
`C` have compatible dimensions and `A` and `-B` have no eigenvalues with equal real part.
1584

1585
# Examples
1586
```jldoctest
1587
julia> A = [3. 4.; 5. 6]
1588
2×2 Matrix{Float64}:
1589
 3.0  4.0
1590
 5.0  6.0
1591

1592
julia> B = [1. 1.; 1. 2.]
1593
2×2 Matrix{Float64}:
1594
 1.0  1.0
1595
 1.0  2.0
1596

1597
julia> C = [1. 2.; -2. 1]
1598
2×2 Matrix{Float64}:
1599
  1.0  2.0
1600
 -2.0  1.0
1601

1602
julia> X = sylvester(A, B, C)
1603
2×2 Matrix{Float64}:
1604
 -4.46667   1.93333
1605
  3.73333  -1.8
1606

1607
julia> A*X + X*B ≈ -C
1608
true
1609
```
1610
"""
1611
function sylvester(A::AbstractMatrix, B::AbstractMatrix, C::AbstractMatrix)
10✔
1612
    T = promote_type(float(eltype(A)), float(eltype(B)), float(eltype(C)))
10✔
1613
    return sylvester(copy_similar(A, T), copy_similar(B, T), copy_similar(C, T))
10✔
1614
end
1615
function sylvester(A::AbstractMatrix{T}, B::AbstractMatrix{T}, C::AbstractMatrix{T}) where {T<:BlasFloat}
50✔
1616
    RA, QA = schur(A)
74✔
1617
    RB, QB = schur(B)
62✔
1618
    D = QA' * C * QB
50✔
1619
    D .= .-D
100✔
1620
    Y, scale = LAPACK.trsyl!('N', 'N', RA, RB, D)
50✔
1621
    rmul!(QA * Y * QB', inv(scale))
50✔
1622
end
1623

1624
Base.@propagate_inbounds function _sylvester_2x1!(A, B, C)
417✔
1625
    b = B[1]
417✔
1626
    a21, a12 = A[2, 1], A[1, 2]
417✔
1627
    m11 = b + A[1, 1]
417✔
1628
    m22 = b + A[2, 2]
417✔
1629
    d = m11 * m22 - a12 * a21
417✔
1630
    c1, c2 = C
834✔
1631
    C[1] = (a12 * c2 - m22 * c1) / d
417✔
1632
    C[2] = (a21 * c1 - m11 * c2) / d
417✔
1633
    return C
417✔
1634
end
1635
Base.@propagate_inbounds function _sylvester_1x2!(A, B, C)
250✔
1636
    a = A[1]
250✔
1637
    b21, b12 = B[2, 1], B[1, 2]
250✔
1638
    m11 = a + B[1, 1]
250✔
1639
    m22 = a + B[2, 2]
250✔
1640
    d = m11 * m22 - b21 * b12
250✔
1641
    c1, c2 = C
500✔
1642
    C[1] = (b21 * c2 - m22 * c1) / d
250✔
1643
    C[2] = (b12 * c1 - m11 * c2) / d
250✔
1644
    return C
250✔
1645
end
1646
function _sylvester_2x2!(A, B, C)
411✔
1647
    _, scale = LAPACK.trsyl!('N', 'N', A, B, C)
411✔
1648
    rmul!(C, -inv(scale))
411✔
1649
    return C
411✔
1650
end
1651

1652
sylvester(a::Union{Real,Complex}, b::Union{Real,Complex}, c::Union{Real,Complex}) = -c / (a + b)
56,955✔
1653

1654
# AX + XA' + C = 0
1655

1656
"""
1657
    lyap(A, C)
1658

1659
Computes the solution `X` to the continuous Lyapunov equation `AX + XA' + C = 0`, where no
1660
eigenvalue of `A` has a zero real part and no two eigenvalues are negative complex
1661
conjugates of each other.
1662

1663
# Examples
1664
```jldoctest
1665
julia> A = [3. 4.; 5. 6]
1666
2×2 Matrix{Float64}:
1667
 3.0  4.0
1668
 5.0  6.0
1669

1670
julia> B = [1. 1.; 1. 2.]
1671
2×2 Matrix{Float64}:
1672
 1.0  1.0
1673
 1.0  2.0
1674

1675
julia> X = lyap(A, B)
1676
2×2 Matrix{Float64}:
1677
  0.5  -0.5
1678
 -0.5   0.25
1679

1680
julia> A*X + X*A' ≈ -B
1681
true
1682
```
1683
"""
1684
function lyap(A::AbstractMatrix, C::AbstractMatrix)
10✔
1685
    T = promote_type(float(eltype(A)), float(eltype(C)))
10✔
1686
    return lyap(copy_similar(A, T), copy_similar(C, T))
10✔
1687
end
1688
function lyap(A::AbstractMatrix{T}, C::AbstractMatrix{T}) where {T<:BlasFloat}
50✔
1689
    R, Q = schur(A)
54✔
1690
    D = Q' * C * Q
50✔
1691
    D .= .-D
100✔
1692
    Y, scale = LAPACK.trsyl!('N', T <: Complex ? 'C' : 'T', R, R, D)
50✔
1693
    rmul!(Q * Y * Q', inv(scale))
50✔
1694
end
1695
lyap(a::Union{Real,Complex}, c::Union{Real,Complex}) = -c/(2real(a))
5✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc