• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

QuantEcon / QuantEcon.py / 17227928141

26 Aug 2025 04:28AM UTC coverage: 92.626%. Remained the same
17227928141

Pull #787

github

web-flow
Merge 2d53230c0 into 4a889ad1b
Pull Request #787: Migrate np.dot and .dot() method calls to @ operator in library code only

113 of 158 new or added lines in 14 files covered. (71.52%)

1 existing line in 1 file now uncovered.

7512 of 8110 relevant lines covered (92.63%)

2.78 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

93.13
/quantecon/_lqcontrol.py
1
"""
2
Provides a class called LQ for solving linear quadratic control
3
problems, and a class called LQMarkov for solving Markov jump
4
linear quadratic control problems.
5

6
"""
7
from textwrap import dedent
3✔
8
import numpy as np
3✔
9
from scipy.linalg import solve
3✔
10
from ._matrix_eqn import solve_discrete_riccati, solve_discrete_riccati_system
3✔
11
from .util import check_random_state
3✔
12
from .markov import MarkovChain
3✔
13

14

15
class LQ:
3✔
16
    r"""
17
    This class is for analyzing linear quadratic optimal control
18
    problems of either the infinite horizon form
19

20
    .. math::
21

22
        \min \mathbb{E}
23
        \Big[ \sum_{t=0}^{\infty} \beta^t r(x_t, u_t) \Big]
24

25
    with
26

27
    .. math::
28

29
         r(x_t, u_t) := x_t' R x_t + u_t' Q u_t + 2 u_t' N x_t
30

31
    or the finite horizon form
32

33
    .. math::
34

35
         \min \mathbb{E}
36
         \Big[
37
         \sum_{t=0}^{T-1} \beta^t r(x_t, u_t) + \beta^T x_T' R_f x_T
38
         \Big]
39

40
    Both are minimized subject to the law of motion
41

42
    .. math::
43

44
         x_{t+1} = A x_t + B u_t + C w_{t+1}
45

46
    Here :math:`x` is n x 1, :math:`u` is k x 1, :math:`w` is j x 1 and the
47
    matrices are conformable for these dimensions.  The sequence :math:`{w_t}`
48
    is assumed to be white noise, with zero mean and
49
    :math:`\mathbb{E} [ w_t' w_t ] = I`, the j x j identity.
50

51
    If :math:`C` is not supplied as a parameter, the model is assumed to be
52
    deterministic (and :math:`C` is set to a zero matrix of appropriate
53
    dimension).
54

55
    For this model, the time t value (i.e., cost-to-go) function :math:`V_t`
56
    takes the form
57

58
    .. math::
59

60
         x' P_T x + d_T
61

62
    and the optimal policy is of the form :math:`u_T = -F_T x_T`. In the
63
    infinite horizon case, :math:`V, P, d` and :math:`F` are all stationary.
64

65
    Parameters
66
    ----------
67
    Q : array_like(float)
68
        Q is the payoff (or cost) matrix that corresponds with the
69
        control variable u and is k x k. Should be symmetric and
70
        non-negative definite
71
    R : array_like(float)
72
        R is the payoff (or cost) matrix that corresponds with the
73
        state variable x and is n x n. Should be symmetric and
74
        non-negative definite
75
    A : array_like(float)
76
        A is part of the state transition as described above. It should
77
        be n x n
78
    B : array_like(float)
79
        B is part of the state transition as described above. It should
80
        be n x k
81
    C : array_like(float), optional(default=None)
82
        C is part of the state transition as described above and
83
        corresponds to the random variable today.  If the model is
84
        deterministic then C should take default value of None
85
    N : array_like(float), optional(default=None)
86
        N is the cross product term in the payoff, as above. It should
87
        be k x n.
88
    beta : scalar(float), optional(default=1)
89
        beta is the discount parameter
90
    T : scalar(int), optional(default=None)
91
        T is the number of periods in a finite horizon problem.
92
    Rf : array_like(float), optional(default=None)
93
        Rf is the final (in a finite horizon model) payoff(or cost)
94
        matrix that corresponds with the control variable u and is n x
95
        n.  Should be symmetric and non-negative definite
96

97
    Attributes
98
    ----------
99
    Q, R, N, A, B, C, beta, T, Rf : see Parameters
100
    P : array_like(float)
101
        P is part of the value function representation of
102
        :math:`V(x) = x'Px + d`
103
    d : array_like(float)
104
        d is part of the value function representation of
105
        :math:`V(x) = x'Px + d`
106
    F : array_like(float)
107
        F is the policy rule that determines the choice of control in
108
        each period.
109
    k, n, j : scalar(int)
110
        The dimensions of the matrices as presented above
111

112
    """
113

114
    def __init__(self, Q, R, A, B, C=None, N=None, beta=1, T=None, Rf=None):
3✔
115
        # == Make sure all matrices can be treated as 2D arrays == #
116
        converter = lambda X: np.atleast_2d(np.asarray(X, dtype='float'))
3✔
117
        self.A, self.B, self.Q, self.R, self.N = list(map(converter,
3✔
118
                                                          (A, B, Q, R, N)))
119
        # == Record dimensions == #
120
        self.k, self.n = self.Q.shape[0], self.R.shape[0]
3✔
121

122
        self.beta = beta
3✔
123

124
        if C is None:
3✔
125
            # == If C not given, then model is deterministic. Set C=0. == #
126
            self.j = 1
3✔
127
            self.C = np.zeros((self.n, self.j))
3✔
128
        else:
129
            self.C = converter(C)
3✔
130
            self.j = self.C.shape[1]
3✔
131

132
        if N is None:
3✔
133
            # == No cross product term in payoff. Set N=0. == #
134
            self.N = np.zeros((self.k, self.n))
3✔
135

136
        if T:
3✔
137
            # == Model is finite horizon == #
138
            self.T = T
3✔
139
            self.Rf = converter(Rf)
3✔
140
            self.P = self.Rf
3✔
141
            self.d = 0
3✔
142
        else:
143
            self.P = None
3✔
144
            self.d = None
3✔
145
            self.T = None
3✔
146

147
            if (self.C != 0).any() and beta >= 1:
3✔
148
                raise ValueError('beta must be strictly smaller than 1 if ' +
×
149
                    'T = None and C != 0.')
150

151
        self.F = None
3✔
152

153
    def __repr__(self):
154
        return self.__str__()
155

156
    def __str__(self):
3✔
157
        m = """\
×
158
        Linear Quadratic control system
159
          - beta (discount parameter)       : {b}
160
          - T (time horizon)                : {t}
161
          - n (number of state variables)   : {n}
162
          - k (number of control variables) : {k}
163
          - j (number of shocks)            : {j}
164
        """
165
        t = "infinite" if self.T is None else self.T
×
166
        return dedent(m.format(b=self.beta, n=self.n, k=self.k, j=self.j,
×
167
                               t=t))
168

169
    def update_values(self):
3✔
170
        """
171
        This method is for updating in the finite horizon case.  It
172
        shifts the current value function
173

174
        .. math::
175

176
             V_t(x) = x' P_t x + d_t
177

178
        and the optimal policy :math:`F_t` one step *back* in time,
179
        replacing the pair :math:`P_t` and :math:`d_t` with
180
        :math:`P_{t-1}` and :math:`d_{t-1}`, and :math:`F_t` with
181
        :math:`F_{t-1}`
182

183
        """
184
        # === Simplify notation === #
185
        Q, R, A, B, N, C = self.Q, self.R, self.A, self.B, self.N, self.C
3✔
186
        P, d = self.P, self.d
3✔
187
        # == Some useful matrices == #
188
        S1 = Q + self.beta * (B.T @ P @ B)
3✔
189
        S2 = self.beta * (B.T @ P @ A) + N
3✔
190
        S3 = self.beta * (A.T @ P @ A)
3✔
191
        # == Compute F as (Q + B'PB)^{-1} (beta B'PA + N) == #
192
        self.F = solve(S1, S2)
3✔
193
        # === Shift P back in time one step == #
194
        new_P = R - (S2.T @ self.F) + S3
3✔
195
        # == Recalling that trace(AB) = trace(BA) == #
196
        new_d = self.beta * (d + np.trace(P @ C @ C.T))
3✔
197
        # == Set new state == #
198
        self.P, self.d = new_P, new_d
3✔
199

200
    def stationary_values(self, method='doubling'):
3✔
201
        """
202
        Computes the matrix :math:`P` and scalar :math:`d` that represent
203
        the value function
204

205
        .. math::
206

207
             V(x) = x' P x + d
208

209
        in the infinite horizon case.  Also computes the control matrix
210
        :math:`F` from :math:`u = - Fx`. Computation is via the solution
211
        algorithm as specified by the `method` option (default to the
212
        doubling algorithm) (see the documentation in
213
        `matrix_eqn.solve_discrete_riccati`).
214

215
        Parameters
216
        ----------
217
        method : str, optional(default='doubling')
218
            Solution method used in solving the associated Riccati
219
            equation, str in {'doubling', 'qz'}.
220

221
        Returns
222
        -------
223
        P : array_like(float)
224
            P is part of the value function representation of
225
            :math:`V(x) = x'Px + d`
226
        F : array_like(float)
227
            F is the policy rule that determines the choice of control
228
            in each period.
229
        d : array_like(float)
230
            d is part of the value function representation of
231
            :math:`V(x) = x'Px + d`
232

233
        """
234
        # === simplify notation === #
235
        Q, R, A, B, N, C = self.Q, self.R, self.A, self.B, self.N, self.C
3✔
236

237
        # === solve Riccati equation, obtain P === #
238
        A0, B0 = np.sqrt(self.beta) * A, np.sqrt(self.beta) * B
3✔
239
        P = solve_discrete_riccati(A0, B0, R, Q, N, method=method)
3✔
240

241
        # == Compute F == #
242
        S1 = Q + self.beta * (B.T @ P @ B)
3✔
243
        S2 = self.beta * (B.T @ P @ A) + N
3✔
244
        F = solve(S1, S2)
3✔
245

246
        # == Compute d == #
247
        if self.beta == 1:
3✔
248
            d = 0
3✔
249
        else:
250
            d = self.beta * np.trace(P @ C @ C.T) / (1 - self.beta)
3✔
251

252
        # == Bind states and return values == #
253
        self.P, self.F, self.d = P, F, d
3✔
254

255
        return P, F, d
3✔
256

257
    def compute_sequence(self, x0, ts_length=None, method='doubling',
3✔
258
                         random_state=None):
259
        """
260
        Compute and return the optimal state and control sequences
261
        :math:`x_0, ..., x_T` and :math:`u_0,..., u_T`  under the
262
        assumption that :math:`{w_t}` is iid and :math:`N(0, 1)`.
263

264
        Parameters
265
        ----------
266
        x0 : array_like(float)
267
            The initial state, a vector of length n
268

269
        ts_length : scalar(int)
270
            Length of the simulation -- defaults to T in finite case
271

272
        method : str, optional(default='doubling')
273
            Solution method used in solving the associated Riccati
274
            equation, str in {'doubling', 'qz'}. Only relevant when the
275
            `T` attribute is `None` (i.e., the horizon is infinite).
276

277
        random_state : int or np.random.RandomState/Generator, optional
278
            Random seed (integer) or np.random.RandomState or Generator
279
            instance to set the initial state of the random number
280
            generator for reproducibility. If None, a randomly
281
            initialized RandomState is used.
282

283
        Returns
284
        -------
285
        x_path : array_like(float)
286
            An n x T+1 matrix, where the t-th column represents :math:`x_t`
287

288
        u_path : array_like(float)
289
            A k x T matrix, where the t-th column represents :math:`u_t`
290

291
        w_path : array_like(float)
292
            A j x T+1 matrix, where the t-th column represent :math:`w_t`
293

294
        """
295

296
        # === Simplify notation === #
297
        A, B, C = self.A, self.B, self.C
3✔
298

299
        # == Preliminaries, finite horizon case == #
300
        if self.T:
3✔
301
            T = self.T if not ts_length else min(ts_length, self.T)
3✔
302
            self.P, self.d = self.Rf, 0
3✔
303

304
        # == Preliminaries, infinite horizon case == #
305
        else:
306
            T = ts_length if ts_length else 100
×
307
            if self.P is None:
×
308
                self.stationary_values(method=method)
×
309

310
        # == Set up initial condition and arrays to store paths == #
311
        random_state = check_random_state(random_state)
3✔
312
        x0 = np.asarray(x0)
3✔
313
        x0 = x0.reshape(self.n, 1)  # Make sure x0 is a column vector
3✔
314
        x_path = np.empty((self.n, T+1))
3✔
315
        u_path = np.empty((self.k, T))
3✔
316
        w_path = random_state.standard_normal((self.j, T+1))
3✔
317
        Cw_path = C @ w_path
3✔
318

319
        # == Compute and record the sequence of policies == #
320
        policies = []
3✔
321
        for t in range(T):
3✔
322
            if self.T:  # Finite horizon case
3✔
323
                self.update_values()
3✔
324
            policies.append(self.F)
3✔
325

326
        # == Use policy sequence to generate states and controls == #
327
        F = policies.pop()
3✔
328
        x_path[:, 0] = x0.flatten()
3✔
329
        u_path[:, 0] = -(F @ x0).flatten()
3✔
330
        for t in range(1, T):
3✔
331
            F = policies.pop()
×
NEW
332
            Ax, Bu = A @ x_path[:, t-1], B @ u_path[:, t-1]
×
333
            x_path[:, t] = Ax + Bu + Cw_path[:, t]
×
NEW
334
            u_path[:, t] = -(F @ x_path[:, t])
×
335
        Ax, Bu = A @ x_path[:, T-1], B @ u_path[:, T-1]
3✔
336
        x_path[:, T] = Ax + Bu + Cw_path[:, T]
3✔
337

338
        return x_path, u_path, w_path
3✔
339

340

341
class LQMarkov:
3✔
342
    r"""
343
    This class is for analyzing Markov jump linear quadratic optimal
344
    control problems of the infinite horizon form
345

346
    .. math::
347

348
        \min \mathbb{E}
349
        \Big[ \sum_{t=0}^{\infty} \beta^t r(x_t, s_t, u_t) \Big]
350

351
    with
352

353
    .. math::
354

355
         r(x_t, s_t, u_t) :=
356
            (x_t' R(s_t) x_t + u_t' Q(s_t) u_t + 2 u_t' N(s_t) x_t)
357

358
    subject to the law of motion
359

360
    .. math::
361

362
         x_{t+1} = A(s_t) x_t + B(s_t) u_t + C(s_t) w_{t+1}
363

364
    Here :math:`x` is n x 1, :math:`u` is k x 1, :math:`w` is j x 1 and the
365
    matrices are conformable for these dimensions.  The sequence :math:`{w_t}`
366
    is assumed to be white noise, with zero mean and
367
    :math:`\mathbb{E} [ w_t' w_t ] = I`, the j x j identity.
368

369
    If :math:`C` is not supplied as a parameter, the model is assumed to be
370
    deterministic (and :math:`C` is set to a zero matrix of appropriate
371
    dimension).
372

373
    The optimal value function :math:`V(x_t, s_t)` takes the form
374

375
    .. math::
376

377
         x_t' P(s_t) x_t + d(s_t)
378

379
    and the optimal policy is of the form :math:`u_t = -F(s_t) x_t`.
380

381
    Parameters
382
    ----------
383
    Π : array_like(float, ndim=2)
384
        The Markov chain transition matrix with dimension m x m.
385
    Qs : array_like(float)
386
        Consists of m symmetric and non-negative definite payoff
387
        matrices Q(s) with dimension k x k that corresponds with
388
        the control variable u for each Markov state s
389
    Rs : array_like(float)
390
        Consists of m symmetric and non-negative definite payoff
391
        matrices R(s) with dimension n x n that corresponds with
392
        the state variable x for each Markov state s
393
    As : array_like(float)
394
        Consists of m state transition matrices A(s) with dimension
395
        n x n for each Markov state s
396
    Bs : array_like(float)
397
        Consists of m state transition matrices B(s) with dimension
398
        n x k for each Markov state s
399
    Cs : array_like(float), optional(default=None)
400
        Consists of m state transition matrices C(s) with dimension
401
        n x j for each Markov state s. If the model is deterministic
402
        then Cs should take default value of None
403
    Ns : array_like(float), optional(default=None)
404
        Consists of m cross product term matrices N(s) with dimension
405
        k x n for each Markov state,
406
    beta : scalar(float), optional(default=1)
407
        beta is the discount parameter
408

409
    Attributes
410
    ----------
411
    Π, Qs, Rs, Ns, As, Bs, Cs, beta : see Parameters
412
    Ps : array_like(float)
413
        Ps is part of the value function representation of
414
        :math:`V(x, s) = x' P(s) x + d(s)`
415
    ds : array_like(float)
416
        ds is part of the value function representation of
417
        :math:`V(x, s) = x' P(s) x + d(s)`
418
    Fs : array_like(float)
419
        Fs is the policy rule that determines the choice of control in
420
        each period at each Markov state
421
    m : scalar(int)
422
        The number of Markov states
423
    k, n, j : scalar(int)
424
        The dimensions of the matrices as presented above
425

426
    """
427

428
    def __init__(self, Π, Qs, Rs, As, Bs, Cs=None, Ns=None, beta=1):
3✔
429

430
        # == Make sure all matrices for each state are 2D arrays == #
431
        def converter(Xs):
3✔
432
            return np.array([np.atleast_2d(np.asarray(X, dtype='float'))
3✔
433
                             for X in Xs])
434
        self.As, self.Bs, self.Qs, self.Rs = list(map(converter,
3✔
435
                                                      (As, Bs, Qs, Rs)))
436

437
        # == Record number of states == #
438
        self.m = self.Qs.shape[0]
3✔
439
        # == Record dimensions == #
440
        self.k, self.n = self.Qs.shape[1], self.Rs.shape[1]
3✔
441

442
        if Ns is None:
3✔
443
            # == No cross product term in payoff. Set N=0. == #
444
            Ns = [np.zeros((self.k, self.n)) for i in range(self.m)]
3✔
445

446
        self.Ns = converter(Ns)
3✔
447

448
        if Cs is None:
3✔
449
            # == If C not given, then model is deterministic. Set C=0. == #
450
            self.j = 1
3✔
451
            Cs = [np.zeros((self.n, self.j)) for i in range(self.m)]
3✔
452

453
        self.Cs = converter(Cs)
3✔
454
        self.j = self.Cs.shape[2]
3✔
455

456
        self.beta = beta
3✔
457

458
        self.Π = np.asarray(Π, dtype='float')
3✔
459

460
        self.Ps = None
3✔
461
        self.ds = None
3✔
462
        self.Fs = None
3✔
463

464
    def __repr__(self):
465
        return self.__str__()
466

467
    def __str__(self):
3✔
468
        m = """\
3✔
469
        Markov Jump Linear Quadratic control system
470
          - beta (discount parameter)       : {b}
471
          - T (time horizon)                : {t}
472
          - m (number of Markov states)     : {m}
473
          - n (number of state variables)   : {n}
474
          - k (number of control variables) : {k}
475
          - j (number of shocks)            : {j}
476
        """
477
        t = "infinite"
3✔
478
        return dedent(m.format(b=self.beta, m=self.m, n=self.n, k=self.k,
3✔
479
                               j=self.j, t=t))
480

481
    def stationary_values(self, max_iter=1000):
3✔
482
        """
483
        Computes the matrix :math:`P(s)` and scalar :math:`d(s)` that
484
        represent the value function
485

486
        .. math::
487

488
             V(x, s) = x' P(s) x + d(s)
489

490
        in the infinite horizon case.  Also computes the control matrix
491
        :math:`F` from :math:`u = - F(s) x`.
492

493
        Parameters
494
        ----------
495
        max_iter : scalar(int), optional(default=1000)
496
            The maximum number of iterations allowed
497

498
        Returns
499
        -------
500
        Ps : array_like(float)
501
            Ps is part of the value function representation of
502
            :math:`V(x, s) = x' P(s) x + d(s)`
503
        ds : array_like(float)
504
            ds is part of the value function representation of
505
            :math:`V(x, s) = x' P(s) x + d(s)`
506
        Fs : array_like(float)
507
            Fs is the policy rule that determines the choice of control in
508
            each period at each Markov state
509

510
        """
511

512
        # == Simplify notations == #
513
        beta, Π = self.beta, self.Π
3✔
514
        m, n, k = self.m, self.n, self.k
3✔
515
        As, Bs, Cs = self.As, self.Bs, self.Cs
3✔
516
        Qs, Rs, Ns = self.Qs, self.Rs, self.Ns
3✔
517

518
        # == Solve for P(s) by iterating discrete riccati system== #
519
        Ps = solve_discrete_riccati_system(Π, As, Bs, Cs, Qs, Rs, Ns, beta,
3✔
520
                                           max_iter=max_iter)
521

522
        # == calculate F and d == #
523
        Fs = np.array([np.empty((k, n)) for i in range(m)])
3✔
524
        X = np.empty((m, m))
3✔
525
        sum1, sum2 = np.empty((k, k)), np.empty((k, n))
3✔
526
        for i in range(m):
3✔
527
            # CCi = C_i C_i'
528
            CCi = Cs[i] @ Cs[i].T
3✔
529
            sum1[:, :] = 0.
3✔
530
            sum2[:, :] = 0.
3✔
531
            for j in range(m):
3✔
532
                # for F
533
                sum1 += beta * Π[i, j] * Bs[i].T @ Ps[j] @ Bs[i]
3✔
534
                sum2 += beta * Π[i, j] * Bs[i].T @ Ps[j] @ As[i]
3✔
535

536
                # for d
537
                X[j, i] = np.trace(Ps[j] @ CCi)
3✔
538

539
            Fs[i][:, :] = solve(Qs[i] + sum1, sum2 + Ns[i])
3✔
540

541
        ds = solve(np.eye(m) - beta * Π,
3✔
542
                   np.diag(beta * Π @ X).reshape((m, 1))).flatten()
543

544
        self.Ps, self.ds, self.Fs = Ps, ds, Fs
3✔
545

546
        return Ps, ds, Fs
3✔
547

548
    def compute_sequence(self, x0, ts_length=None, random_state=None):
3✔
549
        """
550
        Compute and return the optimal state and control sequences
551
        :math:`x_0, ..., x_T` and :math:`u_0,..., u_T`  under the
552
        assumption that :math:`{w_t}` is iid and :math:`N(0, 1)`,
553
        with Markov states sequence :math:`s_0, ..., s_T`
554

555
        Parameters
556
        ----------
557
        x0 : array_like(float)
558
            The initial state, a vector of length n
559

560
        ts_length : scalar(int), optional(default=None)
561
            Length of the simulation. If None, T is set to be 100
562

563
        random_state : int or np.random.RandomState/Generator, optional
564
            Random seed (integer) or np.random.RandomState or Generator
565
            instance to set the initial state of the random number
566
            generator for reproducibility. If None, a randomly
567
            initialized RandomState is used.
568

569
        Returns
570
        -------
571
        x_path : array_like(float)
572
            An n x T+1 matrix, where the t-th column represents :math:`x_t`
573

574
        u_path : array_like(float)
575
            A k x T matrix, where the t-th column represents :math:`u_t`
576

577
        w_path : array_like(float)
578
            A j x T+1 matrix, where the t-th column represent :math:`w_t`
579

580
        state : array_like(int)
581
            Array containing the state values :math:`s_t` of the sample path
582

583
        """
584

585
        # === solve for optimal policies === #
586
        if self.Ps is None:
3✔
587
            self.stationary_values()
3✔
588

589
        # === Simplify notation === #
590
        As, Bs, Cs = self.As, self.Bs, self.Cs
3✔
591
        Fs = self.Fs
3✔
592

593
        random_state = check_random_state(random_state)
3✔
594
        x0 = np.asarray(x0)
3✔
595
        x0 = x0.reshape(self.n, 1)
3✔
596

597
        T = ts_length if ts_length else 100
3✔
598

599
        # == Simulate Markov states == #
600
        chain = MarkovChain(self.Π)
3✔
601
        state = chain.simulate_indices(ts_length=T+1,
3✔
602
                                       random_state=random_state)
603

604
        # == Prepare storage arrays == #
605
        x_path = np.empty((self.n, T+1))
3✔
606
        u_path = np.empty((self.k, T))
3✔
607
        w_path = random_state.standard_normal((self.j, T+1))
3✔
608
        Cw_path = np.empty((self.n, T+1))
3✔
609
        for i in range(T+1):
3✔
610
            Cw_path[:, i] = Cs[state[i]] @ w_path[:, i]
3✔
611

612
        # == Use policy sequence to generate states and controls == #
613
        x_path[:, 0] = x0.flatten()
3✔
614
        u_path[:, 0] = - (Fs[state[0]] @ x0).flatten()
3✔
615
        for t in range(1, T):
3✔
616
            Ax = As[state[t]] @ x_path[:, t-1]
3✔
617
            Bu = Bs[state[t]] @ u_path[:, t-1]
3✔
618
            x_path[:, t] = Ax + Bu + Cw_path[:, t]
3✔
619
            u_path[:, t] = - (Fs[state[t]] @ x_path[:, t])
3✔
620
        Ax = As[state[T]] @ x_path[:, T-1]
3✔
621
        Bu = Bs[state[T]] @ u_path[:, T-1]
3✔
622
        x_path[:, T] = Ax + Bu + Cw_path[:, T]
3✔
623

624
        return x_path, u_path, w_path, state
3✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc