1#
2#     This file is part of CasADi.
3#
4#     CasADi -- A symbolic framework for dynamic optimization.
5#     Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
6#                             K.U. Leuven. All rights reserved.
7#     Copyright (C) 2011-2014 Greg Horn
8#
9#     CasADi is free software; you can redistribute it and/or
10#     modify it under the terms of the GNU Lesser General Public
11#     License as published by the Free Software Foundation; either
12#     version 3 of the License, or (at your option) any later version.
13#
14#     CasADi is distributed in the hope that it will be useful,
15#     but WITHOUT ANY WARRANTY; without even the implied warranty of
16#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17#     Lesser General Public License for more details.
18#
19#     You should have received a copy of the GNU Lesser General Public
20#     License along with CasADi; if not, write to the Free Software
21#     Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
22#
23#
24import casadi as ca
25import numpy as np
26import matplotlib.pyplot as plt
27
28# Degree of interpolating polynomial
29d = 3
30# Get collocation points
31tau_root = np.append(0, ca.collocation_points(d, 'legendre'))
32# Coefficients of the collocation equation
33C = np.zeros((d+1,d+1))
34# Coefficients of the continuity equation
35D = np.zeros(d+1)
36# Coefficients of the quadrature function
37B = np.zeros(d+1)
38# Construct polynomial basis
39for j in range(d+1):
40    # Construct Lagrange polynomials to get the polynomial basis at the collocation point
41    p = np.poly1d([1])
42    for r in range(d+1):
43        if r != j:
44            p *= np.poly1d([1, -tau_root[r]]) / (tau_root[j]-tau_root[r])
45    # Evaluate the polynomial at the final time to get the coefficients of the continuity equation
46    D[j] = p(1.0)
47    # Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation
48    pder = np.polyder(p)
49    for r in range(d+1):
50        C[j,r] = pder(tau_root[r])
51
52    # Evaluate the integral of the polynomial to get the coefficients of the quadrature function
53    pint = np.polyint(p)
54    B[j] = pint(1.0)
55
56# Time horizon
57T = 10.
58
59# Declare model variables
60x1 = ca.SX.sym('x1')
61x2 = ca.SX.sym('x2')
62x = ca.vertcat(x1, x2)
63u = ca.SX.sym('u')
64
65# Model equations
66xdot = ca.vertcat((1-x2**2)*x1 - x2 + u, x1)
67
68# Objective term
69L = x1**2 + x2**2 + u**2
70
71# Continuous time dynamics
72f = ca.Function('f', [x, u], [xdot, L], ['x', 'u'], ['xdot', 'L'])
73
74# Control discretization
75N = 20 # number of control intervals
76h = T/N
77
78# Start with an empty NLP
79w=[]
80w0 = []
81lbw = []
82ubw = []
83J = 0
84g=[]
85lbg = []
86ubg = []
87
88# For plotting x and u given w
89x_plot = []
90u_plot = []
91
92# "Lift" initial conditions
93Xk = ca.MX.sym('X0', 2)
94w.append(Xk)
95lbw.append([0, 1])
96ubw.append([0, 1])
97w0.append([0, 1])
98x_plot.append(Xk)
99
100# Perturb with P
101P = ca.MX.sym('P', 2)
102Xk = Xk + P
103
104# Formulate the NLP
105for k in range(N):
106
107    # New NLP variable for the control
108    Uk = ca.MX.sym('U_' + str(k))
109    w.append(Uk)
110    lbw.append([-1])
111    ubw.append([.85])
112    w0.append([0])
113    u_plot.append(Uk)
114
115    # State at collocation points
116    Xc = []
117    for j in range(d):
118        Xkj = ca.MX.sym('X_'+str(k)+'_'+str(j), 2)
119        Xc.append(Xkj)
120        w.append(Xkj)
121        lbw.append([-0.25, -np.inf])
122        ubw.append([np.inf,  np.inf])
123        w0.append([0, 0])
124
125    # Loop over collocation points
126    Xk_end = D[0]*Xk
127    for j in range(1,d+1):
128       # Expression for the state derivative at the collocation point
129       xp = C[0,j]*Xk
130       for r in range(d): xp = xp + C[r+1,j]*Xc[r]
131
132       # Append collocation equations
133       fj, qj = f(Xc[j-1],Uk)
134       g.append(h*fj - xp)
135       lbg.append([0, 0])
136       ubg.append([0, 0])
137
138       # Add contribution to the end state
139       Xk_end = Xk_end + D[j]*Xc[j-1];
140
141       # Add contribution to quadrature function
142       J = J + B[j]*qj*h
143
144    # New NLP variable for state at end of interval
145    Xk = ca.MX.sym('X_' + str(k+1), 2)
146    w.append(Xk)
147    lbw.append([-0.25, -np.inf])
148    ubw.append([np.inf,  np.inf])
149    w0.append([0, 0])
150    x_plot.append(Xk)
151
152    # Add equality constraint
153    g.append(Xk_end-Xk)
154    lbg.append([0, 0])
155    ubg.append([0, 0])
156
157# Concatenate vectors
158w = ca.vertcat(*w)
159g = ca.vertcat(*g)
160x_plot = ca.horzcat(*x_plot)
161u_plot = ca.horzcat(*u_plot)
162w0 = np.concatenate(w0)
163lbw = np.concatenate(lbw)
164ubw = np.concatenate(ubw)
165lbg = np.concatenate(lbg)
166ubg = np.concatenate(ubg)
167
168# Create an NLP solver
169prob = {'f': J, 'x': w, 'g': g, 'p': P}
170
171# Function to get x and u trajectories from w
172trajectories = ca.Function('trajectories', [w], [x_plot, u_plot], ['w'], ['x', 'u'])
173
174# Create an NLP solver, using SQP and active-set QP for accurate multipliers
175opts = dict(qpsol='qrqp', qpsol_options=dict(print_iter=False,error_on_fail=False), print_time=False)
176solver = ca.nlpsol('solver', 'sqpmethod', prob, opts)
177
178# Solve the NLP
179sol = solver(x0=w0, lbx=lbw, ubx=ubw, lbg=lbg, ubg=ubg, p=0)
180
181# Extract trajectories
182x_opt, u_opt = trajectories(sol['x'])
183x_opt = x_opt.full() # to numpy array
184u_opt = u_opt.full() # to numpy array
185
186# Plot the result
187tgrid = np.linspace(0, T, N+1)
188plt.figure(1)
189plt.clf()
190plt.plot(tgrid, x_opt[0], '--')
191plt.plot(tgrid, x_opt[1], '-')
192plt.step(tgrid, np.append(np.nan, u_opt[0]), '-.')
193plt.xlabel('t')
194plt.legend(['x1','x2','u'])
195plt.grid()
196plt.show()
197
198# High-level approach:
199# Use factory to e.g. to calculate Hessian of optimal f w.r.t. p
200hsolver = solver.factory('h', solver.name_in(), ['sym:hess:f:p:p'])
201print('hsolver generated')
202hsol = hsolver(x0=sol['x'], lam_x0=sol['lam_x'], lam_g0=sol['lam_g'],
203               lbx=lbw, ubx=ubw, lbg=lbg, ubg=ubg, p=0)
204print('Hessian of f w.r.t. p:')
205print(hsol['sym_hess_f_p_p'])
206
207# Low-level approach: Calculate directional derivatives.
208# We will calculate two directions simultaneously
209nfwd = 2
210
211# Forward mode AD for the NLP solver object
212fwd_solver = solver.forward(nfwd);
213print('fwd_solver generated')
214
215# Seeds, initalized to zero
216fwd_lbx = [ca.DM.zeros(sol['x'].sparsity()) for i in range(nfwd)]
217fwd_ubx = [ca.DM.zeros(sol['x'].sparsity()) for i in range(nfwd)]
218fwd_p = [ca.DM.zeros(P.sparsity()) for i in range(nfwd)]
219fwd_lbg = [ca.DM.zeros(sol['g'].sparsity()) for i in range(nfwd)]
220fwd_ubg = [ca.DM.zeros(sol['g'].sparsity()) for i in range(nfwd)]
221
222# Let's preturb P
223fwd_p[0][0] = 1  # first nonzero of P
224fwd_p[1][1] = 1  # second nonzero of P
225
226# Calculate sensitivities using AD
227sol_fwd = fwd_solver(out_x=sol['x'], out_lam_g=sol['lam_g'], out_lam_x=sol['lam_x'],
228            out_f=sol['f'], out_g=sol['g'], lbx=lbw, ubx=ubw, lbg=lbg, ubg=ubg,
229            fwd_lbx=ca.horzcat(*fwd_lbx), fwd_ubx=ca.horzcat(*fwd_ubx),
230            fwd_lbg=ca.horzcat(*fwd_lbg), fwd_ubg=ca.horzcat(*fwd_ubg),
231            p=0, fwd_p=ca.horzcat(*fwd_p))
232
233# Calculate the same thing using finite differences
234h = 1e-3
235pert = []
236for d in range(nfwd):
237    pert.append(solver(x0=sol['x'], lam_g0=sol['lam_g'], lam_x0=sol['lam_x'],
238                lbx=lbw + h*(fwd_lbx[d]+fwd_ubx[d]),
239                ubx=ubw + h*(fwd_lbx[d]+fwd_ubx[d]),
240                lbg=lbg + h*(fwd_lbg[d]+fwd_ubg[d]),
241                ubg=ubg + h*(fwd_lbg[d]+fwd_ubg[d]),
242                p=0 + h*fwd_p[d]))
243
244# Print the result
245for s in ['f']:
246    print('==========')
247    print('Checking ' + s)
248    print('finite differences')
249    for d in range(nfwd): print((pert[d][s]-sol[s])/h)
250    print('AD fwd')
251    M = sol_fwd['fwd_' + s].full()
252    for d in range(nfwd): print(M[:, d].flatten())
253
254# Perturb again, in the opposite direction for second order derivatives
255pert2 = []
256for d in range(nfwd):
257    pert2.append(solver(x0=sol['x'], lam_g0=sol['lam_g'], lam_x0=sol['lam_x'],
258                lbx=lbw - h*(fwd_lbx[d]+fwd_ubx[d]),
259                ubx=ubw - h*(fwd_lbx[d]+fwd_ubx[d]),
260                lbg=lbg - h*(fwd_lbg[d]+fwd_ubg[d]),
261                ubg=ubg - h*(fwd_lbg[d]+fwd_ubg[d]),
262                p=0 - h*fwd_p[d]))
263
264# Print the result
265for s in ['f']:
266    print('finite differences, second order: ' + s)
267    for d in range(nfwd): print((pert[d][s] - 2*sol[s] + pert2[d][s])/(h*h))
268
269# Reverse mode AD for the NLP solver object
270nadj = 1
271adj_solver = solver.reverse(nadj)
272print('adj_solver generated')
273
274# Seeds, initalized to zero
275adj_f = [ca.DM.zeros(sol['f'].sparsity()) for i in range(nadj)]
276adj_g = [ca.DM.zeros(sol['g'].sparsity()) for i in range(nadj)]
277adj_x = [ca.DM.zeros(sol['x'].sparsity()) for i in range(nadj)]
278
279# Study which inputs influence f
280adj_f[0][0] = 1
281
282# Calculate sensitivities using AD
283sol_adj = adj_solver(out_x=sol['x'], out_lam_g=sol['lam_g'], out_lam_x=sol['lam_x'],
284                out_f=sol['f'], out_g=sol['g'], lbx=lbw, ubx=ubw, lbg=lbg, ubg=ubg,
285                adj_f=ca.horzcat(*adj_f), adj_g=ca.horzcat(*adj_g),
286                p=0, adj_x=ca.horzcat(*adj_x))
287
288# Print the result
289for s in ['p']:
290    print('==========')
291    print('Checking ' + s)
292    print('Reverse mode AD')
293    print(sol_adj['adj_' + s])
294