import Pkg; Pkg.activate(@DIR); Pkg.instantiate()
https://github.com/Optimal-Control-16-745/lecture-notebooks/blob/main/Lecture%203/minimization.ipynb
using LinearAlgebra
using ForwardDiff
using PyPlot
"""
f(x)
Scalar objective used to illustrate Newton's method on a nonconvex quartic.
f(x) = x^4 + x^3 - x^2 - x
This function has both minima and maxima, so a naive Newton step can ascend if
`∇²f(x) < 0` at the current iterate (see lecture notes on sufficient conditions).
"""
function f(x)
return x.^4 + x.^3 - x.^2 - x
end
f (generic function with 1 method)
"""
∇f(x)
Analytic gradient (first derivative) of `f`.
"""
function ∇f(x)
return 4.0*x.^3 + 3.0*x.^2 - 2.0*x - 1.0
end
∇f
"""
Analytic Hessian (second derivative) of `f`.
"""
function ∇2f(x)
return 12.0*x.^2 + 6.0*x - 2.0
end
∇2f (generic function with 1 method)
x = LinRange(-1.75,1.25,1000)
1000-element LinRange{Float64, Int64}: -1.75, -1.747, -1.74399, -1.74099, …, 1.24099, 1.24399, 1.247, 1.25
p = plot(x,f(x))
1-element Vector{PyCall.PyObject}: PyObject <matplotlib.lines.Line2D object at 0x16801bfa0>
function newton_step(x0)
xn = x0 - ∇2f(x0)\∇f(x0)
end
newton_step (generic function with 1 method)
xguess = -1.5
plot(x, f(x))
plot(xguess, f(xguess), "rx")
1-element Vector{PyCall.PyObject}: PyObject <matplotlib.lines.Line2D object at 0x1685ec460>
xnew = newton_step(xguess[end])
xguess = [xguess xnew]
plot(x, f(x))
plot(xguess, f(xguess), "rx")
2-element Vector{PyCall.PyObject}: PyObject <matplotlib.lines.Line2D object at 0x1685ec730> PyObject <matplotlib.lines.Line2D object at 0x1685ec820>
xguess = 0.0
plot(x, f(x))
plot(xguess, f(xguess), "rx")
1-element Vector{PyCall.PyObject}: PyObject <matplotlib.lines.Line2D object at 0x1693ba920>
xnew = newton_step(xguess[end])
xguess = [xguess xnew]
plot(x, f(x))
plot(xguess, f(xguess), "rx")
2-element Vector{PyCall.PyObject}: PyObject <matplotlib.lines.Line2D object at 0x1694506d0> PyObject <matplotlib.lines.Line2D object at 0x1694507c0>
∇2f(0.0)
-2.0
function regularized_newton_step(x0)
β = 1.0
H = ∇2f(x0)
while !isposdef(H)
H = H + β*I
end
xn = x0 - H\∇f(x0)
end
regularized_newton_step (generic function with 1 method)
xguess = 0.0
plot(x, f(x))
plot(xguess, f(xguess), "rx")
1-element Vector{PyCall.PyObject}: PyObject <matplotlib.lines.Line2D object at 0x170479de0>
xnew = regularized_newton_step(xguess[end])
xguess = [xguess xnew]
plot(x, f(x))
plot(xguess, f(xguess), "rx")
2-element Vector{PyCall.PyObject}: PyObject <matplotlib.lines.Line2D object at 0x1704eba00> PyObject <matplotlib.lines.Line2D object at 0x1704ebaf0>
function backtracking_regularized_newton_step(x0)
b = 0.1
c = 0.5
β = 1.0
H = ∇2f(x0)
while !isposdef(H)
H = H + β*I
end
Δx = -H\∇f(x0)
α = 1.0
while f(x0 + α*Δx) > f(x0) + b*α*∇f(x0)*Δx
α = c*α
end
print(α)
xn = x0 + α*Δx
end
backtracking_regularized_newton_step (generic function with 1 method)
xguess = 0.0
plot(x, f(x))
plot(xguess, f(xguess), "rx")
1-element Vector{PyCall.PyObject}: PyObject <matplotlib.lines.Line2D object at 0x17058d6c0>
xnew = backtracking_regularized_newton_step(xguess[end])
xguess = [xguess xnew]
plot(x, f(x))
plot(xguess, f(xguess), "rx")
0.5
2-element Vector{PyCall.PyObject}: PyObject <matplotlib.lines.Line2D object at 0x1705df850> PyObject <matplotlib.lines.Line2D object at 0x1705df940>