CGO 06-1: Non-Quadratic Problem in R^2
Download the Jupyter Notebook file from here.
CGO 06-1: Non-Quadratic Problem in $\mathbb{R}^2$
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
- Consider the non-quadratic example \(f(x) = e^{x_1 + 3 x_2 - 0.1 } + e^{ x_1 - 3 x_2 - 0.1 } + e^{ -x_1 - 0.1 } = [1,1,1] \exp(A x + b)\)
- Lines connecting successive iterates show the scaled steps, \(x^{(k+1)}-x^{(k)} = -t^{(k)} \nabla f(x^{(k)})\)
# problem definition
A = np.array([[1, 3], [1, -3], [-1, 0]], dtype=np.float64)
b = -0.1*np.array([1, 1, 1], ndmin=2, dtype=np.float64).T
def f(x):
"Function value at point x"
return np.exp( A @ x + b ).sum(0)
def g(x):
"Gradient at point x"
return A.T @ np.exp( A @ x + b )
# backtracking parameters
alpha = 0.1
beta = 0.7
# starting point
x0 = np.array( [-1,1], ndmin=2, dtype=np.float64 ).T
f0 = f(x0) # initial function value
# calculate the minimum
x = x0.copy()
for i in range(10):
matexp = np.exp( A @ x + b )
grad = A.T @ matexp
hess = A.T @ np.diag( matexp.flatten() ) @ A
x -= np.linalg.solve( hess, grad )
xmin = x
fmin = f( xmin )
Show five contour lines $f_0 + k \Delta,\quad k\in{-3,-2,-1,0,1},\quad \Delta = (f_0-f_\text{min})/4$
nangles = 1000
nopts = 1000
thetas = np.linspace( 0, 2*np.pi, nangles )
cont = np.zeros((5,2,nangles))
Delta = (f0-fmin)/4
for i in range(nangles):
ray = np.vstack( [np.linspace(0,4,nopts)*np.cos(thetas[i]),
np.linspace(0,4,nopts)*np.sin(thetas[i])] )
fray = np.exp(A @ ray+b.repeat(nopts, axis=1)).sum(0)
def get_ind( expression ):
ind = np.nonzero( expression )[0].max(0)
return (ray[:,ind] + ray[:,ind+1]) / 2
cont[0,:,i] = get_ind( fray < f0 )
cont[1,:,i] = get_ind( fray < f0 - 3*Delta )
cont[2,:,i] = get_ind( fray < f0 - 2*Delta )
cont[3,:,i] = get_ind( fray < f0 - Delta )
cont[4,:,i] = get_ind( fray < f0 + Delta )
def plt_contour():
fig, ax = plt.subplots()
ax.plot( cont[0,0,:], cont[0,1,:], '--',
cont[1,0,:], cont[1,1,:], '--',
cont[2,0,:], cont[2,1,:], '--',
cont[3,0,:], cont[3,1,:], '--',
cont[4,0,:], cont[4,1,:], '--',
)
ax.axis('equal')
ax.axis('off')
return ax
Run the Gradient Method
maxiters = 100
xs = np.zeros( (2,maxiters) )
fs = np.zeros( (1,maxiters) )
x = x0.copy()
for i in range(maxiters):
fval = f( x )
xs[:,i:i+1] = x
fs[:,i] = fval
if fval-fmin < 1e-10: # stopping criterion
break
# Backtracking Line Search
gval = g(x) # gradient
v = -gval
s = 1
for k in range(10):
xnew = x + s * v
fxnew = f( xnew )
if fxnew < fval + s * alpha * gval.T @ v:
break
else:
s = s * beta
x = x + s * v
# Plot path
ax = plt_contour()
ax.plot( xs[0,0:i], xs[1,0:i], 'o' )
ax.plot( xs[0,0:i], xs[1,0:i], '-' )
Steepest Descent Method
H = np.diag( [2,8] )
maxiters = 100
xs = np.zeros( (2,maxiters) )
fs = np.zeros( (1,maxiters) )
x = x0.copy()
for i in range(maxiters):
fval = f(x)
xs[:,i:i+1] = x
fs[:,i] = fval
if fval-fmin < 1e-10:
break
gval = g(x) # gradient
v = np.linalg.solve( -H, gval )
s = 1
for k in range(10):
xnew = x + s * v
fxnew = f(xnew)
if fxnew < fval + s * alpha * gval.T @ v:
break
else:
s = beta * s
x = x + s * v
# Plot path
ax = plt_contour()
ax.plot( xs[0,0:i], xs[1,0:i], 'o' )
ax.plot( xs[0,0:i], xs[1,0:i], '-' )
Newton Method
maxiters = 100
xs = np.zeros( (2,maxiters) )
fs = np.zeros( (1,maxiters) )
x = x0.copy()
for i in range(maxiters):
fval = f(x)
xs[:,i:i+1] = x
fs[:,i] = fval
if fval-fmin < 1e-10:
break
# TODO compute for newton's method
gval = g(x)
H = A.T @ np.diag( np.exp(A @ x + b).flatten() ) @ A
v = np.linalg.solve( -H, gval )
s = 1
for k in range(10):
xnew = x + s * v
fxnew = f(xnew)
if fxnew < fval + s * alpha * gval.T @ v:
break
else:
s = beta * s
x = x + s * v
# Plot path
ax = plt_contour()
ax.plot( xs[0,0:i], xs[1,0:i], 'o' )
ax.plot( xs[0,0:i], xs[1,0:i], '-' )