Bierlaire, M. (2015). Optimization: Principles and Algorithms. EPFL Press
import numpy as np
from scipy import linalg
def quadraticDirect(Q,b):
L = linalg.cholesky(Q).T
y = linalg.solve_triangular(L,-b,lower=True)
solution = linalg.solve_triangular(L.T,y,lower=False)
return solution
Example 9.8: $Q=\left(\begin{array}{cccc} 1& 1 & 1 & 1 \\ 1 & 2 & 2 & 2 \\ 1 & 2 & 3 & 3 \\ 1 & 2 & 3 & 4\end{array}\right)$, $b=\left(\begin{array}{c}-4 \\ -7 \\ -9 \\ -10\end{array}\right)$
Q = np.array([[1, 1, 1, 1] , [1, 2, 2, 2] , [1, 2, 3, 3] , [1, 2, 3, 4]])
b = np.array([[-4],[-7],[-9],[-10]])
s = quadraticDirect(Q,b)
print(s)
import optimizationExceptions
# Define a function simple to call for the inner product
def scalarInner(a,b):
return(np.asscalar(a.T.dot(b)))
def conjugateGradient(Q,b,x0):
n = len(x0)
xk = x0
gk = Q.dot(xk) + b
iters = list()
dk = -gk
betak = 0
for k in range(n):
denom = scalarInner(dk,Q.dot(dk))
if (denom <= 0):
raise OptimizationLinAlgError('The matrix must be positive definite')
alphak = - np.asscalar(dk.T.dot(gk)) / denom
iters.append([xk,gk,dk,alphak,betak])
xk = xk + alphak * dk
gkp1 = Q.dot(xk) + b
betak = scalarInner(gkp1,gkp1) / scalarInner(gk,gk)
dk = -gkp1 + betak * dk
gk = gkp1
iters.append([xk,gk,dk,alphak,betak])
return (xk,iters)
Run the algorithm from $x_0=\left(\begin{array}{c}5 \\ 5 \\ 5 \\ 5 \end{array}\right)$.
x0 = np.array([[5.0],[5.0],[5.0],[5.0]])
(x,iters)=conjugateGradient(Q,b,x0)
print(x)
Calculate the gradient, to check that it is indeed numerically close to 0.
print(Q.dot(x)+b)
Table 9.1, page 231.
print("k\txk\t\tGrad(xk)\t\tdk\t\talphak\t\ttbetak")
for k in range(len(iters)):
# print(iters[k])
print("{}\t{:+E}\t{:+E}\t{:+E}\t{:+E}\t{:+E}".format(k,iters[k][0].item(0),iters[k][1].item(0),iters[k][2].item(0),iters[k][3],iters[k][4]))
print(" \t{:+E}\t{:+E}\t{:+E}".format(iters[k][0].item(1),iters[k][1].item(1),iters[k][2].item(1)))
print(" \t{:+E}\t{:+E}\t{:+E}".format(iters[k][0].item(2),iters[k][1].item(2),iters[k][2].item(2)))
print(" \t{:+E}\t{:+E}\t{:+E}".format(iters[k][0].item(3),iters[k][1].item(3),iters[k][2].item(3)))
An error is triggered if the matrix is not definite positive. Here, $Q=\left(\begin{array}{cccc} 1& 2 & 3 & 4 \\ 5 & 6 & 7 & 8 \\ 9 & 10 & 11 & 12 \\ 13 & 14 & 15 & 16\end{array}\right)$
Q = np.array([[1, 2, 3, 4] , [5, 6, 7, 8] , [9, 10, 11, 12] , [13, 14, 15, 16]])
b = np.array([[-4],[-7],[-9],[-10]])
x0 = np.array([[5.0],[5.0],[5.0],[5.0]])
(x,iters)=conjugateGradient(Q,b,x0)