The conjugate gradient method is an algorithm for the numerical solution of particular systems of linear equations, namely those whose matrix is symmetric and positive-definite [wikipedia].
import numpy as np
MAX_ITERATIONS = 10**4
MAX_ERROR = 10**-8
n = 100
x = np.random.randint(1,10, size=(n, 1))
t = np.random.rand(n,n)
A = np.dot(t, t.T)
b = np.random.randint(1,10, size=(n, 1))
def conjugate_gradients( A,x,b ):
residual = b - A.dot(x)
d = residual
g_new = np.dot(residual.T, residual)
iteration = 0
while iteration<MAX_ITERATIONS and g_new>MAX_ERROR**2:
q = A.dot(d)
a = g_new/d.T.dot(q)
x = x + a*d
residual = residual - a * q
g_old = g_new
g_new = np.dot( residual.T, residual )
beta = g_new/g_old
d = residual + beta*d
iteration += 1
#end loop
if iteration<MAX_ITERATIONS:
print 'Precision achieved. Iterations:', iteration
else:
print 'Convergence failed.'
return x # result
error = b - np.dot( A, conjugate_gradients(A,x,b) )
print "Error converged to:", np.dot(error.T,error)[0,0]**0.5