Math
- machine epsilon
- relative residual is always be small
PageRank - solved
- Newton’s Method.
- Newton’s method usually converges faster than bisection method.
r_newton = r_init r_arr = [r_newton] iteration_count_newton = -1 while True: iteration_count_newton = iteration_count_newton + 1 H_ = H(r_newton) df_ = -1 * df(r_newton) _H = np.linalg.inv(H_) s = _H.dot(df_) r_newton = r_newton + s r_arr.append(r_newton) if np.linalg.norm(s) <= stop: break print(iteration_count_newton)
- Newton’s method usually converges faster than bisection method.
- Golden Section Search
# while True: # brackets.append([a, m1, m2, b]) # if m1 not in dict_.keys(): # dict_[m1] = f(m1) # if m2 not in dict_.keys(): # dict_[m2] = f(m2) # if dict_[m1] < dict_[m2]: # b = m2 # else: # a = m1 # if abs(a-b) < 0.00001: # break # m1 = a + (1 - gs) * (b - a) # m2 = a + gs * (b - a)
- Steepest Descent ``` x(k+1) = x(k) - f’(x(k))
x(k+1) = x(k) - f’(x(k)) * ak
- COO format [d, i, j]
- CSR format [d, R(num), COL_index]
- condition number/worst-case relative error
- Power Iteration Convergence
def power_iteration(A, num_simulations: int): # Ideally choose a random vector # To decrease the chance that our vector # Is orthogonal to the eigenvector b_k = np.random.rand(A.shape[1]) for _ in range(num_simulations): # calculate the matrix-by-vector product Ab b_k1 = np.dot(A, b_k) # calculate the norm b_k1_norm = np.linalg.norm(b_k1) # re normalize the vector b_k = b_k1 / b_k1_norm return b_k
power_iteration(np.array([[0.5, 0.5], [0.2, 0.8]]), 10)
import numpy as np import matplotlib.pyplot as plt import scipy
def f(r): x, y = r return 3 +((x2)/8) + ((y2)/8) - np.sin(x)np.cos((2**-0.5)y)
def df(r): x, y = r return np.array([x/4-np.cos(x)np.cos((2-0.5)*y), y/4+(2-0.5)np.sin(x)np.sin((2**-0.5)y)])
def H(r): x, y = r H = np.zeros((2,2)) H[0][0] = 1/4+np.sin(x)np.cos((2-0.5)*y) H[0][1] = (2-0.5)np.cos(x)np.sin((2-0.5)*y) H[1][0] = (2-0.5)np.cos(x)np.sin((2-0.5)*y) H[1][1] = 1/4+(2-1)np.sin(x)np.cos((2**-0.5)y) return H
r_newton = r_init r_arr = [r_newton] iteration_count_newton = -1 while True: iteration_count_newton = iteration_count_newton + 1 H_ = H(r_newton) df_ = -1 * df(r_newton) H = np.linalg.inv(H) s = H.dot(df) r_newton = r_newton + s r_arr.append(r_newton) if np.linalg.norm(s) <= stop: break print(iteration_count_newton)
r_sd = r_init iteration_count_sd = -1 while True: iteration_count_sd = iteration_count_sd + 1 s = -1 * df(r_sd) def fk(a): x, y = r_sd dx, dy = s x = x + adx y = y + ady return 3 +((x2)/8) + ((y2)/8) - np.sin(x)np.cos((2**-0.5)y) a = scipy.optimize.minimize_scalar(fk) s = a.x * s r_sd = r_sd + s if np.linalg.norm(s) <= stop: break
list1 = list(range(iteration_count_newton+1)) vals = []
vals = [np.linalg.norm(f(r_init) - f(r_newton))]
for i in r_arr[:-1]: vals.append(np.log(np.linalg.norm(f(r_init) - f(r_newton)))) print(vals) print(list1)
list2 = list(range(iteration_count_sd+1))
plt.xlabel(“iteration”) plt.ylabel(“error”) plt.title(“iteration vs. error “) plt.plot(list1, vals, label=”newton”) plt.plot(list2, list2, label=”sd”) plt.legend() ```