1
|
import numpy as np
|
2
|
|
3
|
def fx01(x):
|
4
|
|
5
|
return np.exp(-1.*np.linalg.norm(x)**2)
|
6
|
|
7
|
def grad_fx01(x):
|
8
|
|
9
|
return np.exp(-1.*np.linalg.norm(x)**2)*-2.*x
|
10
|
|
11
|
def fx02(x):
|
12
|
|
13
|
return np.power(x, 2) * np.exp(-x) - x
|
14
|
|
15
|
def grad_fx02(x):
|
16
|
|
17
|
return 2. * x * np.exp(-x) - np.power(x, 2) * np.exp(-x) - 1
|
18
|
|
19
|
def ArmijoGD(fx, grad_fx, x0, alpha=0.2, beta=0.8, epsilon=2e-4, max_iter=3000):
|
20
|
|
21
|
if alpha < 0 or alpha > 1 or beta < 0 or beta > 1:
|
22
|
print("Some of alpha or beta, out of range")
|
23
|
raise ValueError
|
24
|
|
25
|
step = 1
|
26
|
x = x0
|
27
|
if x==0.5 or x==2:
|
28
|
print("x = {0}".format(x))
|
29
|
|
30
|
while step < max_iter:
|
31
|
beta_k = 1
|
32
|
grad = grad_fx(x)
|
33
|
|
34
|
while fx(x - beta_k*grad) > fx(x) - beta_k*alpha*(np.linalg.norm(grad)**2):
|
35
|
beta_k = beta_k * beta
|
36
|
x = x - beta_k*grad
|
37
|
|
38
|
if np.linalg.norm(grad) < epsilon:
|
39
|
print("Almost Converged")
|
40
|
break
|
41
|
step += 1
|
42
|
print("{0} step -> x : {1}, f(x) : {2}".format(step, x, fx(x)))
|
43
|
|
44
|
return x, fx(x)
|
45
|
|
46
|
|
47
|
|
48
|
|
49
|
ArmijoGD(fx01, grad_fx01, x)
|
50
|
ArmijoGD(fx02, grad_fx02, x0=0.5)
|
51
|
ArmijoGD(fx02, grad_fx02, x0=2)
|