PyTorch 계단식 하강법
최적화할 함수 def rosenbrock(x0, x1):
y = 10 * (x1 - x0 ** 2) ** 2 + (x0 - 1) ** 2
return y
시각화 함수 import numpy as np
h = 0.01
x_min = -2
y_min = -3
x_max = 2
y_max = 5
X = np.arange(x_min, x_max, h)
Y = np.arange(y_min, y_max, h)
xx, yy = np.meshgrid(X, Y)
최소치는 이렇습니다.matrix = rosenbrock(xx, yy)
minimum = None
min_x = None
min_y = None
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
if minimum is None or minimum > matrix[i][j]:
minimum = matrix[i][j]
min_y = Y[i]
min_x = X[j]
print(min_x, min_y, minimum)
1.0000000000000027 0.9999999999999147 8.208018832734106e-26
드로잉 점의 위치는 최소값입니다.import matplotlib.pyplot as plt
plt.contourf(xx, yy, np.sqrt(rosenbrock(xx, yy)), alpha=0.5)
plt.scatter(min_x, min_y, c="k")
plt.colorbar()
plt.grid()
plt.show()
계단식 하강법 import numpy as np
import torch
x0 = torch.tensor(0.0, requires_grad=True)
x1 = torch.tensor(4.0, requires_grad=True)
lr = 0.001
iters = 10000
history = []
for i in range(iters):
history.append(np.array([np.array(x0.data), np.array(x1.data)]).flatten())
y = rosenbrock(x0, x1)
y.backward()
with torch.no_grad():
x0.data -= lr * x0.grad
x1.data -= lr * x1.grad
x0.grad.zero_()
x1.grad.zero_()
결과 표시import matplotlib.pyplot as plt
plt.contourf(xx, yy, np.sqrt(rosenbrock(xx, yy)), alpha=0.5)
plt.scatter([p[0] for p in history], [p[1] for p in history])
plt.scatter(min_x, min_y, c="k")
plt.colorbar()
plt.grid()
plt.show()
여러 곳부터. import numpy as np
import torch
x0 = torch.tensor([0.0, -0.5, -1.5, 1.5], requires_grad=True)
x1 = torch.tensor([4.0, 4.0, -1.0, -2], requires_grad=True)
lr = 0.001
iters = 10000
history = []
for i in range(iters):
history.append([x0.detach().clone(), x1.detach().clone()])
y = rosenbrock(x0, x1)
#y.backward()
s = torch.sum(y)
s.backward()
with torch.no_grad():
x0.data -= lr * x0.grad
x1.data -= lr * x1.grad
x0.grad.zero_()
x1.grad.zero_()
결과 표시import matplotlib.pyplot as plt
plt.contourf(xx, yy, np.sqrt(rosenbrock(xx, yy)), alpha=0.5)
for i in range(4):
plt.scatter([p[0][i] for p in history], [p[1][i] for p in history])
plt.colorbar()
plt.scatter(min_x, min_y, c="k")
plt.grid()
plt.show()
Reference
이 문제에 관하여(PyTorch 계단식 하강법), 우리는 이곳에서 더 많은 자료를 발견하고 링크를 클릭하여 보았다
https://qiita.com/maskot1977/items/036c01dcddd6484feff8
텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
우수한 개발자 콘텐츠 발견에 전념
(Collection and Share based on the CC Protocol.)
def rosenbrock(x0, x1):
y = 10 * (x1 - x0 ** 2) ** 2 + (x0 - 1) ** 2
return y
import numpy as np
h = 0.01
x_min = -2
y_min = -3
x_max = 2
y_max = 5
X = np.arange(x_min, x_max, h)
Y = np.arange(y_min, y_max, h)
xx, yy = np.meshgrid(X, Y)
최소치는 이렇습니다.matrix = rosenbrock(xx, yy)
minimum = None
min_x = None
min_y = None
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
if minimum is None or minimum > matrix[i][j]:
minimum = matrix[i][j]
min_y = Y[i]
min_x = X[j]
print(min_x, min_y, minimum)
1.0000000000000027 0.9999999999999147 8.208018832734106e-26
드로잉 점의 위치는 최소값입니다.import matplotlib.pyplot as plt
plt.contourf(xx, yy, np.sqrt(rosenbrock(xx, yy)), alpha=0.5)
plt.scatter(min_x, min_y, c="k")
plt.colorbar()
plt.grid()
plt.show()
계단식 하강법 import numpy as np
import torch
x0 = torch.tensor(0.0, requires_grad=True)
x1 = torch.tensor(4.0, requires_grad=True)
lr = 0.001
iters = 10000
history = []
for i in range(iters):
history.append(np.array([np.array(x0.data), np.array(x1.data)]).flatten())
y = rosenbrock(x0, x1)
y.backward()
with torch.no_grad():
x0.data -= lr * x0.grad
x1.data -= lr * x1.grad
x0.grad.zero_()
x1.grad.zero_()
결과 표시import matplotlib.pyplot as plt
plt.contourf(xx, yy, np.sqrt(rosenbrock(xx, yy)), alpha=0.5)
plt.scatter([p[0] for p in history], [p[1] for p in history])
plt.scatter(min_x, min_y, c="k")
plt.colorbar()
plt.grid()
plt.show()
여러 곳부터. import numpy as np
import torch
x0 = torch.tensor([0.0, -0.5, -1.5, 1.5], requires_grad=True)
x1 = torch.tensor([4.0, 4.0, -1.0, -2], requires_grad=True)
lr = 0.001
iters = 10000
history = []
for i in range(iters):
history.append([x0.detach().clone(), x1.detach().clone()])
y = rosenbrock(x0, x1)
#y.backward()
s = torch.sum(y)
s.backward()
with torch.no_grad():
x0.data -= lr * x0.grad
x1.data -= lr * x1.grad
x0.grad.zero_()
x1.grad.zero_()
결과 표시import matplotlib.pyplot as plt
plt.contourf(xx, yy, np.sqrt(rosenbrock(xx, yy)), alpha=0.5)
for i in range(4):
plt.scatter([p[0][i] for p in history], [p[1][i] for p in history])
plt.colorbar()
plt.scatter(min_x, min_y, c="k")
plt.grid()
plt.show()
Reference
이 문제에 관하여(PyTorch 계단식 하강법), 우리는 이곳에서 더 많은 자료를 발견하고 링크를 클릭하여 보았다
https://qiita.com/maskot1977/items/036c01dcddd6484feff8
텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
우수한 개발자 콘텐츠 발견에 전념
(Collection and Share based on the CC Protocol.)
import numpy as np
import torch
x0 = torch.tensor(0.0, requires_grad=True)
x1 = torch.tensor(4.0, requires_grad=True)
lr = 0.001
iters = 10000
history = []
for i in range(iters):
history.append(np.array([np.array(x0.data), np.array(x1.data)]).flatten())
y = rosenbrock(x0, x1)
y.backward()
with torch.no_grad():
x0.data -= lr * x0.grad
x1.data -= lr * x1.grad
x0.grad.zero_()
x1.grad.zero_()
import matplotlib.pyplot as plt
plt.contourf(xx, yy, np.sqrt(rosenbrock(xx, yy)), alpha=0.5)
plt.scatter([p[0] for p in history], [p[1] for p in history])
plt.scatter(min_x, min_y, c="k")
plt.colorbar()
plt.grid()
plt.show()
import numpy as np
import torch
x0 = torch.tensor([0.0, -0.5, -1.5, 1.5], requires_grad=True)
x1 = torch.tensor([4.0, 4.0, -1.0, -2], requires_grad=True)
lr = 0.001
iters = 10000
history = []
for i in range(iters):
history.append([x0.detach().clone(), x1.detach().clone()])
y = rosenbrock(x0, x1)
#y.backward()
s = torch.sum(y)
s.backward()
with torch.no_grad():
x0.data -= lr * x0.grad
x1.data -= lr * x1.grad
x0.grad.zero_()
x1.grad.zero_()
결과 표시import matplotlib.pyplot as plt
plt.contourf(xx, yy, np.sqrt(rosenbrock(xx, yy)), alpha=0.5)
for i in range(4):
plt.scatter([p[0][i] for p in history], [p[1][i] for p in history])
plt.colorbar()
plt.scatter(min_x, min_y, c="k")
plt.grid()
plt.show()
Reference
이 문제에 관하여(PyTorch 계단식 하강법), 우리는 이곳에서 더 많은 자료를 발견하고 링크를 클릭하여 보았다 https://qiita.com/maskot1977/items/036c01dcddd6484feff8텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
우수한 개발자 콘텐츠 발견에 전념 (Collection and Share based on the CC Protocol.)