自学笔记
课程老师:刘二大人 河北工业大学教师 https://liuii.github.io 课程来源:https://www.bilibili.com/video/BV1Y7411d7Ys
一、Gradient Descent梯度下降(无反馈)
x_data
= [1,2,3]
y_data
= [2,4,6]
w
= 1
def forward(x
):
return x
*w
def cost(xs
,ys
):
cost
= 0
for x
,y
in zip(xs
,ys
):
y_pred
= forward
(x
)
cost
+= (y_pred
- y
)**2
return cost
/len(xs
)
def gradient(xs
,ys
):
grad
= 0
for x
,y
in zip(xs
,ys
):
grad
= 2*x
*(x
*w
- y
)
return grad
/len(xs
)
for epoch
in range(100):
cost_val
= cost
(x_data
,y_data
)
grad_val
= gradient
(x_data
,y_data
)
w
-= 0.01*grad_val
print("循环次数:"+str(epoch
)+"+++++ w的值:"+str(w
))
print("pred:w=",w
,forward
(w
))
二、Stochastic Gradient Descent随机梯度下降(无反馈)
x_data
= [1.0,2.0,3.0]
y_data
= [2.0,4.0,6.0]
w
= 1
def forward(x
):
return x
*w
def loss(x
,y
):
y_pred
= forward
(x
)
return (y_pred
- y
)**2
def gradient(x
,y
):
return 2*x
*(x
*w
- y
)
for epoch
in range(100):
for x
,y
in zip(x_data
,y_data
):
grad_val
= gradient
(x
,y
)
w
-= 0.01*grad_val
print("循环次数:"+str(epoch
)+"+++++ w的值:"+str(w
))
l
= loss
(x
,y
)
print("pred:w=",w
,forward
(w
),l
)
三、使用pytorch(有反馈)
import torch
x_data
= [1.0,2.0,3.0]
y_data
= [2.0,4.0,6.0]
w
= torch
.Tensor
([1.0])
w
.requires_grad
= True
def forward(x
):
return w
*x
def loss(x
,y
):
y_pred
= forward
(x
)
return (y
- y_pred
)**2
for epoch
in range(100):
for x
,y
in zip(x_data
,y_data
):
l
= loss
(x
,y
)
l
.backward
()
w
.data
-= 0.01*w
.grad
.data
w
.grad
.data
.zero_
()
print("progress:",epoch
,l
.item
())
print("predict (after training)", 4, forward
(4).item
())
四、线性回归,使用torch.nn类
import torch
x_data
= torch
.Tensor
([[1.0],[2.0],[3.0]])
y_data
= torch
.Tensor
([[2.0],[4.0],[6.0]])
class LinearModel(torch
.nn
.Module
):
def __init__(self
):
super(LinearModel
,self
).__init__
()
self
.linear
= torch
.nn
.Linear
(1,1)
def forward(self
,x
):
y_pred
= self
.linear
(x
)
return y_pred
model
= LinearModel
()
criterion
= torch
.nn
.MSELoss
(size_average
=Flase
)
optimizer
= torch
.optim
.SGD
(model
.parameters
(),lr
=0.01)
for epoch
in range(1000):
y_pred
= model
(x_data
)
loss
= criterion
(y_pred
,y_data
)
optimizer
.zero_grad
()
loss
.backward
()
optimizer
.step
()
print('w = ', model
.linear
.weight
.item
())
print('b = ', model
.linear
.bias
.item
())
x_test
= torch
.Tensor
([[4.0]])
y_test
= model
(x_test
)
print(y_test
.data
)
转载请注明原文地址: https://lol.8miu.com/read-5141.html