Pytorch构建神经网络多元线性回归模型
1.模型线性方程y = W ∗ X + b
from torch import nn
import torch
#手动设置的W参数(待模型学习),这里设置为12个,自己随意设置
weight_set=torch.tensor([[1.5,2.38,4.22,6.5,7.2,3.21,4.44,6.55,2.48,-1.75,-3.26,4.78]])
#手动设置的偏置b
bias=torch.tensor([7.25])
#生成100个随机的12个特征的点
torch.random.manual_seed(100)
x=torch.randint(1,10,(100,12))
x=x.float()
#将参数转置
weight_set_trans=weight_set.transpose(0,1)
#y=w*x+b
y_true=torch.matmul(x,weight_set_trans)+bias
2.定义单层的网络结构
#定义模型
class linear_model(nn.Module):
def __init__(self):
super().__init__()
self.layer = nn.Linear(12,1)
def forward (self, x):
y = self.layer(x)
return y
model=linear_model()
h=model(x)
for name ,param in model.named_parameters():
print(f"{name}:{param}")
output:
layer.weight:Parameter containing:
tensor([[ 0.2429, 0.0523, -0.2873, 0.2485, 0.1396, -0.0960, 0.2534, 0.2423,
0.0123, -0.2309, -0.2212, 0.0499]], requires_grad=True)
layer.bias:Parameter containing:
tensor([0.0503], requires_grad=True)
可以看到模型初始化的参数与设置的参数相差很大
3.模型训练
#定义优化器
optimizer=torch.optim.Adam(model.parameters(),lr=0.1)
#定义损失函数
criterion=nn.MSELoss()
#设置训练轮数
for epoch in range(5000):
h=model(x)
loss=criterion(h,y_true)
#使用backward计算梯度
loss.backward()
#参数更新
optimizer.step()
optimizer.zero_grad() #梯度清零
if epoch %100==0:
print(f'after{epoch+1} iterations,train_loss:{loss.item():.3f}')
output:
after1 iterations,train_loss:36681.406
after101 iterations,train_loss:488.356
after201 iterations,train_loss:297.450
after301 iterations,train_loss:158.137
after401 iterations,train_loss:75.587
after501 iterations,train_loss:33.060
after601 iterations,train_loss:13.407
after701 iterations,train_loss:5.149
after801 iterations,train_loss:1.968
after901 iterations,train_loss:0.831
after1001 iterations,train_loss:0.445
after1101 iterations,train_loss:0.315
after1201 iterations,train_loss:0.268
after1301 iterations,train_loss:0.248
after1401 iterations,train_loss:0.236
after1501 iterations,train_loss:0.226
after1601 iterations,train_loss:0.218
after1701 iterations,train_loss:0.209
after1801 iterations,train_loss:0.200
after1901 iterations,train_loss:0.191
print(model.state_dict())
output:
OrderedDict({'layer.weight': tensor([[ 1.5698, 2.4342, 4.2689, 6.5287, 7.2732, 3.2440, 4.4685, 6.6003,
2.5380, -1.7194, -3.2042, 4.8492]]), 'layer.bias': tensor([4.2708])})
#保存模型文件参数
torch.save(model.state_dict(),'lr.pth')
#模型预测
model_predict=linear_model()
#加载模型参数
model_predict.load_state_dict(torch.load('lr.pth'))
predict=model.predict(x)
可以看到经过1000次迭代之后,损失就很小了。最终模型训练的参数与开始设置的参数也比较接近。