机器学习day4
自定义数据集 使用pytorch框架实现逻辑回归并保存模型,然后保存模型后再加载模型进行预测
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optimizer
import matplotlib.pyplot as plt
class1_points = np.array([[2.1, 1.8],
[1.9, 2.4],
[2.2, 1.2],
[1.8, 1.5],
[1.3, 1.7],
[1.6, 2.1],
[1.7, 1.4]])
class2_points = np.array([[3.5, 3.4],
[3.8, 2.7],
[3.4, 2.9],
[3.1, 3.6],
[3.9, 2.4],
[4.0, 2.8],
[3.3, 2.5]])
x_train = np.concatenate((class1_points, class2_points), axis=0)
y_train = np.concatenate((np.zeros(len(class1_points)), np.ones(len(class2_points))))
x_train_tensor = torch.tensor(x_train, dtype=torch.float32)
y_train_tensor = torch.tensor(y_train, dtype=torch.float32)
seed = 42
torch.manual_seed(seed)
class LogisticRegreModel(nn.Module):
def __init__(self):
super(LogisticRegreModel, self).__init__()
self.fc = nn.Linear(2, 1)
def forward(self, x):
x = self.fc(x)
x = torch.sigmoid(x)
return x
model = LogisticRegreModel()
cri = nn.BCELoss()
lr = 0.05
optimizer = optimizer.SGD(model.parameters(), lr=lr)
fig, (ax1, ax2) = plt.subplots(1, 2)
epoch_list = []
epoch_loss = []
epoches = 1000
for epoch in range(1, epoches + 1):
y_pre = model(x_train_tensor)
loss = cri(y_pre, y_train_tensor.unsqueeze(1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 50 == 0 or epoch == 1:
print(f"epoch:{epoch},loss:{loss.item()}")
w1, w2 = model.fc.weight.data[0]
b = model.fc.bias.data[0]
slope = -w1 / w2
intercept = -b / w2
x_min, x_max = 0, 5
x = np.array([x_min, x_max])
y = slope * x + intercept
ax1.clear()
ax1.plot(x, y, 'r')
ax1.scatter(x_train[:len(class1_points), 0], x_train[:len(class1_points), 1])
ax1.scatter(x_train[len(class1_points):, 0], x_train[len(class1_points):, 1])
ax2.clear()
epoch_list.append(epoch)
epoch_loss.append(loss.item())
ax2.plot(epoch_list, epoch_loss, 'b')
plt.pause(1)
运行结果如下