import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import precision_score, recall_score, f1_score
# 数据准备
class1_points = np.array([[1.9, 1.2],
[1.5, 2.1],
[1.9, 0.5],
[1.5, 0.9],
[0.9, 1.2],
[1.1, 1.7],
[1.4, 1.1]])
class2_points = np.array([[3.2, 3.2],
[3.7, 2.9],
[3.2, 2.6],
[1.7, 3.3],
[3.4, 2.6],
[4.1, 2.3],
[3.0, 2.9]])
x_train = np.concatenate((class1_points, class2_points), axis=0)
y_train = np.concatenate((np.zeros(len(class1_points)), np.ones(len(class2_points))))
x_train_tensor = torch.tensor(x_train, dtype=torch.float32)
y_train_tensor = torch.tensor(y_train, dtype=torch.float32)
# 设置随机种子
seed = 42
torch.manual_seed(seed)
# 定义模型
class LogisticRegreModel(nn.Module):
def __init__(self):
super(LogisticRegreModel, self).__init__()
self.fc = nn.Linear(2, 1)
def forward(self, x):
x = self.fc(x)
x = torch.sigmoid(x)
return x
model = LogisticRegreModel()
# 定义损失函数和优化器
criterion = nn.BCELoss()
optimizer = optim.SGD(model.parameters(), lr=0.05)
# 训练模型
epochs = 1000
for epoch in range(1, epochs + 1):
y_pred = model(x_train_tensor)
loss = criterion(y_pred, y_train_tensor.unsqueeze(1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 50 == 0 or epoch == 1:
print(f"epoch: {epoch}, loss: {loss.item()}")
# 保存模型
torch.save(model.state_dict(), 'model.pth')
# 加载模型
model = LogisticRegreModel()
model.load_state_dict(torch.load('model.pth'))
# 设置模型为评估模式
model.eval()
# 进行预测
with torch.no_grad():
y_pred = model(x_train_tensor)
y_pred_class = (y_pred > 0.5).float().squeeze()
# 计算精确度、召回率和F1分数
precision = precision_score(y_train_tensor.numpy(), y_pred_class.numpy())
recall = recall_score(y_train_tensor.numpy(), y_pred_class.numpy())
f1 = f1_score(y_train_tensor.numpy(), y_pred_class.numpy())
print(f"Precision: {precision:.4f}")
print(f"Recall: {recall:.4f}")
print(f"F1 Score: {f1:.4f}")