002.动手实现softmax回归(pytorch简洁版)
- 相关操作可复用002.从零开始实现softMax回归(pytorch)
中的代码
import torch
from torch import nn
from torch.nn import init
import numpy as np
import sys
sys.path.append("..")
import d2lzh_pytorch as d2l
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size,root='/Users/wydi/PycharmProjects/DeepLearning_with_LiMu/datasets/FashionMnist')
batch_size = 256
num_input = 784
num_output = 10
class LineraNet(nn.Module):
def __init__(self,num_input,num_output):
super(LineraNet,self).__init__()
self.linear = nn.Linear(num_input,num_output)
def forward(self,X):
# 数据返回的每个batch样本x的形状为(batch_size, 1, 28, 28), 所以我们先用view()将x的形状转换成(batch_size, 784)才送入全连接层。
y =self.linear(X.view(X.shape[0]),-1)
return y
net = LineraNet(num_input,num_output)
# 封装自定义的结构转换函数
class FlattenLayer(nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self, x): # x shape: (batch, *, *, ...)
return x.view(x.shape[0], -1)
from collections import OrderedDict
net = nn.Sequential(
# FlattenLayer(),
# nn.Linear(num_inputs, num_outputs)
OrderedDict([
('flatten', FlattenLayer()),
('linear', nn.Linear(num_input, num_output))
])
)
init.normal_(net.linear.weight, mean=0, std=0.01)
init.constant_(net.linear.bias, val=0)
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
num_epochs = 5
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)
epoch 1, loss 0.0031, train acc 0.748, test acc 0.781
epoch 2, loss 0.0022, train acc 0.813, test acc 0.793
epoch 3, loss 0.0021, train acc 0.825, test acc 0.819
epoch 4, loss 0.0020, train acc 0.833, test acc 0.823
epoch 5, loss 0.0019, train acc 0.837, test acc 0.821