AI智能
改变未来

利用 torch.nn 实现前馈神经网络解决 多分类 任务


1 导入实验需要的包

import torchimport numpy as npfrom torch import nnfrom torchvision.datasets import MNISTimport torchvision.transforms  as transformsimport matplotlib.pyplot as pltfrom sklearn.model_selection import train_test_splitfrom torch.utils.data import DataLoaderfrom torch import nn

2 导入 MNIST 数据

mnist_train = MNIST(root=\'./datasets/MNIST\',train = True,download =True,transform=transforms.ToTensor())mnist_test = MNIST(root=\'./datasets/MNIST\',train = False,download =True,transform=transforms.ToTensor())

3 加载数据

batch_size =64train_iter = DataLoader(dataset = mnist_train,batch_size = batch_size,shuffle = True,)test_iter = DataLoader(dataset = mnist_test,batch_size = batch_size,shuffle = True,)

4 定义模型

num_input,num_hidden1,num_hidden2,num_output = 28*28,512,256,10class DNN(nn.Module):def __init__(self,num_input,num_hidden1,num_hidden2,num_output):super(DNN,self).__init__()self.linear1 = nn.Linear(num_input,num_hidden1)self.linear2 = nn.Linear(num_hidden1,num_hidden2)self.linear3 = nn.Linear(num_hidden2,num_output)def forward(self,input):input = input.view(-1,784)out = self.linear1(input)out = self.linear2(out)out = self.linear3(out)return out

5 模型初始化

net = DNN(num_input,num_hidden1,num_hidden2,num_output)for param in net.parameters():nn.init.normal_(param,mean=0,std=0.001)

6 定义训练函数

def train(net,train_iter,test_iter,loss,num_epochs):train_ls,test_ls,train_acc,test_acc = [],[],[],[]for epoch in range(num_epochs):train_ls_sum,train_acc_sum,n = 0,0,0for x,y in train_iter:y_pred = net(x)l = loss(y_pred,y)optimizer.zero_grad()l.backward()optimizer.step()train_ls_sum +=l.item()train_acc_sum += (y_pred.argmax(dim = 1)==y).sum().item()n += y_pred.shape[0]train_ls.append(train_ls_sum)train_acc.append(train_acc_sum/n)test_ls_sum,test_acc_sum,n = 0,0,0for x,y in test_iter:y_pred = net(x)l = loss(y_pred,y)test_ls_sum +=l.item()test_acc_sum += (y_pred.argmax(dim = 1)==y).sum().item()n += y_pred.shape[0]test_ls.append(test_ls_sum)test_acc.append(test_acc_sum/n)print(\'epoch %d, train_loss %.6f,test_loss %f, train_acc %.6f,test_acc %f\'%(epoch+1, train_ls[epoch],test_ls[epoch], train_acc[epoch],test_acc[epoch]))return train_ls,test_ls,train_acc,test_acc

7 优化器和损失函数定义

#训练次数和学习率num_epochs = 20lr = 0.01loss  = nn.CrossEntropyLoss()optimizer = torch.optim.SGD(net.parameters(),lr=lr)

8 训练

train_loss,test_loss,train_acc,test_acc = train(net,train_iter,test_iter,loss,num_epochs)

9 可视化

x = np.linspace(0,len(train_loss),len(train_loss))plt.plot(x,train_loss,label=\"train_loss\",linewidth=1.5)plt.plot(x,test_loss,label=\"test_loss\",linewidth=1.5)plt.xlabel(\"epoch\")plt.ylabel(\"loss\")plt.legend()plt.show()

赞(0) 打赏
未经允许不得转载:爱站程序员基地 » 利用 torch.nn 实现前馈神经网络解决 多分类 任务