1.使用RNN做MNIST分类

时间:2023-03-09 18:12:53
1.使用RNN做MNIST分类

第一次用LSTM,从简单做起吧~~

注意事项:

  • batch_first=True 意味着输入的格式为(batch_size,time_step,input_size),False 意味着输入的格式为(time_step,batch_size,input_size)
  • 取r_out[:,-1,:],即取时间步最后一步的结果,相当于LSTM把一张图片全部扫描完后的返回的状态向量(此时的维度变为(64,64),前面的64是batch_size,后面的64是隐藏层的神经元个数)
 import torch
from torch.autograd import Variable
from torchvision import datasets,transforms
#超参数
EPOCH=1
BATCH_SIZE=64
TIME_STEP=28#run time step/image height
INPUT_SIZE=28#run input size/image width
LR=0.01
DOWNLOAD_MNIST=True train_data=datasets.MNIST(root='./mnist',train=True,transform=transforms.ToTensor(),download=DOWNLOAD_MNIST)
train_loader=torch.utils.data.DataLoader(dataset=train_data,batch_size=BATCH_SIZE,shuffle=True) test_data=datasets.MNIST(root='./mnist',train=False,transform=transforms.ToTensor(),download=DOWNLOAD_MNIST)
test_loader=torch.utils.data.DataLoader(dataset=test_data,batch_size=BATCH_SIZE,shuffle=True) class RNN(torch.nn.Module):
def __init__(self):
super(RNN,self).__init__() self.rnn=torch.nn.LSTM(
input_size=INPUT_SIZE,
hidden_size=64,
num_layers=1, batch_first=True,
)
self.out=torch.nn.Linear(64,10)
def forward(self, x):
r_out,(h_n,h_c)=self.rnn(x,None)#[64,28,64]
out=self.out(r_out[:,-1,:])#[64,10]
return out #time_step,batch,input batch_first=False,
rnn=RNN()
print(rnn) optimizer=torch.optim.Adam(rnn.parameters(),lr=LR)
loss_func=torch.nn.CrossEntropyLoss() for epoch in range(EPOCH):
for step,(x,y) in enumerate(train_loader):
b_x=Variable(x.view(-1,28,28))#reshape x to (batch,time_step.input_size) b_y=Variable(y).squeeze()
output=rnn(b_x)
loss=loss_func(output,b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step() if step %50==0:
for test_x,test_y in test_loader:
test_output=rnn(test_x.view(-1,28,28))
pred_y=torch.max(test_output,1)[1].data.numpy().squeeze()
test_y=test_y.numpy()
acc=sum(pred_y==test_y)/test_y.size
print(acc)