n_epochs = 5
for epoch in range (n_epochs):
running_loss = 0.0 #損失個數初始化為0
running_correct = 0 #命中個數初始化為0
print("Epoch {}/{}".format(epoch , n_epochs))
print("-"*20)
for data in data_loader_train : #逐一讀取訓練集中的資料
X_train , y_train = data
X_train , y_train = Variable(X_train),Variable(y_train) #使用Variable将Tensor資料變量進行封裝,以便自動梯度的實作
outputs = model(X_train) #輸出卷積後的訓練集
_,pred = torch.max(outputs.data , 1)
optimizer.zero_grad() #自适應下的梯度清零
loss = cost(outputs , y_train) #損失函數的計算:将訓練後的參數與目标數進行 交叉熵 的計算
loss.backward() #後向傳播
optimizer.step() #自适應算法下的梯度更新
running_loss += loss.data.item() #識别失敗資料統計,.item()等同于data[0]
running_correct += torch.sum(pred == y_train.data) #識别正确資料統計
testing_correct = 0
for data in data_loader_test: #逐一讀取測試集中的資料
X_test , y_test = data
X_test , y_test = Variable(X_test),Variable(y_test)
outputs = model(X_test) #輸出卷積後的測試集
_ , pred = torch.max(outputs.data ,1)
testing_correct += torch.sum(pred == y_test.data)
print("Loss is : {:.4f} , Train Accuracy is : {:.4f}% , Test Accuracy is :{:.4f}".format(running_loss/len(data_train),#len求字元串長度
100*running_correct/len(data_train), #百分比樣式
100*testing_correct/len(data_test))) #百分比樣式
6、最終結果
Epoch 0/5
--------------------
Loss is : 0.0021 , Train Accuracy is : 95.0000% , Test Accuracy is :98.0000
Epoch 1/5
--------------------
Loss is : 0.0007 , Train Accuracy is : 98.0000% , Test Accuracy is :99.0000
Epoch 2/5
--------------------
Loss is : 0.0004 , Train Accuracy is : 99.0000% , Test Accuracy is :99.0000
Epoch 3/5
--------------------
Loss is : 0.0003 , Train Accuracy is : 99.0000% , Test Accuracy is :99.0000
Epoch 4/5
--------------------
Loss is : 0.0002 , Train Accuracy is : 99.0000% , Test Accuracy is :99.0000