佳木斯湛栽影视文化发展公司

主頁(yè) > 知識(shí)庫(kù) > 手把手教你實(shí)現(xiàn)PyTorch的MNIST數(shù)據(jù)集

手把手教你實(shí)現(xiàn)PyTorch的MNIST數(shù)據(jù)集

熱門(mén)標(biāo)簽:服務(wù)器配置 網(wǎng)站文章發(fā)布 智能手機(jī) 鐵路電話(huà)系統(tǒng) 檢查注冊(cè)表項(xiàng) 美圖手機(jī) 銀行業(yè)務(wù) 呼叫中心市場(chǎng)需求

概述

MNIST 包含 0~9 的手寫(xiě)數(shù)字, 共有 60000 個(gè)訓(xùn)練集和 10000 個(gè)測(cè)試集. 數(shù)據(jù)的格式為單通道 28*28 的灰度圖.

獲取數(shù)據(jù)

def get_data():
    """獲取數(shù)據(jù)"""

    # 獲取測(cè)試集
    train = torchvision.datasets.MNIST(root="./data", train=True, download=True,
                                       transform=torchvision.transforms.Compose([
                                           torchvision.transforms.ToTensor(),  # 轉(zhuǎn)換成張量
                                           torchvision.transforms.Normalize((0.1307,), (0.3081,))  # 標(biāo)準(zhǔn)化
                                       ]))
    train_loader = DataLoader(train, batch_size=batch_size)  # 分割測(cè)試集

    # 獲取測(cè)試集
    test = torchvision.datasets.MNIST(root="./data", train=False, download=True,
                                      transform=torchvision.transforms.Compose([
                                          torchvision.transforms.ToTensor(),  # 轉(zhuǎn)換成張量
                                          torchvision.transforms.Normalize((0.1307,), (0.3081,))  # 標(biāo)準(zhǔn)化
                                      ]))
    test_loader = DataLoader(test, batch_size=batch_size)  # 分割訓(xùn)練

    # 返回分割好的訓(xùn)練集和測(cè)試集
    return train_loader, test_loader

網(wǎng)絡(luò)模型

class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()

        # 卷積層
        self.conv1 = torch.nn.Conv2d(1, 32, kernel_size=(3, 3), stride=(1, 1))
        self.conv2 = torch.nn.Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))

        # Dropout層
        self.dropout1 = torch.nn.Dropout(0.25)
        self.dropout2 = torch.nn.Dropout(0.5)

        # 全連接層
        self.fc1 = torch.nn.Linear(9216, 128)
        self.fc2 = torch.nn.Linear(128, 10)

    def forward(self, x):
        """前向傳播"""
        
        # [b, 1, 28, 28] => [b, 32, 26, 26]
        out = self.conv1(x)
        out = F.relu(out)
        
        # [b, 32, 26, 26] => [b, 64, 24, 24]
        out = self.conv2(out)
        out = F.relu(out)

        # [b, 64, 24, 24] => [b, 64, 12, 12]
        out = F.max_pool2d(out, 2)
        out = self.dropout1(out)
        
        # [b, 64, 12, 12] => [b, 64 * 12 * 12] => [b, 9216]
        out = torch.flatten(out, 1)
        
        # [b, 9216] => [b, 128]
        out = self.fc1(out)
        out = F.relu(out)

        # [b, 128] => [b, 10]
        out = self.dropout2(out)
        out = self.fc2(out)

        output = F.log_softmax(out, dim=1)

        return output

train 函數(shù)

def train(model, epoch, train_loader):
    """訓(xùn)練"""

    # 訓(xùn)練模式
    model.train()

    # 迭代
    for step, (x, y) in enumerate(train_loader):
        # 加速
        if use_cuda:
            model = model.cuda()
            x, y = x.cuda(), y.cuda()

        # 梯度清零
        optimizer.zero_grad()

        output = model(x)

        # 計(jì)算損失
        loss = F.nll_loss(output, y)

        # 反向傳播
        loss.backward()

        # 更新梯度
        optimizer.step()

        # 打印損失
        if step % 50 == 0:
            print('Epoch: {}, Step {}, Loss: {}'.format(epoch, step, loss))

test 函數(shù)

def test(model, test_loader):
    """測(cè)試"""
    
    # 測(cè)試模式
    model.eval()

    # 存放正確個(gè)數(shù)
    correct = 0

    with torch.no_grad():
        for x, y in test_loader:

            # 加速
            if use_cuda:
                model = model.cuda()
                x, y = x.cuda(), y.cuda()

            # 獲取結(jié)果
            output = model(x)

            # 預(yù)測(cè)結(jié)果
            pred = output.argmax(dim=1, keepdim=True)

            # 計(jì)算準(zhǔn)確個(gè)數(shù)
            correct += pred.eq(y.view_as(pred)).sum().item()

    # 計(jì)算準(zhǔn)確率
    accuracy = correct / len(test_loader.dataset) * 100

    # 輸出準(zhǔn)確
    print("Test Accuracy: {}%".format(accuracy))

main 函數(shù)

def main():
    # 獲取數(shù)據(jù)
    train_loader, test_loader = get_data()
    
    # 迭代
    for epoch in range(iteration_num):
        print("\n================ epoch: {} ================".format(epoch))
        train(network, epoch, train_loader)
        test(network, test_loader)

完整代碼:

import torch
import torchvision
import torch.nn.functional as F
from torch.utils.data import DataLoader
class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()

        # 卷積層
        self.conv1 = torch.nn.Conv2d(1, 32, kernel_size=(3, 3), stride=(1, 1))
        self.conv2 = torch.nn.Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))

        # Dropout層
        self.dropout1 = torch.nn.Dropout(0.25)
        self.dropout2 = torch.nn.Dropout(0.5)

        # 全連接層
        self.fc1 = torch.nn.Linear(9216, 128)
        self.fc2 = torch.nn.Linear(128, 10)

    def forward(self, x):
        """前向傳播"""
        
        # [b, 1, 28, 28] => [b, 32, 26, 26]
        out = self.conv1(x)
        out = F.relu(out)
        
        # [b, 32, 26, 26] => [b, 64, 24, 24]
        out = self.conv2(out)
        out = F.relu(out)

        # [b, 64, 24, 24] => [b, 64, 12, 12]
        out = F.max_pool2d(out, 2)
        out = self.dropout1(out)
        
        # [b, 64, 12, 12] => [b, 64 * 12 * 12] => [b, 9216]
        out = torch.flatten(out, 1)
        
        # [b, 9216] => [b, 128]
        out = self.fc1(out)
        out = F.relu(out)

        # [b, 128] => [b, 10]
        out = self.dropout2(out)
        out = self.fc2(out)

        output = F.log_softmax(out, dim=1)

        return output


# 定義超參數(shù)
batch_size = 64  # 一次訓(xùn)練的樣本數(shù)目
learning_rate = 0.0001  # 學(xué)習(xí)率
iteration_num = 5  # 迭代次數(shù)
network = Model()  # 實(shí)例化網(wǎng)絡(luò)
print(network)  # 調(diào)試輸出網(wǎng)絡(luò)結(jié)構(gòu)
optimizer = torch.optim.Adam(network.parameters(), lr=learning_rate)  # 優(yōu)化器

# GPU 加速
use_cuda = torch.cuda.is_available()
print("是否使用 GPU 加速:", use_cuda)


def get_data():
    """獲取數(shù)據(jù)"""

    # 獲取測(cè)試集
    train = torchvision.datasets.MNIST(root="./data", train=True, download=True,
                                       transform=torchvision.transforms.Compose([
                                           torchvision.transforms.ToTensor(),  # 轉(zhuǎn)換成張量
                                           torchvision.transforms.Normalize((0.1307,), (0.3081,))  # 標(biāo)準(zhǔn)化
                                       ]))
    train_loader = DataLoader(train, batch_size=batch_size)  # 分割測(cè)試集

    # 獲取測(cè)試集
    test = torchvision.datasets.MNIST(root="./data", train=False, download=True,
                                      transform=torchvision.transforms.Compose([
                                          torchvision.transforms.ToTensor(),  # 轉(zhuǎn)換成張量
                                          torchvision.transforms.Normalize((0.1307,), (0.3081,))  # 標(biāo)準(zhǔn)化
                                      ]))
    test_loader = DataLoader(test, batch_size=batch_size)  # 分割訓(xùn)練

    # 返回分割好的訓(xùn)練集和測(cè)試集
    return train_loader, test_loader


def train(model, epoch, train_loader):
    """訓(xùn)練"""

    # 訓(xùn)練模式
    model.train()

    # 迭代
    for step, (x, y) in enumerate(train_loader):
        # 加速
        if use_cuda:
            model = model.cuda()
            x, y = x.cuda(), y.cuda()

        # 梯度清零
        optimizer.zero_grad()

        output = model(x)

        # 計(jì)算損失
        loss = F.nll_loss(output, y)

        # 反向傳播
        loss.backward()

        # 更新梯度
        optimizer.step()

        # 打印損失
        if step % 50 == 0:
            print('Epoch: {}, Step {}, Loss: {}'.format(epoch, step, loss))


def test(model, test_loader):
    """測(cè)試"""

    # 測(cè)試模式
    model.eval()

    # 存放正確個(gè)數(shù)
    correct = 0

    with torch.no_grad():
        for x, y in test_loader:

            # 加速
            if use_cuda:
                model = model.cuda()
                x, y = x.cuda(), y.cuda()

            # 獲取結(jié)果
            output = model(x)

            # 預(yù)測(cè)結(jié)果
            pred = output.argmax(dim=1, keepdim=True)

            # 計(jì)算準(zhǔn)確個(gè)數(shù)
            correct += pred.eq(y.view_as(pred)).sum().item()

    # 計(jì)算準(zhǔn)確率
    accuracy = correct / len(test_loader.dataset) * 100

    # 輸出準(zhǔn)確
    print("Test Accuracy: {}%".format(accuracy))


def main():
    # 獲取數(shù)據(jù)
    train_loader, test_loader = get_data()

    # 迭代
    for epoch in range(iteration_num):
        print("\n================ epoch: {} ================".format(epoch))
        train(network, epoch, train_loader)
        test(network, test_loader)

if __name__ == "__main__":
    main()

輸出結(jié)果:

Model(
  (conv1): Conv2d(1, 32, kernel_size=(3, 3), stride=(1, 1))
  (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))
  (dropout1): Dropout(p=0.25, inplace=False)
  (dropout2): Dropout(p=0.5, inplace=False)
  (fc1): Linear(in_features=9216, out_features=128, bias=True)
  (fc2): Linear(in_features=128, out_features=10, bias=True)
)
是否使用 GPU 加速: True

================ epoch: 0 ================
Epoch: 0, Step 0, Loss: 2.3131277561187744
Epoch: 0, Step 50, Loss: 1.0419045686721802
Epoch: 0, Step 100, Loss: 0.6259541511535645
Epoch: 0, Step 150, Loss: 0.7194482684135437
Epoch: 0, Step 200, Loss: 0.4020516574382782
Epoch: 0, Step 250, Loss: 0.6890509128570557
Epoch: 0, Step 300, Loss: 0.28660136461257935
Epoch: 0, Step 350, Loss: 0.3277580738067627
Epoch: 0, Step 400, Loss: 0.2750288248062134
Epoch: 0, Step 450, Loss: 0.28428223729133606
Epoch: 0, Step 500, Loss: 0.3514065444469452
Epoch: 0, Step 550, Loss: 0.23386947810649872
Epoch: 0, Step 600, Loss: 0.25338059663772583
Epoch: 0, Step 650, Loss: 0.1743898093700409
Epoch: 0, Step 700, Loss: 0.35752204060554504
Epoch: 0, Step 750, Loss: 0.17575909197330475
Epoch: 0, Step 800, Loss: 0.20604261755943298
Epoch: 0, Step 850, Loss: 0.17389622330665588
Epoch: 0, Step 900, Loss: 0.3188241124153137
Test Accuracy: 96.56%

================ epoch: 1 ================
Epoch: 1, Step 0, Loss: 0.23558208346366882
Epoch: 1, Step 50, Loss: 0.13511177897453308
Epoch: 1, Step 100, Loss: 0.18823786079883575
Epoch: 1, Step 150, Loss: 0.2644936144351959
Epoch: 1, Step 200, Loss: 0.145077645778656
Epoch: 1, Step 250, Loss: 0.30574971437454224
Epoch: 1, Step 300, Loss: 0.2386859953403473
Epoch: 1, Step 350, Loss: 0.08346735686063766
Epoch: 1, Step 400, Loss: 0.10480977594852448
Epoch: 1, Step 450, Loss: 0.07280707359313965
Epoch: 1, Step 500, Loss: 0.20928426086902618
Epoch: 1, Step 550, Loss: 0.20455852150917053
Epoch: 1, Step 600, Loss: 0.10085935145616531
Epoch: 1, Step 650, Loss: 0.13476189970970154
Epoch: 1, Step 700, Loss: 0.19087043404579163
Epoch: 1, Step 750, Loss: 0.0981522724032402
Epoch: 1, Step 800, Loss: 0.1961515098810196
Epoch: 1, Step 850, Loss: 0.041140712797641754
Epoch: 1, Step 900, Loss: 0.250461220741272
Test Accuracy: 98.03%

================ epoch: 2 ================
Epoch: 2, Step 0, Loss: 0.09572553634643555
Epoch: 2, Step 50, Loss: 0.10370486229658127
Epoch: 2, Step 100, Loss: 0.17737184464931488
Epoch: 2, Step 150, Loss: 0.1570713371038437
Epoch: 2, Step 200, Loss: 0.07462178170681
Epoch: 2, Step 250, Loss: 0.18744900822639465
Epoch: 2, Step 300, Loss: 0.09910508990287781
Epoch: 2, Step 350, Loss: 0.08929706364870071
Epoch: 2, Step 400, Loss: 0.07703761011362076
Epoch: 2, Step 450, Loss: 0.10133732110261917
Epoch: 2, Step 500, Loss: 0.1314031481742859
Epoch: 2, Step 550, Loss: 0.10394387692213058
Epoch: 2, Step 600, Loss: 0.11612939089536667
Epoch: 2, Step 650, Loss: 0.17494803667068481
Epoch: 2, Step 700, Loss: 0.11065669357776642
Epoch: 2, Step 750, Loss: 0.061209067702293396
Epoch: 2, Step 800, Loss: 0.14715790748596191
Epoch: 2, Step 850, Loss: 0.03930797800421715
Epoch: 2, Step 900, Loss: 0.18030673265457153
Test Accuracy: 98.46000000000001%

================ epoch: 3 ================
Epoch: 3, Step 0, Loss: 0.09266342222690582
Epoch: 3, Step 50, Loss: 0.0414913073182106
Epoch: 3, Step 100, Loss: 0.2152961939573288
Epoch: 3, Step 150, Loss: 0.12287424504756927
Epoch: 3, Step 200, Loss: 0.13468700647354126
Epoch: 3, Step 250, Loss: 0.11967387050390244
Epoch: 3, Step 300, Loss: 0.11301510035991669
Epoch: 3, Step 350, Loss: 0.037447575479745865
Epoch: 3, Step 400, Loss: 0.04699449613690376
Epoch: 3, Step 450, Loss: 0.05472381412982941
Epoch: 3, Step 500, Loss: 0.09839300811290741
Epoch: 3, Step 550, Loss: 0.07964356243610382
Epoch: 3, Step 600, Loss: 0.08182843774557114
Epoch: 3, Step 650, Loss: 0.05514759197831154
Epoch: 3, Step 700, Loss: 0.13785190880298615
Epoch: 3, Step 750, Loss: 0.062480345368385315
Epoch: 3, Step 800, Loss: 0.120387002825737
Epoch: 3, Step 850, Loss: 0.04458726942539215
Epoch: 3, Step 900, Loss: 0.17119190096855164
Test Accuracy: 98.55000000000001%

================ epoch: 4 ================
Epoch: 4, Step 0, Loss: 0.08094145357608795
Epoch: 4, Step 50, Loss: 0.05615215748548508
Epoch: 4, Step 100, Loss: 0.07766406238079071
Epoch: 4, Step 150, Loss: 0.07915271818637848
Epoch: 4, Step 200, Loss: 0.1301635503768921
Epoch: 4, Step 250, Loss: 0.12118984013795853
Epoch: 4, Step 300, Loss: 0.073218435049057
Epoch: 4, Step 350, Loss: 0.04517696052789688
Epoch: 4, Step 400, Loss: 0.08493026345968246
Epoch: 4, Step 450, Loss: 0.03904269263148308
Epoch: 4, Step 500, Loss: 0.09386837482452393
Epoch: 4, Step 550, Loss: 0.12583576142787933
Epoch: 4, Step 600, Loss: 0.09053893387317657
Epoch: 4, Step 650, Loss: 0.06912104040384293
Epoch: 4, Step 700, Loss: 0.1502612829208374
Epoch: 4, Step 750, Loss: 0.07162325084209442
Epoch: 4, Step 800, Loss: 0.10512275993824005
Epoch: 4, Step 850, Loss: 0.028180215507745743
Epoch: 4, Step 900, Loss: 0.08492615073919296
Test Accuracy: 98.69%

到此這篇關(guān)于手把手教你實(shí)現(xiàn)PyTorch的MNIST數(shù)據(jù)集的文章就介紹到這了,更多相關(guān)PyTorch MNIST數(shù)據(jù)集內(nèi)容請(qǐng)搜索腳本之家以前的文章或繼續(xù)瀏覽下面的相關(guān)文章希望大家以后多多支持腳本之家!

您可能感興趣的文章:
  • 詳解PyTorch手寫(xiě)數(shù)字識(shí)別(MNIST數(shù)據(jù)集)
  • PyTorch CNN實(shí)戰(zhàn)之MNIST手寫(xiě)數(shù)字識(shí)別示例
  • Pytorch實(shí)現(xiàn)的手寫(xiě)數(shù)字mnist識(shí)別功能完整示例
  • pytorch實(shí)現(xiàn)MNIST手寫(xiě)體識(shí)別
  • pytorch教程實(shí)現(xiàn)mnist手寫(xiě)數(shù)字識(shí)別代碼示例

標(biāo)簽:新疆 沈陽(yáng) 滄州 長(zhǎng)治 上海 樂(lè)山 紅河 河南

巨人網(wǎng)絡(luò)通訊聲明:本文標(biāo)題《手把手教你實(shí)現(xiàn)PyTorch的MNIST數(shù)據(jù)集》,本文關(guān)鍵詞  ;如發(fā)現(xiàn)本文內(nèi)容存在版權(quán)問(wèn)題,煩請(qǐng)?zhí)峁┫嚓P(guān)信息告之我們,我們將及時(shí)溝通與處理。本站內(nèi)容系統(tǒng)采集于網(wǎng)絡(luò),涉及言論、版權(quán)與本站無(wú)關(guān)。
  • 相關(guān)文章
  • 收縮
    • 微信客服
    • 微信二維碼
    • 電話(huà)咨詢(xún)

    • 400-1100-266
    长寿区| 河曲县| 宝应县| 西和县| 洱源县| 贡山| 苍溪县| 荣成市| 罗源县| 霍邱县| 新昌县| 西乌珠穆沁旗| 津南区| 清苑县| 江城| 武隆县| 闽清县| 当阳市| 乡宁县| 繁峙县| 灵石县| 收藏| 尉犁县| 珠海市| 于都县| 海城市| 英德市| 茂名市| 密山市| 新余市| 临海市| 乃东县| 永寿县| 上虞市| 津市市| 潞城市| 永寿县| 平凉市| 迁安市| 缙云县| 紫金县|