다층 퍼셉트론으로 MNIST 분류

zoya·2024년 4월 30일

인공지능 공부

목록 보기
12/19
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import optim
from torch import nn
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.datasets import fetch_openml

mnist = fetch_open_ml('mnist_784', version=1, cache=True, as_frame=False)

mnist.target = mnist.target.astype(np.int8)
X = mnist.data/255
Y = mnist.target

X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=1/7, random_state=0)

X_train = torch.Tensor(X_train)
X_test = torch.Tensor(X_test)
Y_train = torch.LongTensor(Y_train)
Y_test = torch.LongTensor(Y_test)

train_set = TensorDataset(X_train, Y_train)
test_set = TensorDataset(X_test, Y_test)

loader_train = DataLoader(train_set, batch_size=64, shuffle=True)
loader_test = DataLoader(test_set, batch_size=64, shuffle=True)

model = nn.Sequential(
  nn.Linear(28*28*1, 100),
  nn.ReLU(),
  nn.Linear(100,100),
  nn.ReLU(),
  nn.Linear(100,10)

loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)

def train(epoch):
	model.train()
    
    for data, targets in loader_train:
    	optimizer.zero_grad()
        outputs = model(data)
        loss = loss_fn(outputs, targets)
        loss.backward()
        optimizer.step()
    print("{} epoch : 완료".format(epoch))

def test()
	model.eval()
    correct = 0
    
    with torch.no_grad():
    	for data, targets in loader_test:
        	outputs = model(data)
            _, predicted = torch.max(outputs.data, 1)
            correct += predicted.eq(targets.data.view_as(predicted)).sum()
            
    data_num = len(loader_test.dataset)
    print("정확도 : {}/{} ({:.0f}%)\n".format(correct, data_num, 100.*correct/data_num))
profile
동물을 좋아하는 개발자(희망)의 저장소

0개의 댓글