합성곱신경망(Convolution Neural Network)

hottogi·2022년 11월 13일
0

합성곱신경망

CNN 모델은 영상처리에 적합한 인공신경망입니다.
CNN 모델은 일반적으로 컨볼루션 계층(convolution layer), 풀링 계층(pooling layer), 특징들을 모아 최종 분류하는 일반적인 인공신경망 계층으로 구성됩니다.

CNN 모델 구현

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import transforms, datasets
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device('cuda' if USE_CUDA else "cpu")

이폭, 배치크기 결정

EPOCHS = 40
BATCH_SIZE = 64

Fashion MNIST 데이터셋을 불러옵니다.

train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('./.data',
                   train=True,
                   download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,),(0.3081,))
                   ])),
    batch_size=BATCH_SIZE, shuffle=True)
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('./.data',
                   train=False,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,),(0.3081,))
                   ])),
    batch_size = BATCH_SIZE, shuffle=True)
class Net(nn.Module):
  def __init__(self):
    super(Net, self).__init__()
    self.conv1=nn.Conv2d(1,10,kernel_size=5)
    self.conv2=nn.Conv2d(10,20,kernel_size=5)
    self.conv2_drop=nn.Dropout2d()
    self.fc1 = nn.Linear(320,50)
    self.fc2 = nn.Linear(50,10)
  
  def forward(self, x):
    x = F.relu(F.max_pool2d(self.conv1(x),2))
    x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)),2))
    x = x.view(-1, 320)
    x = F.relu(self.fc1(x))
    x = F.dropout(x, training=self.training)
    x = self.fc2(x)
    return x

파라미터 지정
optim.SGD -> 최적화 알고리즘

model = Net().to(DEVICE)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

DNN과 동일한 훈련 코드

def train(model, train_loader, optimizer, epoch):
  model.train()
  for batch_idx, (data, target) in enumerate(train_loader):
    data, target = data.to(DEVICE), target.to(DEVICE)
    optimizer.zero_grad()
    output = model(data)
    loss = F.cross_entropy(output, target)
    loss.backward()
    optimizer.step()

    if batch_idx % 200 == 0:
      print('Train Epoch:{}[{}/{}({:.0f}%)]\tLoss:{:.6f}'.format(
          epoch, batch_idx * len(data), len(train_loader.dataset),
          100.*batch_idx / len(train_loader), loss.item()))

def evaluate(model, test_loader):
  model.eval()
  test_loss = 0
  correct = 0
  with torch.no_grad():
    for data, target in test_loader:
      data, target = data.to(DEVICE), target.to(DEVICE)
      output = model(data)

      test_loss += F.cross_entropy(output, target,
                                   reduction='sum').item()
  
  test_loss /= len(test_loader.dataset)
  test_accuracy = 100. * correct / len(test_loader.dataset)
  return test_loss, test_accuracy
for epoch in range(1, EPOCHS +1):
  train(model, train_loader, optimizer, epoch)
  test_loss, test_accuracy = evaluate(model, test_loader)

  print('[{}] Test Loss:{:4f},Accuracy:{:2f}%'.format(
      epoch, test_loss, test_accuracy))

Train Epoch:1[0/60000(0%)] Loss:2.357857
Train Epoch:1[12800/60000(21%)] Loss:1.059924
Train Epoch:1[25600/60000(43%)] Loss:0.807543
Train Epoch:1[38400/60000(64%)] Loss:0.631742
Train Epoch:1[51200/60000(85%)] Loss:0.573869
[1] Test Loss:0.190044,Accuracy:0.000000%
Train Epoch:2[0/60000(0%)] Loss:0.373116
Train Epoch:2[12800/60000(21%)] Loss:0.197062
Train Epoch:2[25600/60000(43%)] Loss:0.409881
Train Epoch:2[38400/60000(64%)] Loss:0.226902
Train Epoch:2[51200/60000(85%)] Loss:0.565896
[2] Test Loss:0.122348,Accuracy:0.000000%
Train Epoch:3[0/60000(0%)] Loss:0.314205
Train Epoch:3[12800/60000(21%)] Loss:0.189394
Train Epoch:3[25600/60000(43%)] Loss:0.294737
Train Epoch:3[38400/60000(64%)] Loss:0.479467
Train Epoch:3[51200/60000(85%)] Loss:0.337846
[3] Test Loss:0.093101,Accuracy:0.000000%
Train Epoch:4[0/60000(0%)] Loss:0.421773
Train Epoch:4[12800/60000(21%)] Loss:0.430934
Train Epoch:4[25600/60000(43%)] Loss:0.316962
Train Epoch:4[38400/60000(64%)] Loss:0.366147
Train Epoch:4[51200/60000(85%)] Loss:0.312483
[4] Test Loss:0.082502,Accuracy:0.000000%
Train Epoch:5[0/60000(0%)] Loss:0.359786
Train Epoch:5[12800/60000(21%)] Loss:0.436588
Train Epoch:5[25600/60000(43%)] Loss:0.246425
Train Epoch:5[38400/60000(64%)] Loss:0.212675
Train Epoch:5[51200/60000(85%)] Loss:0.349040
[5] Test Loss:0.078541,Accuracy:0.000000%
Train Epoch:6[0/60000(0%)] Loss:0.189783
Train Epoch:6[12800/60000(21%)] Loss:0.275350
Train Epoch:6[25600/60000(43%)] Loss:0.237854
Train Epoch:6[38400/60000(64%)] Loss:0.164088
Train Epoch:6[51200/60000(85%)] Loss:0.085523
[6] Test Loss:0.071968,Accuracy:0.000000%
Train Epoch:7[0/60000(0%)] Loss:0.291057
Train Epoch:7[12800/60000(21%)] Loss:0.117925
Train Epoch:7[25600/60000(43%)] Loss:0.421658
Train Epoch:7[38400/60000(64%)] Loss:0.284089
Train Epoch:7[51200/60000(85%)] Loss:0.349889
[7] Test Loss:0.065760,Accuracy:0.000000%
Train Epoch:8[0/60000(0%)] Loss:0.126013
Train Epoch:8[12800/60000(21%)] Loss:0.127163
Train Epoch:8[25600/60000(43%)] Loss:0.110703
Train Epoch:8[38400/60000(64%)] Loss:0.387093
Train Epoch:8[51200/60000(85%)] Loss:0.141555
[8] Test Loss:0.060505,Accuracy:0.000000%
Train Epoch:9[0/60000(0%)] Loss:0.394795
Train Epoch:9[12800/60000(21%)] Loss:0.139964
Train Epoch:9[25600/60000(43%)] Loss:0.160763
Train Epoch:9[38400/60000(64%)] Loss:0.176797
Train Epoch:9[51200/60000(85%)] Loss:0.115855
[9] Test Loss:0.057605,Accuracy:0.000000%
Train Epoch:10[0/60000(0%)] Loss:0.154873
Train Epoch:10[12800/60000(21%)] Loss:0.263257
Train Epoch:10[25600/60000(43%)] Loss:0.131425
Train Epoch:10[38400/60000(64%)] Loss:0.087306
Train Epoch:10[51200/60000(85%)] Loss:0.078015
[10] Test Loss:0.057229,Accuracy:0.000000%
Train Epoch:11[0/60000(0%)] Loss:0.141776
Train Epoch:11[12800/60000(21%)] Loss:0.176566
Train Epoch:11[25600/60000(43%)] Loss:0.080277
Train Epoch:11[38400/60000(64%)] Loss:0.228003
Train Epoch:11[51200/60000(85%)] Loss:0.100153
[11] Test Loss:0.052958,Accuracy:0.000000%
Train Epoch:12[0/60000(0%)] Loss:0.109465
Train Epoch:12[12800/60000(21%)] Loss:0.098430
Train Epoch:12[25600/60000(43%)] Loss:0.103493
Train Epoch:12[38400/60000(64%)] Loss:0.136345
Train Epoch:12[51200/60000(85%)] Loss:0.184960
[12] Test Loss:0.048533,Accuracy:0.000000%
Train Epoch:13[0/60000(0%)] Loss:0.285230
Train Epoch:13[12800/60000(21%)] Loss:0.106502
Train Epoch:13[25600/60000(43%)] Loss:0.320486
Train Epoch:13[38400/60000(64%)] Loss:0.088069
Train Epoch:13[51200/60000(85%)] Loss:0.138731
[13] Test Loss:0.049099,Accuracy:0.000000%
Train Epoch:14[0/60000(0%)] Loss:0.101178
Train Epoch:14[12800/60000(21%)] Loss:0.055023
Train Epoch:14[25600/60000(43%)] Loss:0.116318
Train Epoch:14[38400/60000(64%)] Loss:0.106148
Train Epoch:14[51200/60000(85%)] Loss:0.222102
[14] Test Loss:0.047543,Accuracy:0.000000%
Train Epoch:15[0/60000(0%)] Loss:0.126484
Train Epoch:15[12800/60000(21%)] Loss:0.102467
Train Epoch:15[25600/60000(43%)] Loss:0.274431
Train Epoch:15[38400/60000(64%)] Loss:0.102106
Train Epoch:15[51200/60000(85%)] Loss:0.102310
[15] Test Loss:0.043690,Accuracy:0.000000%
Train Epoch:16[0/60000(0%)] Loss:0.203734
Train Epoch:16[12800/60000(21%)] Loss:0.049144
Train Epoch:16[25600/60000(43%)] Loss:0.071441
Train Epoch:16[38400/60000(64%)] Loss:0.121045
Train Epoch:16[51200/60000(85%)] Loss:0.051802
[16] Test Loss:0.046138,Accuracy:0.000000%
Train Epoch:17[0/60000(0%)] Loss:0.151890
Train Epoch:17[12800/60000(21%)] Loss:0.249376
Train Epoch:17[25600/60000(43%)] Loss:0.094193
Train Epoch:17[38400/60000(64%)] Loss:0.093083
Train Epoch:17[51200/60000(85%)] Loss:0.186045
[17] Test Loss:0.044256,Accuracy:0.000000%
Train Epoch:18[0/60000(0%)] Loss:0.111681
Train Epoch:18[12800/60000(21%)] Loss:0.141488
Train Epoch:18[25600/60000(43%)] Loss:0.062598
Train Epoch:18[38400/60000(64%)] Loss:0.164350
Train Epoch:18[51200/60000(85%)] Loss:0.172219
[18] Test Loss:0.044843,Accuracy:0.000000%
Train Epoch:19[0/60000(0%)] Loss:0.132799
Train Epoch:19[12800/60000(21%)] Loss:0.156223
Train Epoch:19[25600/60000(43%)] Loss:0.093626
Train Epoch:19[38400/60000(64%)] Loss:0.460045
Train Epoch:19[51200/60000(85%)] Loss:0.151337
[19] Test Loss:0.043969,Accuracy:0.000000%
Train Epoch:20[0/60000(0%)] Loss:0.154976
Train Epoch:20[12800/60000(21%)] Loss:0.157059
Train Epoch:20[25600/60000(43%)] Loss:0.170432
Train Epoch:20[38400/60000(64%)] Loss:0.102625
Train Epoch:20[51200/60000(85%)] Loss:0.182307
[20] Test Loss:0.042776,Accuracy:0.000000%
Train Epoch:21[0/60000(0%)] Loss:0.094635
Train Epoch:21[12800/60000(21%)] Loss:0.164163
Train Epoch:21[25600/60000(43%)] Loss:0.219547
Train Epoch:21[38400/60000(64%)] Loss:0.036221
Train Epoch:21[51200/60000(85%)] Loss:0.119320
[21] Test Loss:0.040902,Accuracy:0.000000%
Train Epoch:22[0/60000(0%)] Loss:0.044222
Train Epoch:22[12800/60000(21%)] Loss:0.216186
Train Epoch:22[25600/60000(43%)] Loss:0.142717
Train Epoch:22[38400/60000(64%)] Loss:0.108182
Train Epoch:22[51200/60000(85%)] Loss:0.302198
[22] Test Loss:0.039190,Accuracy:0.000000%
Train Epoch:23[0/60000(0%)] Loss:0.093056
Train Epoch:23[12800/60000(21%)] Loss:0.149647
Train Epoch:23[25600/60000(43%)] Loss:0.061690
Train Epoch:23[38400/60000(64%)] Loss:0.084015
Train Epoch:23[51200/60000(85%)] Loss:0.131337
[23] Test Loss:0.041260,Accuracy:0.000000%
Train Epoch:24[0/60000(0%)] Loss:0.199704
Train Epoch:24[12800/60000(21%)] Loss:0.052211
Train Epoch:24[25600/60000(43%)] Loss:0.064280
Train Epoch:24[38400/60000(64%)] Loss:0.184073
Train Epoch:24[51200/60000(85%)] Loss:0.155948
[24] Test Loss:0.039777,Accuracy:0.000000%
Train Epoch:25[0/60000(0%)] Loss:0.113475
Train Epoch:25[12800/60000(21%)] Loss:0.077337
Train Epoch:25[25600/60000(43%)] Loss:0.243608
Train Epoch:25[38400/60000(64%)] Loss:0.088869
Train Epoch:25[51200/60000(85%)] Loss:0.036658
[25] Test Loss:0.040551,Accuracy:0.000000%
Train Epoch:26[0/60000(0%)] Loss:0.181940
Train Epoch:26[12800/60000(21%)] Loss:0.144679
Train Epoch:26[25600/60000(43%)] Loss:0.055117
Train Epoch:26[38400/60000(64%)] Loss:0.220691
Train Epoch:26[51200/60000(85%)] Loss:0.260967
[26] Test Loss:0.038796,Accuracy:0.000000%
Train Epoch:27[0/60000(0%)] Loss:0.171033
Train Epoch:27[12800/60000(21%)] Loss:0.043242
Train Epoch:27[25600/60000(43%)] Loss:0.092857
Train Epoch:27[38400/60000(64%)] Loss:0.158558
Train Epoch:27[51200/60000(85%)] Loss:0.107486
[27] Test Loss:0.039239,Accuracy:0.000000%
Train Epoch:28[0/60000(0%)] Loss:0.049218
Train Epoch:28[12800/60000(21%)] Loss:0.155139
Train Epoch:28[25600/60000(43%)] Loss:0.066454
Train Epoch:28[38400/60000(64%)] Loss:0.156582
Train Epoch:28[51200/60000(85%)] Loss:0.035966
[28] Test Loss:0.039149,Accuracy:0.000000%
Train Epoch:29[0/60000(0%)] Loss:0.117255
Train Epoch:29[12800/60000(21%)] Loss:0.074740
Train Epoch:29[25600/60000(43%)] Loss:0.103689
Train Epoch:29[38400/60000(64%)] Loss:0.046001
Train Epoch:29[51200/60000(85%)] Loss:0.117149
[29] Test Loss:0.035833,Accuracy:0.000000%
Train Epoch:30[0/60000(0%)] Loss:0.036455
Train Epoch:30[12800/60000(21%)] Loss:0.126105
Train Epoch:30[25600/60000(43%)] Loss:0.115692
Train Epoch:30[38400/60000(64%)] Loss:0.018417
Train Epoch:30[51200/60000(85%)] Loss:0.066026
[30] Test Loss:0.035685,Accuracy:0.000000%
Train Epoch:31[0/60000(0%)] Loss:0.124710
Train Epoch:31[12800/60000(21%)] Loss:0.158781
Train Epoch:31[25600/60000(43%)] Loss:0.080228
Train Epoch:31[38400/60000(64%)] Loss:0.119942
Train Epoch:31[51200/60000(85%)] Loss:0.290246
[31] Test Loss:0.036855,Accuracy:0.000000%
Train Epoch:32[0/60000(0%)] Loss:0.085644
Train Epoch:32[12800/60000(21%)] Loss:0.113613
Train Epoch:32[25600/60000(43%)] Loss:0.087788
Train Epoch:32[38400/60000(64%)] Loss:0.076330
Train Epoch:32[51200/60000(85%)] Loss:0.112151
[32] Test Loss:0.036337,Accuracy:0.000000%
Train Epoch:33[0/60000(0%)] Loss:0.189592
Train Epoch:33[12800/60000(21%)] Loss:0.046543
Train Epoch:33[25600/60000(43%)] Loss:0.026070
Train Epoch:33[38400/60000(64%)] Loss:0.058383
Train Epoch:33[51200/60000(85%)] Loss:0.310753
[33] Test Loss:0.037069,Accuracy:0.000000%
Train Epoch:34[0/60000(0%)] Loss:0.014610
Train Epoch:34[12800/60000(21%)] Loss:0.190224
Train Epoch:34[25600/60000(43%)] Loss:0.121834
Train Epoch:34[38400/60000(64%)] Loss:0.115401
Train Epoch:34[51200/60000(85%)] Loss:0.064396
[34] Test Loss:0.036501,Accuracy:0.000000%
Train Epoch:35[0/60000(0%)] Loss:0.180124
Train Epoch:35[12800/60000(21%)] Loss:0.130293
Train Epoch:35[25600/60000(43%)] Loss:0.179425
Train Epoch:35[38400/60000(64%)] Loss:0.028951
Train Epoch:35[51200/60000(85%)] Loss:0.065353
[35] Test Loss:0.035594,Accuracy:0.000000%
Train Epoch:36[0/60000(0%)] Loss:0.307430
Train Epoch:36[12800/60000(21%)] Loss:0.109721
Train Epoch:36[25600/60000(43%)] Loss:0.076229
Train Epoch:36[38400/60000(64%)] Loss:0.041029
Train Epoch:36[51200/60000(85%)] Loss:0.037678
[36] Test Loss:0.034982,Accuracy:0.000000%
Train Epoch:37[0/60000(0%)] Loss:0.063103
Train Epoch:37[12800/60000(21%)] Loss:0.162756
Train Epoch:37[25600/60000(43%)] Loss:0.093424
Train Epoch:37[38400/60000(64%)] Loss:0.070055
Train Epoch:37[51200/60000(85%)] Loss:0.061057
[37] Test Loss:0.033242,Accuracy:0.000000%
Train Epoch:38[0/60000(0%)] Loss:0.030475
Train Epoch:38[12800/60000(21%)] Loss:0.020539
Train Epoch:38[25600/60000(43%)] Loss:0.068967
Train Epoch:38[38400/60000(64%)] Loss:0.022671
Train Epoch:38[51200/60000(85%)] Loss:0.128011
[38] Test Loss:0.035033,Accuracy:0.000000%
Train Epoch:39[0/60000(0%)] Loss:0.011313
Train Epoch:39[12800/60000(21%)] Loss:0.102900
Train Epoch:39[25600/60000(43%)] Loss:0.083013
Train Epoch:39[38400/60000(64%)] Loss:0.040392
Train Epoch:39[51200/60000(85%)] Loss:0.146874
[39] Test Loss:0.032370,Accuracy:0.000000%
Train Epoch:40[0/60000(0%)] Loss:0.041181
Train Epoch:40[12800/60000(21%)] Loss:0.154289
Train Epoch:40[25600/60000(43%)] Loss:0.015487
Train Epoch:40[38400/60000(64%)] Loss:0.129316
Train Epoch:40[51200/60000(85%)] Loss:0.022184
[40] Test Loss:0.034573,Accuracy:0.000000%

ResNet으로 컬러 데이터셋에 적용하기

ResNet은 신경망을 깊게 쌓으면 오히려 성능이 나빠지는 문제를 해결.
입력과 출력의 관계를 바로 학습하기보다 입력과 출력의 차이를 따로 학습하는 것이 성능이 좋다는 가설을 기반으로 함.
CIFAR-10은 Fashion MNIST과 비슷한 데이터 셋 모음입니다.

train_loader = torch.utils.data.DataLoader(
    datasets.CIFAR10('./.data', 
                   train=True,
                   download=True,
                   transform=transforms.Compose([
                       transforms.RandomCrop(32, padding=4),
                       transforms.RandomHorizontalFlip(), 
                       transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5),
                                            (0.5, 0.5, 0.5))])),
    batch_size=BATCH_SIZE, shuffle=True)
test_loader = torch.utils.data.DataLoader(
    datasets.CIFAR10('./.data',
                   train=False, 
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5),
                                            (0.5, 0.5, 0.5))])),
    batch_size=BATCH_SIZE, shuffle=True)

Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ./.data/cifar-10-python.tar.gz
100%
170498071/170498071 [00:02<00:00, 72953299.54it/s]
Extracting ./.data/cifar-10-python.tar.gz to ./.data

class BasicBlock(nn.Module): 
    def __init__(self, in_planes, planes, stride=1):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3,
                               stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
                               stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.shortcut = nn.Sequential() 
        if stride != 1 or in_planes != planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes, planes,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes)
            )

    def forward(self, x): 
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        out += self.shortcut(x)
        out = F.relu(out)
        return out


class ResNet(nn.Module):
    def __init__(self, num_classes=10):
        super(ResNet, self).__init__()
        self.in_planes = 16 

        self.conv1 = nn.Conv2d(3, 16, kernel_size=3,
                               stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(16)
        self.layer1 = self._make_layer(16, 2, stride=1) 
        self.layer2 = self._make_layer(32, 2, stride=2)
        self.layer3 = self._make_layer(64, 2, stride=2)
        self.linear = nn.Linear(64, num_classes)

    def _make_layer(self, planes, num_blocks, stride):
        strides = [stride] + [1]*(num_blocks-1)
        layers = []
        for stride in strides:
            layers.append(BasicBlock(self.in_planes, planes, stride))
            self.in_planes = planes
        return nn.Sequential(*layers)

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = F.avg_pool2d(out, 8)
        out = out.view(out.size(0), -1)
        out = self.linear(out)
        return out

학습률 감소는 학습이 진행하면서 최적화 함수의 학습률을 점점 낮춰서 더 정교하게 최적화하고 파이토치 내부의 optim.lr_scheduler.StepLR 도구로 적용할 수 있습니다.

model = ResNet().to(DEVICE)
optimizer = optim.SGD(model.parameters(), lr=0.1,
                      momentum=0.9, weight_decay=0.0005) 
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)
for epoch in range(1, EPOCHS + 1):
    scheduler.step() 
    train(model, train_loader, optimizer, epoch)
    test_loss, test_accuracy = evaluate(model, test_loader)
    
    print('[{}] Test Loss: {:.4f}, Accuracy: {:.2f}%'.format(
          epoch, test_loss, test_accuracy))
profile

0개의 댓글