Deep Learning day 8 cat_and_dog(from_directory)

bbkyoo·2021년 9월 14일
0

Deep learning

목록 보기
11/15
!pwd
/content
from google.colab import drive
drive.mount("/content/drive")
Mounted at /content/drive
!unzip -q -d "cat-and-dog" "/content/drive/MyDrive/archive.zip"
!ls -la
total 24
drwxr-xr-x 1 root root 4096 Sep 14 02:12 .
drwxr-xr-x 1 root root 4096 Sep 14 01:56 ..
drwxr-xr-x 4 root root 4096 Sep 14 02:12 cat-and-dog
drwxr-xr-x 4 root root 4096 Sep  1 19:26 .config
drwx------ 6 root root 4096 Sep 14 02:10 drive
drwxr-xr-x 1 root root 4096 Sep  1 19:26 sample_data

Cat and Dog DataSet

Imports

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
import glob
from tensorflow.keras.preprocessing.image import ImageDataGenerator

from tensorflow.keras.layers import Input, Conv2D, Dropout, Flatten, Activation, MaxPooling2D, Dense
from tensorflow.keras.layers import GlobalAveragePooling2D, BatchNormalization

from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
np.random.seed(42)
tf.random.set_seed(42)

Load Dataset

## in train_set directory
training_cats = glob.glob("./cat-and-dog/training_set/training_set/cats/*.jpg")
training_dogs = glob.glob("./cat-and-dog/training_set/training_set/dogs/*.jpg")

print(len(training_cats), len(training_dogs))
4000 4005
## in test_set directory
test_cats = glob.glob("./cat-and-dog/test_set/test_set/cats/*.jpg")
test_dogs = glob.glob("./cat-and-dog/test_set/test_set/dogs/*.jpg")

print(len(test_cats), len(test_dogs))
1011 1012
test_cats[:3]
['./cat-and-dog/test_set/test_set/cats/cat.4714.jpg',
 './cat-and-dog/test_set/test_set/cats/cat.4558.jpg',
 './cat-and-dog/test_set/test_set/cats/cat.4336.jpg']

Visualize Data

figure, axes = plt.subplots(figsize=(22, 6), nrows=1, ncols=4)
dog_images = training_dogs[:4]
for i in range(4):
    image = cv2.cvtColor(cv2.imread(dog_images[i]), cv2.COLOR_BGR2RGB)
    axes[i].imshow(image)
    
figure, axes = plt.subplots(figsize=(22, 6), nrows=1, ncols=4)
cat_images = training_cats[:4]
for i in range(4):
    image = cv2.cvtColor(cv2.imread(cat_images[i]), cv2.COLOR_BGR2RGB)
    axes[i].imshow(image)

Preprocess data

BATCH_SIZE = 64
train_generator = ImageDataGenerator(horizontal_flip = True, rescale=1/255.0 )
train_generator_iterator = train_generator.flow_from_directory(directory="./cat-and-dog/training_set/training_set", 
                                                               target_size=(224, 224), batch_size=BATCH_SIZE, class_mode="binary")
Found 8005 images belonging to 2 classes.
test_generator = ImageDataGenerator(rescale=1/255.0 )
test_generator_iterator = test_generator.flow_from_directory(directory="./cat-and-dog/test_set/test_set", 
                                                               target_size=(224, 224), batch_size=BATCH_SIZE, class_mode="binary")
Found 2023 images belonging to 2 classes.
  • fetch some data
image_array, label_array = next(train_generator_iterator)
print(image_array.shape, label_array.shape)
(64, 224, 224, 3) (64,)

Create Model

def build_extended_gap_model():
  tf.keras.backend.clear_session()

  input_tensor = Input(shape=(224, 224, 3))
  x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding="same")(input_tensor)
  x = BatchNormalization()(x)
  x = Activation("relu")(x)
  x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding="same")(x)
  x = BatchNormalization()(x)
  x = Activation("relu")(x)
  x = MaxPooling2D(pool_size=(2, 2))(x)

  x = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding="same")(x)
  x = BatchNormalization()(x)
  x = Activation("relu")(x)
  x = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding="same")(x)
  x = BatchNormalization()(x)
  x = Activation("relu")(x)
  x = MaxPooling2D(pool_size=(2, 2))(x)
                  
  x = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding="same")(x)
  x = BatchNormalization()(x)
  x = Activation("relu")(x)
  x = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding="same")(x)
  x = BatchNormalization()(x)
  x = Activation("relu")(x)
  #x = MaxPooling2D(pool_size=(2, 2))(x)                 

  x = Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding="same")(x)
  x = BatchNormalization()(x)
  x = Activation("relu")(x)
  x = MaxPooling2D(pool_size=(2, 2))(x) 

  # x = Flatten()(x)
  x = GlobalAveragePooling2D()(x)
  x = Dropout(rate=0.5)(x)                 
  x = Dense(300, activation="relu")(x)
  x = Dropout(rate=0.3)(x)
  x = Dense(100, activation="relu")(x)
  x = Dropout(rate=0.3)(x)
  output = Dense(1, activation="sigmoid")(x)

  model = Model(inputs=input_tensor, outputs=output)
  return model

model = build_extended_gap_model()
model.summary()
Model: "model"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         [(None, 224, 224, 3)]     0         
_________________________________________________________________
conv2d (Conv2D)              (None, 224, 224, 32)      896       
_________________________________________________________________
batch_normalization (BatchNo (None, 224, 224, 32)      128       
_________________________________________________________________
activation (Activation)      (None, 224, 224, 32)      0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 224, 224, 64)      18496     
_________________________________________________________________
batch_normalization_1 (Batch (None, 224, 224, 64)      256       
_________________________________________________________________
activation_1 (Activation)    (None, 224, 224, 64)      0         
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 112, 112, 64)      0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 112, 112, 128)     73856     
_________________________________________________________________
batch_normalization_2 (Batch (None, 112, 112, 128)     512       
_________________________________________________________________
activation_2 (Activation)    (None, 112, 112, 128)     0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 112, 112, 128)     147584    
_________________________________________________________________
batch_normalization_3 (Batch (None, 112, 112, 128)     512       
_________________________________________________________________
activation_3 (Activation)    (None, 112, 112, 128)     0         
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 56, 56, 128)       0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 56, 56, 256)       295168    
_________________________________________________________________
batch_normalization_4 (Batch (None, 56, 56, 256)       1024      
_________________________________________________________________
activation_4 (Activation)    (None, 56, 56, 256)       0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 56, 56, 256)       590080    
_________________________________________________________________
batch_normalization_5 (Batch (None, 56, 56, 256)       1024      
_________________________________________________________________
activation_5 (Activation)    (None, 56, 56, 256)       0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 56, 56, 512)       1180160   
_________________________________________________________________
batch_normalization_6 (Batch (None, 56, 56, 512)       2048      
_________________________________________________________________
activation_6 (Activation)    (None, 56, 56, 512)       0         
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 28, 28, 512)       0         
_________________________________________________________________
global_average_pooling2d (Gl (None, 512)               0         
_________________________________________________________________
dropout (Dropout)            (None, 512)               0         
_________________________________________________________________
dense (Dense)                (None, 300)               153900    
_________________________________________________________________
dropout_1 (Dropout)          (None, 300)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 100)               30100     
_________________________________________________________________
dropout_2 (Dropout)          (None, 100)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 1)                 101       
=================================================================
Total params: 2,495,845
Trainable params: 2,493,093
Non-trainable params: 2,752
_________________________________________________________________

Compile Model, Train

model.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=["accuracy"])
history = model.fit(train_generator_iterator, epochs=30)
Epoch 1/30
126/126 [==============================] - 216s 1s/step - loss: 0.6830 - accuracy: 0.5906
Epoch 2/30
126/126 [==============================] - 175s 1s/step - loss: 0.6434 - accuracy: 0.6206
Epoch 3/30
126/126 [==============================] - 174s 1s/step - loss: 0.6326 - accuracy: 0.6452
Epoch 4/30
126/126 [==============================] - 174s 1s/step - loss: 0.5961 - accuracy: 0.6832
Epoch 5/30
126/126 [==============================] - 175s 1s/step - loss: 0.5777 - accuracy: 0.7001
Epoch 6/30
126/126 [==============================] - 175s 1s/step - loss: 0.5544 - accuracy: 0.7212
Epoch 7/30
126/126 [==============================] - 175s 1s/step - loss: 0.5424 - accuracy: 0.7322
Epoch 8/30
126/126 [==============================] - 174s 1s/step - loss: 0.5277 - accuracy: 0.7467
Epoch 9/30
126/126 [==============================] - 174s 1s/step - loss: 0.5167 - accuracy: 0.7484
Epoch 10/30
126/126 [==============================] - 175s 1s/step - loss: 0.4959 - accuracy: 0.7654
Epoch 11/30
126/126 [==============================] - 175s 1s/step - loss: 0.4832 - accuracy: 0.7761
Epoch 12/30
126/126 [==============================] - 175s 1s/step - loss: 0.4616 - accuracy: 0.7834
Epoch 13/30
126/126 [==============================] - 175s 1s/step - loss: 0.4526 - accuracy: 0.7955
Epoch 14/30
126/126 [==============================] - 175s 1s/step - loss: 0.4410 - accuracy: 0.7968
Epoch 15/30
126/126 [==============================] - 175s 1s/step - loss: 0.4157 - accuracy: 0.8200
Epoch 16/30
126/126 [==============================] - 175s 1s/step - loss: 0.4105 - accuracy: 0.8207
Epoch 17/30
126/126 [==============================] - 175s 1s/step - loss: 0.3744 - accuracy: 0.8400
Epoch 18/30
126/126 [==============================] - 175s 1s/step - loss: 0.3705 - accuracy: 0.8408
Epoch 19/30
126/126 [==============================] - 175s 1s/step - loss: 0.3665 - accuracy: 0.8422
Epoch 20/30
126/126 [==============================] - 175s 1s/step - loss: 0.3360 - accuracy: 0.8602
Epoch 21/30
126/126 [==============================] - 175s 1s/step - loss: 0.3092 - accuracy: 0.8712
Epoch 22/30
126/126 [==============================] - 175s 1s/step - loss: 0.3040 - accuracy: 0.8701
Epoch 23/30
126/126 [==============================] - 175s 1s/step - loss: 0.2909 - accuracy: 0.8782
Epoch 24/30
126/126 [==============================] - 175s 1s/step - loss: 0.2723 - accuracy: 0.8882
Epoch 25/30
126/126 [==============================] - 175s 1s/step - loss: 0.2590 - accuracy: 0.8954
Epoch 26/30
126/126 [==============================] - 175s 1s/step - loss: 0.2564 - accuracy: 0.8966
Epoch 27/30
126/126 [==============================] - 175s 1s/step - loss: 0.2334 - accuracy: 0.9053
Epoch 28/30
126/126 [==============================] - 175s 1s/step - loss: 0.2230 - accuracy: 0.9114
Epoch 29/30
126/126 [==============================] - 175s 1s/step - loss: 0.2193 - accuracy: 0.9127
Epoch 30/30
126/126 [==============================] - 175s 1s/step - loss: 0.1996 - accuracy: 0.9207

Evaluate

model.evaluate(test_generator_iterator)
32/32 [==============================] - 17s 512ms/step - loss: 0.2798 - accuracy: 0.8878





[0.2798061668872833, 0.8877903819084167]
profile
어제보다 오늘 더 성장하는 개발자!

0개의 댓글