Deep Learning day 8 cat-and-dog-vgg16

bbkyoo·2021년 9월 14일
0

Deep learning

목록 보기
13/15

Cat and Dog DataSet

Imports

%pwd
'/kaggle/working'
!ls -la /kaggle/input
total 8
drwxr-xr-x 3 root   root    4096 Sep 14 06:01 .
drwxr-xr-x 5 root   root    4096 Sep 14 06:01 ..
drwxr-xr-x 4 nobody nogroup    0 Dec  4  2020 cat-and-dog
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
import glob
from tensorflow.keras.preprocessing.image import ImageDataGenerator

from tensorflow.keras.layers import Input, Conv2D, Dropout, Flatten, Activation, MaxPooling2D, Dense
from tensorflow.keras.layers import GlobalAveragePooling2D, BatchNormalization

from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from tensorflow.keras.applications.vgg16 import VGG16
np.random.seed(42)
tf.random.set_seed(42)

Load Dataset

## in train_set directory
training_cats = glob.glob("/kaggle/input/cat-and-dog/training_set/training_set/cats/*.jpg")
training_dogs = glob.glob("/kaggle/input/cat-and-dog/training_set/training_set/dogs/*.jpg")

print(len(training_cats), len(training_dogs))
4000 4005
## in test_set directory
test_cats = glob.glob("/kaggle/input/cat-and-dog/test_set/test_set/cats/*.jpg")
test_dogs = glob.glob("/kaggle/input/cat-and-dog/test_set/test_set/dogs/*.jpg")

print(len(test_cats), len(test_dogs))
1011 1012
test_cats[:3]
['/kaggle/input/cat-and-dog/test_set/test_set/cats/cat.4414.jpg',
 '/kaggle/input/cat-and-dog/test_set/test_set/cats/cat.4420.jpg',
 '/kaggle/input/cat-and-dog/test_set/test_set/cats/cat.4880.jpg']

Visualize Data

figure, axes = plt.subplots(figsize=(22, 6), nrows=1, ncols=4)
dog_images = training_dogs[:4]
for i in range(4):
    image = cv2.cvtColor(cv2.imread(dog_images[i]), cv2.COLOR_BGR2RGB)
    axes[i].imshow(image)
    
figure, axes = plt.subplots(figsize=(22, 6), nrows=1, ncols=4)
cat_images = training_cats[:4]
for i in range(4):
    image = cv2.cvtColor(cv2.imread(cat_images[i]), cv2.COLOR_BGR2RGB)
    axes[i].imshow(image)

Preprocess data (from dataframe)

pd.set_option("display.max_colwidth", 200)
train_paths = training_cats + training_dogs
train_labels = ["CAT" for _ in range(len(training_cats))] + ["DOG" for _ in range(len(training_dogs))]
train_df = pd.DataFrame({"path":train_paths, "label":train_labels})

test_paths = test_cats + test_dogs
test_labels = ["CAT" for _ in range(len(test_cats))] + ["DOG" for _ in range(len(test_dogs))]
test_df = pd.DataFrame({"path":test_paths, "label":test_labels})

print(train_df["label"].value_counts())
print(test_df["label"].value_counts())
DOG    4005
CAT    4000
Name: label, dtype: int64
DOG    1012
CAT    1011
Name: label, dtype: int64
train_df, valid_df = train_test_split(train_df, test_size=0.2, stratify=train_df["label"])

print(train_df["label"].value_counts())
print(valid_df["label"].value_counts())
DOG    3204
CAT    3200
Name: label, dtype: int64
DOG    801
CAT    800
Name: label, dtype: int64
IMAGE_SIZE = 224
BATCH_SIZE = 64
train_df.shape
(6404, 2)
train_generator = ImageDataGenerator(horizontal_flip = True, rescale=1/255.0 )
train_generator_iterator = train_generator.flow_from_dataframe(dataframe=train_df,
                                                               x_col = "path",
                                                               y_col = "label",
                                                               target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, 
                                                               class_mode="binary")
Found 6404 validated image filenames belonging to 2 classes.
valid_generator = ImageDataGenerator(rescale=1/255.0 )
valid_generator_iterator = valid_generator.flow_from_dataframe(dataframe=valid_df,
                                                               x_col = "path",
                                                               y_col = "label",
                                                               target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, 
                                                               class_mode="binary")
Found 1601 validated image filenames belonging to 2 classes.
test_generator = ImageDataGenerator(rescale=1/255.0 )
test_generator_iterator = test_generator.flow_from_dataframe(dataframe=test_df,
                                                               x_col = "path",
                                                               y_col = "label",
                                                               target_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE, 
                                                               class_mode="binary")
Found 2023 validated image filenames belonging to 2 classes.
  • fetch some data
image_array, label_array = next(train_generator_iterator)
print(image_array.shape, label_array.shape)
(64, 224, 224, 3) (64,)

Create Model

def build_vgg16():   
    tf.keras.backend.clear_session()

    input_tensor = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    
    # Block 1
    x = Conv2D(
      64, (3, 3), activation='relu', padding='same', name='block1_conv1')(input_tensor)
    x = Conv2D(
      64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Conv2D(
      128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
    x = Conv2D(
      128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Conv2D(
      256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
    x = Conv2D(
      256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
    x = Conv2D(
      256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
    x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
    x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
    x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
    x = Conv2D(
      512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    
    x = GlobalAveragePooling2D()(x)
#     x = Dropout(rate=0.5)(x)   
#     x = Dense(300, activation='relu', name='fc1')(x)
#     x = Dropout(rate=0.5)(x)   
#     x = Dense(100, activation='relu', name='fc2')(x)
#     x = Dropout(rate=0.5)(x)   
    x = Dense(50, activation='relu', name='fc1')(x)
    output = Dense(1, activation="sigmoid")(x)

    model = Model(inputs=input_tensor, outputs=output)
    return model

model = build_vgg16()

model.summary()
Model: "model"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         [(None, 224, 224, 3)]     0         
_________________________________________________________________
block1_conv1 (Conv2D)        (None, 224, 224, 64)      1792      
_________________________________________________________________
block1_conv2 (Conv2D)        (None, 224, 224, 64)      36928     
_________________________________________________________________
block1_pool (MaxPooling2D)   (None, 112, 112, 64)      0         
_________________________________________________________________
block2_conv1 (Conv2D)        (None, 112, 112, 128)     73856     
_________________________________________________________________
block2_conv2 (Conv2D)        (None, 112, 112, 128)     147584    
_________________________________________________________________
block2_pool (MaxPooling2D)   (None, 56, 56, 128)       0         
_________________________________________________________________
block3_conv1 (Conv2D)        (None, 56, 56, 256)       295168    
_________________________________________________________________
block3_conv2 (Conv2D)        (None, 56, 56, 256)       590080    
_________________________________________________________________
block3_conv3 (Conv2D)        (None, 56, 56, 256)       590080    
_________________________________________________________________
block3_pool (MaxPooling2D)   (None, 28, 28, 256)       0         
_________________________________________________________________
block4_conv1 (Conv2D)        (None, 28, 28, 512)       1180160   
_________________________________________________________________
block4_conv2 (Conv2D)        (None, 28, 28, 512)       2359808   
_________________________________________________________________
block4_conv3 (Conv2D)        (None, 28, 28, 512)       2359808   
_________________________________________________________________
block4_pool (MaxPooling2D)   (None, 14, 14, 512)       0         
_________________________________________________________________
block5_conv1 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_conv2 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_conv3 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_pool (MaxPooling2D)   (None, 7, 7, 512)         0         
_________________________________________________________________
global_average_pooling2d (Gl (None, 512)               0         
_________________________________________________________________
fc1 (Dense)                  (None, 50)                25650     
_________________________________________________________________
dense (Dense)                (None, 1)                 51        
=================================================================
Total params: 14,740,389
Trainable params: 14,740,389
Non-trainable params: 0
_________________________________________________________________

Compile Model, Train

checkpoint_cb = ModelCheckpoint("my_keras_model.h5", save_best_only=True, verbose=1)
early_stopping_cb = EarlyStopping(patience=12, restore_best_weights=True)
reducelr_cb = ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=5, mode="min", verbose=1)
model.compile(optimizer=Adam(0.0001), loss="binary_crossentropy", metrics=["accuracy"])
history = model.fit(train_generator_iterator, epochs=40, validation_data=valid_generator_iterator,
                   callbacks=[checkpoint_cb, early_stopping_cb, reducelr_cb])
Epoch 1/40
101/101 [==============================] - 53s 508ms/step - loss: 0.6931 - accuracy: 0.4828 - val_loss: 0.6929 - val_accuracy: 0.5109

Epoch 00001: val_loss improved from inf to 0.69294, saving model to my_keras_model.h5
Epoch 2/40
101/101 [==============================] - 51s 500ms/step - loss: 0.6899 - accuracy: 0.5620 - val_loss: 0.6972 - val_accuracy: 0.5116

Epoch 00002: val_loss did not improve from 0.69294
Epoch 3/40
101/101 [==============================] - 50s 496ms/step - loss: 0.6619 - accuracy: 0.5904 - val_loss: 0.6344 - val_accuracy: 0.6221

Epoch 00003: val_loss improved from 0.69294 to 0.63439, saving model to my_keras_model.h5
Epoch 4/40
101/101 [==============================] - 50s 498ms/step - loss: 0.6299 - accuracy: 0.6319 - val_loss: 0.6414 - val_accuracy: 0.5996

Epoch 00004: val_loss did not improve from 0.63439
Epoch 5/40
101/101 [==============================] - 50s 497ms/step - loss: 0.6093 - accuracy: 0.6692 - val_loss: 0.6455 - val_accuracy: 0.6077

Epoch 00005: val_loss did not improve from 0.63439
Epoch 6/40
101/101 [==============================] - 50s 494ms/step - loss: 0.5964 - accuracy: 0.6778 - val_loss: 0.5958 - val_accuracy: 0.6765

Epoch 00006: val_loss improved from 0.63439 to 0.59580, saving model to my_keras_model.h5
Epoch 7/40
101/101 [==============================] - 50s 494ms/step - loss: 0.5800 - accuracy: 0.6883 - val_loss: 0.5812 - val_accuracy: 0.7033

Epoch 00007: val_loss improved from 0.59580 to 0.58125, saving model to my_keras_model.h5
Epoch 8/40
101/101 [==============================] - 49s 480ms/step - loss: 0.5767 - accuracy: 0.6955 - val_loss: 0.5550 - val_accuracy: 0.7158

Epoch 00008: val_loss improved from 0.58125 to 0.55502, saving model to my_keras_model.h5
Epoch 9/40
101/101 [==============================] - 48s 469ms/step - loss: 0.5509 - accuracy: 0.7175 - val_loss: 0.5579 - val_accuracy: 0.7071

Epoch 00009: val_loss did not improve from 0.55502
Epoch 10/40
101/101 [==============================] - 47s 468ms/step - loss: 0.5450 - accuracy: 0.7306 - val_loss: 0.5525 - val_accuracy: 0.7339

Epoch 00010: val_loss improved from 0.55502 to 0.55245, saving model to my_keras_model.h5
Epoch 11/40
101/101 [==============================] - 47s 465ms/step - loss: 0.5468 - accuracy: 0.7275 - val_loss: 0.5391 - val_accuracy: 0.7395

Epoch 00011: val_loss improved from 0.55245 to 0.53910, saving model to my_keras_model.h5
Epoch 12/40
101/101 [==============================] - 48s 469ms/step - loss: 0.5156 - accuracy: 0.7581 - val_loss: 0.5044 - val_accuracy: 0.7564

Epoch 00012: val_loss improved from 0.53910 to 0.50436, saving model to my_keras_model.h5
Epoch 13/40
101/101 [==============================] - 47s 466ms/step - loss: 0.5171 - accuracy: 0.7591 - val_loss: 0.5093 - val_accuracy: 0.7514

Epoch 00013: val_loss did not improve from 0.50436
Epoch 14/40
101/101 [==============================] - 48s 470ms/step - loss: 0.5055 - accuracy: 0.7625 - val_loss: 0.4751 - val_accuracy: 0.7820

Epoch 00014: val_loss improved from 0.50436 to 0.47507, saving model to my_keras_model.h5
Epoch 15/40
101/101 [==============================] - 47s 468ms/step - loss: 0.5123 - accuracy: 0.7578 - val_loss: 0.4633 - val_accuracy: 0.7914

Epoch 00015: val_loss improved from 0.47507 to 0.46335, saving model to my_keras_model.h5
Epoch 16/40
101/101 [==============================] - 48s 469ms/step - loss: 0.4980 - accuracy: 0.7537 - val_loss: 0.4532 - val_accuracy: 0.7983

Epoch 00016: val_loss improved from 0.46335 to 0.45324, saving model to my_keras_model.h5
Epoch 17/40
101/101 [==============================] - 48s 475ms/step - loss: 0.4515 - accuracy: 0.7987 - val_loss: 0.5073 - val_accuracy: 0.7558

Epoch 00017: val_loss did not improve from 0.45324
Epoch 18/40
101/101 [==============================] - 47s 467ms/step - loss: 0.4863 - accuracy: 0.7754 - val_loss: 0.4296 - val_accuracy: 0.8114

Epoch 00018: val_loss improved from 0.45324 to 0.42960, saving model to my_keras_model.h5
Epoch 19/40
101/101 [==============================] - 48s 470ms/step - loss: 0.3985 - accuracy: 0.8227 - val_loss: 0.4244 - val_accuracy: 0.8157

Epoch 00019: val_loss improved from 0.42960 to 0.42441, saving model to my_keras_model.h5
Epoch 20/40
101/101 [==============================] - 47s 466ms/step - loss: 0.3902 - accuracy: 0.8291 - val_loss: 0.3781 - val_accuracy: 0.8339

Epoch 00020: val_loss improved from 0.42441 to 0.37809, saving model to my_keras_model.h5
Epoch 21/40
101/101 [==============================] - 48s 471ms/step - loss: 0.3468 - accuracy: 0.8503 - val_loss: 0.3806 - val_accuracy: 0.8351

Epoch 00021: val_loss did not improve from 0.37809
Epoch 22/40
101/101 [==============================] - 47s 466ms/step - loss: 0.3566 - accuracy: 0.8459 - val_loss: 0.3554 - val_accuracy: 0.8532

Epoch 00022: val_loss improved from 0.37809 to 0.35544, saving model to my_keras_model.h5
Epoch 23/40
101/101 [==============================] - 48s 469ms/step - loss: 0.3338 - accuracy: 0.8557 - val_loss: 0.3499 - val_accuracy: 0.8532

Epoch 00023: val_loss improved from 0.35544 to 0.34992, saving model to my_keras_model.h5
Epoch 24/40
101/101 [==============================] - 47s 467ms/step - loss: 0.3160 - accuracy: 0.8635 - val_loss: 0.3200 - val_accuracy: 0.8701

Epoch 00024: val_loss improved from 0.34992 to 0.32001, saving model to my_keras_model.h5
Epoch 25/40
101/101 [==============================] - 48s 474ms/step - loss: 0.2887 - accuracy: 0.8794 - val_loss: 0.3276 - val_accuracy: 0.8707

Epoch 00025: val_loss did not improve from 0.32001
Epoch 26/40
101/101 [==============================] - 47s 465ms/step - loss: 0.2805 - accuracy: 0.8845 - val_loss: 0.3586 - val_accuracy: 0.8457

Epoch 00026: val_loss did not improve from 0.32001
Epoch 27/40
101/101 [==============================] - 48s 469ms/step - loss: 0.2705 - accuracy: 0.8832 - val_loss: 0.3123 - val_accuracy: 0.8595

Epoch 00027: val_loss improved from 0.32001 to 0.31232, saving model to my_keras_model.h5
Epoch 28/40
101/101 [==============================] - 47s 465ms/step - loss: 0.2866 - accuracy: 0.8815 - val_loss: 0.3240 - val_accuracy: 0.8620

Epoch 00028: val_loss did not improve from 0.31232
Epoch 29/40
101/101 [==============================] - 47s 466ms/step - loss: 0.2201 - accuracy: 0.9173 - val_loss: 0.2848 - val_accuracy: 0.8795

Epoch 00029: val_loss improved from 0.31232 to 0.28477, saving model to my_keras_model.h5
Epoch 30/40
101/101 [==============================] - 48s 468ms/step - loss: 0.1982 - accuracy: 0.9237 - val_loss: 0.3535 - val_accuracy: 0.8588

Epoch 00030: val_loss did not improve from 0.28477
Epoch 31/40
101/101 [==============================] - 47s 464ms/step - loss: 0.2332 - accuracy: 0.9097 - val_loss: 0.3125 - val_accuracy: 0.8763

Epoch 00031: val_loss did not improve from 0.28477
Epoch 32/40
101/101 [==============================] - 48s 469ms/step - loss: 0.1937 - accuracy: 0.9231 - val_loss: 0.3024 - val_accuracy: 0.8801

Epoch 00032: val_loss did not improve from 0.28477
Epoch 33/40
101/101 [==============================] - 47s 466ms/step - loss: 0.1869 - accuracy: 0.9288 - val_loss: 0.2906 - val_accuracy: 0.8932

Epoch 00033: val_loss did not improve from 0.28477
Epoch 34/40
101/101 [==============================] - 47s 469ms/step - loss: 0.1852 - accuracy: 0.9268 - val_loss: 0.3111 - val_accuracy: 0.8770

Epoch 00034: val_loss did not improve from 0.28477

Epoch 00034: ReduceLROnPlateau reducing learning rate to 1.9999999494757503e-05.
Epoch 35/40
101/101 [==============================] - 47s 467ms/step - loss: 0.1509 - accuracy: 0.9441 - val_loss: 0.3238 - val_accuracy: 0.8876

Epoch 00035: val_loss did not improve from 0.28477
Epoch 36/40
101/101 [==============================] - 48s 471ms/step - loss: 0.1148 - accuracy: 0.9582 - val_loss: 0.3308 - val_accuracy: 0.8832

Epoch 00036: val_loss did not improve from 0.28477
Epoch 37/40
101/101 [==============================] - 47s 467ms/step - loss: 0.1057 - accuracy: 0.9608 - val_loss: 0.3288 - val_accuracy: 0.8913

Epoch 00037: val_loss did not improve from 0.28477
Epoch 38/40
101/101 [==============================] - 47s 468ms/step - loss: 0.0990 - accuracy: 0.9631 - val_loss: 0.3746 - val_accuracy: 0.8819

Epoch 00038: val_loss did not improve from 0.28477
Epoch 39/40
101/101 [==============================] - 47s 468ms/step - loss: 0.1068 - accuracy: 0.9563 - val_loss: 0.3278 - val_accuracy: 0.8888

Epoch 00039: val_loss did not improve from 0.28477

Epoch 00039: ReduceLROnPlateau reducing learning rate to 3.999999898951501e-06.
Epoch 40/40
101/101 [==============================] - 48s 470ms/step - loss: 0.0868 - accuracy: 0.9704 - val_loss: 0.3219 - val_accuracy: 0.8901

Epoch 00040: val_loss did not improve from 0.28477

Evaluate

model.evaluate(test_generator_iterator)
32/32 [==============================] - 21s 665ms/step - loss: 0.3447 - accuracy: 0.8903





[0.3446810245513916, 0.8902620077133179]

VGG16 Pre-trained Model 이용하기

IMAGE_SIZE = 224
input_tensor = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
base_model = VGG16(input_tensor=input_tensor, include_top=False, weights="imagenet")

x = GlobalAveragePooling2D()(base_model.output)
x = Dense(50, activation='relu', name='fc1')(x)
output = Dense(1, activation="sigmoid")(x)

model = Model(inputs=input_tensor, outputs=output)
model.summary()
Model: "model_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_4 (InputLayer)         [(None, 224, 224, 3)]     0         
_________________________________________________________________
block1_conv1 (Conv2D)        (None, 224, 224, 64)      1792      
_________________________________________________________________
block1_conv2 (Conv2D)        (None, 224, 224, 64)      36928     
_________________________________________________________________
block1_pool (MaxPooling2D)   (None, 112, 112, 64)      0         
_________________________________________________________________
block2_conv1 (Conv2D)        (None, 112, 112, 128)     73856     
_________________________________________________________________
block2_conv2 (Conv2D)        (None, 112, 112, 128)     147584    
_________________________________________________________________
block2_pool (MaxPooling2D)   (None, 56, 56, 128)       0         
_________________________________________________________________
block3_conv1 (Conv2D)        (None, 56, 56, 256)       295168    
_________________________________________________________________
block3_conv2 (Conv2D)        (None, 56, 56, 256)       590080    
_________________________________________________________________
block3_conv3 (Conv2D)        (None, 56, 56, 256)       590080    
_________________________________________________________________
block3_pool (MaxPooling2D)   (None, 28, 28, 256)       0         
_________________________________________________________________
block4_conv1 (Conv2D)        (None, 28, 28, 512)       1180160   
_________________________________________________________________
block4_conv2 (Conv2D)        (None, 28, 28, 512)       2359808   
_________________________________________________________________
block4_conv3 (Conv2D)        (None, 28, 28, 512)       2359808   
_________________________________________________________________
block4_pool (MaxPooling2D)   (None, 14, 14, 512)       0         
_________________________________________________________________
block5_conv1 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_conv2 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_conv3 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_pool (MaxPooling2D)   (None, 7, 7, 512)         0         
_________________________________________________________________
global_average_pooling2d_2 ( (None, 512)               0         
_________________________________________________________________
fc1 (Dense)                  (None, 50)                25650     
_________________________________________________________________
dense_2 (Dense)              (None, 1)                 51        
=================================================================
Total params: 14,740,389
Trainable params: 14,740,389
Non-trainable params: 0
_________________________________________________________________
checkpoint_cb = ModelCheckpoint("my_keras_model.h5", save_best_only=True, verbose=1)
early_stopping_cb = EarlyStopping(patience=12, restore_best_weights=True)
reducelr_cb = ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=5, mode="min", verbose=1)
model.compile(optimizer=Adam(0.0001), loss="binary_crossentropy", metrics=["accuracy"])
history = model.fit(train_generator_iterator, epochs=40, validation_data=valid_generator_iterator,
                   callbacks=[checkpoint_cb, early_stopping_cb, reducelr_cb])
Epoch 1/40
101/101 [==============================] - 49s 475ms/step - loss: 0.6934 - accuracy: 0.5734 - val_loss: 0.4318 - val_accuracy: 0.8082

Epoch 00001: val_loss improved from inf to 0.43175, saving model to my_keras_model.h5
Epoch 2/40
101/101 [==============================] - 47s 467ms/step - loss: 0.3882 - accuracy: 0.8380 - val_loss: 0.1763 - val_accuracy: 0.9269

Epoch 00002: val_loss improved from 0.43175 to 0.17635, saving model to my_keras_model.h5
Epoch 3/40
101/101 [==============================] - 48s 471ms/step - loss: 0.1630 - accuracy: 0.9354 - val_loss: 0.1667 - val_accuracy: 0.9282

Epoch 00003: val_loss improved from 0.17635 to 0.16675, saving model to my_keras_model.h5
Epoch 4/40
101/101 [==============================] - 47s 467ms/step - loss: 0.1304 - accuracy: 0.9477 - val_loss: 0.1022 - val_accuracy: 0.9588

Epoch 00004: val_loss improved from 0.16675 to 0.10215, saving model to my_keras_model.h5
Epoch 5/40
101/101 [==============================] - 48s 469ms/step - loss: 0.0825 - accuracy: 0.9697 - val_loss: 0.1405 - val_accuracy: 0.9413

Epoch 00005: val_loss did not improve from 0.10215
Epoch 6/40
101/101 [==============================] - 48s 469ms/step - loss: 0.0605 - accuracy: 0.9770 - val_loss: 0.0801 - val_accuracy: 0.9669

Epoch 00006: val_loss improved from 0.10215 to 0.08014, saving model to my_keras_model.h5
Epoch 7/40
101/101 [==============================] - 47s 467ms/step - loss: 0.0607 - accuracy: 0.9752 - val_loss: 0.0700 - val_accuracy: 0.9719

Epoch 00007: val_loss improved from 0.08014 to 0.07003, saving model to my_keras_model.h5
Epoch 8/40
101/101 [==============================] - 48s 472ms/step - loss: 0.0298 - accuracy: 0.9911 - val_loss: 0.0943 - val_accuracy: 0.9644

Epoch 00008: val_loss did not improve from 0.07003
Epoch 9/40
101/101 [==============================] - 47s 465ms/step - loss: 0.0269 - accuracy: 0.9907 - val_loss: 0.0930 - val_accuracy: 0.9638

Epoch 00009: val_loss did not improve from 0.07003
Epoch 10/40
101/101 [==============================] - 47s 468ms/step - loss: 0.0296 - accuracy: 0.9906 - val_loss: 0.1275 - val_accuracy: 0.9606

Epoch 00010: val_loss did not improve from 0.07003
Epoch 11/40
101/101 [==============================] - 47s 464ms/step - loss: 0.0277 - accuracy: 0.9891 - val_loss: 0.1187 - val_accuracy: 0.9675

Epoch 00011: val_loss did not improve from 0.07003
Epoch 12/40
101/101 [==============================] - 48s 471ms/step - loss: 0.0195 - accuracy: 0.9936 - val_loss: 0.1102 - val_accuracy: 0.9750

Epoch 00012: val_loss did not improve from 0.07003

Epoch 00012: ReduceLROnPlateau reducing learning rate to 1.9999999494757503e-05.
Epoch 13/40
101/101 [==============================] - 47s 466ms/step - loss: 0.0057 - accuracy: 0.9983 - val_loss: 0.0829 - val_accuracy: 0.9788

Epoch 00013: val_loss did not improve from 0.07003
Epoch 14/40
101/101 [==============================] - 48s 470ms/step - loss: 0.0022 - accuracy: 0.9994 - val_loss: 0.0855 - val_accuracy: 0.9781

Epoch 00014: val_loss did not improve from 0.07003
Epoch 15/40
101/101 [==============================] - 48s 469ms/step - loss: 0.0017 - accuracy: 0.9991 - val_loss: 0.0901 - val_accuracy: 0.9775

Epoch 00015: val_loss did not improve from 0.07003
Epoch 16/40
101/101 [==============================] - 48s 470ms/step - loss: 5.0941e-04 - accuracy: 1.0000 - val_loss: 0.1008 - val_accuracy: 0.9756

Epoch 00016: val_loss did not improve from 0.07003
Epoch 17/40
101/101 [==============================] - 47s 467ms/step - loss: 0.0011 - accuracy: 0.9996 - val_loss: 0.1034 - val_accuracy: 0.9788

Epoch 00017: val_loss did not improve from 0.07003

Epoch 00017: ReduceLROnPlateau reducing learning rate to 3.999999898951501e-06.
Epoch 18/40
101/101 [==============================] - 48s 472ms/step - loss: 2.4544e-04 - accuracy: 1.0000 - val_loss: 0.1041 - val_accuracy: 0.9794

Epoch 00018: val_loss did not improve from 0.07003
Epoch 19/40
101/101 [==============================] - 47s 467ms/step - loss: 3.4601e-04 - accuracy: 1.0000 - val_loss: 0.1049 - val_accuracy: 0.9794

Epoch 00019: val_loss did not improve from 0.07003
model.evaluate(test_generator_iterator)
32/32 [==============================] - 9s 294ms/step - loss: 0.0796 - accuracy: 0.9698





[0.07955637574195862, 0.969846785068512]

Transfer Learning

IMAGE_SIZE = 224
input_tensor = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
base_model = VGG16(input_tensor=input_tensor, include_top=False, weights="imagenet")

x = GlobalAveragePooling2D()(base_model.output)
x = Dense(50, activation='relu', name='fc1')(x)
output = Dense(1, activation="sigmoid")(x)

model = Model(inputs=input_tensor, outputs=output)
model.summary()
2021-09-14 08:10:44.451219: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set
2021-09-14 08:10:44.454643: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1
2021-09-14 08:10:44.493301: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-14 08:10:44.493981: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: 
pciBusID: 0000:00:04.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0
coreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s
2021-09-14 08:10:44.494046: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0
2021-09-14 08:10:44.537245: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11
2021-09-14 08:10:44.537399: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11
2021-09-14 08:10:44.553821: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10
2021-09-14 08:10:44.599746: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10
2021-09-14 08:10:44.636602: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusolver.so.10
2021-09-14 08:10:44.644139: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11
2021-09-14 08:10:44.646671: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8
2021-09-14 08:10:44.646891: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-14 08:10:44.647630: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-14 08:10:44.649234: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1862] Adding visible gpu devices: 0
2021-09-14 08:10:44.649780: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 AVX512F FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2021-09-14 08:10:44.649975: I tensorflow/compiler/jit/xla_gpu_device.cc:99] Not creating XLA devices, tf_xla_enable_xla_devices not set
2021-09-14 08:10:44.650140: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-14 08:10:44.650747: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: 
pciBusID: 0000:00:04.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0
coreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s
2021-09-14 08:10:44.650799: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0
2021-09-14 08:10:44.650838: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11
2021-09-14 08:10:44.650860: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11
2021-09-14 08:10:44.650881: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10
2021-09-14 08:10:44.650901: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10
2021-09-14 08:10:44.650922: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusolver.so.10
2021-09-14 08:10:44.650944: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11
2021-09-14 08:10:44.650965: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8
2021-09-14 08:10:44.651061: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-14 08:10:44.651700: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-14 08:10:44.652232: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1862] Adding visible gpu devices: 0
2021-09-14 08:10:44.653097: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0
2021-09-14 08:10:46.013924: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1261] Device interconnect StreamExecutor with strength 1 edge matrix:
2021-09-14 08:10:46.013993: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1267]      0 
2021-09-14 08:10:46.014004: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1280] 0:   N 
2021-09-14 08:10:46.016050: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-14 08:10:46.016861: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-14 08:10:46.017604: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-14 08:10:46.018250: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1406] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 14957 MB memory) -> physical GPU (device: 0, name: Tesla P100-PCIE-16GB, pci bus id: 0000:00:04.0, compute capability: 6.0)


Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5
58892288/58889256 [==============================] - 0s 0us/step
Model: "model"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         [(None, 224, 224, 3)]     0         
_________________________________________________________________
block1_conv1 (Conv2D)        (None, 224, 224, 64)      1792      
_________________________________________________________________
block1_conv2 (Conv2D)        (None, 224, 224, 64)      36928     
_________________________________________________________________
block1_pool (MaxPooling2D)   (None, 112, 112, 64)      0         
_________________________________________________________________
block2_conv1 (Conv2D)        (None, 112, 112, 128)     73856     
_________________________________________________________________
block2_conv2 (Conv2D)        (None, 112, 112, 128)     147584    
_________________________________________________________________
block2_pool (MaxPooling2D)   (None, 56, 56, 128)       0         
_________________________________________________________________
block3_conv1 (Conv2D)        (None, 56, 56, 256)       295168    
_________________________________________________________________
block3_conv2 (Conv2D)        (None, 56, 56, 256)       590080    
_________________________________________________________________
block3_conv3 (Conv2D)        (None, 56, 56, 256)       590080    
_________________________________________________________________
block3_pool (MaxPooling2D)   (None, 28, 28, 256)       0         
_________________________________________________________________
block4_conv1 (Conv2D)        (None, 28, 28, 512)       1180160   
_________________________________________________________________
block4_conv2 (Conv2D)        (None, 28, 28, 512)       2359808   
_________________________________________________________________
block4_conv3 (Conv2D)        (None, 28, 28, 512)       2359808   
_________________________________________________________________
block4_pool (MaxPooling2D)   (None, 14, 14, 512)       0         
_________________________________________________________________
block5_conv1 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_conv2 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_conv3 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_pool (MaxPooling2D)   (None, 7, 7, 512)         0         
_________________________________________________________________
global_average_pooling2d (Gl (None, 512)               0         
_________________________________________________________________
fc1 (Dense)                  (None, 50)                25650     
_________________________________________________________________
dense (Dense)                (None, 1)                 51        
=================================================================
Total params: 14,740,389
Trainable params: 14,740,389
Non-trainable params: 0
_________________________________________________________________
model.layers
[<tensorflow.python.keras.engine.input_layer.InputLayer at 0x7f1006c38190>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006a94410>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006c38dd0>,
 <tensorflow.python.keras.layers.pooling.MaxPooling2D at 0x7f1006bc0c90>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006b4cf90>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006b4c850>,
 <tensorflow.python.keras.layers.pooling.MaxPooling2D at 0x7f1006b57e50>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006b5fc10>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006b64e10>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006b57f50>,
 <tensorflow.python.keras.layers.pooling.MaxPooling2D at 0x7f1006b57550>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006b74f90>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006b79d10>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006b68910>,
 <tensorflow.python.keras.layers.pooling.MaxPooling2D at 0x7f1006b04c90>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006b08e50>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006b04650>,
 <tensorflow.python.keras.layers.convolutional.Conv2D at 0x7f1006b10e50>,
 <tensorflow.python.keras.layers.pooling.MaxPooling2D at 0x7f1006b15ed0>,
 <tensorflow.python.keras.layers.pooling.GlobalAveragePooling2D at 0x7f105fa4f390>,
 <tensorflow.python.keras.layers.core.Dense at 0x7f1006b28b50>,
 <tensorflow.python.keras.layers.core.Dense at 0x7f1006b10b50>]
for layer in model.layers:
    print(layer.name, layer.trainable)
input_2 True
block1_conv1 True
block1_conv2 True
block1_pool True
block2_conv1 True
block2_conv2 True
block2_pool True
block3_conv1 True
block3_conv2 True
block3_conv3 True
block3_pool True
block4_conv1 True
block4_conv2 True
block4_conv3 True
block4_pool True
block5_conv1 True
block5_conv2 True
block5_conv3 True
block5_pool True
global_average_pooling2d True
fc1 True
dense True
type(model.layers)
list
for layer in model.layers[:-3]:
    layer.trainable= False
    print(layer.name, layer.trainable)
for layer in model.layers[-3:]:
    print(layer.name, layer.trainable)
input_2 False
block1_conv1 False
block1_conv2 False
block1_pool False
block2_conv1 False
block2_conv2 False
block2_pool False
block3_conv1 False
block3_conv2 False
block3_conv3 False
block3_pool False
block4_conv1 False
block4_conv2 False
block4_conv3 False
block4_pool False
block5_conv1 False
block5_conv2 False
block5_conv3 False
block5_pool False
global_average_pooling2d True
fc1 True
dense True
checkpoint_cb = ModelCheckpoint("my_keras_model.h5", save_best_only=True, verbose=1)
early_stopping_cb = EarlyStopping(patience=12, restore_best_weights=True)
reducelr_cb = ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=5, mode="min", verbose=1)
model.compile(optimizer=Adam(0.0001), loss="binary_crossentropy", metrics=["accuracy"])
history = model.fit(train_generator_iterator, epochs=40, validation_data=valid_generator_iterator,
                   callbacks=[checkpoint_cb, early_stopping_cb, reducelr_cb])
2021-09-14 08:16:07.035827: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:116] None of the MLIR optimization passes are enabled (registered 2)
2021-09-14 08:16:07.039887: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2000144999 Hz


Epoch 1/40


2021-09-14 08:16:07.721224: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11
2021-09-14 08:16:08.542620: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11
2021-09-14 08:16:08.592307: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8


101/101 [==============================] - 84s 720ms/step - loss: 0.6684 - accuracy: 0.6078 - val_loss: 0.6303 - val_accuracy: 0.7695

Epoch 00001: val_loss improved from inf to 0.63027, saving model to my_keras_model.h5
Epoch 2/40
101/101 [==============================] - 38s 379ms/step - loss: 0.6131 - accuracy: 0.7990 - val_loss: 0.5786 - val_accuracy: 0.8145

Epoch 00002: val_loss improved from 0.63027 to 0.57865, saving model to my_keras_model.h5
Epoch 3/40
101/101 [==============================] - 38s 379ms/step - loss: 0.5587 - accuracy: 0.8233 - val_loss: 0.5328 - val_accuracy: 0.8251

Epoch 00003: val_loss improved from 0.57865 to 0.53277, saving model to my_keras_model.h5
Epoch 4/40
101/101 [==============================] - 38s 373ms/step - loss: 0.5127 - accuracy: 0.8395 - val_loss: 0.4952 - val_accuracy: 0.8326

Epoch 00004: val_loss improved from 0.53277 to 0.49522, saving model to my_keras_model.h5
Epoch 5/40
101/101 [==============================] - 38s 381ms/step - loss: 0.4724 - accuracy: 0.8492 - val_loss: 0.4620 - val_accuracy: 0.8407

Epoch 00005: val_loss improved from 0.49522 to 0.46202, saving model to my_keras_model.h5
Epoch 6/40
101/101 [==============================] - 38s 374ms/step - loss: 0.4384 - accuracy: 0.8646 - val_loss: 0.4346 - val_accuracy: 0.8457

Epoch 00006: val_loss improved from 0.46202 to 0.43463, saving model to my_keras_model.h5
Epoch 7/40
101/101 [==============================] - 38s 379ms/step - loss: 0.4111 - accuracy: 0.8570 - val_loss: 0.4083 - val_accuracy: 0.8551

Epoch 00007: val_loss improved from 0.43463 to 0.40831, saving model to my_keras_model.h5
Epoch 8/40
101/101 [==============================] - 38s 378ms/step - loss: 0.3806 - accuracy: 0.8787 - val_loss: 0.3865 - val_accuracy: 0.8588

Epoch 00008: val_loss improved from 0.40831 to 0.38650, saving model to my_keras_model.h5
Epoch 9/40
101/101 [==============================] - 39s 382ms/step - loss: 0.3677 - accuracy: 0.8724 - val_loss: 0.3700 - val_accuracy: 0.8645

Epoch 00009: val_loss improved from 0.38650 to 0.37000, saving model to my_keras_model.h5
Epoch 10/40
101/101 [==============================] - 38s 378ms/step - loss: 0.3449 - accuracy: 0.8782 - val_loss: 0.3546 - val_accuracy: 0.8657

Epoch 00010: val_loss improved from 0.37000 to 0.35464, saving model to my_keras_model.h5
Epoch 11/40
101/101 [==============================] - 38s 381ms/step - loss: 0.3296 - accuracy: 0.8845 - val_loss: 0.3411 - val_accuracy: 0.8707

Epoch 00011: val_loss improved from 0.35464 to 0.34106, saving model to my_keras_model.h5
Epoch 12/40
101/101 [==============================] - 38s 377ms/step - loss: 0.3207 - accuracy: 0.8903 - val_loss: 0.3289 - val_accuracy: 0.8751

Epoch 00012: val_loss improved from 0.34106 to 0.32885, saving model to my_keras_model.h5
Epoch 13/40
101/101 [==============================] - 38s 378ms/step - loss: 0.3047 - accuracy: 0.8893 - val_loss: 0.3187 - val_accuracy: 0.8770

Epoch 00013: val_loss improved from 0.32885 to 0.31868, saving model to my_keras_model.h5
Epoch 14/40
101/101 [==============================] - 38s 375ms/step - loss: 0.2969 - accuracy: 0.8896 - val_loss: 0.3132 - val_accuracy: 0.8738

Epoch 00014: val_loss improved from 0.31868 to 0.31318, saving model to my_keras_model.h5
Epoch 15/40
101/101 [==============================] - 38s 381ms/step - loss: 0.2799 - accuracy: 0.8978 - val_loss: 0.3019 - val_accuracy: 0.8819

Epoch 00015: val_loss improved from 0.31318 to 0.30192, saving model to my_keras_model.h5
Epoch 16/40
101/101 [==============================] - 38s 377ms/step - loss: 0.2806 - accuracy: 0.8949 - val_loss: 0.2948 - val_accuracy: 0.8876

Epoch 00016: val_loss improved from 0.30192 to 0.29476, saving model to my_keras_model.h5
Epoch 17/40
101/101 [==============================] - 38s 379ms/step - loss: 0.2697 - accuracy: 0.8982 - val_loss: 0.2895 - val_accuracy: 0.8844

Epoch 00017: val_loss improved from 0.29476 to 0.28950, saving model to my_keras_model.h5
Epoch 18/40
101/101 [==============================] - 38s 381ms/step - loss: 0.2675 - accuracy: 0.8968 - val_loss: 0.2848 - val_accuracy: 0.8851

Epoch 00018: val_loss improved from 0.28950 to 0.28484, saving model to my_keras_model.h5
Epoch 19/40
101/101 [==============================] - 38s 380ms/step - loss: 0.2581 - accuracy: 0.9035 - val_loss: 0.2779 - val_accuracy: 0.8901

Epoch 00019: val_loss improved from 0.28484 to 0.27786, saving model to my_keras_model.h5
Epoch 20/40
101/101 [==============================] - 38s 376ms/step - loss: 0.2458 - accuracy: 0.9095 - val_loss: 0.2729 - val_accuracy: 0.8907

Epoch 00020: val_loss improved from 0.27786 to 0.27290, saving model to my_keras_model.h5
Epoch 21/40
101/101 [==============================] - 38s 379ms/step - loss: 0.2441 - accuracy: 0.9104 - val_loss: 0.2684 - val_accuracy: 0.8938

Epoch 00021: val_loss improved from 0.27290 to 0.26836, saving model to my_keras_model.h5
Epoch 22/40
101/101 [==============================] - 38s 379ms/step - loss: 0.2446 - accuracy: 0.9033 - val_loss: 0.2667 - val_accuracy: 0.8919

Epoch 00022: val_loss improved from 0.26836 to 0.26668, saving model to my_keras_model.h5
Epoch 23/40
101/101 [==============================] - 38s 379ms/step - loss: 0.2401 - accuracy: 0.9084 - val_loss: 0.2625 - val_accuracy: 0.8982

Epoch 00023: val_loss improved from 0.26668 to 0.26247, saving model to my_keras_model.h5
Epoch 24/40
101/101 [==============================] - 39s 381ms/step - loss: 0.2421 - accuracy: 0.9075 - val_loss: 0.2579 - val_accuracy: 0.8963

Epoch 00024: val_loss improved from 0.26247 to 0.25792, saving model to my_keras_model.h5
Epoch 25/40
101/101 [==============================] - 38s 379ms/step - loss: 0.2243 - accuracy: 0.9129 - val_loss: 0.2547 - val_accuracy: 0.8982

Epoch 00025: val_loss improved from 0.25792 to 0.25467, saving model to my_keras_model.h5
Epoch 26/40
101/101 [==============================] - 38s 374ms/step - loss: 0.2294 - accuracy: 0.9109 - val_loss: 0.2520 - val_accuracy: 0.8969

Epoch 00026: val_loss improved from 0.25467 to 0.25201, saving model to my_keras_model.h5
Epoch 27/40
101/101 [==============================] - 38s 377ms/step - loss: 0.2188 - accuracy: 0.9180 - val_loss: 0.2492 - val_accuracy: 0.9013

Epoch 00027: val_loss improved from 0.25201 to 0.24915, saving model to my_keras_model.h5
Epoch 28/40
101/101 [==============================] - 38s 378ms/step - loss: 0.2347 - accuracy: 0.9036 - val_loss: 0.2483 - val_accuracy: 0.9001

Epoch 00028: val_loss improved from 0.24915 to 0.24832, saving model to my_keras_model.h5
Epoch 29/40
101/101 [==============================] - 38s 377ms/step - loss: 0.2185 - accuracy: 0.9164 - val_loss: 0.2450 - val_accuracy: 0.9001

Epoch 00029: val_loss improved from 0.24832 to 0.24503, saving model to my_keras_model.h5
Epoch 30/40
101/101 [==============================] - 39s 380ms/step - loss: 0.2183 - accuracy: 0.9147 - val_loss: 0.2439 - val_accuracy: 0.9026

Epoch 00030: val_loss improved from 0.24503 to 0.24394, saving model to my_keras_model.h5
Epoch 31/40
101/101 [==============================] - 38s 380ms/step - loss: 0.2196 - accuracy: 0.9173 - val_loss: 0.2419 - val_accuracy: 0.9038

Epoch 00031: val_loss improved from 0.24394 to 0.24188, saving model to my_keras_model.h5
Epoch 32/40
101/101 [==============================] - 38s 376ms/step - loss: 0.2176 - accuracy: 0.9118 - val_loss: 0.2382 - val_accuracy: 0.9032

Epoch 00032: val_loss improved from 0.24188 to 0.23825, saving model to my_keras_model.h5
Epoch 33/40
101/101 [==============================] - 38s 380ms/step - loss: 0.2083 - accuracy: 0.9153 - val_loss: 0.2365 - val_accuracy: 0.9038

Epoch 00033: val_loss improved from 0.23825 to 0.23647, saving model to my_keras_model.h5
Epoch 34/40
101/101 [==============================] - 38s 380ms/step - loss: 0.2120 - accuracy: 0.9172 - val_loss: 0.2356 - val_accuracy: 0.9057

Epoch 00034: val_loss improved from 0.23647 to 0.23557, saving model to my_keras_model.h5
Epoch 35/40
101/101 [==============================] - 39s 387ms/step - loss: 0.2040 - accuracy: 0.9233 - val_loss: 0.2336 - val_accuracy: 0.9051

Epoch 00035: val_loss improved from 0.23557 to 0.23360, saving model to my_keras_model.h5
Epoch 36/40
101/101 [==============================] - 38s 377ms/step - loss: 0.2035 - accuracy: 0.9204 - val_loss: 0.2345 - val_accuracy: 0.9057

Epoch 00036: val_loss did not improve from 0.23360
Epoch 37/40
101/101 [==============================] - 38s 381ms/step - loss: 0.2065 - accuracy: 0.9164 - val_loss: 0.2303 - val_accuracy: 0.9076

Epoch 00037: val_loss improved from 0.23360 to 0.23027, saving model to my_keras_model.h5
Epoch 38/40
101/101 [==============================] - 38s 374ms/step - loss: 0.2026 - accuracy: 0.9254 - val_loss: 0.2286 - val_accuracy: 0.9076

Epoch 00038: val_loss improved from 0.23027 to 0.22859, saving model to my_keras_model.h5
Epoch 39/40
101/101 [==============================] - 38s 379ms/step - loss: 0.1957 - accuracy: 0.9231 - val_loss: 0.2282 - val_accuracy: 0.9088

Epoch 00039: val_loss improved from 0.22859 to 0.22825, saving model to my_keras_model.h5
Epoch 40/40
101/101 [==============================] - 38s 376ms/step - loss: 0.2047 - accuracy: 0.9123 - val_loss: 0.2273 - val_accuracy: 0.9082

Epoch 00040: val_loss improved from 0.22825 to 0.22725, saving model to my_keras_model.h5
model.evaluate(test_generator_iterator)
32/32 [==============================] - 21s 661ms/step - loss: 0.2214 - accuracy: 0.9081





[0.22137922048568726, 0.9080573320388794]
profile
어제보다 오늘 더 성장하는 개발자!

0개의 댓글