Naver Project (Image Classification)

Jacob Kim·2024년 1월 31일
0

Naver Project Week 1

목록 보기
27/28
import tensorflow as tf
from tensorflow.keras import layers

from sklearn.model_selection import train_test_split

import numpy as np
import matplotlib.pyplot as plt

tf.__version__
# Load training and eval data from tf.keras
(train_data, train_labels), (test_data, test_labels) = \
    tf.keras.datasets.cifar10.load_data()

train_data, valid_data, train_labels, valid_labels = \
    train_test_split(train_data, train_labels, test_size=0.1, shuffle=True)

train_data = train_data / 255.
train_data = train_data.reshape([-1, 32, 32, 3])
train_data = train_data.astype(np.float32)
train_labels = train_labels.reshape([-1])
train_labels = train_labels.astype(np.int32)

valid_data = valid_data / 255.
valid_data = valid_data.reshape([-1, 32, 32, 3])
valid_data = valid_data.astype(np.float32)
valid_labels = valid_labels.reshape([-1])
valid_labels = valid_labels.astype(np.int32)

test_data = test_data / 255.
test_data = test_data.reshape([-1, 32, 32, 3])
test_data = test_data.astype(np.float32)
test_labels = test_labels.reshape([-1])
test_labels = test_labels.astype(np.int32)
def one_hot_label(image, label):
  label = tf.one_hot(label, depth=10)
  return image, label
batch_size = 32
max_epochs = 30

# for train
N = len(train_data)
train_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels))
train_dataset = train_dataset.shuffle(buffer_size=10000)
train_dataset = train_dataset.map(one_hot_label)
train_dataset = train_dataset.repeat().batch(batch_size=batch_size)
print(train_dataset)

# for valid
valid_dataset = tf.data.Dataset.from_tensor_slices((valid_data, valid_labels))
valid_dataset = valid_dataset.map(one_hot_label)
valid_dataset = valid_dataset.batch(batch_size=batch_size)
print(valid_dataset)

# for test
test_dataset = tf.data.Dataset.from_tensor_slices((test_data, test_labels))
test_dataset = test_dataset.map(one_hot_label)
test_dataset = test_dataset.batch(batch_size=batch_size)
print(test_dataset)
index = 655
print("label = {}".format(train_labels[index]))
plt.imshow(train_data[index].reshape(32, 32, 3))
plt.colorbar()
#plt.gca().grid(False)
plt.show()
# inputs = layers.Input(shape=(32, 32, 3))

# x1 = layers.Conv2D(32, 3, padding='same',
#                    kernel_initializer=tf.keras.initializers.HeNormal())(inputs)
# x1 = layers.BatchNormalization()(x1)
# x1 = layers.Activation('relu')(x1)
# x1 = layers.Conv2D(32, 3, padding='same',
#                    kernel_initializer=tf.keras.initializers.HeNormal())(x1)
# x1 = layers.BatchNormalization()(x1)
# x1 = layers.Activation('relu')(x1)
# x1_skip = layers.MaxPooling2D()(x1) # 16, 16

# x2 = layers.Conv2D(64, 3, padding='same',
#                    kernel_initializer=tf.keras.initializers.HeNormal())(x1_skip)
# x2 = layers.BatchNormalization()(x2)
# x2 = layers.Activation('relu')(x2)
# x2 = layers.Conv2D(64, 3, padding='same',
#                    kernel_initializer=tf.keras.initializers.HeNormal())(x2)
# x2 = layers.BatchNormalization()(x2)
# x2 = tf.concat([x2, x1_skip], -1)
# x2 = layers.Activation('relu')(x2)
# x2_skip = layers.MaxPooling2D()(x2) # 8, 8

# x3 = layers.Conv2D(128, 3, padding='same',
#                    kernel_initializer=tf.keras.initializers.HeNormal())(x2_skip)
# x3 = layers.BatchNormalization()(x3)
# x3 = layers.Activation('relu')(x3)
# x3 = layers.Conv2D(128, 3, padding='same',
#                    kernel_initializer=tf.keras.initializers.HeNormal())(x3)
# x3 = layers.BatchNormalization()(x3)
# x3 = layers.Activation('relu')(x3)
# x3 = layers.Conv2D(128, 3, padding='same',
#                    kernel_initializer=tf.keras.initializers.HeNormal())(x3)
# x3 = layers.BatchNormalization()(x3)
# x3 = tf.concat([x3, x2_skip], -1)
# x3 = layers.Activation('relu')(x3)
# x3_skip = layers.MaxPooling2D()(x3) # 8, 8

# x4 = layers.Conv2D(256, 3, padding='same',
#                    kernel_initializer=tf.keras.initializers.HeNormal())(x3_skip)
# x4 = layers.BatchNormalization()(x4)
# x4 = layers.Activation('relu')(x4)
# x4 = layers.Conv2D(256, 3, padding='same',
#                    kernel_initializer=tf.keras.initializers.HeNormal())(x4)
# x4 = layers.BatchNormalization()(x4)
# x4 = layers.Activation('relu')(x4)
# x4 = layers.Conv2D(256, 3, padding='same',
#                    kernel_initializer=tf.keras.initializers.HeNormal())(x4)
# x4 = layers.BatchNormalization()(x4)
# x4 = tf.concat([x4, x3_skip], -1)
# x4_skip = layers.Activation('relu')(x4)

# x = layers.Flatten()(x4_skip)
# x = layers.Dense(256, kernel_initializer=tf.keras.initializers.HeNormal())(x)
# x = layers.BatchNormalization()(x)
# x = layers.Activation('relu')(x)
# x = layers.Dropout(0.5)(x)
# x = layers.Dense(256, kernel_initializer=tf.keras.initializers.HeNormal())(x)
# x = layers.BatchNormalization()(x)
# x = layers.Activation('relu')(x)
# x = layers.Dropout(0.5)(x)
# x = layers.Dense(10, activation='softmax')(x)

# model = tf.keras.Model(inputs=inputs, outputs=x)
# Conv block
class MyModel(tf.keras.Model):
  def __init__(self, filter_num):
    super(MyModel, self).__init__()
    self.conv1 = tf.keras.Sequential([
        layers.Conv2D(filter_num, 3, padding='same'),
        layers.BatchNormalization(),
        layers.Activation('relu'),
    ])
    self.conv2 = tf.keras.Sequential([
        layers.Conv2D(filter_num, 3, padding='same'),
        layers.BatchNormalization(),
        layers.Activation('relu'),
    ])
    self.pooling = layers.MaxPool2D()

  def call(self, x):
    x = self.conv1(x)
    x = self.conv2(x)
    x = self.pooling(x)

    return x

conv_block_1 = MyModel(32)
conv_block_2 = MyModel(64)
conv_block_3 = MyModel(128)

model = tf.keras.Sequential()
model.add(conv_block_1)
model.add(conv_block_2)
model.add(conv_block_3)
model.add(layers.Flatten())
model.add(layers.Dense(256))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Dense(256))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Dense(10, activation='softmax'))
# without training, just inference a model in eager execution:
predictions = model(train_data[0:1], training=False)
print("Predictions: ", predictions.numpy())
model.compile(optimizer=tf.keras.optimizers.Adam(3e-5),
              loss=tf.keras.losses.CategoricalCrossentropy(), 
              metrics=['accuracy'])
model.summary()
# using `tf.data.Dataset` 
history = model.fit(train_dataset,                     
                    steps_per_epoch=len(train_data) // batch_size,
                    epochs=max_epochs, 
                    validation_data=valid_dataset,
                    validation_steps=len(valid_data) // batch_size)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(max_epochs)

plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Valid Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Valid Loss')
plt.show()
results = model.evaluate(test_dataset, steps=len(test_data) // batch_size)
# loss
print("loss value: {:.3f}".format(results[0]))
# accuracy
print("accuracy value: {:.4f}%".format(results[1]*100))
test_batch_size = 16
batch_index = np.random.choice(len(test_data), size=test_batch_size, replace=False)

batch_xs = test_data[batch_index]
batch_ys = test_labels[batch_index]
y_pred_ = model(batch_xs, training=False)

fig = plt.figure(figsize=(16, 10))
for i, (px, py) in enumerate(zip(batch_xs, y_pred_)):
  p = fig.add_subplot(4, 8, i+1)
  if np.argmax(py) == batch_ys[i]:
    p.set_title("y_pred: {}".format(np.argmax(py)), color='blue')
  else:
    p.set_title("y_pred: {}".format(np.argmax(py)), color='red')
  p.imshow(px.reshape(32, 32, 3))
  p.axis('off')
profile
AI, Information and Communication, Electronics, Computer Science, Bio, Algorithms

0개의 댓글