Convolution Layer
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from keras.optimizers import SGD, Adam, Adagrad, RMSprop
#MNIST 데이터셋을 읽어와 신경망에 입력할 형태로 변환
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 28, 28, 1) #텐서 모양 변환
x_test = x_test.reshape(10000, 28, 28, 1)
x_train = x_train.astype(np.float32)/255.0 #ndarray로 변환
x_test = x_test.astype(np.float32)/255.0
y_train = tf.keras.utils.to_categorical(y_train, 10) #원핫 코드로 변환
y_test = tf.keras.utils.to_categorical(y_test, 10)
#Lenet-5 신경망 모델 설계
cnn = Sequential()
cnn.add(Conv2D(6,(5,5), padding='same', activation='relu', input_shape=(28, 28, 1)))
cnn.add(MaxPooling2D(pool_size=(2,2)))
cnn.add(Conv2D(6,(5,5), padding='same', activation='relu')) #Convolution
cnn.add(MaxPooling2D(pool_size=(2,2))) #pooling
cnn.add(Conv2D(6,(5,5), padding='same', activation='relu'))
cnn.add(Flatten())
cnn.add(Dense(84, activation='relu')) #Fully connected
cnn.add(Dense(10, activation='softmax')) #0~9까지의 숫자 10개 분류
#신경망 모델 학습
cnn.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])
hist = cnn.fit(x_train, y_train, batch_size= 128, epochs= 30, validation_data=(x_test, y_test), verbose=2)
#신경망 모델 평가
res = cnn.evaluate(x_test, y_test, verbose=0)
print("정확률은", res[1]*100)
#정확률 그래프
plt.plot(hist.history['accuracy'],)
plt.plot(hist.history['val_accuracy'],)
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='best')
plt.grid()
plt.show()
#손실 함수 그래프
plt.plot(hist.history['loss'],)
plt.plot(hist.history['val_loss'],)
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='best')
plt.grid()
plt.show()
Epoch 15/30
469/469 - 8s - loss: 0.0201 - accuracy: 0.9931 - val_loss: 0.0370 - val_accuracy: 0.9881 - 8s/epoch - 17ms/step
Epoch 16/30
469/469 - 8s - loss: 0.0184 - accuracy: 0.9944 - val_loss: 0.0449 - val_accuracy: 0.9872 - 8s/epoch - 17ms/step
Epoch 17/30
469/469 - 8s - loss: 0.0177 - accuracy: 0.9938 - val_loss: 0.0410 - val_accuracy: 0.9882 - 8s/epoch - 17ms/step
Epoch 18/30
469/469 - 8s - loss: 0.0173 - accuracy: 0.9937 - val_loss: 0.0427 - val_accuracy: 0.9886 - 8s/epoch - 17ms/step
Epoch 19/30
…
Epoch 26/30
469/469 - 8s - loss: 0.0095 - accuracy: 0.9966 - val_loss: 0.0467 - val_accuracy: 0.9875 - 8s/epoch - 17ms/step
Epoch 27/30
469/469 - 8s - loss: 0.0090 - accuracy: 0.9969 - val_loss: 0.0515 - val_accuracy: 0.9878 - 8s/epoch - 17ms/step
Epoch 28/30
469/469 - 8s - loss: 0.0080 - accuracy: 0.9975 - val_loss: 0.0509 - val_accuracy: 0.9865 - 8s/epoch - 17ms/step
Epoch 29/30
469/469 - 8s - loss: 0.0087 - accuracy: 0.9971 - val_loss: 0.0563 - val_accuracy: 0.9868 - 8s/epoch - 17ms/step
Epoch 30/30
469/469 - 8s - loss: 0.0077 - accuracy: 0.9976 - val_loss: 0.0578 - val_accuracy: 0.9871 - 8s/epoch - 17ms/step
정확률은 98.71000051498413
# Weight Decay를 전체적으로 반영한 예시 코드
from tensorflow.keras.constraints import MaxNorm
from tensorflow.keras import regularizers
# 모델 구성을 확인합니다.
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(64, input_dim=64,
kernel_regularizer=regularizers.l2(0.01), # L2 norm regularization
activity_regularizer=regularizers.l1(0.01)), # L1 norm regularization
Dense(10, activation='softmax')
])