import tensorflow as tf
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train/255, x_test/255 # 픽셀의 최대값이 255이므로 255로 나눠서 스케일링
데이터를 28x28 크기의 2차원 이미지 형태에서 28x28x1 크기의 3차원 이미지 형태로 변환
X_train = X_train.reshape((60000, 28, 28, 1))
X_test = X_test.reshape((10000, 28, 28, 1))
from tensorflow.keras import layers, models
model = models.Sequential([
layers.Conv2D(32, kernel_size=(5,5), strides=(1,1), padding = 'same', activation = 'relu', input_shape=(28,28,1)),
layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
layers.Conv2D(64, (2,2), activation = 'relu', padding = 'same'),
layers.MaxPool2D(pool_size=(2,2), strides=(2,2)),
layers.Dropout(0.25),
layers.Flatten(),
layers.Dense(1000, activation = 'relu'),
layers.Dense(10, activation = 'softmax')
])
Total params: 3,156,098
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 28, 28, 32) 832
max_pooling2d (MaxPooling2D (None, 14, 14, 32) 0
)
conv2d_1 (Conv2D) (None, 14, 14, 64) 8256
max_pooling2d_1 (MaxPooling (None, 7, 7, 64) 0
2D)
dropout (Dropout) (None, 7, 7, 64) 0
flatten (Flatten) (None, 3136) 0
dense (Dense) (None, 1000) 3137000
dense_1 (Dense) (None, 10) 10010
=================================================================
Total params: 3,156,098
Trainable params: 3,156,098
Non-trainable params: 0
model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics=['accuracy'])
hist = model.fit(X_train, y_train, epochs=5, verbose=1, validation_data=(X_test, y_test))
score = model.evaluate(X_test, y_test)
score
loss: 0.0297 - accuracy: 0.9906
[0.02974139340221882, 0.9905999898910522]
model.save('MNIST_CNN_model.h5')