--19.인공신경망.ipynb--
MNIST 는
FashionMNIST 는 숫자 대신 패션 아이템 이미지로 구성

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import tensorflow as tf
tf.keras.utils.set_random_seed(42)
tf.config.experimental.enable_op_determinism()
from tensorflow import keras
(train_input, train_target), (test_input,test_target) = keras.datasets.fashion_mnist.load_data()
train_input.shape, train_target.shape
test_input.shape, test_target.shape
fig, axs = plt.subplots(1, 10 ,figsize=(10,10))
for i in range(10) :
axs[i].imshow(train_input[i], cmap='gray')
axs[i].axis('off')
plt.show()
[train_target[i] for i in range(10)]
np.unique(train_target, return_counts=True)
train_scaled = train_input / 255.0
train_scaled = train_scaled.reshape(-1, 28*28)
train_scaled.shape
np.min(train_scaled), np.max(train_scaled)
from sklearn.model_selection import cross_validate
from sklearn.linear_model import SGDClassifier
sc = SGDClassifier(loss='log_loss', max_iter=5, random_state=42)
scores = cross_validate(sc, train_scaled, train_target, n_jobs=-1)
np.mean(scores['test_score'])
"""
z = a x (Weight) + b x (Length) + c x (Diagonal) + d x (Height) + e x (Width) + f
위 식을 FashionMNIST 데이터에 맞게 변형하면 다음과 같을거다.
▶ 첫번째 레이블 '티셔츠'에 대한 선형방정식
z_티셔츠 = w1 x (픽셀1) + w2 x (픽셀2) + .... + w784 x (픽셀784) + b
▶ 두번째 레이블 '바지'에 대한 선형방정식
z_바지 = w1' x (픽셀1) + w2' x (픽셀2) + .... + w784' x (픽셀784) + b'
"""
None
Artificial neural network(ANN)
from sklearn.model_selection import train_test_split
train_scaled, val_scaled, train_target, val_target = \
train_test_split(train_scaled, train_target, test_size=0.2, random_state=42)
train_scaled.shape, train_target.shape
val_scaled.shape, val_target.shape
밀집층
dense = keras.layers.Dense(10, activation='softmax', input_shape=(784,))
# Dense(뉴런의 개수, 뉴런의 출력에 적용할 함수, 입력의 크기)
model = keras.Sequential(dense) # dense layer를 가진 신경망 모델 생성
model.compile(loss='sparse_categorical_crossentropy', metrics = 'accuracy')
train_target[:10]
model.fit(train_scaled, train_target, epochs=5)
model.evaluate(val_scaled, val_target)
dense = keras.layers.Dense(10, activation='softmax', input_shape=(784,))
model = keras.Sequential(dense)
model.compile(loss='sparse_categorical_crossentropy', metrics='accuracy')
model.fit(train_scaled, train_target, epochs=5)
model.evaluate(val_scaled, val_target)
여러개의 layer추가해보기
dense1 = keras.layers.Dense(100, activation='sigmoid', input_shape=(784,))
dense2 = keras.layers.Dense(10, activation='softmax')
model = keras.Sequential([dense1, dense2]) # 주의! 출력층을 가장 마지막에 두어야한다.
model.summary()
"""
↑ 모델에 들어있는 layer의 순서대로 나열
각 layer 마다 이름, 클래스, 출력크기, 모델 파라미터 개수가 출력.
"""
None
model = keras.Sequential([
keras.layers.Dense(100, activation='sigmoid', input_shape=(784,), name='hidden'),
keras.layers.Dense(10, activation='softmax', name='output')
], name='Fashion MNIST 모델')
model.summary()
model = keras.Sequential()
model.add(keras.layers.Dense(100, activation='sigmoid', input_shape=(784,), name='hidden'))
model.add(keras.layers.Dense(10, activation='softmax', name='output'))
model.summary()
model.compile(loss='sparse_categorical_crossentropy', metrics='accuracy')
model.fit(train_scaled, train_target, epochs=5)
"""
Epoch 1/5
1500/1500 [==============================] - 4s 2ms/step - loss: 0.6069 - accuracy: 0.7947
Epoch 2/5
1500/1500 [==============================] - 3s 2ms/step - loss: 0.4742 - accuracy: 0.8382
Epoch 3/5
1500/1500 [==============================] - 3s 2ms/step - loss: 0.4508 - accuracy: 0.8474
Epoch 4/5
1500/1500 [==============================] - 3s 2ms/step - loss: 0.4367 - accuracy: 0.8527
Epoch 5/5
1500/1500 [==============================] - 3s 2ms/step - loss: 0.4280 - accuracy: 0.8555
<keras.src.callbacks.History at 0x798055148760>
Epoch 1/5
1500/1500 [==============================] - 4s 3ms/step - loss: 0.5711 - accuracy: 0.8067
Epoch 2/5
1500/1500 [==============================] - 5s 4ms/step - loss: 0.4134 - accuracy: 0.8506
Epoch 3/5
1500/1500 [==============================] - 4s 3ms/step - loss: 0.3792 - accuracy: 0.8641
Epoch 4/5
1500/1500 [==============================] - 4s 2ms/step - loss: 0.3548 - accuracy: 0.8719
Epoch 5/5
1500/1500 [==============================] - 5s 3ms/step - loss: 0.3364 - accuracy: 0.8771
<keras.src.callbacks.History at 0x7980544cb670>
"""
None

model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28,28)))
model.add(keras.layers.Dense(100, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.summary()
"""
Flatten을 사용하는 장점
(train_input, train_target), (test_input,test_target) = \
keras.datasets.fashion_mnist.load_data()
train_scaled = train_input /255.0
train_scaled, val_scaled, train_target, val_target =\
train_test_split(train_scaled, train_target, test_size=0.2, random_state=42)
model.compile(loss='sparse_categorical_crossentropy', metrics='accuracy')
model.fit(train_scaled, train_target, epochs=5)
model.evaluate(val_scaled, val_target)
![]()

![]()
![]()
momentum
![]()
learning rate

model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics='accuracy')
sgd = keras.optimizers.SGD()
model.compile(optimizer=sgd, loss='sparse_categorical_crossentropy', metrics='accuracy')
sgd = keras.optimizers.SGD(learning_rate=0.1)
adagrad = keras.optimizers.Adagrad() # 생성자에 매개변수 지정 가능.
model.compile(optimizer=adagrad, loss='sparse_categorical_crossentropy', metrics='accuracy')
rmsprop = keras.optimizers.RMSprop()
model.compile(optimizer=rmsprop, loss='sparse_categorical_crossentropy', metrics='accuracy')
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28,28)))
model.add(keras.layers.Dense(100, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics='accuracy')
model.fit(train_scaled, train_target, epochs=5)
model.evaluate(val_scaled, val_target)