face
┣ mysite
┃ ┣ config
┃ ┃ ┣ __pycache__
┃ ┃ ┃ ┣ __init__.cpython-310.pyc
┃ ┃ ┃ ┣ __init__.cpython-39.pyc
┃ ┃ ┃ ┣ settings.cpython-310.pyc
┃ ┃ ┃ ┣ settings.cpython-39.pyc
┃ ┃ ┃ ┣ urls.cpython-310.pyc
┃ ┃ ┃ ┣ urls.cpython-39.pyc
┃ ┃ ┃ ┣ wsgi.cpython-310.pyc
┃ ┃ ┃ ┗ wsgi.cpython-39.pyc
┃ ┃ ┣ __init__.py
┃ ┃ ┣ asgi.py
┃ ┃ ┣ settings.py
┃ ┃ ┣ urls.py
┃ ┃ ┗ wsgi.py
┃ ┣ star
┃ ┃ ┣ __pycache__
┃ ┃ ┃ ┣ __init__.cpython-310.pyc
┃ ┃ ┃ ┣ __init__.cpython-39.pyc
┃ ┃ ┃ ┣ admin.cpython-310.pyc
┃ ┃ ┃ ┣ admin.cpython-39.pyc
┃ ┃ ┃ ┣ apps.cpython-310.pyc
┃ ┃ ┃ ┣ apps.cpython-39.pyc
┃ ┃ ┃ ┣ forms.cpython-310.pyc
┃ ┃ ┃ ┣ models.cpython-310.pyc
┃ ┃ ┃ ┣ models.cpython-39.pyc
┃ ┃ ┃ ┣ urls.cpython-310.pyc
┃ ┃ ┃ ┣ urls.cpython-39.pyc
┃ ┃ ┃ ┣ views.cpython-310.pyc
┃ ┃ ┃ ┗ views.cpython-39.pyc
┃ ┃ ┣ migrations
┃ ┃ ┃ ┣ __pycache__
┃ ┃ ┃ ┃ ┣ __init__.cpython-310.pyc
┃ ┃ ┃ ┃ ┗ __init__.cpython-39.pyc
┃ ┃ ┃ ┗ __init__.py
┃ ┃ ┣ .DS_Store
┃ ┃ ┣ __init__.py
┃ ┃ ┣ admin.py
┃ ┃ ┣ apps.py
┃ ┃ ┣ forms.py
┃ ┃ ┣ models.py
┃ ┃ ┣ team3_new.h5
┃ ┃ ┣ tests.py
┃ ┃ ┣ urls.py
┃ ┃ ┗ views.py
┃ ┣ static
┃ ┃ ┣ assets
┃ ┃ ┃ ┣ img
┃ ┃ ┃ ┃ ┣ .DS_Store
┃ ┃ ┃ ┃ ┣ bg-callout.jpg
┃ ┃ ┃ ┃ ┣ bg-masthead.jpg
┃ ┃ ┃ ┃ ┣ portfolio-1.jpg
┃ ┃ ┃ ┃ ┣ portfolio-2.jpg
┃ ┃ ┃ ┃ ┣ portfolio-3.jpg
┃ ┃ ┃ ┃ ┗ portfolio-4.jpg
┃ ┃ ┃ ┣ .DS_Store
┃ ┃ ┃ ┗ favicon.ico
┃ ┃ ┣ css
┃ ┃ ┃ ┣ style.css
┃ ┃ ┃ ┗ styles.css
┃ ┃ ┣ js
┃ ┃ ┃ ┗ scripts.js
┃ ┃ ┗ .DS_Store
┃ ┣ templates
┃ ┃ ┣ index.html
┃ ┃ ┗ result.html
┃ ┣ .DS_Store
┃ ┣ db.sqlite3
┃ ┣ manage.py
┃ ┗ requirements.txt
┗ .DS_Store
# 라이브러리 임포트
import tensorflow as tf
from tensorflow.keras.preprocessing import image
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.image import pad_to_bounding_box
from tensorflow.image import central_crop
from tensorflow.image import resize
#이미지 불러오기
bgd = image.load_img('./train/train_1/4.jpg')
bgd_vector = np.asarray(image.img_to_array(bgd))
bgd_vector = bgd_vector/255
#이미지 형태 확인
bgd_vector.shape
#이미지 확인
plt.imshow(bgd_vector)
plt.show()
이미지 파일을 numpy배열로 변환하여 np.asarray()함수를 사용하여 배열로 변환
bad_vector 배열의 모든 원소를 255로 나누어서 0~1범위로 정규화
from tensorflow.keras.applications.vgg16 import VGG16
#weight, include_top 파라미터 설정
model = VGG16(weights='imagenet', include_top=True)
model.summary()
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing import image
import matplotlib.pyplot as plt
from PIL import Image
from tensorflow.keras.applications.imagenet_utils import decode_predictions
from tensorflow.keras.applications.imagenet_utils import preprocess_input
import numpy as np
#from google.colab import drive
#drive.mount('/content/gdrive')
img = Image.open('./train/train_1/4.jpg')
img.size
plt.imshow(np.asarray(img))
#VGG16이 입력받는 이미지크기 확인
model.layers[0].input_shape
#이미지 리사이즈
target_size = 224
img = img.resize((target_size, target_size)) # resize from 280x280 to 224x224
plt.imshow(np.asarray(img))
img.size #변경된 크기 확인
#numpy array로 변경
np_img = image.img_to_array(img)
np_img.shape #(224, 224, 3)
#4차원으로 변경
img_batch = np.expand_dims(np_img, axis=0)
img_batch.shape
--> (1, 224, 224, 3)
#feature normalization
pre_processed = preprocess_input(img_batch)
pre_processed
y_preds = model.predict(pre_processed)
y_preds.shape # 종속변수가 취할 수 있는 값의 수 = 1000
np.set_printoptions(suppress=True, precision=10)
y_preds
#가장 확률이 높은 값
np.max(y_preds)
TRAIN_DATA_DIR = './train'
VALIDATION_DATA_DIR = './validation'
TEST_DATA_DIR = './test'
TRAIN_SAMPLES = 800*2
VALIDATION_SAMPLES = 400*2
NUM_CLASSES = 2
IMG_WIDTH, IMG_HEIGHT = 224, 224
BATCH_SIZE = 64
train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.2)
val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(TRAIN_DATA_DIR,
target_size=(IMG_WIDTH,
IMG_HEIGHT),
batch_size=BATCH_SIZE,
shuffle=True,
seed=12345,
class_mode='categorical')
validation_generator = val_datagen.flow_from_directory(
VALIDATION_DATA_DIR,
target_size=(IMG_WIDTH, IMG_HEIGHT),
batch_size=BATCH_SIZE,
shuffle=False,
class_mode='categorical')
def model_maker():
base_model = VGG16(include_top=False, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3))
print(len(base_model.layers))
for layer in base_model.layers[:]:
layer.trainable = False
input = Input(shape=(IMG_WIDTH, IMG_HEIGHT, 3))
custom_model = base_model(input)
custom_model = GlobalAveragePooling2D()(custom_model)
custom_model = Dense(32, activation='relu')(custom_model)
predictions = Dense(NUM_CLASSES, activation='softmax')(custom_model)
return Model(inputs=input, outputs=predictions)
model_final = model_maker()
model_final.summary()
model_final.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['acc'])
history = model_final.fit(
train_generator,
steps_per_epoch=TRAIN_SAMPLES // BATCH_SIZE, # number of updates
epochs=10,
validation_data=validation_generator,
validation_steps=VALIDATION_SAMPLES // BATCH_SIZE)
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
test_generator = val_datagen.flow_from_directory(
TEST_DATA_DIR,
target_size=(IMG_WIDTH, IMG_HEIGHT),
batch_size=BATCH_SIZE,
shuffle=False,
class_mode='categorical')
model_final.evaluate(test_generator, steps=800 // BATCH_SIZE)
prediction = model_final.predict(preprocessed_img)
print(np.array(prediction[0]))
- 사진 데이터 수집 시 크롤링 함수를 구현하여 진행하였는데, 연예인 사진이 얼굴이 정확한 사진(데이터셋) 부족으로 모델의 학습이 부족함
- 얼굴 사진 라이브러리 확인 및 함수 구현 방법으로 대체 필요
- cpu로 처리하여 처리 속도 부족으로 aws의 머신러닝 처리기 등 GPU가 지원되는 방법으로 진행 필요