from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784', version = 1, parser = 'auto', as_frame = False)
mnist.keys()
X, y = mnist["data"], mnist["target"]
print(X.shape) # 70,000개 이미지, 784(28x28)개의 feature, 개개의 특성은 단순히 0(white)~255(black)
print(y.shape)
import matplotlib as mpl
import matplotlib.pyplot as plt
some_digit = X[0]
some_digit_image = some_digit.reshape(28,28)
plt.imshow(some_digit_image, cmap = "binary")
plt.axis("off")
plt.show() # X[0] 이미지 확인
y[0] # 실제 레이블
import numpy as np
y = y.astype(np.uint8) # 레이블은 문자열이므로 y를 정수로 변환
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:] # training, test 셋 나눔
# Binary Classifier
y_train_5 = (y_train == 5) # 5는 True고 다른 숫자는 모두 False
y_test_5 = (y_test == 5)
# SGD(Stochasitc Gradient Descent) Classifier
from sklearn.linear_model import SGDClassifier
sgd_clf = SGDClassifier(random_state=42)
sgd_clf.fit(X_train, y_train_5)
sgd_clf.predict([some_digit]) # 정답 맞춤
# 1. 교차 검증을 사용한 정확도 측정
from sklearn.model_selection import StratifiedKFold
from sklearn.base import clone
skfolds = StratifiedKFold(n_splits=3, random_state=42, shuffle=True) # 계층적 샘플링 수행, 3-fold cross validation
for train_index, test_index in skfolds.split(X_train, y_train_5):
clone_clf = clone(sgd_clf)
X_train_folds = X_train[train_index]
y_train_folds = y_train_5[train_index]
X_test_fold = X_train[test_index]
y_test_fold = y_train_5[test_index]
clone_clf.fit(X_train_folds, y_train_folds)
y_pred = clone_clf.predict(X_test_fold)
n_correct = sum(y_pred == y_test_fold)
print(n_correct / len(y_pred))
# cross-validation fold accuracy
from sklearn.model_selection import cross_val_score
cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring="accuracy")
from sklearn.base import BaseEstimator
class Never5Classifier(BaseEstimator):
def fit(self, X, y=None):
return self
def predict(self, X):
return np.zeros((len(X), 1), dtype=bool)
never_5_clf = Never5Classifier()
cross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring="accuracy") # 정확도가 90%이상 나옴 -> 불균형한 데이터셋일 때는 주의해야함
# 2. Confusion Matrix 사용
from sklearn.model_selection import cross_val_predict
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)
from sklearn.metrics import confusion_matrix
confusion_matrix(y_train_5, y_train_pred)
y_train_perfect_predictions = y_train_5 # 완벽한 분류기일 경우
confusion_matrix(y_train_5, y_train_perfect_predictions)
# 3. F1 Score
from sklearn.metrics import precision_score, recall_score
print(precision_score(y_train_5, y_train_pred)) # = TP / (TP + FP)
print(recall_score(y_train_5, y_train_pred)) # = TP / (TP + FN)
from sklearn.metrics import f1_score
f1_score(y_train_5, y_train_pred) # F1= (2×Precision×Recall) / (Precision + Recall)
# trade-off
y_scores = sgd_clf.decision_function([some_digit])
print(y_scores)
threshold = 0
y_some_digit_pred = (y_scores > threshold)
print(y_some_digit_pred)
# 임곗값을 높이면 recall이 줄어듦
threshold = 8000
y_some_digit_pred = (y_scores > threshold)
y_some_digit_pred
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method = "decision_function")
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="precision")
plt.plot(thresholds, recalls[:-1], "g--", label="recall")
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.show()
threshold_90_precision = thresholds[np.argmax(precisions >= 0.9)]
y_train_pred_90 = (y_scores >= threshold_90_precision)
print(precision_score(y_train_5, y_train_pred_90))
print(recall_score(y_train_5, y_train_pred_90))
# ROC 곡선
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_train_5, y_scores)
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--')
plot_roc_curve(fpr, tpr)
plt.show()
from sklearn.metrics import roc_auc_score
roc_auc_score(y_train_5, y_scores)
Reference
핸즈온 머신러닝 2판 Ch03 분류