앙상블(Ensemble) : 여러 개의 모델을 사용해서 각각의 예측 결과를 만들고 그 예측 결과를 기반으로 최종 예측결과를 결정하는 방법
배깅(Bagging)
: 분할정복(divide and conquer and combine)과 같은 것
: 전체 데이터를 여러개로 분할 후 각 set별로 모델 학습->결합
-> 랜덤 포레스트, 엑스트라 트리
부스팅(Boosting)
: 순차적으로 약한 학습자를 추가 결합하여 하나의 강한 모델 만드는 방법
-> 그래디언트 부스팅
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
wine = pd.read_csv('https://bit.ly/wine_csv_data')
data = wine[['alcohol', 'sugar', 'pH']].to_numpy()
target = wine['class'].to_numpy()
train_input, test_input, train_target, test_target = train_test_split(
data, target, random_state=42, test_size=0.2
)
from sklearn.model_selection import cross_validate
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_jobs=-1, random_state=42)
scores = cross_validate(rf, train_input, train_target,
return_train_score=True, n_jobs=-1)
print(np.mean(scores['train_score']), np.mean(scores['test_score']))
rf.fit(train_input, train_target)
print(rf.feature_importances_)
0.9973541965122431 // 훈련 set
0.8905151032797809 // 검증 set
[0.23167441 0.50039841 0.26792718] // 특성 중요도
rf = RandomForestClassifier(oob_score=True, n_jobs=-1, random_state=42)
rf.fit(train_input, train_target)
print(rf.oob_score_)
0.8934000384837406
from sklearn.ensemble import ExtraTreesClassifier
et = ExtraTreesClassifier(n_jobs=-1, random_state=42)
scores = cross_validate(et, train_input, train_target,
return_train_score=True, n_jobs=-1)
print(np.mean(scores['train_score']), np.mean(scores['test_score']))
et.fit(train_input, train_target)
print(et.feature_importances_)
0.9974503966084433 // 훈련 set
0.8887848893166506 // 검증 set
[0.20183568 0.52242907 0.27573525] // 특성 중요도
from sklearn.ensemble import GradientBoostingClassifier
gb = GradientBoostingClassifier(random_state=42)
scores = cross_validate(gb, train_input, train_target,
return_train_score=True, n_jobs=-1)
print(np.mean(scores['train_score']), np.mean(scores['test_score']))
0.8881086892152563
0.8720430147331015
gb = GradientBoostingClassifier(n_estimators=500, learning_rate=0.2, random_state=42)
0.9464595437171814
0.8780082549788999
-> 과대적합 억제
gb.fit(train_input, train_target)
print(gb.feature_importances_)
[0.15872278 0.68010884 0.16116839] // 당도 중요
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
hgb = HistGradientBoostingClassifier(random_state=42)
scores = cross_validate(hgb, train_input, train_target,
return_train_score=True, n_jobs=-1)
print(np.mean(scores['train_score']), np.mean(scores['test_score']))
0.9321723946453317 0.8801241948619236