Machine Learning & Deep Learning practice code.5

AI Engineering Course Log·2023년 7월 12일
0

road to AI Engineering

목록 보기
59/83
  • load the data
df = pd.read_csv('data_v1_save.csv')
  • analyze the data
df.info()
df.tail()
df['Churn'].value_counts().plot(kind='bar')
  • data preprocessing
df[['MultipleLines']].head()
df['MultipleLines'].value_counts()
  • One Hot Encoding. object->number transform using pandas get_dummies() function
pd.get_dummies(data=df, columns=['MultipleLines'])
  • check 'object' columns
df.select_dtypes('object').head(3)
  • collect names of columns
cal_cols = df.select_dtypes('object').columns.values
cal_cols
  • One-Hot-Encoding to 'Object' columns and save it to df1
df1 = pd.get_dummies(data=df, columns=cal_cols)
df1.info()
df1.head(3)
  • divide Train, Test data
X = df1.drop('Churn', axis=1).values
y = df1['Churn'].values
x.shape, y.shape
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=42)
X_train.shape
y_train.shape
  • Data Normalizing/Scaling
df1.tail()
from sklearn.preprocessing import MinMaxScaler
  • define it as scaler
scaler = MinMaxScaler(0
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_train[:2], y_train[:2]
  • Develop the model
from sklearn.metrics import accuracy_score
my_predictions = {}
colors = ['r', 'c', 'm', 'y', 'k', 'khaki', 'teal', 'orchid', 'sandybrown',
          'greenyellow', 'dodgerblue', 'deepskyblue', 'rosybrown', 'firebrick',
          'deeppink', 'crimson', 'salmon', 'darkred', 'olivedrab', 'olive', 
          'forestgreen', 'royalblue', 'indigo', 'navy', 'mediumpurple', 'chocolate',
          'gold', 'darkorange', 'seagreen', 'turquoise', 'steelblue', 'slategray', 
          'peru', 'midnightblue', 'slateblue', 'dimgray', 'cadetblue', 'tomato'
         ]

def recall_eval(name_, pred, actual):
    global predictions
    global colors

    plt.figure(figsize=(12, 9))

    #acc = accuracy_score(actual, pred)
    acc = recall_score(actual, pred)
    my_predictions[name_] = acc * 100

    y_value = sorted(my_predictions.items(), key=lambda x: x[1], reverse=True)
    
    df = pd.DataFrame(y_value, columns=['model', 'recall'])
    print(df)
   
    length = len(df)
    
    plt.figure(figsize=(10, length))
    ax = plt.subplot()
    ax.set_yticks(np.arange(len(df)))
    ax.set_yticklabels(df['model'], fontsize=15)
    bars = ax.barh(np.arange(len(df)), df['recall'])
    
    for i, v in enumerate(df['recall']):
        idx = np.random.choice(len(colors))
        bars[i].set_color(colors[idx])
        ax.text(v + 2, i, str(round(v, 3)), color='k', fontsize=15, fontweight='bold')
        
    plt.title('recall', fontsize=18)
    plt.xlim(0, 100)
    
    plt.show()
  • LogisticRegression Classification
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix 
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
  • define the model and learn it
lg = LogisticRegression()
lg.fit(X_train, y_train)
  • evaluate the performance of classification
lg.score(X_test, y_test)
lg_pred = lg.predict(X_test)
confusion_matrix(y_test, lg_pred)
accuracy_score(y_test, lg_pred)
precision_score(y_test, lg_pred) 
recall_score(y_test, lg_pred) 
f1_score(y_test, lg_pred)
print(classification_report(y_test, lg_pred))
recall_eval('LogisticRegression', lg_pred, y_test)
  • KNN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
knn_pred = knn.predict(X_test)
recall_eval('K-Nearest Neighbor', knn_pred, y_test)
  • Decision Tree
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(max_depth=10, random_state=42)
dt.fit(X_train, y_train)
dt_pred = dt.predict(X_test)
recall_eval('DecisionTree', dt_pred, y_test)
  • RandomForest
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=3, random_state=42)
rfc.fit(X_train, y_train)
rfc_pred = rfc.predict(X_test)
recall_eval('RandomForest Ensemble', rfc_pred, y_test)

<Deep Learning>

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout

tf.random.set_seed(100)
batch_size = 16
epochs = 20
X_train.shape
y_train.shape
  • make Sequential model as requested
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(39,)))
model.add(Dense(3, activation='relu'))
model.add(Dense(1, activation='sigmoid')
model.summary()
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(39,)))
model.add(Dropout(0.3))
model.add(Dense(3, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(39,)))
model.add(Dropout(0.3))
model.add(Dense(3, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
  • Binary Classification Model
model.compile(optimizer='adam', 
              loss='binary_crossentropy', 
              metrics=['accuracy']) 
model.fit(X_train, y_train, 
          validation_data=(X_test, y_test),
          epochs=10, 
          batch_size=10)
  • multiclass dnn configuration
model = Sequential()
model.add(Dense(5, activation='relu', input_shape=(39,)))
model.add(Dropout(0.3))
model.add(Dense(4, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(2, activation='softmax'))
model.summary()
model.compile(optimizer='adam', 
              loss='sparse_categorical_crossentropy', 
              metrics=['accuracy']) 
history = model.fit(X_train, y_train, 
          validation_data=(X_test, y_test),
          epochs=20, 
          batch_size=16)
  • Callback
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
early_stop = EarlyStopping(monitor='val_loss', mode='min', 
                           verbose=1, patience=5)
check_point = ModelCheckpoint('best_model.h5', verbose=1, 
                              monitor='val_loss', mode='min', save_best_only=True)
history = model.fit(x=X_train, y=y_train, 
          epochs=50 , batch_size=20,
          validation_data=(X_test, y_test), verbose=1,
          callbacks=[early_stop, check_point])
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Acc')
plt.legend(['acc', 'val_acc'])
plt.show()
  • Evaluation
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
pred = model.predict(X_test)
pred.shape
y_pred = np.argmax(pred, axis=1)
accuracy_score(y_test, y_pred)
recall_score(y_test, y_pred)
print(classification_report(y_test, y_pred))

0개의 댓글