pip install plotly_express
import pandas as pd
titanic_url = 'https://raw.githubusercontent.com/PinkWink/ML_tutorial/master/dataset/titanic.xls'
titanic = pd.read_excel(titanic_url)
titanic.head()

import matplotlib.pyplot as plt
import seaborn as sns
f, ax = plt.subplots(1, 2, figsize=(16, 8))
titanic['survived'].value_counts().plot.pie(ax=ax[0], autopct='%1.1f%%', shadow=True, explode=[0, 0.05])
ax[0].set_title('Pie plot - survived')
ax[0].set_ylabel('')
sns.countplot(x='survived', data=titanic, ax=ax[1])
ax[1].set_title('Count plot - survived')
plt.show()

f, ax = plt.subplots(1, 2, figsize=(16, 8))
sns.countplot(x='sex', data=titanic, ax=ax[0])
ax[0].set_title('Count of passengers of sex')
ax[0].set_ylabel('')
sns.countplot(x='sex', data=titanic, hue='survived', ax=ax[1])
ax[1].set_title('Sex : survived')
plt.show()

pd.crosstab(titanic['pclass'], titanic['survived'], margins=True)
• 1등실의 생존가능성이 아주높다
• 그런데 여성의 생존률도 높다
• 그럼, 1등실에는 여성이 많이타고 있었을까?

gird = sns.FacetGrid(titanic, row='pclass', col='sex', height=4, aspect=2)
gird.map(plt.hist, 'age', alpha=.8, bins=20)
gird.add_legend()

import plotly.express as px
fig = px.histogram(titanic, x="age")
fig.show()

gird = sns.FacetGrid(titanic, col='survived', row='pclass', height=4, aspect=2)
gird.map(plt.hist, 'age', alpha=.5, bins=20)
gird.add_legend();

- 나이를 5단계로 정리
titanic['age_cat'] = pd.cut(titanic['age'], bins=[0,7,15,30,60,100],
include_lowest=True,
labels=['baby', 'teen', 'young', 'adult', 'old'])
titanic.head()

plt.figure(figsize=(12,4))
plt.subplot(131)
sns.barplot(x='pclass', y='survived', data=titanic)
plt.subplot(132)
sns.barplot(x='age_cat', y='survived', data=titanic)
plt.subplot(133)
sns.barplot(x='sex', y='survived', data=titanic)
plt.subplots_adjust(top=1, bottom=0.1, left=0.1, right=1, hspace=0.5, wspace=0.5)

fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(14,6))
women = titanic[titanic['sex']=='female']
men = titanic[titanic['sex']=='male']
ax = sns.distplot(women[women['survived']==1]['age'], bins=20,
label = 'servived', ax = axes[0], kde=False)
ax = sns.distplot(women[women['survived']==0]['age'], bins=40,
label = 'not_servived', ax = axes[0], kde=False)
ax.legend(); ax.set_title('Female')
ax = sns.distplot(men[men['survived']==1]['age'], bins=18,
label = 'servived', ax = axes[1], kde=False)
ax = sns.distplot(men[men['survived']==0]['age'], bins=40,
label = 'not_servived', ax = axes[1], kde=False)
ax.legend(); ax = ax.set_title('Male')

for idx, dataset in titanic.iterrows():
print(dataset['name'])

import re
title = []
for idx, dataset in titanic.iterrows():
tmp = dataset['name']
title.append(re.search('\,\s\w+(\s\w+)?\.', tmp).group()[2:-1])
titanic['title'] = title
titanic.head()

pd.crosstab(titanic['title'], titanic['sex'])

titanic['title'] = titanic['title'].replace('Mlle', 'Miss')
titanic['title'] = titanic['title'].replace('Ms', 'Miss')
titanic['title'] = titanic['title'].replace('Mme', 'Mrs')
Rare_f = ['Dona', 'Lady','the Countess']
Rare_m = ['Capt', 'Col','Don', 'Major', 'Rev', 'Sir', 'Dr', 'Master','Jonkheer']
for each in Rare_f:
titanic['title'] = titanic['title'].replace(each, 'Rare_f')
for each in Rare_m:
titanic['title'] = titanic['title'].replace(each, 'Rare_m')
titanic[['title','survived']].groupby(['title'], as_index=False).mean()

# 성별 값 확인
titanic['sex'].unique()
# 성별을 0, 1로 바꿔주기
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(titanic['sex'])
titanic['gender'] = le.transform(titanic['sex'])
titanic.head()
labelEncoeder는 값을 인코딩할 수 있게 바꿔주는 함수
성별을 fit(학습) 시켜준 다음 transform 하면 0, 1값으로 변경 됨

결측값 제거
titanic = titanic[titanic['age'].notnull()]
titanic = titanic[titanic['fare'].notnull()]
from sklearn.model_selection import train_test_split
X = titanic[['pclass', 'age', 'sibsp', 'parch', 'fare', 'gender']]
y = titanic['survived']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=13)
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
dt = DecisionTreeClassifier(max_depth=4, random_state=13)
dt.fit(X_train, y_train)
pred = dt.predict(X_test)
print(accuracy_score(y_test, pred))
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
dt = DecisionTreeClassifier(max_depth=4, random_state=13)
dt.fit(X_train, y_train)
pred = dt.predict(X_test)
print(accuracy_score(y_test, pred))
-> 0.7655502392344498
import numpy as np
dicaprio = np.array([[3, 18, 0, 0, 5, 1]])
print('Decaprio : ', dt.predict_proba(dicaprio)[0,1])
-> Decaprio : 0.16728624535315986
winslet = np.array([[1, 16, 1, 1, 100, 0]])
print('winslet : ', dt.predict_proba(winslet)[0,1])
-> winslet : 1.0