타이타닉?
1910년대 당시 최대 여객선 - 타이타닉
영국에서 미국 뉴욕으로 가던 국제선
데이터
pip install plotly_express
import pandas as pd
titanic_url = 'https://raw.githubusercontent.com/PinkWink/ML_tutorial/master/dataset/titanic.xls'
titanic = pd.read_excel(titanic_url)
titanic.head()
import matplotlib.pyplot as plt
import seaborn as sns
f, ax = plt.subplots(1, 2, figsize=(18, 8))
titanic['survived'].value_counts().plot.pie(explode=[0,0.05],autopct='%1.1f%%', ax = ax[0], shadow=True)
ax[0].set_title('Pie plot - Survived')
ax[0].set_ylabel('')
sns.countplot(x='survived', data=titanic, ax=ax[1])
# 버전 문제로 'survived'를 x 데이터라고 명시해주어야 한다.
ax[1].set_title('Count plot - Survived')
plt.show()
f, ax = plt.subplots(1, 2, figsize=(18, 8))
sns.countplot(x='sex', data=titanic, ax=ax[0])
ax[0].set_title('Count of Passengers of Sex')
ax[0].set_ylabel('')
sns.countplot(x='sex', hue='survived', data=titanic, ax=ax[1])
ax[1].set_title('Sex:Survived and Unsurvived')
plt.show()
남성의 생존 가능성이 더 낮다
경쟁력 대비 생존률
grid = sns.FacetGrid(titanic, row='pclass', col='sex', height=4, aspect=2)
grid.map(plt.hist, 'age', alpha=.8, bins=20)
grid.add_legend()
import plotly.express as px
fig = px.histogram(titanic, x='age')
fig.show()
grid = sns.FacetGrid(titanic, col='survived', row='pclass', height=4, aspect=2)
grid.map(plt.hist, 'age', alpha=.5, bins=20)
grid.add_legend()
titanic['age_cat'] = pd.cut(titanic['age'], bins=[0,7,15,30,60,100], include_lowest=True,
labels=['baby', 'teen', 'young', 'adult', 'old'])
titanic.head()
plt.figure(figsize=(12,4))
plt.subplot(131)
sns.barplot(x="pclass", y='survived', data=titanic)
plt.subplot(132)
sns.barplot(x='age_cat', y='survived', data=titanic)
plt.subplot(133)
sns.barplot(x='sex', y='survived', data=titanic)
plt.subplots_adjust(top=1, bottom=0.1, left=0.1, right=1, hspace=0.5, wspace=0.5)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(14,6))
women = titanic[titanic['sex']=='female']
men = titanic[titanic['sex']=='male']
ax = sns.distplot(women[women['survived']==1]['age'], bins=20, label='survived', ax=axes[0], kde=False)
ax = sns.distplot(women[women['survived']==0]['age'], bins=40, label='not_survived', ax=axes[0], kde=False)
ax.legend()
ax.set_title('Female')
ax = sns.distplot(men[men['survived']==1]['age'], bins=18, label='survived', ax=axes[1], kde=False)
ax = sns.distplot(men[men['survived']==0]['age'], bins=40, label='not_survived', ax=axes[1], kde=False)
ax.legend()
ax.set_title('Male')
for idx, dataset in titanic.iterrows():
print(dataset['name'])
import re
for idx, dataset in titanic.iterrows():
tmp = dataset['name']
print(idx)
print(re.search('\,\s\w+(\s\w+)?\.', tmp).group())
import re
title = []
for idx, dataset in titanic.iterrows():
title.append(re.search('\,\s\w+(\s\w+)?\.', dataset['name']).group()[2:-1])
titanic['title'] = title
titanic.head()
pd.crosstab(titanic['title'], titanic['sex'])
titanic['title'] = titanic['title'].replace('Mlle', 'Miss')
titanic['title'] = titanic['title'].replace('Ms', 'Miss')
titanic['title'] = titanic['title'].replace('Mme', 'Mrs')
Rare_f = ['Dona', 'Dr', 'Lady', 'the Countess']
Rare_m = ['Capt', 'Col', 'Don', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Master']
for each in Rare_f:
titanic['title'] = titanic['title'].replace(each, 'Rare_f')
for each in Rare_m:
titanic['title'] = titanic['title'].replace(each, 'Rare_m')
titanic['title'].unique()
titanic[['title','survived']].groupby(['title'], as_index=False).mean()
빙산과 충돌한 타이타닉의 탈출 원칙
타이타닉의 최후
titanic.info()
titanic['sex'].unique()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(titanic['sex'])
titanic['gender'] = le.transform(titanic['sex'])
titanic.head()
titanic = titanic[titanic['age'].notnull()]
titanic = titanic[titanic['fare'].notnull()]
titanic.info()
correlation_matrix = titanic.corr().round(1)
sns.heatmap(data=correlation_matrix, annot=True, cmap='bwr')
from sklearn.model_selection import train_test_split
x = titanic[['pclass', 'age', 'sibsp', 'parch', 'fare', 'gender']]
y = titanic['survived']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=13)
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
dt = DecisionTreeClassifier(max_depth=4, random_state=13)
dt.fit(x_train, y_train)
pred = dt.predict(x_test)
print(accuracy_score(y_test, pred))
import numpy as np
dicaprio = np.array([[3,18,0,0,5,1]])
print('Dicaprio', dt.predict_proba(dicaprio)[0,1])
winslet = np.array([[1, 16, 1, 1, 100, 0]])
print('Winslet', dt.predict_proba(winslet)[0,1])