빅데이터 분석기사 실기 시험 Tip

김신영·2024년 6월 20일
0
post-thumbnail
import pandas as pd
import numpy as np
import sklearn

# LabelEncoder
from sklearn.preprocessing import LabelEncoder
# from sklearn.preprocessing import OneHotEncoder 
pd.get_dummies(df, columns=[...], drop_first=False)

# Transaction Encoder
from mlxtend.preprocessing import TransactionEncoder

# Scaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import RobustScaler

# Polynomial Features
from sklearn.preprocessing import PloynomialFeatures

# train test data setting
from sklearn.model_selection import train_test_split

# model performance test (Regression)
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import root_mean_squared_error
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import r2_score

# model performance test (Classification)
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import roc_auc_score

# PCA
from sklearn.decomposition import PCA

# sklearn.linear_model
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.linear_model import ElasticNet

# sklearn.svm
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.svm import LinearSVC
from sklearn.svm import LinearSVR
from sklearn.svm import NuSVC
from sklearn.svm import NuSVR

SVC(C=1.0, kernel='rbf')

# sklearn.tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor

from sklearn.tree import plot_tree

plot_tree(clf, feature_names=clf.feature_names_in_, class_names=['0','1'], filled=True, fontsize=15)

# sklearn.ensemble 
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import AdaBoostClassfier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RnadomForestRegressor

from xgboost import XGBClassifier
from xgboost import XGBRegressor

# sklearn.naive_bayes
from sklearn.feature_extraction.text import CountVectorizer

cv = CountVectorizer(binary=True)
X_train_cv = cv.fit_transform(X_train['content'])
from sklearn.naive_bayes import BernoulliNB

CountVectorizer(binary=False)
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB


# Classifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC

# Regressor
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor

# Cluster
from sklearn.cluster import KMeans

# Assiciation Rule
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules

# Descriptive Statistics
import matplotlib.pyplot as plt
import seaborn as sns
import scipy
import scipy.stats as stats

# Linear Regression (OLS)
import statsmodels
import statsmodels.formula.api as smf
import statsmodels.api as sm
import statsmodels.stats.api as sms

# Association Rule
import mlxtend
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
from mlxtend.preprocessing import TransactionEncoder

# Hypothesis Test
# t-Test
from scipy.stats import shapiro
statistic, p_value = shapiro(data) # 정규성 검정

from scipy.stats import wilcoxon
statistic, p_value = wilcoxon(x, y=None) # 정규분포가 아닌 경우

from scipy.stats import ttest_1samp
statistic, p_value = ttest_1samp(data, popmean=3, alternative='two-sided')

from scipy.stats import levene
statistic, p_value = levene(*samples)  # 등분산 검정

from scipy.stats import ttest_ind
statistic, p_value = ttest_ind(data1, data2, equal_var=True, alternative='less')  # 등분산일 경우
statistic, p_value = ttest_ind(data1, data2, equal_var=False, alternative='less') # 등분산이 아닐 경우

from scipy.stats import ttest_rel
statistic, p_value = ttest_rel(data1, data2, alternative='greater'

# ANOVA (Analysis of Variance)
from scipy.stats import kruskal
stats.kruskal(*data) # 정규성이 아닐 경우, 집단 간 median 일치 검점

from scipy.stats import levene
statistic, p_value = levene(*samples)  # 등분산 검정

import pingouin as pg
pg.welch_anova(df, dv=feature_name, between=target_name) # 등분산성이 아닐 경우, welch_anova 분

from scipy.stats import f_oneway
f_oneway(*data) # 일원분산분석

import statsmodels.stats as sm
import statsmodels.formula.api as smf

sm.pairwise_tukeyhsd(data, groups, alpha=0.05) # 사후검정

import statsmodels.formula.api as smf
import statsmodels.stats as sm

model = smf.ols(formula='mpg ~ C(cyl) + C(am) + C(cyl):C(am)', data=df)
sm.anova.anova_lm(model, typ=2) # 이원교차분석


# 교차분석 (카이제곱 검정)
from scipy.stats import chisquare 
statistics, p_value = chisquare(f_obs, f_exp=None) # 적합성 검정

from scipy.stats import chi2_contingency 
chi, p, df, expect = chi2_contingency(pd.crosstab(df['a'], df['b'])) # 독립성 검정

from scipy.stats import pearsonr
from scipy.stats import spearmanr
from scipy.stats import kendalltau

from scipy.stats import spearmanr

# 정규성 검정
from scipy.stats import shapiro
from scipy.stats import normaltest
from scipy.stats import kstest

kstest(data, 'norm')

from scipy.stats import mannwhitneyu

# Util
import inspect

def get_object_type(obj):
    if inspect.ismodule(obj):
        return "module"
    elif inspect.isfunction(obj):
        return "function"
    elif inspect.ismethod(obj):
        return "method"
    elif inspect.isclass(obj):
        return "class"
    else:
        return "unknown"


def list_all_methods(module):
    return list_all(module, inspect.ismethod)


def list_all_functions(module):
    return list_all(module, inspect.isfunction)


def list_all_classes(module):
    return list_all(module, inspect.isclass)


def list_all(module, predict=lambda x: True):
    return [(name, get_object_type(obj))
            for name, obj in inspect.getmembers(module)
            if not name.startswith('_') and predict(obj)]

def hypothesis_test(p_value, alpha=0.05):
    if (p_value <= alpha):
        print(f"p_value = {p_value} <= {alpha}, null hypothesis is rejected.")
    else:
        print(f"p_value = {p_value} > {alpha}, null hypothesis is accepted.")
        
def predict(y_test, y_pred, score_func, **kwargs):
    print(f"{score_func.__name__} = {score_func(y_test, y_pred, **kwargs)}")
    

import inspect
from sklearn.ensemble import RandomForestClassifier
print(inspect.signature(RandomForestClassifier.fit))
profile
Hello velog!

0개의 댓글