# -*- coding: utf-8 -*-
"""
Created on Tue Mar  1 17:20:45 2022

@author: TANISH
"""


""" Linear Regression """

# Linear Regression is a Statistical 
# Technique which helps to predict the Target
# variables based on 1 or more Explanatory variables


# Data Link: 
# https://www.kaggle.com/fedesoriano/body-fat-prediction-dataset

import pandas as pd
import seaborn as sb


""" Data Import """

data = pd.read_csv(r"C:\Users\tanis\Desktop\Body Fat.csv")


""" Viz """

sb.heatmap(data.corr())
sb.kdeplot(data["BodyFat"])


""" Train Test Split """

X = data.loc[:,data.columns != "BodyFat"]
Y = data.loc[:,data.columns == "BodyFat"]

from sklearn import model_selection as ms

X_train,X_test,Y_train,Y_test = ms.train_test_split(X,Y,
                                      test_size = 0.30,
                                      random_state = 11)


""" Training the Model """

from sklearn.linear_model import LinearRegression

lm = LinearRegression()

model = lm.fit(X_train, Y_train)


""" Validating the Model """

model.score(X_test,Y_test)



""" Alternative """

import statsmodels.formula.api as smf

Train = pd.concat([X_train,Y_train],axis = 1)


model = smf.ols(formula = "BodyFat ~ ." + ,
                data = Train).fit()


fm = str()
z = 0

for i in X_train.columns:
    if(z == 0):
        fm = fm + i
        z = 1
    else:
        fm = fm + " + " + i


model = smf.ols(formula = "BodyFat ~ " + fm,
                data = Train).fit()
        

model.summary()


""" Actual v/s Fitted """

Y_test_Pred = pd.DataFrame()
Y_test_Pred["Pred"] = model.predict(X_test)


sb.scatterplot(x = Y_test["BodyFat"],
               y = Y_test_Pred["Pred"])



""" Feature Selection """

from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.feature_selection import RFE


select = SelectKBest(score_func = f_regression)
model = select.fit(X_train,Y_train)

model.scores_
index = model.get_support()
X_train.columns[index]


""" Assumption Testings """

# Errors follow Normal Dist

# Shapiro-Wilks Test
# H0: Errors are Normal

from scipy.stats import shapiro

shapiro(model.resid)


# Q-Q Plot
import statsmodels.api as sm
sm.qqplot(model.resid)


# Erros have homogenous Variance

# Breuschpagan Test

import statsmodels.stats.diagnostic as diag

diag.het_breuschpagan(model.resid, model.model.exog)[1]


# Using Residual Plot

sb.scatterplot(model.fittedvalues,model.resid)


# Multicollinearity is absent

from statsmodels.stats.outliers_influence import variance_inflation_factor as VIF

vif = pd.DataFrame()
vif["Feature"] = rain.columns


import statsmodels.tools.tools as stt


X_train = stt.add_constant(X_train)

vif["VIF"] = [VIF(X_train.values,
                  i) for i in range(X_train.shape[1] - 1)]

vif["VIF"]
vif[vif["VIF"] < 10]



""" Re-fitting the model """


fm = str()
z = 0

for i in vif[vif["VIF"] < 10].iloc[:,0]:
    if(z == 0):
        fm = fm + i
        z = 1
    else:
        fm = fm + " + " + i

model = smf.ols(formula = "BodyFat ~ " + fm,
                data = Train).fit()
        

model.summary()


""" Check for Outlier """

cooks_dist = pd.DataFrame()
cooks_dist["Value"] = model.get_influence().cooks_distance[0]
cooks_dist["p_value"] = model.get_influence().cooks_distance[1]

cooks_dist["Index"] = range(0,cooks_dist.shape[0])

sb.scatterplot(x = cooks_dist["Index"],
               y = cooks_dist["Value"])


cooks_dist[cooks_dist["Value"] >= 0.20]
cooks_dist[cooks_dist["p_value"] < 0.05]

