-
Notifications
You must be signed in to change notification settings - Fork 0
/
pca_feature_transformation.py
51 lines (37 loc) · 1.28 KB
/
pca_feature_transformation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
def read_data(file_name):
return pd.read_csv(file_name)
def remove_unwanted_features(dataset):
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
return X, y
def label_encoder(X):
label_encoder_X = LabelEncoder()
X[:, 1] = label_encoder_X.fit_transform(X[:, 1])
X[:, 2] = label_encoder_X.fit_transform(X[:, 2])
return X
def hot_encoder(X):
one_hot_encoder = OneHotEncoder(categorical_features=[1])
X = one_hot_encoder.fit_transform(X).toarray()
return X
def feature_scaling(dataset):
scaler = StandardScaler()
cols = ["CreditScore", "Age", "Tenure", "Balance", "EstimatedSalary"]
dataset[cols] = scaler.fit_transform(dataset[cols])
return dataset
def feature_transform(X):
pca = PCA(n_components=8)
X = pca.fit_transform(X)
eigen_values = pca.explained_variance_
return X
if __name__ == '__main__':
np.set_printoptions(threshold=np.inf)
dataset = read_data("Churn_Modelling.csv")
dataset = feature_scaling(dataset)
X, y = remove_unwanted_features(dataset)
X = label_encoder(X)
X = hot_encoder(X)
X = feature_transform(X)