-
Notifications
You must be signed in to change notification settings - Fork 1
/
model_tester.py
111 lines (87 loc) · 3.88 KB
/
model_tester.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
import numpy as np
from sklearn.model_selection import train_test_split, cross_val_score
import learners.DecisionTreeClassification as dt
import learners.NeuralNetworkClassification as mlp
import learners.KNeighborsClassification as knn
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
import learners.SVMClassification as svmc
def read_data(file_name):
return pd.read_csv(file_name)
def remove_unwanted_features(dataset):
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
return X, y
def label_encoder(X):
label_encoder_X = LabelEncoder()
X[:, 1] = label_encoder_X.fit_transform(X[:, 1])
X[:, 2] = label_encoder_X.fit_transform(X[:, 2])
return X
def hot_encoder(X):
one_hot_encoder = OneHotEncoder(categorical_features=[1])
X = one_hot_encoder.fit_transform(X).toarray()
return X
def feature_scaling(dataset):
scaler = StandardScaler()
cols = ["CreditScore", "Age", "Tenure", "Balance", "EstimatedSalary"]
dataset[cols] = scaler.fit_transform(dataset[cols])
return dataset
def run_decision_tree(X_train, y_train, X_test, y_test):
args = {"criterion": 'entropy', "splitter": "random", "max_depth": 7, "min_samples_split": 300,
"min_samples_leaf": 50}
model = dt.DecisionTreeClassification(args)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
cm = metrics.confusion_matrix(y_test, y_pred)
print(metrics.accuracy_score(y_test, y_pred))
def run_decision_tree_cross_validation(X, y):
args = {"criterion": 'entropy', "splitter": "random", "max_depth": 7, "min_samples_split": 300,
"min_samples_leaf": 50}
model = dt.DecisionTreeClassification(args)
classifier = model.get_skl_learner()
all_accuracies = cross_val_score(estimator=classifier, X=X, y=y, cv=10)
print(all_accuracies.mean())
def run_neural_network(X_train, y_train, X_test, y_test):
layers = (11,) * 10
args = {"activation": "tanh", "max_iter": 1000, "hidden_layer_sizes": layers}
model = mlp.NeuralNetworkClassification(args)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
cm = metrics.confusion_matrix(y_test, y_pred)
print(metrics.accuracy_score(y_test, y_pred))
def run_neural_network_cross_validation(X, y):
layers = (11,) * 10
args = {"activation": "tanh", "max_iter": 1000, "hidden_layer_sizes": layers}
model = mlp.NeuralNetworkClassification(args)
all_accuracies = cross_val_score(estimator=model.get_learner(), X=X, y=y, cv=10)
print(all_accuracies.mean())
def run_k_neighbors(X_train, y_train, X_test, y_test):
args = {"algorithm": 'brute', "leaf_size": 30, "metric": 'cityblock', "n_jobs": -1, "n_neighbors": 9,
"weights": 'uniform'}
model = knn.KNeighborsClassification(args)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
cm = metrics.confusion_matrix(y_test, y_pred)
print(metrics.accuracy_score(y_test, y_pred))
def run_svm(X_train, y_train, X_test, y_test):
args={"gamma" : 0.9}
classifier = svmc.SVMClassification(args)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print(metrics.accuracy_score(y_test, y_pred))
if __name__ == '__main__':
np.set_printoptions(threshold=np.inf)
dataset = read_data("Churn_Modelling.csv")
dataset = feature_scaling(dataset)
X, y = remove_unwanted_features(dataset)
X = label_encoder(X)
X = hot_encoder(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
run_decision_tree(X_train, y_train, X_test, y_test)
run_decision_tree_cross_validation(X, y)
run_neural_network(X_train, y_train, X_test, y_test)
run_neural_network_cross_validation(X, y)
run_k_neighbors(X_train, y_train, X_test, y_test)
run_svm(X_train, y_train, X_test, y_test)