-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathRunning different algorithms and calculate accuracy.py
151 lines (140 loc) · 6.26 KB
/
Running different algorithms and calculate accuracy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
crossValidationData[0][0]
final_data = []
totalAccuracy = []
#for k in range(0,1):
for k in range(len(crossValidationData)):
print("Cross Validation ------" + str(k))
ktrainingData = crossValidationData[k][0]#[0]
kAccuracyResults = []
#Naive Bayesian Classification
print("Naive Bayesian")
classifier = nltk.NaiveBayesClassifier.train(ktrainingData)
# Multinomial Naive Bayesian
print("Multinomial Naive Bayesian")
MNB_classifier = SklearnClassifier(MultinomialNB())
MNB_classifier.train(ktrainingData)
# Bernoulli Naive Bayesian
print("Bernoulli Naive Bayesian")
BernoulliNB_classifier = SklearnClassifier(BernoulliNB())
BernoulliNB_classifier.train(ktrainingData)
# Logistic Regression
print("Logistic Regression")
LogisticRegression_classifier = SklearnClassifier(LogisticRegression())
LogisticRegression_classifier.train(ktrainingData)
# Linear SVC Classification
print("Linear SVC Classification")
LinearSVC_classifier = SklearnClassifier(LinearSVC())
LinearSVC_classifier.train(ktrainingData)
# NuSVC Classification
#print("NuSVC Classification")
#NuSVC_classifier = SklearnClassifier(NuSVC())
#NuSVC_classifier.train(ktrainingData)
# Knearest Neighbors
print("K nearest Neighbors Classification")
Kn_classifier = SklearnClassifier(KNeighborsClassifier(20))
Kn_classifier.train(ktrainingData)
# Decision Tree Classification
print("Decision Tree Classification")
DecisionTree_classifier = SklearnClassifier(DecisionTreeClassifier(max_depth=5))
DecisionTree_classifier.train(ktrainingData)
# Random Forest Classification
print("Random Forest Classification")
RandomForest_classifier = SklearnClassifier(RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1))
RandomForest_classifier.train(ktrainingData)
# MLP(Neural Network Classification)
#print("MLP - Neural Network Classification")
#MLP_classifier = SklearnClassifier(MLPClassifier(alpha=1))
#MLP_classifier.train(ktrainingData)
# AdaBoost
print("Adaboost Classification")
adaBoost_classifier = SklearnClassifier(AdaBoostClassifier())
adaBoost_classifier.train(ktrainingData)
# Classifying test data
print("Classifying test data")
for record in crossValidationData[k][1]:
final_data.append([record[0], record[1], classifier.classify(record[0]),
MNB_classifier.classify(record[0]),
BernoulliNB_classifier.classify(record[0]),
LogisticRegression_classifier.classify(record[0]),
LinearSVC_classifier.classify(record[0]),
#NuSVC_classifier.classify(tweet_split(record[0])),
Kn_classifier.classify(record[0]),
DecisionTree_classifier.classify(record[0]),
RandomForest_classifier.classify(record[0]),
#MLP_classifier.classify(tweet_split(record[0])),
adaBoost_classifier.classify(record[0])])
# final_data.append([record[0], record[1], classifier.classify(tweet_split(record[0])),
# MNB_classifier.classify(tweet_split(record[0])),
# BernoulliNB_classifier.classify(tweet_split(record[0])),
# LogisticRegression_classifier.classify(tweet_split(record[0])),
# LinearSVC_classifier.classify(tweet_split(record[0])),
# #NuSVC_classifier.classify(tweet_split(record[0])),
# Kn_classifier.classify(tweet_split(record[0])),
# DecisionTree_classifier.classify(tweet_split(record[0])),
# RandomForest_classifier.classify(tweet_split(record[0])),
# #MLP_classifier.classify(tweet_split(record[0])),
# adaBoost_classifier.classify(tweet_split(record[0]))])
for i in range(9): #10
print("accuracy of classifier --- " + str(i))
classifieraccuracy = []
accfcst = 0
totalTweets = 0
truePos = 0
trueNeg = 0
actPos = 0
actNeg = 0
fcstPos = 0
fcstNeg = 0
for record in final_data:
totalTweets+=1
if record[1] == record[i+2]:
accfcst+=1
if record[i+2] == 1:
truePos+=1
if record[i+2] == -1:
trueNeg+=1
if record[1] == 1:
actPos+=1
if record[1] == -1:
actNeg+=1
if record[i+2] == 1:
fcstPos+=1
if record[i+2] == -1:
fcstNeg+=1
accuracy = accfcst / totalTweets
if fcstPos == 0:
posPrec = 0
else:
posPrec = truePos / fcstPos
posRec = truePos / actPos
if posPrec + posRec == 0:
posFScore = 0
else:
posFScore = 2 * posPrec * posRec / (posPrec + posRec)
if fcstNeg == 0:
negPrec = 0
else:
negPrec = trueNeg / fcstNeg
negRec = trueNeg / actNeg
if negPrec + negRec == 0:
negFScore = 0
else:
negFScore = 2 * negPrec * negRec / (negPrec + negRec)
classifierAccuracy = [accuracy, posPrec, posRec, negPrec, negRec, posFScore, negFScore]
kAccuracyResults.append(classifierAccuracy)
totalAccuracy.append(kAccuracyResults)