-
Notifications
You must be signed in to change notification settings - Fork 0
/
evaluate.py
119 lines (105 loc) · 3.24 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
'''
Created on Apr 15, 2016
Evaluate the performance of Top-K recommendation:
Protocol: leave-1-out evaluation
Measures: Hit Ratio and NDCG
(more details are in: Xiangnan He, et al. Fast Matrix Factorization for Online Recommendation with Implicit Feedback. SIGIR'16)
@author: hexiangnan
'''
import math
import heapq # for retrieval topK
import multiprocessing
import numpy as np
from time import time
import tensorflow as tf
# Global variables that are shared across processes
_model = None
_testRatings = None
_testNegatives = None
_K = None
_sess = None
_input_user = None
_input_item = None
_rating_matrix = None
_train = None
def evaluate_model(model, testRatings, testNegatives, K, num_thread, sess, input_user, input_item, rating_matrix,train):
"""
Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation
Return: score of each test rating.
"""
global _model
global _testRatings
global _testNegatives
global _K
global _sess
global _input_user
global _input_item
global _rating_matrix
global _train
_model = model
_testRatings = testRatings
_testNegatives = testNegatives
_K = K
_sess = sess
_input_user = input_user
_input_item = input_item
_rating_matrix = rating_matrix
_train = train
batch_size=512
hits, ndcgs = [],[]
if(num_thread > 1): # Multi-thread
pool = multiprocessing.Pool(processes=num_thread)
res = pool.map(eval_one_rating, range(len(_testRatings)))
pool.close()
pool.join()
hits = [r[0] for r in res]
ndcgs = [r[1] for r in res]
return (hits, ndcgs)
# Single thread
time_1 = time()
for idx in xrange(len(_testRatings)):
if idx%(len(_testRatings)//100)==0:
print ("%d/100 done...,[%.1f s]")%(idx/(len(_testRatings)//100),time()-time_1)
time_1=time()
(hr,ndcg) = eval_one_rating(idx)
hits.append(hr)
ndcgs.append(ndcg)
return (hits, ndcgs)
def eval_one_rating(idx):
rating = _testRatings[idx]
items = _testNegatives[idx]
sess = _sess
input_user=_input_user
input_item=_input_item
rating_matrix=_rating_matrix
train= _train
u = rating[0]
gtItem = rating[1]
items.append(gtItem)
# Get prediction scores
map_item_score = {}
users = np.full(len(items), u, dtype = 'int32')
predictions = sess.run(_model.predict,
feed_dict={input_user:np.expand_dims(users, axis=1),
input_item:np.expand_dims(np.array(items), axis=1),
rating_matrix: train})
for i in xrange(len(items)):
item = items[i]
map_item_score[item] = predictions[i]
items.pop()
# Evaluate top rank list
ranklist = heapq.nlargest(_K, map_item_score, key=map_item_score.get)
hr = getHitRatio(ranklist, gtItem)
ndcg = getNDCG(ranklist, gtItem)
return (hr, ndcg)
def getHitRatio(ranklist, gtItem):
for item in ranklist:
if item == gtItem:
return 1
return 0
def getNDCG(ranklist, gtItem):
for i in xrange(len(ranklist)):
item = ranklist[i]
if item == gtItem:
return math.log(2) / math.log(i+2)
return 0