-
Notifications
You must be signed in to change notification settings - Fork 6
/
utils.py
248 lines (218 loc) · 9.2 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
import unicodedata
import json
import unicodedata
import joblib
import string
import pickle
import nltk
import numpy as np
from scipy import sparse
from sklearn.preprocessing import LabelEncoder
from concurrent.futures import ProcessPoolExecutor, as_completed
from tqdm.autonotebook import tqdm
FEVER_LABELS = {'SUPPORTS': 0, 'REFUTES': 1}
def tokenize_helper(inp):
print(inp)
return tokenize_claim(inp[0], inp[1], inp[2])
class ClaimEncoder(object):
def __init__(self):
self.feature_encoder = joblib.load("feature_encoder.pkl")
self.encoder = joblib.load("encoder.pkl")
def tokenize_claim(self, c):
"""
Input: a string that represents a single claim
Output: a list of 3x|vocabulary| arrays that has a 1 where the letter-gram exists.
"""
encoded_vector = []
c = preprocess_article_name(c)
c = "! {} !".format(c)
for ngram in nltk.ngrams(nltk.word_tokenize(c), 3):
arr = sparse.lil_matrix((3, len(self.encoder.__dict__['classes_'])))
for idx, word in enumerate(ngram):
for letter_gram in nltk.ngrams("#" + word + "#", 3):
s = "".join(letter_gram)
if s in self.feature_encoder:
letter_idx = self.feature_encoder[s]
else:
# note: lowercase OOV is actually in the dictionary, uppercase OOV is not in vocab.
letter_idx = self.feature_encoder['OOV']
arr[idx, letter_idx] = 1
encoded_vector.append(arr)
return encoded_vector
def create_encodings(self, claims, train_dict, write_to_file=False):
processed_claims = generate_all_tokens(claims)
all_evidence = []
for query in tqdm(train_dict):
all_evidence.extend([preprocess_article_name(i) for i in query['evidence']])
processed_claims.extend(generate_all_tokens(list(set(all_evidence))))
possible_tokens = list(set(processed_claims))
possible_tokens.append("OOV")
self.encoder = LabelEncoder()
self.encoder.fit(np.array(sorted(possible_tokens)))
self.feature_encoder = {}
for idx, e in tqdm(enumerate(self.encoder.classes_)):
self.feature_encoder[e] = idx
if write_to_file:
joblib.dump(self.feature_encoder, "feature_encoder.pkl")
joblib.dump(self.encoder, "encoder.pkl")
def generate_all_tokens(arr):
all_tokens = []
for unprocessed_claim in tqdm(arr):
c = preprocess_article_name(unprocessed_claim)
c = "! {} !".format(c)
for word in c.split():
letter_tuples = list(nltk.ngrams("#" + word + "#", 3))
letter_grams = []
for l in letter_tuples:
letter_grams.append("".join(l))
all_tokens.extend(letter_grams)
return all_tokens
def extract_fever_jsonl_data(path):
'''
HELPER FUNCTION
Extracts lists of headlines, labels, articles, and a set of
all distinct claims from a given FEVER jsonl file.
Inputs:
path: path to FEVER jsonl file
Outputs:
claims: list of claims for each data point
labels: list of labels for each claim (see FEVER_LABELS in
var.py)
article_list: list of names of articles corresponding to
each claim
claim_set: set of distinct claim
'''
num_train = 0
total_ev = 0
claims = []
labels = []
article_list = []
claim_set = set()
claim_to_article = {}
with open(path, 'r') as f:
for item in f:
data = json.loads(item)
claim_set.add(data["claim"])
if data["verifiable"] == "VERIFIABLE":
evidence_articles = set()
for evidence in data["evidence"][0]:
article_name = unicodedata.normalize('NFC', evidence[2])
article_name = preprocess_article_name(article_name)
# Ignore evidence if the same article has
# already been used before as we are using
# the entire article and not the specified
# sentence.
if article_name in evidence_articles:
continue
else:
article_list.append(article_name)
evidence_articles.add(article_name)
claims.append(data["claim"])
labels.append(FEVER_LABELS[data["label"]])
if data['claim'] not in claim_to_article:
claim_to_article[data['claim']] = [article_name]
else:
claim_to_article[data['claim']].append(article_name)
total_ev += 1
num_train += 1
print("Num Distinct Claims", num_train)
print("Num Data Points", total_ev)
return claims, labels, article_list, claim_set, claim_to_article
def preprocess_article_name(s):
s = s.replace("_", " ")
s = s.replace("-LRB-", "(")
s = s.replace("-RRB-", ")")
s = s.translate(str.maketrans(string.punctuation, ' '*len(string.punctuation)))
s = strip_accents(s)
s = s.replace("’", "")
s = s.replace("“", '')
s = ''.join([i if ord(i) < 128 else ' ' for i in s])
s = ' '.join(s.split())
return s.lower().rstrip()
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def char_ngrams(s, n):
s = "#" + s + "#"
return [s[i:i+n] for i in range(len(s) - 2)]
def parallel_process(array, function, n_jobs=12, use_kwargs=False, front_num=3):
"""
A parallel version of the map function with a progress bar.
Args:
array (array-like): An array to iterate over.
function (function): A python function to apply to the elements of array
n_jobs (int, default=16): The number of cores to use
use_kwargs (boolean, default=False): Whether to consider the elements of array as dictionaries of
keyword arguments to function
front_num (int, default=3): The number of iterations to run serially before kicking off the parallel job.
Useful for catching bugs
Returns:
[function(array[0]), function(array[1]), ...]
"""
#We run the first few iterations serially to catch bugs
if front_num > 0:
front = [function(**a) if use_kwargs else function(a) for a in array[:front_num]]
#If we set n_jobs to 1, just run a list comprehension. This is useful for benchmarking and debugging.
if n_jobs==1:
return front + [function(**a) if use_kwargs else function(a) for a in tqdm(array[front_num:])]
#Assemble the workers
with ProcessPoolExecutor(max_workers=n_jobs) as pool:
#Pass the elements of array into function
if use_kwargs:
futures = [pool.submit(function, **a) for a in array[front_num:]]
else:
futures = [pool.submit(function, a) for a in array[front_num:]]
kwargs = {
'total': len(futures),
'unit': 'it',
'unit_scale': True,
'leave': True
}
#Print out the progress as tasks complete
for f in tqdm(as_completed(futures), **kwargs):
pass
out = []
#Get the results from the futures.
for i, future in tqdm(enumerate(futures)):
try:
out.append(future.result())
except Exception as e:
out.append(e)
return front + out
def sparsify_evidences(train, n_jobs=15, jsonl_file="train.jsonl"):
encoder = ClaimEncoder()
evidence_set = []
for fact in train:
evidence_set.extend(fact['evidence'])
_, _, _, _, claim_to_article = extract_fever_jsonl_data(jsonl_file)
flattened_claims = [item for sublist in (list(claim_to_article.values())) for item in sublist]
evidence_set.extend(flattened_claims)
evidence_set = list(set(evidence_set))
print("Total number of evidences: {}".format(len(evidence_set)))
result = joblib.Parallel(n_jobs=n_jobs, verbose=1, prefer="threads")(joblib.delayed(process)(evidence=i, encoder=encoder) for i in evidence_set)
# result = parallel_process(evidence_set, process, n_jobs=15)
evidences = {}
for e in result:
if e is not None:
k, v = e
evidences[k] = v
return evidences
def process(evidence, encoder):
if "http://wikipedia" in evidence:
processed = preprocess_article_name(evidence.split("http://wikipedia.org/wiki/")[1])
else:
processed = preprocess_article_name(evidence)
evidence = encoder.tokenize_claim(processed)
if len(evidence)>0:
evidence = sparse.vstack(evidence)
return processed, evidence
return None
def calculate_recall(retrieved, relevant, k=None):
"""
retrieved: a list of sorted documents that were retrieved
relevant: a list of sorted documents that are relevant
k: how many documents to consider, all by default.
"""
if k==None:
k = len(retrieved)
return len(set(retrieved[:k]).intersection(set(relevant))) / len(set(relevant))