-
Notifications
You must be signed in to change notification settings - Fork 23
/
Copy pathutils.py
85 lines (61 loc) · 2.44 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import math
from typing import List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def input_transpose(sents, pad_token):
max_len = max(len(s) for s in sents)
batch_size = len(sents)
sents_t = []
for i in range(max_len):
sents_t.append([sents[k][i] if len(sents[k]) > i else pad_token for k in range(batch_size)])
return sents_t
def read_corpus(file_path, source):
data = []
for line in open(file_path):
sent = line.strip().split(' ')
# only append <s> and </s> to the target sentence
if source == 'tgt':
sent = ['<s>'] + sent + ['</s>']
data.append(sent)
return data
def batch_iter(data, batch_size, shuffle=False):
batch_num = math.ceil(len(data) / batch_size)
index_array = list(range(len(data)))
if shuffle:
np.random.shuffle(index_array)
for i in range(batch_num):
indices = index_array[i * batch_size: (i + 1) * batch_size]
examples = [data[idx] for idx in indices]
examples = sorted(examples, key=lambda e: len(e[0]), reverse=True)
src_sents = [e[0] for e in examples]
tgt_sents = [e[1] for e in examples]
yield src_sents, tgt_sents
class LabelSmoothingLoss(nn.Module):
"""
label smoothing
Code adapted from OpenNMT-py
"""
def __init__(self, label_smoothing, tgt_vocab_size, padding_idx=0):
assert 0.0 < label_smoothing <= 1.0
self.padding_idx = padding_idx
super(LabelSmoothingLoss, self).__init__()
smoothing_value = label_smoothing / (tgt_vocab_size - 2) # -1 for pad, -1 for gold-standard word
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.padding_idx] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
def forward(self, output, target):
"""
output (FloatTensor): batch_size x tgt_vocab_size
target (LongTensor): batch_size
"""
# (batch_size, tgt_vocab_size)
true_dist = self.one_hot.repeat(target.size(0), 1)
# fill in gold-standard word position with confidence value
true_dist.scatter_(1, target.unsqueeze(-1), self.confidence)
# fill padded entries with zeros
true_dist.masked_fill_((target == self.padding_idx).unsqueeze(-1), 0.)
loss = -F.kl_div(output, true_dist, reduction='none').sum(-1)
return loss