forked from ASSERT-KTH/RewardRepair
-
Notifications
You must be signed in to change notification settings - Fork 0
/
loader.py
75 lines (58 loc) · 2.71 KB
/
loader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import torch
from torch.utils.data import Dataset
class GeneratorDataset(Dataset):
def __init__(self, dataframe, tokenizer, source_len, summ_len):
self.tokenizer = tokenizer
self.data = dataframe
self.source_len = source_len
self.summ_len = summ_len
self.bugid = self.data.bugid
self.buggy = self.data.buggy
self.patch = self.data.patch
def __len__(self):
return len(self.patch)
def __getitem__(self, index):
buggy = str(self.buggy[index])
buggy = ' '.join(buggy.split())
patch = str(self.patch[index])
patch = ' '.join(patch.split())
source = self.tokenizer.batch_encode_plus([buggy], max_length= self.source_len, pad_to_max_length=True,return_tensors='pt')
target = self.tokenizer.batch_encode_plus([patch], max_length= self.summ_len, pad_to_max_length=True,return_tensors='pt')
source_ids = source['input_ids'].squeeze()
source_mask = source['attention_mask'].squeeze()
target_ids = target['input_ids'].squeeze()
target_mask = target['attention_mask'].squeeze()
return {
'bugid': torch.tensor(self.bugid[index], dtype=torch.long),
'source_ids': source_ids.to(dtype=torch.long),
'source_mask': source_mask.to(dtype=torch.long),
'target_ids': target_ids.to(dtype=torch.long),
'target_ids_y': target_ids.to(dtype=torch.long)
}
class CustomDataset(Dataset):
def __init__(self, dataframe, tokenizer, source_len, summ_len):
self.tokenizer = tokenizer
self.data = dataframe
self.source_len = source_len
self.summ_len = summ_len
self.buggy = self.data.buggy
self.patch = self.data.patch
def __len__(self):
return len(self.patch)
def __getitem__(self, index):
buggy = str(self.buggy[index])
buggy = ' '.join(buggy.split())
patch = str(self.patch[index])
patch = ' '.join(patch.split())
source = self.tokenizer.batch_encode_plus([buggy], max_length= self.source_len,pad_to_max_length=True,return_tensors='pt')
target = self.tokenizer.batch_encode_plus([patch], max_length= self.summ_len, pad_to_max_length=True,return_tensors='pt')
source_ids = source['input_ids'].squeeze()
source_mask = source['attention_mask'].squeeze()
target_ids = target['input_ids'].squeeze()
target_mask = target['attention_mask'].squeeze()
return {
'source_ids': source_ids.to(dtype=torch.long),
'source_mask': source_mask.to(dtype=torch.long),
'target_ids': target_ids.to(dtype=torch.long),
'target_ids_y': target_ids.to(dtype=torch.long)
}