You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Traceback (most recent call last):
File "F:\project\code\python\bert4torch\examples\seq2seq\task_question_answer_generation_by_seq2seq.py", line 191, in
valid(valid_data)
File "F:\project\code\python\bert4torch\examples\seq2seq\task_question_answer_generation_by_seq2seq.py", line 165, in valid
q,a = qag.generate(d[0])
File "F:\project\code\python\bert4torch\examples\seq2seq\task_question_answer_generation_by_seq2seq.py", line 145, in generate
q_ids = self.beam_search([token_ids, segment_ids], topk=topk)[0] # 基于beam search
File "F:\project\code\python\bert4torch\bert4torch\generation.py", line 375, in beam_search
inputs = self._trans2tensors(inputs)
File "F:\project\code\python\bert4torch\bert4torch\generation.py", line 197, in trans2tensors
input_new = torch.tensor(sequence_padding(input, value=self.pad_token_id, mode=self.pad_mode), dtype=torch.long, device=self.device)
File "F:\project\code\python\bert4torch\bert4torch\snippets\data_process.py", line 196, in sequence_padding
length = np.max([np.shape(x)[:seq_dims] for x in inputs], axis=0)
File "F:\project\code\python\bert4torch\bert4torch\snippets\data_process.py", line 196, in
length = np.max([np.shape(x)[:seq_dims] for x in inputs], axis=0)
File "D:\Develop\anaconda\envs\NovelSpeech\lib\site-packages\numpy\core\fromnumeric.py", line 2024, in shape
result = asarray(a).shape
File "D:\Develop\anaconda\envs\NovelSpeech\lib\site-packages\torch_tensor.py", line 1030, in array
return self.numpy()
TypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
提问时请尽可能提供如下信息:
基本信息
核心代码
# 请在此处贴上你的核心代码
def generate(self, passage, topk=1, topp=0.95):
token_ids, segment_ids = tokenizer.encode(passage, maxlen=max_p_len)
a_ids = self.random_sample([token_ids, segment_ids], n=1, topp=topp)[0] # 基于随机采样
token_ids += list(a_ids)
segment_ids += [1] * len(a_ids)
q_ids = self.beam_search([token_ids, segment_ids], topk=topk)[0] # 基于beam search
return (tokenizer.decode(q_ids.cpu().numpy()), tokenizer.decode(a_ids.cpu().numpy()))
输出信息
# 请在此处贴上你的调试输出
Traceback (most recent call last):
File "F:\project\code\python\bert4torch\examples\seq2seq\task_question_answer_generation_by_seq2seq.py", line 191, in
valid(valid_data)
File "F:\project\code\python\bert4torch\examples\seq2seq\task_question_answer_generation_by_seq2seq.py", line 165, in valid
q,a = qag.generate(d[0])
File "F:\project\code\python\bert4torch\examples\seq2seq\task_question_answer_generation_by_seq2seq.py", line 145, in generate
q_ids = self.beam_search([token_ids, segment_ids], topk=topk)[0] # 基于beam search
File "F:\project\code\python\bert4torch\bert4torch\generation.py", line 375, in beam_search
inputs = self._trans2tensors(inputs)
File "F:\project\code\python\bert4torch\bert4torch\generation.py", line 197, in trans2tensors
input_new = torch.tensor(sequence_padding(input, value=self.pad_token_id, mode=self.pad_mode), dtype=torch.long, device=self.device)
File "F:\project\code\python\bert4torch\bert4torch\snippets\data_process.py", line 196, in sequence_padding
length = np.max([np.shape(x)[:seq_dims] for x in inputs], axis=0)
File "F:\project\code\python\bert4torch\bert4torch\snippets\data_process.py", line 196, in
length = np.max([np.shape(x)[:seq_dims] for x in inputs], axis=0)
File "D:\Develop\anaconda\envs\NovelSpeech\lib\site-packages\numpy\core\fromnumeric.py", line 2024, in shape
result = asarray(a).shape
File "D:\Develop\anaconda\envs\NovelSpeech\lib\site-packages\torch_tensor.py", line 1030, in array
return self.numpy()
TypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
自我尝试
此处请贴上你的自我尝试过程
def generate(self, passage, topk=1, topp=0.95):
token_ids, segment_ids = tokenizer.encode(passage, maxlen=max_p_len)
a_ids = self.random_sample([token_ids, segment_ids], n=1, topp=topp)[0] # 基于随机采样
# return tokenizer.decode(a_ids.cpu().numpy())
a_ids = a_ids.cpu().numpy() #这里需要转化成numpy
token_ids += list(a_ids)
segment_ids += [1] * len(a_ids)
q_ids = self.beam_search([token_ids, segment_ids], topk=topk)[0] # 基于beam search
# return tokenizer.decode(q_ids.cpu().numpy())
return (tokenizer.decode(q_ids.cpu().numpy()), tokenizer.decode(a_ids))#
The text was updated successfully, but these errors were encountered: