This repository has been archived by the owner on Oct 9, 2023. It is now read-only.
I got an error when I used AudioClassificationData.from_files. How can I change the type of tensors? #744
Unanswered
aykutcayir34
asked this question in
Q&A
Replies: 1 comment 8 replies
-
Hi @aykutcayir34 my guess is that something is wrong in the transforms. We load spectrogram images as numpy first then convert to tensor, which can sometimes make them switch to double tensors. Could you share the full code snippet that produces the error? Thanks 😃 |
Beta Was this translation helpful? Give feedback.
8 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
I got an error when I used AudioClassificationData.from_files. How can I change the type of tensors? I think I need to change the type of input tensor from double to float. I need help :(
RuntimeError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_9060/4058120394.py in
1 trainer = flash.Trainer(max_epochs=3, gpus=torch.cuda.device_count())
----> 2 trainer.finetune(model, datamodule=datamodule, strategy=FreezeUnfreeze(unfreeze_epoch=1))
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\flash\core\trainer.py in finetune(self, model, train_dataloader, val_dataloaders, datamodule, strategy)
163 """
164 self._resolve_callbacks(model, strategy)
--> 165 return super().fit(model, train_dataloader, val_dataloaders, datamodule)
166
167 def _resolve_callbacks(self, model, strategy):
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\trainer\trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, train_dataloader)
550 self.checkpoint_connector.resume_start()
551
--> 552 self._run(model)
553
554 assert self.state.stopped
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\trainer\trainer.py in _run(self, model)
915
916 # dispatch
start_training
orstart_evaluating
orstart_predicting
--> 917 self._dispatch()
918
919 # plugin will finalized fitting (e.g. ddp_spawn will load trained model)
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\trainer\trainer.py in _dispatch(self)
983 self.accelerator.start_predicting(self)
984 else:
--> 985 self.accelerator.start_training(self)
986
987 def run_stage(self):
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in start_training(self, trainer)
90
91 def start_training(self, trainer: "pl.Trainer") -> None:
---> 92 self.training_type_plugin.start_training(trainer)
93
94 def start_evaluating(self, trainer: "pl.Trainer") -> None:
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\plugins\training_type\training_type_plugin.py in start_training(self, trainer)
159 def start_training(self, trainer: "pl.Trainer") -> None:
160 # double dispatch to initiate the training loop
--> 161 self._results = trainer.run_stage()
162
163 def start_evaluating(self, trainer: "pl.Trainer") -> None:
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\trainer\trainer.py in run_stage(self)
993 if self.predicting:
994 return self._run_predict()
--> 995 return self._run_train()
996
997 def _pre_training_routine(self):
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\trainer\trainer.py in _run_train(self)
1028 self.progress_bar_callback.disable()
1029
-> 1030 self._run_sanity_check(self.lightning_module)
1031
1032 # enable train mode
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\flash\core\trainer.py in _run_sanity_check(self, ref_model)
91 def _run_sanity_check(self, ref_model):
92 if hasattr(super(), "_run_sanity_check"):
---> 93 super()._run_sanity_check(ref_model)
94
95 self.run_sanity_check(ref_model)
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\trainer\trainer.py in _run_sanity_check(self, ref_model)
1112 # run eval step
1113 with torch.no_grad():
-> 1114 self._evaluation_loop.run()
1115
1116 self.on_sanity_check_end()
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\loops\base.py in run(self, *args, **kwargs)
109 try:
110 self.on_advance_start(*args, **kwargs)
--> 111 self.advance(*args, **kwargs)
112 self.on_advance_end()
113 self.iteration_count += 1
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\loops\dataloader\evaluation_loop.py in advance(self, *args, **kwargs)
108 dl_max_batches = self._max_batches[self.current_dataloader_idx]
109
--> 110 dl_outputs = self.epoch_loop.run(
111 dataloader_iter, self.current_dataloader_idx, dl_max_batches, self.num_dataloaders
112 )
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\loops\base.py in run(self, *args, **kwargs)
109 try:
110 self.on_advance_start(*args, **kwargs)
--> 111 self.advance(*args, **kwargs)
112 self.on_advance_end()
113 self.iteration_count += 1
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\loops\epoch\evaluation_epoch_loop.py in advance(self, dataloader_iter, dataloader_idx, dl_max_batches, num_dataloaders)
108 # lightning module methods
109 with self.trainer.profiler.profile("evaluation_step_and_end"):
--> 110 output = self.evaluation_step(batch, batch_idx, dataloader_idx)
111 output = self.evaluation_step_end(output)
112
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\loops\epoch\evaluation_epoch_loop.py in evaluation_step(self, batch, batch_idx, dataloader_idx)
152 self.trainer.lightning_module._current_fx_name = "validation_step"
153 with self.trainer.profiler.profile("validation_step"):
--> 154 output = self.trainer.accelerator.validation_step(step_kwargs)
155
156 return output
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in validation_step(self, step_kwargs)
209 """
210 with self.precision_plugin.val_step_context(), self.training_type_plugin.val_step_context():
--> 211 return self.training_type_plugin.validation_step(*step_kwargs.values())
212
213 def test_step(self, step_kwargs: Dict[str, Union[Any, int]]) -> Optional[STEP_OUTPUT]:
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\plugins\training_type\training_type_plugin.py in validation_step(self, *args, **kwargs)
176
177 def validation_step(self, *args, **kwargs):
--> 178 return self.model.validation_step(*args, **kwargs)
179
180 def test_step(self, *args, **kwargs):
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\flash\image\classification\model.py in validation_step(self, batch, batch_idx)
124 def validation_step(self, batch: Any, batch_idx: int) -> Any:
125 batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])
--> 126 return super().validation_step(batch, batch_idx)
127
128 def test_step(self, batch: Any, batch_idx: int) -> Any:
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\flash\core\model.py in validation_step(self, batch, batch_idx)
385
386 def validation_step(self, batch: Any, batch_idx: int) -> None:
--> 387 output = self.step(batch, batch_idx, self.val_metrics)
388 self.log_dict({f"val_{k}": v for k, v in output["logs"].items()}, on_step=False, on_epoch=True, prog_bar=True)
389
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\flash\core\model.py in step(self, batch, batch_idx, metrics)
330 """
331 x, y = batch
--> 332 y_hat = self(x)
333 y, y_hat = self.apply_filtering(y, y_hat)
334 output = {"y_hat": y_hat}
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
887 result = self._slow_forward(*input, **kwargs)
888 else:
--> 889 result = self.forward(*input, **kwargs)
890 for hook in itertools.chain(
891 _global_forward_hooks.values(),
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\flash\image\classification\model.py in forward(self, x)
137
138 def forward(self, x) -> torch.Tensor:
--> 139 x = self.backbone(x)
140 if x.dim() == 4:
141 x = x.mean(-1).mean(-1)
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
887 result = self._slow_forward(*input, **kwargs)
888 else:
--> 889 result = self.forward(*input, **kwargs)
890 for hook in itertools.chain(
891 _global_forward_hooks.values(),
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\timm\models\efficientnet.py in forward(self, x)
475
476 def forward(self, x):
--> 477 x = self.forward_features(x)
478 x = self.global_pool(x)
479 if self.drop_rate > 0.:
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\timm\models\efficientnet.py in forward_features(self, x)
465
466 def forward_features(self, x):
--> 467 x = self.conv_stem(x)
468 x = self.bn1(x)
469 x = self.act1(x)
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
887 result = self._slow_forward(*input, **kwargs)
888 else:
--> 889 result = self.forward(*input, **kwargs)
890 for hook in itertools.chain(
891 _global_forward_hooks.values(),
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\torch\nn\modules\conv.py in forward(self, input)
397
398 def forward(self, input: Tensor) -> Tensor:
--> 399 return self._conv_forward(input, self.weight, self.bias)
400
401 class Conv3d(_ConvNd):
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\torch\nn\modules\conv.py in _conv_forward(self, input, weight, bias)
393 weight, bias, self.stride,
394 _pair(0), self.dilation, self.groups)
--> 395 return F.conv2d(input, weight, bias, self.stride,
396 self.padding, self.dilation, self.groups)
397
RuntimeError: expected scalar type Double but found Float
Beta Was this translation helpful? Give feedback.
All reactions