Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Does using wgpu backend requires Torch to be installed? #11

Open
tiero opened this issue Aug 28, 2023 · 1 comment
Open

Does using wgpu backend requires Torch to be installed? #11

tiero opened this issue Aug 28, 2023 · 1 comment

Comments

@tiero
Copy link
Contributor

tiero commented Aug 28, 2023

I am following the example in README with my Mac M1, I reach the point to run it, I choose to use wgpu backend and it panics. Before your refactor you did some days ago, I was able to run this project locally on my mac (no torch installed whatsover)

cargo run --release --features wgpu-backend --bin transcribe tiny_en audio16k.wav transcription.txt
warning: unused imports: `Bool`, `Int`, `activation::relu`
 --> src/model/load.rs:6:9
  |
6 |         activation::relu,
  |         ^^^^^^^^^^^^^^^^
7 |         Tensor,
8 |         Int,
  |         ^^^
9 |         Bool,
  |         ^^^^
  |
  = note: `#[warn(unused_imports)]` on by default

warning: unused import: `Conv1dRecord`
 --> src/model/mod.rs:8:45
  |
8 |     nn::{self, conv::{Conv1d, Conv1dConfig, Conv1dRecord}, PaddingConfig1d},
  |                                             ^^^^^^^^^^^^

warning: unused import: `Tokenizer`
 --> src/token.rs:4:18
  |
4 | use tokenizers::{Tokenizer, AddedToken};
  |                  ^^^^^^^^^

warning: unused import: `crate::helper::*`
 --> src/transcribe.rs:2:5
  |
2 | use crate::helper::*;
  |     ^^^^^^^^^^^^^^^^

warning: unused imports: `Float`, `Int`, `config::Config`, `self`
  --> src/transcribe.rs:9:5
   |
9  |     config::Config,
   |     ^^^^^^^^^^^^^^
...
13 |         backend::{self, Backend},
   |                   ^^^^
...
16 |         Int,
   |         ^^^
17 |         Float,
   |         ^^^^^

warning: unused variable: `n_batch`
   --> src/model/mod.rs:122:14
    |
122 |         let [n_batch, seq_len] = x.dims();
    |              ^^^^^^^ help: if this is intentional, prefix it with an underscore: `_n_batch`
    |
    = note: `#[warn(unused_variables)]` on by default

warning: unused variable: `new_text`
  --> src/transcribe.rs:38:14
   |
38 |         let (new_text, new_tokens) = mels_to_text(whisper, bpe, mel, &prev_normal_tokens[..], padding)?;
   |              ^^^^^^^^ help: if this is intentional, prefix it with an underscore: `_new_text`

warning: unused variable: `n_channel`
   --> src/transcribe.rs:119:10
    |
119 |     let [n_channel, n_mel, n_ctx] = mels.dims();
    |          ^^^^^^^^^ help: if this is intentional, prefix it with an underscore: `_n_channel`

warning: unused variable: `start_of_prev_token`
   --> src/transcribe.rs:130:9
    |
130 |     let start_of_prev_token = bpe.special_token(SpecialToken::StartofPrev).unwrap();
    |         ^^^^^^^^^^^^^^^^^^^ help: if this is intentional, prefix it with an underscore: `_start_of_prev_token`

warning: unused variable: `n_batch`
   --> src/transcribe.rs:158:14
    |
158 |         let [n_batch, n_token, n_dict] = out.dims();
    |              ^^^^^^^ help: if this is intentional, prefix it with an underscore: `_n_batch`

warning: unused variable: `n_dict`
   --> src/transcribe.rs:158:32
    |
158 |         let [n_batch, n_token, n_dict] = out.dims();
    |                                ^^^^^^ help: if this is intentional, prefix it with an underscore: `_n_dict`

warning: unused variable: `prev_normal_tokens`
   --> src/transcribe.rs:113:92
    |
113 | ...Tokenizer, mels: Tensor<B, 3>, prev_normal_tokens: &[usize], padding: usize) -> token::Result<(St...
    |                                   ^^^^^^^^^^^^^^^^^^ help: if this is intentional, prefix it with an underscore: `_prev_normal_tokens`

warning: function `get_mel_filters` is never used
  --> src/audio.rs:66:4
   |
66 | fn get_mel_filters<B: Backend>(sample_rate: f64, n_fft: usize, n_mels: usize, htk: bool) -> Tensor<B,...
   |    ^^^^^^^^^^^^^^^
   |
   = note: `#[warn(dead_code)]` on by default

warning: function `fft_frequencies` is never used
   --> src/audio.rs:128:4
    |
128 | fn fft_frequencies<B: Backend>(sample_rate: f64, n_fft: usize) -> Tensor<B, 1> {
    |    ^^^^^^^^^^^^^^^

warning: function `test_fft_frequencies` is never used
   --> src/audio.rs:137:4
    |
137 | fn test_fft_frequencies<B: Backend>() {
    |    ^^^^^^^^^^^^^^^^^^^^

warning: function `test_mel_frequencies` is never used
   --> src/audio.rs:144:4
    |
144 | fn test_mel_frequencies<B: Backend>(htk: bool) {
    |    ^^^^^^^^^^^^^^^^^^^^

warning: function `mel_frequencies` is never used
   --> src/audio.rs:152:4
    |
152 | fn mel_frequencies<B: Backend>(n_mels: usize, fmin: f64, fmax: f64, htk: bool) -> Tensor<B, 1> {
    |    ^^^^^^^^^^^^^^^

warning: `whisper` (lib) generated 17 warnings (run `cargo fix --lib -p whisper` to apply 12 suggestions)
warning: unused import: `std::collections::HashMap`
 --> src/bin/transcribe/main.rs:1:5
  |
1 | use std::collections::HashMap;
  |     ^^^^^^^^^^^^^^^^^^^^^^^^^
  |
  = note: `#[warn(unused_imports)]` on by default

warning: unused import: `std::iter`
 --> src/bin/transcribe/main.rs:2:5
  |
2 | use std::iter;
  |     ^^^^^^^^^

warning: unused import: `whisper::helper::*`
 --> src/bin/transcribe/main.rs:5:5
  |
5 | use whisper::helper::*;
  |     ^^^^^^^^^^^^^^^^^^

warning: unused import: `whisper::token`
 --> src/bin/transcribe/main.rs:6:5
  |
6 | use whisper::token;
  |     ^^^^^^^^^^^^^^

warning: unused imports: `Data`, `Float`, `Int`, `Tensor`, `self`, `self`
  --> src/bin/transcribe/main.rs:21:9
   |
21 |         self,
   |         ^^^^
22 |         backend::{self, Backend},
   |                   ^^^^
23 |         Data,
   |         ^^^^
24 |         Tensor,
   |         ^^^^^^
25 |         Int,
   |         ^^^
26 |         Float,
   |         ^^^^^

warning: unused import: `num_traits::ToPrimitive`
  --> src/bin/transcribe/main.rs:60:5
   |
60 | use num_traits::ToPrimitive;
   |     ^^^^^^^^^^^^^^^^^^^^^^^

warning: unused import: `whisper::audio::prep_audio`
  --> src/bin/transcribe/main.rs:61:5
   |
61 | use whisper::audio::prep_audio;
   |     ^^^^^^^^^^^^^^^^^^^^^^^^^^

warning: unused import: `SpecialToken`
  --> src/bin/transcribe/main.rs:62:37
   |
62 | use whisper::token::{Gpt2Tokenizer, SpecialToken};
   |                                     ^^^^^^^^^^^^

warning: unused variable: `duration`
  --> src/bin/transcribe/main.rs:36:9
   |
36 |     let duration = reader.duration() as usize;
   |         ^^^^^^^^ help: if this is intentional, prefix it with an underscore: `_duration`
   |
   = note: `#[warn(unused_variables)]` on by default

warning: unused variable: `bits_per_sample`
  --> src/bin/transcribe/main.rs:39:9
   |
39 |     let bits_per_sample = spec.bits_per_sample;
   |         ^^^^^^^^^^^^^^^ help: if this is intentional, prefix it with an underscore: `_bits_per_sample`

warning: variable does not need to be mutable
  --> src/bin/transcribe/main.rs:33:9
   |
33 |     let mut reader = hound::WavReader::open(filename)?;
   |         ----^^^^^^
   |         |
   |         help: remove this `mut`
   |
   = note: `#[warn(unused_mut)]` on by default

warning: unused variable: `tokens`
   --> src/bin/transcribe/main.rs:132:16
    |
132 |     let (text, tokens) = match waveform_to_text(&whisper, &bpe, waveform, sample_rate) {
    |                ^^^^^^ help: if this is intentional, prefix it with an underscore: `_tokens`

warning: `whisper` (bin "transcribe") generated 12 warnings (run `cargo fix --bin "transcribe"` to apply 12 suggestions)
    Finished release [optimized] target(s) in 0.28s
warning: the following packages contain code that will be rejected by a future version of Rust: nom v1.2.4, nom v3.2.1
note: to see what the problems were, use the option `--future-incompat-report`, or run `cargo report future-incompatibilities --id 2`
     Running `target/release/transcribe tiny_en audio16k.wav transcription.txt`
Loading waveform...
Loading model...
thread 'main' panicked at 'called `Result::unwrap()` on an `Err` value: Torch("Could not run 'aten::empty_strided' with arguments from the 'CUDA' backend. This could be because the operator doesn't exist for this backend, or was omitted during the selective/custom build process (if using custom build). If you are a Facebook employee using PyTorch on mobile, please visit https://fburl.com/ptmfixes for possible resolutions. 'aten::empty_strided' is only available for these backends: [CPU, MPS, Meta, QuantizedCPU, BackendSelect, Python, FuncTorchDynamicLayerBackMode, Functionalize, Named, Conjugate, Negative, ZeroTensor, ADInplaceOrView, AutogradOther, AutogradCPU, AutogradCUDA, AutogradHIP, AutogradXLA, AutogradMPS, AutogradIPU, AutogradXPU, AutogradHPU, AutogradVE, AutogradLazy, AutogradMeta, AutogradMTIA, AutogradPrivateUse1, AutogradPrivateUse2, AutogradPrivateUse3, AutogradNestedTensor, Tracer, AutocastCPU, AutocastCUDA, FuncTorchBatched, FuncTorchVmapMode, Batched, VmapMode, FuncTorchGradWrapper, PythonTLSSnapshot, FuncTorchDynamicLayerFrontMode, PythonDispatcher].\n\nCPU: registered at /Users/runner/work/pytorch/pytorch/pytorch/build/aten/src/ATen/RegisterCPU.cpp:31034 [kernel]\nMPS: registered at /Users/runner/work/pytorch/pytorch/pytorch/build/aten/src/ATen/RegisterMPS.cpp:22748 [kernel]\nMeta: registered at /Users/runner/work/pytorch/pytorch/pytorch/build/aten/src/ATen/RegisterMeta.cpp:26824 [kernel]\nQuantizedCPU: registered at /Users/runner/work/pytorch/pytorch/pytorch/build/aten/src/ATen/RegisterQuantizedCPU.cpp:929 [kernel]\nBackendSelect: registered at /Users/runner/work/pytorch/pytorch/pytorch/build/aten/src/ATen/RegisterBackendSelect.cpp:726 [kernel]\nPython: registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/core/PythonFallbackKernel.cpp:144 [backend fallback]\nFuncTorchDynamicLayerBackMode: registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/functorch/DynamicLayer.cpp:491 [backend fallback]\nFunctionalize: registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/FunctionalizeFallbackKernel.cpp:280 [backend fallback]\nNamed: registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/core/NamedRegistrations.cpp:7 [backend fallback]\nConjugate: fallthrough registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/ConjugateFallback.cpp:21 [kernel]\nNegative: fallthrough registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/NegateFallback.cpp:23 [kernel]\nZeroTensor: fallthrough registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/ZeroTensorFallback.cpp:90 [kernel]\nADInplaceOrView: fallthrough registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/core/VariableFallbackKernel.cpp:63 [backend fallback]\nAutogradOther: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradCPU: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradCUDA: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradHIP: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradXLA: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradMPS: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradIPU: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradXPU: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradHPU: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradVE: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradLazy: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradMeta: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradMTIA: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradPrivateUse1: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradPrivateUse2: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradPrivateUse3: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nAutogradNestedTensor: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/VariableType_2.cpp:17488 [autograd kernel]\nTracer: registered at /Users/runner/work/pytorch/pytorch/pytorch/torch/csrc/autograd/generated/TraceType_2.cpp:16726 [kernel]\nAutocastCPU: fallthrough registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/autocast_mode.cpp:487 [backend fallback]\nAutocastCUDA: fallthrough registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/autocast_mode.cpp:354 [backend fallback]\nFuncTorchBatched: registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/functorch/LegacyBatchingRegistrations.cpp:815 [backend fallback]\nFuncTorchVmapMode: fallthrough registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/functorch/VmapModeRegistrations.cpp:28 [backend fallback]\nBatched: registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/LegacyBatchingRegistrations.cpp:1073 [backend fallback]\nVmapMode: fallthrough registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/VmapModeRegistrations.cpp:33 [backend fallback]\nFuncTorchGradWrapper: registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/functorch/TensorWrapper.cpp:210 [backend fallback]\nPythonTLSSnapshot: registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/core/PythonFallbackKernel.cpp:152 [backend fallback]\nFuncTorchDynamicLayerFrontMode: registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/functorch/DynamicLayer.cpp:487 [backend fallback]\nPythonDispatcher: registered at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/core/PythonFallbackKernel.cpp:148 [backend fallback]\n\nException raised from reportError at /Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/core/dispatch/OperatorEntry.cpp:548 (most recent call first):\nframe #0: c10::impl::OperatorEntry::reportError(c10::DispatchKey) const + 588 (0x10bef7020 in libtorch_cpu.dylib)\nframe #1: c10::impl::OperatorEntry::lookup(c10::DispatchKeySet) const + 164 (0x10bbbfd4c in libtorch_cpu.dylib)\nframe #2: at::Tensor c10::Dispatcher::redispatch<at::Tensor, c10::ArrayRef<c10::SymInt>, c10::ArrayRef<c10::SymInt>, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool> >(c10::TypedOperatorHandle<at::Tensor (c10::ArrayRef<c10::SymInt>, c10::ArrayRef<c10::SymInt>, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>)> const&, c10::DispatchKeySet, c10::ArrayRef<c10::SymInt>, c10::ArrayRef<c10::SymInt>, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>) const + 88 (0x10cd1a6d4 in libtorch_cpu.dylib)\nframe #3: at::_ops::empty_strided::redispatch(c10::DispatchKeySet, c10::ArrayRef<c10::SymInt>, c10::ArrayRef<c10::SymInt>, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>) + 172 (0x10cc3af58 in libtorch_cpu.dylib)\nframe #4: at::_ops::empty_strided::call(c10::ArrayRef<c10::SymInt>, c10::ArrayRef<c10::SymInt>, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>) + 300 (0x10cc3aaec in libtorch_cpu.dylib)\nframe #5: at::native::_to_copy(at::Tensor const&, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>, bool, c10::optional<c10::MemoryFormat>) + 2624 (0x10c3c5244 in libtorch_cpu.dylib)\nframe #6: at::_ops::_to_copy::redispatch(c10::DispatchKeySet, at::Tensor const&, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>, bool, c10::optional<c10::MemoryFormat>) + 188 (0x10c8f9f70 in libtorch_cpu.dylib)\nframe #7: at::_ops::_to_copy::redispatch(c10::DispatchKeySet, at::Tensor const&, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>, bool, c10::optional<c10::MemoryFormat>) + 188 (0x10c8f9f70 in libtorch_cpu.dylib)\nframe #8: c10::impl::wrap_kernel_functor_unboxed_<c10::impl::detail::WrapFunctionIntoFunctor_<c10::CompileTimeFunctionPointer<at::Tensor (c10::DispatchKeySet, at::Tensor const&, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>, bool, c10::optional<c10::MemoryFormat>), &(torch::autograd::VariableType::(anonymous namespace)::_to_copy(c10::DispatchKeySet, at::Tensor const&, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>, bool, c10::optional<c10::MemoryFormat>))>, at::Tensor, c10::guts::typelist::typelist<c10::DispatchKeySet, at::Tensor const&, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>, bool, c10::optional<c10::MemoryFormat> > >, at::Tensor (c10::DispatchKeySet, at::Tensor const&, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>, bool, c10::optional<c10::MemoryFormat>)>::call(c10::OperatorKernel*, c10::DispatchKeySet, at::Tensor const&, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>, bool, c10::optional<c10::MemoryFormat>) + 1104 (0x10e673864 in libtorch_cpu.dylib)\nframe #9: at::_ops::_to_copy::call(at::Tensor const&, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>, bool, c10::optional<c10::MemoryFormat>) + 340 (0x10c8f9c30 in libtorch_cpu.dylib)\nframe #10: at::_ops::to_dtype_layout::call(at::Tensor const&, c10::optional<c10::ScalarType>, c10::optional<c10::Layout>, c10::optional<c10::Device>, c10::optional<bool>, bool, bool, c10::optional<c10::MemoryFormat>) + 348 (0x10cacdc30 in libtorch_cpu.dylib)\nframe #11: atg_to + 104 (0x101004cdc in transcribe)\nframe #12: tch::wrappers::tensor_generated::_$LT$impl$u20$tch..wrappers..tensor..Tensor$GT$::to::hfeb248388ea0a533 + 88 (0x100ff6ba8 in transcribe)\nframe #13: burn_tch::ops::tensor::_$LT$impl$u20$burn_tensor..tensor..ops..tensor..TensorOps$LT$burn_tch..backend..TchBackend$LT$E$GT$$GT$$u20$for$u20$burn_tch..backend..TchBackend$LT$E$GT$$GT$::to_device::h39a91cf7f00bf6ef + 64 (0x100fc4108 in transcribe)\nframe #14: _$LT$burn_core..nn..conv..conv1d..Conv1d$LT$B$GT$$u20$as$u20$burn_core..module..base..Module$LT$B$GT$$GT$::map::h58a9338e15b1b63e + 64 (0x100fbc49c in transcribe)\nframe #15: _$LT$whisper..model..Whisper$LT$B$GT$$u20$as$u20$burn_core..module..base..Module$LT$B$GT$$GT$::map::h8a2e896e5556644c + 104 (0x100fe515c in transcribe)\nframe #16: transcribe::main::hfd8ee1d1f4f6714c + 1044 (0x100fc64d0 in transcribe)\nframe #17: std::sys_common::backtrace::__rust_begin_short_backtrace::h847fc7e56d1202dc + 12 (0x100fec7b8 in transcribe)\nframe #18: std::rt::lang_start::_$u7b$$u7b$closure$u7d$$u7d$::h9b3e7ad23a57bf45 + 16 (0x100fec7d0 in transcribe)\nframe #19: std::rt::lang_start_internal::hdd06e3566639fc5b + 648 (0x1013301d4 in transcribe)\nframe #20: main + 52 (0x100fc6b20 in transcribe)\nframe #21: start + 520 (0x1019dd08c in dyld)\n")', /Users/tiero/.cargo/registry/src/index.crates.io-6f17d22bba15001f/tch-0.13.0/src/wrappers/tensor_generated.rs:17243:27
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
@Gadersd
Copy link
Owner

Gadersd commented Aug 28, 2023

There was a bug that caused wgpu to not be used even when specified. Hopefully it should work now.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants