diff --git a/.github/workflows/build_and_deploy.yml b/.github/workflows/build_and_deploy.yml index e9b5e9eb5..9283b273d 100644 --- a/.github/workflows/build_and_deploy.yml +++ b/.github/workflows/build_and_deploy.yml @@ -61,123 +61,90 @@ jobs: includes='[ { "os": "windows-2019", - "features": "", "target": "x86_64-pc-windows-msvc", - "artifact_name": "windows-x64-cpu", + "artifact_name": "windows-x64", "c_release_format": "plain-cdylib", - "whl_local_version": "cpu", - "can_skip_in_simple_test": true - }, - { - "os": "windows-2019", - "features": "directml", - "target": "x86_64-pc-windows-msvc", - "artifact_name": "windows-x64-directml", - "c_release_format": "plain-cdylib", - "whl_local_version": "directml", + "python_whl": true, "can_skip_in_simple_test": false }, { "os": "windows-2019", - "features": "cuda", - "target": "x86_64-pc-windows-msvc", - "artifact_name": "windows-x64-cuda", - "c_release_format": "plain-cdylib", - "whl_local_version": "cuda", - "can_skip_in_simple_test": true - }, - { - "os": "windows-2019", - "features": "", "target": "i686-pc-windows-msvc", - "artifact_name": "windows-x86-cpu", - "c_release_format": "plain-cdylib", - "whl_local_version": "cpu", - "can_skip_in_simple_test": true - }, - { - "os": "ubuntu-20.04", - "features": "", - "target": "x86_64-unknown-linux-gnu", - "artifact_name": "linux-x64-cpu", + "artifact_name": "windows-x86", "c_release_format": "plain-cdylib", - "whl_local_version": "cpu", + "python_whl": true, "can_skip_in_simple_test": true }, { "os": "ubuntu-20.04", - "features": "cuda", "target": "x86_64-unknown-linux-gnu", - "artifact_name": "linux-x64-gpu", + "artifact_name": "linux-x64", "c_release_format": "plain-cdylib", - "whl_local_version": "cuda", + "python_whl": true, "can_skip_in_simple_test": false }, { "os": "ubuntu-20.04", - "features": "", "target": "aarch64-unknown-linux-gnu", - "artifact_name": "linux-arm64-cpu", + "artifact_name": "linux-arm64", "c_release_format": "plain-cdylib", - "whl_local_version": "cpu", + "python_whl": true, "can_skip_in_simple_test": true }, { "os": "ubuntu-20.04", - "features": "", "target": "aarch64-linux-android", - "artifact_name": "android-arm64-cpu", + "artifact_name": "android-arm64", "c_release_format": "plain-cdylib", + "python_whl": false, "can_skip_in_simple_test": true }, { "os": "ubuntu-20.04", - "features": "", "target": "x86_64-linux-android", - "artifact_name": "android-x86_64-cpu", + "artifact_name": "android-x86_64", "c_release_format": "plain-cdylib", + "python_whl": false, "can_skip_in_simple_test": true }, { "os": "macos-12", - "features": "", "target": "aarch64-apple-darwin", - "artifact_name": "osx-arm64-cpu", + "artifact_name": "osx-arm64", "c_release_format": "plain-cdylib", - "whl_local_version": "cpu", + "python_whl": true, "can_skip_in_simple_test": false }, { "os": "macos-12", - "features": "", "target": "x86_64-apple-darwin", - "artifact_name": "osx-x64-cpu", + "artifact_name": "osx-x64", "c_release_format": "plain-cdylib", - "whl_local_version": "cpu", + "python_whl": true, "can_skip_in_simple_test": true }, { "os": "macos-12", - "features": "", "target": "aarch64-apple-ios", "artifact_name": "ios-arm64-cpu", "c_release_format": "ios-xcframework", + "python_whl": false, "can_skip_in_simple_test": true }, { "os": "macos-12", - "features": "", "target": "aarch64-apple-ios-sim", "artifact_name": "ios-arm64-cpu-sim", "c_release_format": "ios-xcframework", + "python_whl": false, "can_skip_in_simple_test": true }, { "os": "macos-12", - "features": "", "target": "x86_64-apple-ios", "artifact_name": "ios-x64-cpu", "c_release_format": "ios-xcframework", + "python_whl": false, "can_skip_in_simple_test": true } ]' @@ -216,7 +183,7 @@ jobs: git -c user.name=dummy -c user.email=dummy@dummy.dummy merge FETCH_HEAD ) > /dev/null 2>&1 - name: Set up Python 3.8 - if: matrix.whl_local_version + if: matrix.python_whl uses: actions/setup-python@v5 with: python-version: "3.8" @@ -258,7 +225,7 @@ jobs: - name: set cargo version run: | cargo set-version "$VERSION" --exclude voicevox_core_python_api --exclude downloader --exclude xtask - if ${{ !!matrix.whl_local_version }}; then cargo set-version "$VERSION+"${{ matrix.whl_local_version }} -p voicevox_core_python_api; fi + if ${{ matrix.python_whl }}; then cargo set-version "$VERSION" -p voicevox_core_python_api; fi - name: cache target uses: Swatinem/rust-cache@v2 if: ${{ !inputs.is_production }} @@ -270,7 +237,7 @@ jobs: ios-xcframework) linking=link-onnxruntime ;; esac function build() { - cargo build -p voicevox_core_c_api -vv --features "$linking",${{ matrix.features }} --target ${{ matrix.target }} --release + cargo build -p voicevox_core_c_api -vv --features "$linking" --target ${{ matrix.target }} --release } if ${{ !inputs.is_production }}; then build @@ -280,7 +247,7 @@ jobs: env: RUSTFLAGS: -C panic=abort - name: build voicevox_core_python_api - if: matrix.whl_local_version + if: matrix.python_whl id: build-voicevox-core-python-api run: | rm -rf ./target/wheels @@ -288,7 +255,7 @@ jobs: poetry config virtualenvs.create false (cd crates/voicevox_core_python_api && poetry install --with dev) function build() { - maturin build --manifest-path ./crates/voicevox_core_python_api/Cargo.toml --features ${{ matrix.features }}, --target ${{ matrix.target }} --release + maturin build --manifest-path ./crates/voicevox_core_python_api/Cargo.toml --target ${{ matrix.target }} --release } if ${{ !inputs.is_production }}; then build @@ -300,7 +267,7 @@ jobs: if: contains(matrix.target, 'android') run: | function build() { - cargo build -p voicevox_core_java_api -vv --features ${{ matrix.features }}, --target ${{ matrix.target }} --release + cargo build -p voicevox_core_java_api -vv --target ${{ matrix.target }} --release } if ${{ !inputs.is_production }}; then build @@ -318,9 +285,6 @@ jobs: > "artifact/${{ env.ASSET_NAME }}/voicevox_core.h" cp -v target/${{ matrix.target }}/release/*voicevox_core.{dll,so,dylib} "artifact/${{ env.ASSET_NAME }}" || true cp -v target/${{ matrix.target }}/release/voicevox_core.dll.lib "artifact/${{ env.ASSET_NAME }}/voicevox_core.lib" || true - cp -v -n target/${{ matrix.target }}/release/{,lib}onnxruntime*.{dll,so.*,so,dylib} "artifact/${{ env.ASSET_NAME }}" || true - # libonnxruntimeについてはバージョン付のshared libraryを使用するためバージョンがついてないものを削除する - rm -f artifact/${{ env.ASSET_NAME }}/libonnxruntime.{so,dylib} cp -v README.md "artifact/${{ env.ASSET_NAME }}/README.txt" echo "${{ env.VERSION }}" > "artifact/${{ env.ASSET_NAME }}/VERSION" @@ -354,7 +318,7 @@ jobs: ${{ env.ASSET_NAME }}.zip target_commitish: ${{ github.sha }} - name: Upload Python whl to Release - if: fromJson(needs.config.outputs.deploy) && matrix.whl_local_version + if: fromJson(needs.config.outputs.deploy) && matrix.python_whl uses: softprops/action-gh-release@v2 with: prerelease: true @@ -475,17 +439,17 @@ jobs: run: cargo set-version "$VERSION" -p voicevox_core_java_api - - name: "Download artifact (android-arm64-cpu)" + - name: "Download artifact (android-arm64)" uses: actions/download-artifact@v4 with: - name: voicevox_core_java_api-android-arm64-cpu - path: artifact/android-arm64-cpu + name: voicevox_core_java_api-android-arm64 + path: artifact/android-arm64 - - name: "Download artifact (android-x86_64-cpu)" + - name: "Download artifact (android-x86_64)" uses: actions/download-artifact@v4 with: - name: voicevox_core_java_api-android-x86_64-cpu - path: artifact/android-x86_64-cpu + name: voicevox_core_java_api-android-x86_64 + path: artifact/android-x86_64 - name: Print tree run: tree artifact @@ -494,8 +458,8 @@ jobs: run: | rm -rf crates/voicevox_core_java_api/lib/src/main/resources/dll cat < = Lazy::new(|| { @@ -74,13 +76,17 @@ struct Args { #[arg(short, long, value_name("GIT_TAG_OR_LATEST"), default_value("latest"))] version: String, + /// ダウンロードするONNX Runtimeのバージョンの指定 + #[arg(long, value_name("GIT_TAG_OR_LATEST"), default_value("latest"))] + onnxruntime_version: String, + /// 追加でダウンロードするライブラリのバージョン #[arg(long, value_name("GIT_TAG_OR_LATEST"), default_value("latest"))] additional_libraries_version: String, /// ダウンロードするデバイスを指定する(cudaはlinuxのみ) - #[arg(value_enum, long, default_value(<&str>::from(Device::default())))] - device: Device, + #[arg(value_enum, long, num_args(1..), default_value(<&str>::from(Device::default())))] + devices: Vec, /// ダウンロードするcpuのアーキテクチャを指定する #[arg(value_enum, long, default_value(CpuArch::default_opt().map(<&str>::from)))] @@ -93,6 +99,13 @@ struct Args { #[arg(long, value_name("REPOSITORY"), default_value(DEFAULT_CORE_REPO))] core_repo: RepoName, + #[arg( + long, + value_name("REPOSITORY"), + default_value(DEFAULT_ONNXRUNTIME_BUILDER_REPO) + )] + onnxruntime_builder_repo: RepoName, + #[arg( long, value_name("REPOSITORY"), @@ -105,11 +118,14 @@ struct Args { enum DownloadTarget { Core, Models, + Onnxruntime, AdditionalLibraries, Dict, } -#[derive(Default, ValueEnum, Display, IntoStaticStr, Clone, Copy, PartialEq)] +#[derive( + Default, ValueEnum, Display, IntoStaticStr, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, +)] #[strum(serialize_all = "kebab-case")] enum Device { #[default] @@ -156,7 +172,7 @@ impl Os { } #[derive(parse_display::FromStr, parse_display::Display, Clone)] -#[from_str(regex = "(?[a-zA-Z0-9_]+)/(?[a-zA-Z0-9_]+)")] +#[from_str(regex = "(?[a-zA-Z0-9_-]+)/(?[a-zA-Z0-9_-]+)")] #[display("{owner}/{repo}")] struct RepoName { owner: String, @@ -173,13 +189,16 @@ async fn main() -> anyhow::Result<()> { min, output, version, + onnxruntime_version, additional_libraries_version, - device, + devices, cpu_arch, os, core_repo, + onnxruntime_builder_repo, additional_libraries_repo, } = Args::parse(); + let devices = devices.into_iter().collect::>(); let targets: HashSet<_> = if !only.is_empty() { assert!(exclude.is_empty() && !min); @@ -224,9 +243,9 @@ async fn main() -> anyhow::Result<()> { `additional-libraries-version`はダウンロード対象から除外されています", ); } - if device == Device::Cpu { + if devices == [Device::Cpu].into() { warn!( - "`--device`が指定されていない、もしくは`--device=cpu`が指定されていますが、\ + "`--devices`が指定されていない、もしくは`--devices=cpu`が指定されていますが、\ `additional-libraries-version`はダウンロード対象から除外されています", ); } @@ -234,44 +253,67 @@ async fn main() -> anyhow::Result<()> { let octocrab = &octocrab()?; - let core = find_gh_asset(octocrab, &core_repo, &version, |tag| { - let device = match (os, device) { - (Os::Linux, Device::Cuda) => "gpu", - (_, device) => device.into(), - }; - format!("{LIB_NAME}-{os}-{cpu_arch}-{device}-{tag}.zip") + let core = find_gh_asset(octocrab, &core_repo, &version, |tag, _| { + Ok(format!("{LIB_NAME}-{os}-{cpu_arch}-{tag}.zip")) }) .await?; - let model = find_gh_asset(octocrab, &core_repo, &version, |tag| { - format!("model-{tag}.zip") + let model = find_gh_asset(octocrab, &core_repo, &version, |tag, _| { + Ok(format!("model-{tag}.zip")) }) .await?; - let additional_libraries = OptionFuture::from((device != Device::Cpu).then(|| { - find_gh_asset( - octocrab, - &additional_libraries_repo, - &additional_libraries_version, - |_| { - let device = match device { - Device::Cpu => unreachable!(), - Device::Cuda => "CUDA", - Device::Directml => "DirectML", - }; - format!("{device}-{os}-{cpu_arch}.zip") - }, - ) - })) - .await - .transpose()?; + let onnxruntime = find_gh_asset( + octocrab, + &onnxruntime_builder_repo, + &onnxruntime_version, + |_, body| { + let body = body.with_context(|| "リリースノートがありません")?; + find_onnxruntime(body, os, cpu_arch, &devices) + }, + ) + .await?; + + let additional_libraries = devices + .iter() + .filter(|&&device| device != Device::Cpu) + .map(|&device| { + find_gh_asset( + octocrab, + &additional_libraries_repo, + &additional_libraries_version, + move |_, _| { + Ok({ + let device = match device { + Device::Cpu => unreachable!(), + Device::Cuda => "CUDA", + Device::Directml => "DirectML", + }; + format!("{device}-{os}-{cpu_arch}.zip") + }) + }, + ) + }) + .collect::>() + .try_collect::>() + .await?; info!("対象OS: {os}"); info!("対象CPUアーキテクチャ: {cpu_arch}"); - info!("ダウンロードデバイスタイプ: {device}"); + info!( + "ダウンロードデバイスタイプ: {}", + devices.iter().format(", "), + ); info!("ダウンロード{LIB_NAME}バージョン: {}", core.tag); - if let Some(GhAsset { tag, .. }) = &additional_libraries { - info!("ダウンロード追加ライブラリバージョン: {tag}"); + info!("ダウンロードONNX Runtimeバージョン: {}", onnxruntime.tag); + if !additional_libraries.is_empty() { + info!( + "ダウンロード追加ライブラリバージョン: {}", + additional_libraries + .iter() + .map(|GhAsset { tag, .. }| tag) + .format(", "), + ); } let progresses = MultiProgress::new(); @@ -294,8 +336,16 @@ async fn main() -> anyhow::Result<()> { &progresses, )?); } + if targets.contains(&DownloadTarget::Onnxruntime) { + tasks.spawn(download_and_extract_from_gh( + onnxruntime, + Stripping::FirstDir, + &output.join("onnxruntime"), + &progresses, + )?); + } if targets.contains(&DownloadTarget::AdditionalLibraries) { - if let Some(additional_libraries) = additional_libraries { + for additional_libraries in additional_libraries { tasks.spawn(download_and_extract_from_gh( additional_libraries, Stripping::FirstDir, @@ -348,11 +398,15 @@ async fn find_gh_asset( octocrab: &Arc, repo: &RepoName, git_tag_or_latest: &str, - asset_name: impl FnOnce(&str) -> String, + asset_name: impl FnOnce( + &str, // タグ名 + Option<&str>, // リリースノートの内容 + ) -> anyhow::Result, ) -> anyhow::Result { let Release { html_url, tag_name, + body, assets, .. } = { @@ -364,7 +418,11 @@ async fn find_gh_asset( }? }; - let asset_name = asset_name(&tag_name); + let asset_name = asset_name(&tag_name, body.as_deref()).with_context(|| { + format!( + "`{repo}`の`{tag_name}`の中から条件に合致するビルドが見つけることができませんでした", + ) + })?; let Asset { id, name, size, .. } = assets .into_iter() .find(|Asset { name, .. }| *name == asset_name) @@ -380,6 +438,82 @@ async fn find_gh_asset( }) } +/// `find_gh_asset`に用いる。 +/// +/// 候補が複数あった場合、「デバイス」の数が最も小さいもののうち最初のものを選ぶ。 +fn find_onnxruntime( + body: &str, // リリースの"body" (i.e. リリースノートの内容) + os: Os, + cpu_arch: CpuArch, + devices: &BTreeSet, +) -> anyhow::Result { + macro_rules! selector { + ($expr:expr $(,)?) => {{ + static SELECTOR: Lazy = + Lazy::new(|| scraper::Selector::parse($expr).expect("should be valid")); + &SELECTOR + }}; + } + + const TARGET: &str = "table\ + [data-voicevox-onnxruntime-specs-format-version=\"1\"]\ + [data-voicevox-onnxruntime-specs-type=\"dylibs\"]"; + + comrak::parse_document(&Default::default(), body, &Default::default()) + .descendants() + .flat_map(|node| match &node.data.borrow().value { + comrak::nodes::NodeValue::HtmlBlock(comrak::nodes::NodeHtmlBlock { + literal, .. + }) => Some(scraper::Html::parse_fragment(literal)), + _ => None, + }) + .collect::>() + .iter() + .flat_map(|html_block| html_block.select(selector!(TARGET))) + .exactly_one() + .map_err(|err| match err.count() { + 0 => anyhow!("リリースノートの中に`{TARGET}`が見つかりませんでした"), + _ => anyhow!("リリースノートの中に`{TARGET}`が複数ありました"), + })? + .select(selector!("tbody > tr")) + .map(|tr| { + tr.select(selector!("td")) + .map(|td| td.text().exactly_one().ok()) + .collect::>>() + .and_then(|text| text.try_into().ok()) + .with_context(|| format!("リリースノート中の`{TARGET}`をパースできませんでした")) + }) + .collect::, _>>()? + .into_iter() + .filter(|&[spec_os, spec_cpu_arch, spec_devices, _]| { + spec_os + == match os { + Os::Windows => "Windows", + Os::Linux => "Linux", + Os::Osx => "macOS", + } + && spec_cpu_arch + == match cpu_arch { + CpuArch::X86 => "x86", + CpuArch::X64 => "x86_64", + CpuArch::Arm64 => "AArch64", + } + && devices.iter().all(|device| { + spec_devices.split('/').any(|spec_device| { + spec_device + == match device { + Device::Cpu => "CPU", + Device::Cuda => "CUDA", + Device::Directml => "DirectML", + } + }) + }) + }) + .min_by_key(|&[.., spec_devices, _]| spec_devices.split('/').count()) + .map(|[.., name]| name.to_owned()) + .with_context(|| "指定されたOS, アーキテクチャ, デバイスを含むものが見つかりませんでした") +} + fn download_and_extract_from_gh( GhAsset { octocrab, diff --git a/crates/voicevox_core/Cargo.toml b/crates/voicevox_core/Cargo.toml index 527fa7494..c9ed52725 100644 --- a/crates/voicevox_core/Cargo.toml +++ b/crates/voicevox_core/Cargo.toml @@ -8,17 +8,12 @@ publish.workspace = true features = ["load-onnxruntime", "link-onnxruntime"] rustdoc-args = ["--cfg", "docsrs"] +# rustdocを参照 [features] default = [] - -# ONNX Runtimeのリンク方法を決めるフィーチャ(rustdocを参照)。 load-onnxruntime = ["voicevox-ort/load-dynamic"] link-onnxruntime = [] -# GPUを利用可能にするフィーチャ(rustdocを参照)。 -cuda = ["voicevox-ort/cuda"] -directml = ["voicevox-ort/directml"] - [dependencies] anyhow.workspace = true async_zip = { workspace = true, features = ["deflate"] } diff --git a/crates/voicevox_core/src/devices.rs b/crates/voicevox_core/src/devices.rs index dfe8d7e64..f3027e741 100644 --- a/crates/voicevox_core/src/devices.rs +++ b/crates/voicevox_core/src/devices.rs @@ -1,10 +1,76 @@ +use std::{ + collections::BTreeMap, + fmt::{self, Display}, + ops::Index, +}; + +use derive_more::BitAnd; use serde::{Deserialize, Serialize}; -/// このライブラリで利用可能なデバイスの情報。 +pub(crate) fn test_gpus( + gpus: impl IntoIterator, + inference_rt_name: &'static str, + devices_supported_by_inference_rt: SupportedDevices, + test: impl Fn(GpuSpec) -> anyhow::Result<()>, +) -> DeviceAvailabilities { + DeviceAvailabilities( + gpus.into_iter() + .map(|gpu| { + let availability = test_gpu( + gpu, + inference_rt_name, + devices_supported_by_inference_rt, + &test, + ); + (gpu, availability) + }) + .collect(), + ) +} + +fn test_gpu( + gpu: GpuSpec, + inference_rt_name: &'static str, + devices_supported_by_inference_rt: SupportedDevices, + test: impl Fn(GpuSpec) -> anyhow::Result<()>, +) -> DeviceAvailability { + if !SupportedDevices::THIS[gpu] { + DeviceAvailability::NotSupportedByThisLib + } else if !devices_supported_by_inference_rt[gpu] { + DeviceAvailability::NotSupportedByCurrentLoadedInferenceRuntime(inference_rt_name) + } else { + match test(gpu) { + Ok(()) => DeviceAvailability::Ok, + Err(err) => DeviceAvailability::Err(err), + } + } +} + +/// 利用可能なデバイスの情報。 /// -/// あくまで本ライブラリが対応しているデバイスの情報であることに注意。GPUが使える環境ではなかったと +/// あくまで本ライブラリもしくはONNX Runtimeが対応しているデバイスの情報であることに注意。GPUが使える環境ではなかったと /// しても`cuda`や`dml`は`true`を示しうる。 -#[derive(Debug, Serialize, Deserialize)] +/// +/// ``` +/// # #[tokio::main] +/// # async fn main() -> anyhow::Result<()> { +/// use voicevox_core::{tokio::Onnxruntime, SupportedDevices}; +/// +/// # voicevox_core::blocking::Onnxruntime::load_once() +/// # .filename(if cfg!(windows) { +/// # // Windows\System32\onnxruntime.dllを回避 +/// # test_util::ONNXRUNTIME_DYLIB_PATH +/// # } else { +/// # voicevox_core::blocking::Onnxruntime::LIB_VERSIONED_FILENAME +/// # }) +/// # .exec()?; +/// # +/// let onnxruntime = Onnxruntime::get().unwrap(); +/// dbg!(SupportedDevices::THIS & onnxruntime.supported_devices()?); +/// # Ok(()) +/// # } +/// ``` +#[derive(Clone, Copy, PartialEq, Eq, Debug, BitAnd, Serialize, Deserialize)] pub struct SupportedDevices { /// CPUが利用可能。 /// @@ -27,7 +93,146 @@ pub struct SupportedDevices { } impl SupportedDevices { - pub fn to_json(&self) -> serde_json::Value { + /// このライブラリで利用可能なデバイスの情報。 + /// + /// `load-onnxruntime`のフィーチャが有効化されているときはすべて`true`となる。 + /// + #[cfg_attr(feature = "load-onnxruntime", doc = "```")] + #[cfg_attr(not(feature = "load-onnxruntime"), doc = "```no_run")] + /// # use voicevox_core::SupportedDevices; + /// assert!(SupportedDevices::THIS.cuda); + /// assert!(SupportedDevices::THIS.dml); + /// ``` + /// + /// `link-onnxruntime`のフィーチャが有効化されているときは`cpu`を除き`false`となる。 + /// + #[cfg_attr(feature = "link-onnxruntime", doc = "```")] + #[cfg_attr(not(feature = "link-onnxruntime"), doc = "```no_run")] + /// # use voicevox_core::SupportedDevices; + /// assert!(!SupportedDevices::THIS.cuda); + /// assert!(!SupportedDevices::THIS.dml); + /// ``` + pub const THIS: Self = if cfg!(feature = "load-onnxruntime") { + Self { + cpu: true, + cuda: true, + dml: true, + } + } else if cfg!(feature = "link-onnxruntime") { + Self { + cpu: true, + cuda: false, + dml: false, + } + } else { + panic!("either `load-onnxruntime` or `link-onnxruntime` must be enabled"); + }; + + pub fn to_json(self) -> serde_json::Value { serde_json::to_value(self).expect("should not fail") } } + +#[derive(Debug)] +pub(crate) struct DeviceAvailabilities(BTreeMap); + +impl DeviceAvailabilities { + pub(crate) fn oks(&self) -> Vec { + self.0 + .iter() + .filter(|(_, result)| matches!(result, DeviceAvailability::Ok)) + .map(|(&gpu, _)| gpu) + .collect() + } +} + +impl Display for DeviceAvailabilities { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for (gpu, availability) in &self.0 { + match availability { + DeviceAvailability::Ok => writeln!(f, "* {gpu}: OK"), + DeviceAvailability::Err(err) => { + writeln!(f, "* {gpu}: {err}", err = err.to_string().trim_end()) + } + DeviceAvailability::NotSupportedByThisLib => { + writeln!( + f, + "* {gpu}: この`{name}`のビルドでは利用できません", + name = env!("CARGO_PKG_NAME"), + ) + } + DeviceAvailability::NotSupportedByCurrentLoadedInferenceRuntime(name) => { + writeln!(f, "* {gpu}: {name}では利用できません") + } + }?; + } + Ok(()) + } +} + +#[derive(Debug)] +enum DeviceAvailability { + Ok, + Err(anyhow::Error), + NotSupportedByThisLib, + NotSupportedByCurrentLoadedInferenceRuntime(&'static str), +} + +#[derive(Clone, Copy, PartialEq, Debug, derive_more::Display)] +pub(crate) enum DeviceSpec { + #[display(fmt = "CPU")] + Cpu, + + #[display(fmt = "{_0}")] + Gpu(GpuSpec), +} + +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, derive_more::Display)] +pub(crate) enum GpuSpec { + #[display(fmt = "CUDA (device_id=0)")] + Cuda, + + #[display(fmt = "DirectML (device_id=0)")] + Dml, +} + +impl GpuSpec { + pub(crate) fn defaults() -> Vec { + vec![Self::Cuda, Self::Dml] + } +} + +impl Index for SupportedDevices { + type Output = bool; + + fn index(&self, gpu: GpuSpec) -> &Self::Output { + match gpu { + GpuSpec::Cuda => &self.cuda, + GpuSpec::Dml => &self.dml, + } + } +} + +#[cfg(test)] +mod tests { + use pretty_assertions::assert_eq; + + use super::{GpuSpec, SupportedDevices}; + + #[test] + fn gpu_spec_defaults_is_exhaustive() { + static SUPPORTED_DEVICES: SupportedDevices = SupportedDevices::THIS; // whatever + + assert_eq!( + { + #[forbid(unused_variables)] + let SupportedDevices { cpu: _, cuda, dml } = &SUPPORTED_DEVICES; + [cuda as *const _, dml as *const _] + }, + *GpuSpec::defaults() + .into_iter() + .map(|gpu| &SUPPORTED_DEVICES[gpu] as *const _) + .collect::>(), + ); + } +} diff --git a/crates/voicevox_core/src/error.rs b/crates/voicevox_core/src/error.rs index d0e7fced0..0125b1cc6 100644 --- a/crates/voicevox_core/src/error.rs +++ b/crates/voicevox_core/src/error.rs @@ -1,4 +1,5 @@ use crate::{ + devices::DeviceAvailabilities, engine::{FullContextLabelError, KanaParseError}, user_dict::InvalidWordError, StyleId, StyleType, VoiceModelId, @@ -33,7 +34,7 @@ impl Error { pub fn kind(&self) -> ErrorKind { match &self.0 { ErrorRepr::NotLoadedOpenjtalkDict => ErrorKind::NotLoadedOpenjtalkDict, - ErrorRepr::GpuSupport => ErrorKind::GpuSupport, + ErrorRepr::GpuSupport(_) => ErrorKind::GpuSupport, ErrorRepr::InitInferenceRuntime { .. } => ErrorKind::InitInferenceRuntime, ErrorRepr::LoadModel(LoadModelError { context, .. }) => match context { LoadModelErrorKind::OpenZipFile => ErrorKind::OpenZipFile, @@ -63,8 +64,8 @@ pub(crate) enum ErrorRepr { #[error("OpenJTalkの辞書が読み込まれていません")] NotLoadedOpenjtalkDict, - #[error("GPU機能をサポートすることができません")] - GpuSupport, + #[error("GPU機能をサポートすることができません:\n{_0}")] + GpuSupport(DeviceAvailabilities), #[error("{runtime_display_name}のロードまたは初期化ができませんでした")] InitInferenceRuntime { diff --git a/crates/voicevox_core/src/infer.rs b/crates/voicevox_core/src/infer.rs index cffd0d524..112ca6b53 100644 --- a/crates/voicevox_core/src/infer.rs +++ b/crates/voicevox_core/src/infer.rs @@ -11,16 +11,25 @@ use enum_map::{Enum, EnumMap}; use ndarray::{Array, ArrayD, Dimension, ShapeError}; use thiserror::Error; -use crate::{StyleType, SupportedDevices}; +use crate::{ + devices::{DeviceSpec, GpuSpec}, + StyleType, SupportedDevices, +}; pub(crate) trait InferenceRuntime: 'static { // TODO: "session"とは何なのかを定め、ドキュメントを書く。`InferenceSessionSet`も同様。 type Session: Sized + Send + 'static; type RunContext<'a>: From<&'a mut Self::Session> + PushInputTensor; - /// このライブラリで利用可能なデバイスの情報を取得する。 + /// 名前。 + const DISPLAY_NAME: &'static str; + + /// このランタイムで利用可能なデバイスの情報を取得する。 fn supported_devices(&self) -> crate::Result; + /// GPUが実際に利用できそうかどうか判定する。 + fn test_gpu(&self, gpu: GpuSpec) -> anyhow::Result<()>; + #[allow(clippy::type_complexity)] fn new_session( &self, @@ -187,7 +196,7 @@ impl ParamInfo { #[derive(new, Clone, Copy, PartialEq, Debug)] pub(crate) struct InferenceSessionOptions { pub(crate) cpu_num_threads: u16, - pub(crate) use_gpu: bool, + pub(crate) device: DeviceSpec, } #[derive(Error, Debug)] diff --git a/crates/voicevox_core/src/infer/runtimes/onnxruntime.rs b/crates/voicevox_core/src/infer/runtimes/onnxruntime.rs index 74dc8a601..15ba963eb 100644 --- a/crates/voicevox_core/src/infer/runtimes/onnxruntime.rs +++ b/crates/voicevox_core/src/infer/runtimes/onnxruntime.rs @@ -8,7 +8,10 @@ use ort::{ GraphOptimizationLevel, IntoTensorElementType, TensorElementType, ValueType, }; -use crate::{devices::SupportedDevices, error::ErrorRepr}; +use crate::{ + devices::{DeviceSpec, GpuSpec, SupportedDevices}, + error::ErrorRepr, +}; use super::super::{ DecryptModelError, InferenceRuntime, InferenceSessionOptions, InputScalarKind, @@ -22,6 +25,14 @@ impl InferenceRuntime for self::blocking::Onnxruntime { type Session = ort::Session; type RunContext<'a> = OnnxruntimeRunContext<'a>; + const DISPLAY_NAME: &'static str = if cfg!(feature = "load-onnxruntime") { + "現在ロードされているONNX Runtime" + } else if cfg!(feature = "link-onnxruntime") { + "現在リンクされているONNX Runtime" + } else { + panic!("either `load-onnxruntime` or `link-onnxruntime` must be enabled"); + }; + fn supported_devices(&self) -> crate::Result { (|| { let cpu = CPUExecutionProvider::default().is_available()?; @@ -40,6 +51,15 @@ impl InferenceRuntime for self::blocking::Onnxruntime { .map_err(Into::into) } + fn test_gpu(&self, gpu: GpuSpec) -> anyhow::Result<()> { + let sess_builder = &ort::SessionBuilder::new()?; + match gpu { + GpuSpec::Cuda => CUDAExecutionProvider::default().register(sess_builder), + GpuSpec::Dml => DirectMLExecutionProvider::default().register(sess_builder), + } + .map_err(Into::into) + } + fn new_session( &self, model: impl FnOnce() -> std::result::Result, DecryptModelError>, @@ -53,14 +73,18 @@ impl InferenceRuntime for self::blocking::Onnxruntime { .with_optimization_level(GraphOptimizationLevel::Level1)? .with_intra_threads(options.cpu_num_threads.into())?; - if options.use_gpu && cfg!(feature = "directml") { - builder = builder - .with_parallel_execution(false)? - .with_memory_pattern(false)?; - DirectMLExecutionProvider::default().register(&builder)?; - } else if options.use_gpu && cfg!(feature = "cuda") { - CUDAExecutionProvider::default().register(&builder)?; - } + match options.device { + DeviceSpec::Cpu => {} + DeviceSpec::Gpu(GpuSpec::Cuda) => { + CUDAExecutionProvider::default().register(&builder)?; + } + DeviceSpec::Gpu(GpuSpec::Dml) => { + builder = builder + .with_parallel_execution(false)? + .with_memory_pattern(false)?; + DirectMLExecutionProvider::default().register(&builder)?; + } + }; let model = model()?; let sess = builder.commit_from_memory(&{ model })?; @@ -365,7 +389,7 @@ pub(crate) mod blocking { } } - /// このライブラリで利用可能なデバイスの情報を取得する。 + /// ONNX Runtimeとして利用可能なデバイスの情報を取得する。 pub fn supported_devices(&self) -> crate::Result { ::supported_devices(self) } @@ -517,7 +541,7 @@ pub(crate) mod tokio { .map(Self::from_blocking) } - /// このライブラリで利用可能なデバイスの情報を取得する。 + /// ONNX Runtimeとして利用可能なデバイスの情報を取得する。 pub fn supported_devices(&self) -> crate::Result { self.0.supported_devices() } diff --git a/crates/voicevox_core/src/lib.rs b/crates/voicevox_core/src/lib.rs index 25ff64f9e..fedf538cf 100644 --- a/crates/voicevox_core/src/lib.rs +++ b/crates/voicevox_core/src/lib.rs @@ -2,39 +2,25 @@ //! //! # Feature flags //! -//! ## ONNX Runtimeのリンク方法を決めるフィーチャ -//! //! このクレートの利用にあたっては以下の二つの[Cargoフィーチャ]のうちどちらかを有効にしなければなり //! ません。両方の有効化はコンパイルエラーとなります。[`Onnxruntime`]の初期化方法はこれらの //! フィーチャによって決まります。 //! -//! - **`load-onnxruntime`**: ONNX Runtimeを`dlopen`/`LoadLibraryExW`で開きます。 +//! - **`load-onnxruntime`**: ONNX Runtimeを`dlopen`/`LoadLibraryExW`で +//! 開きます。[CUDA]と[DirectML]が利用できます。 //! - **`link-onnxruntime`**: ONNX Runtimeをロード時動的リンクします。iOSのような`dlopen`の利用が //! 困難な環境でのみこちらを利用するべきです。_Note_: //! [動的リンク対象のライブラリ名]は`onnxruntime`で固定です。変更 -//! は`patchelf(1)`や`install_name_tool(1)`で行ってください。 -//! -//! ## GPUを利用可能にするフィーチャ -//! -//! - **`cuda`** -//! - **`directml`** -// TODO: こんな感じ(↓)で書く -////! - **`cuda`**: [CUDAを用いた機械学習推論]を可能にします。 -////! - ❗ [acceleration\_mode]={Gpu,Auto}のときの挙動が変化します。`directml`と共に -////! 有効化したときの挙動は未規定です。 -////! - **`directml`**: [DirectMLを用いた機械学習推論]を可能にします。 -////! - ❗ 〃 -////! -////! [CUDAを用いた機械学習推論]: -////! https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html -////! [DirectMLを用いた機械学習推論]: -////! https://onnxruntime.ai/docs/execution-providers/DirectML-ExecutionProvider.html -////! [acceleration\_mode]: InitializeOptions::acceleration_mode +//! は`patchelf(1)`や`install_name_tool(1)`で行ってください。また、[ONNX RuntimeのGPU機能]を使う +//! ことはできません。 //! //! [Cargoフィーチャ]: https://doc.rust-lang.org/stable/cargo/reference/features.html +//! [CUDA]: https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html +//! [DirectML]: https://onnxruntime.ai/docs/execution-providers/DirectML-ExecutionProvider.html //! [動的リンク対象のライブラリ名]: //! https://doc.rust-lang.org/cargo/reference/build-scripts.html#rustc-link-lib //! [`Onnxruntime`]: blocking::Onnxruntime +//! [ONNX RuntimeのGPU機能]: https://onnxruntime.ai/docs/execution-providers/ #![cfg_attr(docsrs, feature(doc_cfg))] diff --git a/crates/voicevox_core/src/status.rs b/crates/voicevox_core/src/status.rs index 8c75d64d9..419be52f5 100644 --- a/crates/voicevox_core/src/status.rs +++ b/crates/voicevox_core/src/status.rs @@ -350,6 +350,7 @@ mod tests { use rstest::rstest; use crate::{ + devices::{DeviceSpec, GpuSpec}, infer::{ domains::{InferenceDomainMap, TalkOperation}, InferenceSessionOptions, @@ -360,16 +361,16 @@ mod tests { use super::Status; #[rstest] - #[case(true, 0)] - #[case(true, 1)] - #[case(true, 8)] - #[case(false, 2)] - #[case(false, 4)] - #[case(false, 8)] - #[case(false, 0)] - fn status_new_works(#[case] use_gpu: bool, #[case] cpu_num_threads: u16) { - let light_session_options = InferenceSessionOptions::new(cpu_num_threads, false); - let heavy_session_options = InferenceSessionOptions::new(cpu_num_threads, use_gpu); + #[case(DeviceSpec::Gpu(GpuSpec::Cuda), 0)] + #[case(DeviceSpec::Gpu(GpuSpec::Cuda), 1)] + #[case(DeviceSpec::Gpu(GpuSpec::Cuda), 8)] + #[case(DeviceSpec::Cpu, 2)] + #[case(DeviceSpec::Cpu, 4)] + #[case(DeviceSpec::Cpu, 8)] + #[case(DeviceSpec::Cpu, 0)] + fn status_new_works(#[case] device_for_heavy: DeviceSpec, #[case] cpu_num_threads: u16) { + let light_session_options = InferenceSessionOptions::new(cpu_num_threads, DeviceSpec::Cpu); + let heavy_session_options = InferenceSessionOptions::new(cpu_num_threads, device_for_heavy); let session_options = InferenceDomainMap { talk: enum_map! { TalkOperation::PredictDuration @@ -404,7 +405,7 @@ mod tests { let status = Status::new( crate::blocking::Onnxruntime::from_test_util_data().unwrap(), InferenceDomainMap { - talk: enum_map!(_ => InferenceSessionOptions::new(0, false)), + talk: enum_map!(_ => InferenceSessionOptions::new(0, DeviceSpec::Cpu)), }, ); let model = &crate::tokio::VoiceModel::sample().await.unwrap(); @@ -420,7 +421,7 @@ mod tests { let status = Status::new( crate::blocking::Onnxruntime::from_test_util_data().unwrap(), InferenceDomainMap { - talk: enum_map!(_ => InferenceSessionOptions::new(0, false)), + talk: enum_map!(_ => InferenceSessionOptions::new(0, DeviceSpec::Cpu)), }, ); let vvm = &crate::tokio::VoiceModel::sample().await.unwrap(); diff --git a/crates/voicevox_core/src/synthesizer.rs b/crates/voicevox_core/src/synthesizer.rs index a11af2d5b..adae7c4b5 100644 --- a/crates/voicevox_core/src/synthesizer.rs +++ b/crates/voicevox_core/src/synthesizer.rs @@ -45,7 +45,7 @@ impl Default for TtsOptions { } /// ハードウェアアクセラレーションモードを設定する設定値。 -#[derive(Default, Debug, PartialEq, Eq)] +#[derive(Default, Clone, Copy, Debug, PartialEq, Eq)] pub enum AccelerationMode { /// 実行環境に合った適切なハードウェアアクセラレーションモードを選択する。 #[default] @@ -74,8 +74,10 @@ pub(crate) mod blocking { use std::io::{Cursor, Write as _}; use enum_map::enum_map; + use tracing::info; use crate::{ + devices::{DeviceSpec, GpuSpec}, engine::{create_kana, mora_to_text, Mora, OjtPhoneme}, error::ErrorRepr, infer::{ @@ -84,7 +86,7 @@ pub(crate) mod blocking { PredictDurationOutput, PredictIntonationInput, PredictIntonationOutput, TalkDomain, TalkOperation, }, - InferenceSessionOptions, + InferenceRuntime as _, InferenceSessionOptions, }, status::Status, text_analyzer::{KanaAnalyzer, OpenJTalkAnalyzer, TextAnalyzer}, @@ -150,31 +152,44 @@ pub(crate) mod blocking { #[cfg(windows)] list_windows_video_cards(); - let use_gpu = match options.acceleration_mode { - AccelerationMode::Auto => { - let supported_devices = onnxruntime.supported_devices()?; + let test_gpus = || { + info!("GPUをテストします:"); + let availabilities = crate::devices::test_gpus( + GpuSpec::defaults(), + crate::blocking::Onnxruntime::DISPLAY_NAME, + onnxruntime.supported_devices()?, + |gpu| onnxruntime.test_gpu(gpu), + ); + for line in availabilities.to_string().lines() { + info!(" {line}"); + } + crate::Result::Ok(availabilities) + }; - if cfg!(feature = "directml") { - supported_devices.dml - } else { - supported_devices.cuda + let device_for_heavy = match options.acceleration_mode { + AccelerationMode::Auto => match *test_gpus()?.oks() { + [] => DeviceSpec::Cpu, + [gpu, ..] => DeviceSpec::Gpu(gpu), + }, + AccelerationMode::Cpu => DeviceSpec::Cpu, + AccelerationMode::Gpu => { + let availabilities = test_gpus()?; + match *availabilities.oks() { + [] => return Err(ErrorRepr::GpuSupport(availabilities).into()), + [gpu, ..] => DeviceSpec::Gpu(gpu), } } - AccelerationMode::Cpu => false, - AccelerationMode::Gpu => true, }; - if use_gpu && !can_support_gpu_feature(onnxruntime)? { - return Err(ErrorRepr::GpuSupport.into()); - } + info!("{device_for_heavy}を利用します"); // 軽いモデルはこちらを使う let light_session_options = - InferenceSessionOptions::new(options.cpu_num_threads, false); + InferenceSessionOptions::new(options.cpu_num_threads, DeviceSpec::Cpu); // 重いモデルはこちらを使う let heavy_session_options = - InferenceSessionOptions::new(options.cpu_num_threads, use_gpu); + InferenceSessionOptions::new(options.cpu_num_threads, device_for_heavy); let status = Status::new( onnxruntime, @@ -187,22 +202,14 @@ pub(crate) mod blocking { }, ); - return Ok(Self { + let use_gpu = matches!(device_for_heavy, DeviceSpec::Gpu(_)); + + Ok(Self { status, open_jtalk_analyzer: OpenJTalkAnalyzer::new(open_jtalk), kana_analyzer: KanaAnalyzer, use_gpu, - }); - - fn can_support_gpu_feature(onnxruntime: &crate::blocking::Onnxruntime) -> Result { - let supported_devices = onnxruntime.supported_devices()?; - - if cfg!(feature = "directml") { - Ok(supported_devices.dml) - } else { - Ok(supported_devices.cuda) - } - } + }) } pub fn onnxruntime(&self) -> &'static crate::blocking::Onnxruntime { @@ -993,13 +1000,13 @@ pub(crate) mod blocking { CreateDXGIFactory, IDXGIFactory, DXGI_ADAPTER_DESC, DXGI_ERROR_NOT_FOUND, }; - info!("検出されたGPU (DirectMLには1番目のGPUが使われます):"); + info!("検出されたGPU (DirectMLにはGPU 0が使われます):"); match list_windows_video_cards() { Ok(descs) => { - for desc in descs { + for (device_id, desc) in descs.into_iter().enumerate() { let description = OsString::from_wide(trim_nul(&desc.Description)); let vram = humansize::format_size(desc.DedicatedVideoMemory, BINARY); - info!(" - {description:?} ({vram})"); + info!(" GPU {device_id}: {description:?} ({vram})"); } } Err(err) => error!("{err}"), diff --git a/crates/voicevox_core_c_api/Cargo.toml b/crates/voicevox_core_c_api/Cargo.toml index 29b66e55a..1b86521d1 100644 --- a/crates/voicevox_core_c_api/Cargo.toml +++ b/crates/voicevox_core_c_api/Cargo.toml @@ -15,8 +15,6 @@ name = "e2e" [features] load-onnxruntime = ["voicevox_core/load-onnxruntime"] link-onnxruntime = ["voicevox_core/link-onnxruntime"] -cuda = ["voicevox_core/cuda"] -directml = ["voicevox_core/directml"] [dependencies] anstream = { workspace = true, default-features = false, features = ["auto"] } diff --git a/crates/voicevox_core_c_api/include/voicevox_core.h b/crates/voicevox_core_c_api/include/voicevox_core.h index fe19a4c2e..fe514dfa9 100644 --- a/crates/voicevox_core_c_api/include/voicevox_core.h +++ b/crates/voicevox_core_c_api/include/voicevox_core.h @@ -806,11 +806,11 @@ __declspec(dllimport) char *voicevox_synthesizer_create_metas_json(const struct VoicevoxSynthesizer *synthesizer); /** - * このライブラリで利用可能なデバイスの情報を、JSONで取得する。 + * ONNX Runtimeとして利用可能なデバイスの情報を、JSONで取得する。 * * JSONの解放は ::voicevox_json_free で行う。 * - * あくまで本ライブラリが対応しているデバイスの情報であることに注意。GPUが使える環境ではなかったとしても`cuda`や`dml`は`true`を示しうる。 + * あくまでONNX Runtimeが対応しているデバイスの情報であることに注意。GPUが使える環境ではなかったとしても`cuda`や`dml`は`true`を示しうる。 * * @param [in] onnxruntime * @param [out] output_supported_devices_json サポートデバイス情報のJSON文字列 diff --git a/crates/voicevox_core_c_api/src/lib.rs b/crates/voicevox_core_c_api/src/lib.rs index a634e6ae8..6f157bf6b 100644 --- a/crates/voicevox_core_c_api/src/lib.rs +++ b/crates/voicevox_core_c_api/src/lib.rs @@ -650,11 +650,11 @@ pub extern "C" fn voicevox_synthesizer_create_metas_json( C_STRING_DROP_CHECKER.whitelist(metas).into_raw() } -/// このライブラリで利用可能なデバイスの情報を、JSONで取得する。 +/// ONNX Runtimeとして利用可能なデバイスの情報を、JSONで取得する。 /// /// JSONの解放は ::voicevox_json_free で行う。 /// -/// あくまで本ライブラリが対応しているデバイスの情報であることに注意。GPUが使える環境ではなかったとしても`cuda`や`dml`は`true`を示しうる。 +/// あくまでONNX Runtimeが対応しているデバイスの情報であることに注意。GPUが使える環境ではなかったとしても`cuda`や`dml`は`true`を示しうる。 /// /// @param [in] onnxruntime /// @param [out] output_supported_devices_json サポートデバイス情報のJSON文字列 diff --git a/crates/voicevox_core_c_api/tests/e2e/log_mask.rs b/crates/voicevox_core_c_api/tests/e2e/log_mask.rs index 9b08c9af7..93114976e 100644 --- a/crates/voicevox_core_c_api/tests/e2e/log_mask.rs +++ b/crates/voicevox_core_c_api/tests/e2e/log_mask.rs @@ -30,7 +30,7 @@ impl Utf8Output { pub(crate) fn mask_windows_video_cards(self) -> Self { self.mask_stderr( static_regex!( - r#"(?m)^\{timestamp\} INFO voicevox_core::synthesizer::blocking: 検出されたGPU \(DirectMLには1番目のGPUが使われます\):(\n\{timestamp\} INFO voicevox_core::synthesizer::blocking: - "[^"]+" \([0-9.]+ [a-zA-Z]+\))+"#, + r#"(?m)^\{timestamp\} INFO voicevox_core::synthesizer::blocking: 検出されたGPU \(DirectMLにはGPU 0が使われます\):(\n\{timestamp\} INFO voicevox_core::synthesizer::blocking: GPU [0-9]+: "[^"]+" \([0-9.]+ [a-zA-Z]+\))+"#, ), "{windows-video-cards}", ) diff --git a/crates/voicevox_core_c_api/tests/e2e/snapshots.toml b/crates/voicevox_core_c_api/tests/e2e/snapshots.toml index b623119dd..17ccd61f8 100644 --- a/crates/voicevox_core_c_api/tests/e2e/snapshots.toml +++ b/crates/voicevox_core_c_api/tests/e2e/snapshots.toml @@ -53,9 +53,11 @@ metas = ''' stderr.windows = ''' {timestamp} INFO ort: Loaded ONNX Runtime dylib with version '{onnxruntime_version}' {windows-video-cards} +{timestamp} INFO voicevox_core::synthesizer::blocking: CPUを利用します ''' stderr.unix = ''' {timestamp} INFO ort: Loaded ONNX Runtime dylib with version '{onnxruntime_version}' +{timestamp} INFO voicevox_core::synthesizer::blocking: CPUを利用します ''' [compatible_engine_load_model_before_initialize] @@ -97,9 +99,11 @@ output."こんにちは、音声合成の世界へようこそ".wav_length = 176 stderr.windows = ''' {timestamp} INFO ort: Loaded ONNX Runtime dylib with version '{onnxruntime_version}' {windows-video-cards} +{timestamp} INFO voicevox_core::synthesizer::blocking: CPUを利用します ''' stderr.unix = ''' {timestamp} INFO ort: Loaded ONNX Runtime dylib with version '{onnxruntime_version}' +{timestamp} INFO voicevox_core::synthesizer::blocking: CPUを利用します ''' [synthesizer_new_output_json] @@ -157,9 +161,11 @@ metas = ''' stderr.windows = ''' {timestamp} INFO ort: Loaded ONNX Runtime dylib with version '{onnxruntime_version}' {windows-video-cards} +{timestamp} INFO voicevox_core::synthesizer::blocking: CPUを利用します ''' stderr.unix = ''' {timestamp} INFO ort: Loaded ONNX Runtime dylib with version '{onnxruntime_version}' +{timestamp} INFO voicevox_core::synthesizer::blocking: CPUを利用します ''' [tts_via_audio_query] @@ -167,9 +173,11 @@ output."こんにちは、音声合成の世界へようこそ".wav_length = 176 stderr.windows = ''' {timestamp} INFO ort: Loaded ONNX Runtime dylib with version '{onnxruntime_version}' {windows-video-cards} +{timestamp} INFO voicevox_core::synthesizer::blocking: CPUを利用します ''' stderr.unix = ''' {timestamp} INFO ort: Loaded ONNX Runtime dylib with version '{onnxruntime_version}' +{timestamp} INFO voicevox_core::synthesizer::blocking: CPUを利用します ''' # FIXME: "user_dict_load"のはず @@ -177,9 +185,11 @@ stderr.unix = ''' stderr.windows = ''' {timestamp} INFO ort: Loaded ONNX Runtime dylib with version '{onnxruntime_version}' {windows-video-cards} +{timestamp} INFO voicevox_core::synthesizer::blocking: CPUを利用します ''' stderr.unix = ''' {timestamp} INFO ort: Loaded ONNX Runtime dylib with version '{onnxruntime_version}' +{timestamp} INFO voicevox_core::synthesizer::blocking: CPUを利用します ''' [user_dict_manipulate] diff --git a/crates/voicevox_core_java_api/Cargo.toml b/crates/voicevox_core_java_api/Cargo.toml index e9cced8b2..ec4e2d616 100644 --- a/crates/voicevox_core_java_api/Cargo.toml +++ b/crates/voicevox_core_java_api/Cargo.toml @@ -7,10 +7,6 @@ publish.workspace = true [lib] crate-type = ["cdylib"] -[features] -cuda = ["voicevox_core/cuda"] -directml = ["voicevox_core/directml"] - [dependencies] android_logger.workspace = true chrono = { workspace = true, default-features = false, features = ["clock"] } diff --git a/crates/voicevox_core_java_api/lib/src/main/java/jp/hiroshiba/voicevoxcore/GlobalInfo.java b/crates/voicevox_core_java_api/lib/src/main/java/jp/hiroshiba/voicevoxcore/GlobalInfo.java index 010e69073..496c2ccc4 100644 --- a/crates/voicevox_core_java_api/lib/src/main/java/jp/hiroshiba/voicevoxcore/GlobalInfo.java +++ b/crates/voicevox_core_java_api/lib/src/main/java/jp/hiroshiba/voicevoxcore/GlobalInfo.java @@ -24,10 +24,10 @@ public static String getVersion() { // FIXME: `Onnxruntime`に移すか、独立させる /** - * このライブラリで利用可能なデバイスの情報。 + * ONNX Runtime利用可能なデバイスの情報。 * - *

あくまで本ライブラリが対応しているデバイスの情報であることに注意。GPUが使える環境ではなかったとしても {@link #cuda} や {@link #dml} は {@code - * true} を示しうる。 + *

あくまでONNX Runtimeが対応しているデバイスの情報であることに注意。GPUが使える環境ではなかったとしても {@link #cuda} や {@link #dml} は + * {@code true} を示しうる。 */ public static class SupportedDevices { /** diff --git a/crates/voicevox_core_python_api/Cargo.toml b/crates/voicevox_core_python_api/Cargo.toml index 48c92dfb4..e0877b623 100644 --- a/crates/voicevox_core_python_api/Cargo.toml +++ b/crates/voicevox_core_python_api/Cargo.toml @@ -7,10 +7,6 @@ publish.workspace = true [lib] crate-type = ["cdylib"] -[features] -cuda = ["voicevox_core/cuda"] -directml = ["voicevox_core/directml"] - [dependencies] camino.workspace = true easy-ext.workspace = true diff --git a/crates/voicevox_core_python_api/python/voicevox_core/_models.py b/crates/voicevox_core_python_api/python/voicevox_core/_models.py index f7929fae2..941ed84fc 100644 --- a/crates/voicevox_core_python_api/python/voicevox_core/_models.py +++ b/crates/voicevox_core_python_api/python/voicevox_core/_models.py @@ -90,9 +90,9 @@ class SpeakerMeta: @pydantic.dataclasses.dataclass class SupportedDevices: """ - このライブラリで利用可能なデバイスの情報。 + ONNX Runtimeとして利用可能なデバイスの情報。 - あくまで本ライブラリが対応しているデバイスの情報であることに注意。GPUが使える環境ではなかったとしても + あくまでONNX Runtimeが対応しているデバイスの情報であることに注意。GPUが使える環境ではなかったとしても ``cuda`` や ``dml`` は ``True`` を示しうる。 """ diff --git a/docs/downloader.md b/docs/downloader.md index aeff5b4a8..76148197f 100644 --- a/docs/downloader.md +++ b/docs/downloader.md @@ -49,7 +49,7 @@ download または ``` -download --device cpu +download --devices cpu ``` @@ -57,7 +57,7 @@ download --device cpu ## DirectML 版をダウンロードする場合 ``` -download --device directml +download --devices directml ``` @@ -65,7 +65,7 @@ download --device directml ## CUDA 版をダウンロードする場合 ``` -download --device cuda +download --devices cuda ``` diff --git a/docs/usage.md b/docs/usage.md index e828ae220..067250126 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -31,10 +31,10 @@ chmod +x download ./download # DirectML版を利用する場合 -./download --device directml +./download --devices directml # CUDA版を利用する場合 -./download --device cuda +./download --devices cuda ``` `voicevox_core`ディレクトリにファイル一式がダウンロードされています。以降の説明ではこのディレクトリで作業を行います。 diff --git a/example/cpp/windows/README.md b/example/cpp/windows/README.md index 660d4190c..4012acdf9 100644 --- a/example/cpp/windows/README.md +++ b/example/cpp/windows/README.md @@ -14,7 +14,7 @@ Visual Studio Installerを使用しインストールしてください。 出力フォルダを作成するために、一度ビルドします。「windows_example.sln」をVisual Studioで開き、メニューの「ビルド」→「ソリューションのビルド」を押します。 この段階では、ビルドは失敗します。「bin」フォルダと「lib」フォルダが生成されていればOKです。 -[Releases](https://github.com/VOICEVOX/voicevox_core/releases/latest)から「voicevox_core-windows-x64-cpu-{バージョン名}.zip」をダウンロードし、展開します。[ダウンローダー](https://github.com/VOICEVOX/voicevox_core/blob/main/docs/downloader.md)を使うと便利です。 +[Releases](https://github.com/VOICEVOX/voicevox_core/releases/latest)から「voicevox_core-windows-x64-{バージョン名}.zip」をダウンロードし、展開します。[ダウンローダー](https://github.com/VOICEVOX/voicevox_core/blob/main/docs/downloader.md)を使うと便利です。 展開してできたファイル・フォルダをそれぞれ下記のフォルダへ配置します。 - simple_tts に配置 diff --git a/example/python/README.md b/example/python/README.md index 97303eb81..48678edb2 100644 --- a/example/python/README.md +++ b/example/python/README.md @@ -18,6 +18,8 @@ https://github.com/VOICEVOX/voicevox_core/releases/latest 2. ダウンローダーを使って環境構築します。 +FIXME: 今は`--exclude core`がある + linux/mac の場合 download-linux-x64 のところはアーキテクチャや OS によって適宜読み替えてください。