Skip to content

Commit

Permalink
アイテムの可視性を必要最低限にする
Browse files Browse the repository at this point in the history
  • Loading branch information
qryxip committed Mar 2, 2024
1 parent 9f87faf commit ff4f705
Show file tree
Hide file tree
Showing 13 changed files with 55 additions and 56 deletions.
2 changes: 1 addition & 1 deletion crates/test_util/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ pub const OPEN_JTALK_DIC_DIR: &str = concat!(
"/data/open_jtalk_dic_utf_8-1.11"
);

pub const EXAMPLE_DATA_JSON: &str = include_str!(concat!(
const EXAMPLE_DATA_JSON: &str = include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/data/example_data.json"
));
Expand Down
10 changes: 5 additions & 5 deletions crates/voicevox_core/src/engine/acoustic_feature_extractor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ static PHONEME_MAP: Lazy<HashMap<&str, i64>> = Lazy::new(|| {
});

#[derive(Debug, Clone, PartialEq, new, Default, Getters)]
pub struct OjtPhoneme {
pub(crate) struct OjtPhoneme {
phoneme: String,
#[allow(dead_code)]
start: f32,
Expand All @@ -70,23 +70,23 @@ pub struct OjtPhoneme {
}

impl OjtPhoneme {
pub fn num_phoneme() -> usize {
pub(crate) fn num_phoneme() -> usize {
PHONEME_MAP.len()
}

pub fn space_phoneme() -> String {
fn space_phoneme() -> String {
"pau".into()
}

pub fn phoneme_id(&self) -> i64 {
pub(crate) fn phoneme_id(&self) -> i64 {
if self.phoneme.is_empty() {
-1
} else {
*PHONEME_MAP.get(&self.phoneme.as_str()).unwrap()
}
}

pub fn convert(phonemes: &[OjtPhoneme]) -> Vec<OjtPhoneme> {
pub(crate) fn convert(phonemes: &[OjtPhoneme]) -> Vec<OjtPhoneme> {
let mut phonemes = phonemes.to_owned();
if let Some(first_phoneme) = phonemes.first_mut() {
if first_phoneme.phoneme.contains("sil") {
Expand Down
2 changes: 1 addition & 1 deletion crates/voicevox_core/src/engine/kana_parser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ pub(crate) fn parse_kana(text: &str) -> KanaParseResult<Vec<AccentPhraseModel>>
Ok(parsed_result)
}

pub fn create_kana(accent_phrases: &[AccentPhraseModel]) -> String {
pub(crate) fn create_kana(accent_phrases: &[AccentPhraseModel]) -> String {
let mut text = String::new();
for phrase in accent_phrases {
let moras = phrase.moras();
Expand Down
2 changes: 1 addition & 1 deletion crates/voicevox_core/src/engine/mora_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ pub(super) const MORA_LIST_MINIMUM: &[[&str; 3]] = &[
["ア", "", "a"],
];

pub fn mora2text(mora: &str) -> &str {
pub(crate) fn mora2text(mora: &str) -> &str {
for &[text, consonant, vowel] in MORA_LIST_MINIMUM {
if mora.len() >= consonant.len()
&& &mora[..consonant.len()] == consonant
Expand Down
14 changes: 7 additions & 7 deletions crates/voicevox_core/src/infer/status.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,14 @@ pub(crate) struct Status<R: InferenceRuntime, D: InferenceDomain> {
}

impl<R: InferenceRuntime, D: InferenceDomain> Status<R, D> {
pub fn new(session_options: EnumMap<D::Operation, InferenceSessionOptions>) -> Self {
pub(crate) fn new(session_options: EnumMap<D::Operation, InferenceSessionOptions>) -> Self {
Self {
loaded_models: Default::default(),
session_options,
}
}

pub fn insert_model(
pub(crate) fn insert_model(
&self,
model_header: &VoiceModelHeader,
model_bytes: &EnumMap<D::Operation, Vec<u8>>,
Expand All @@ -64,30 +64,30 @@ impl<R: InferenceRuntime, D: InferenceDomain> Status<R, D> {
Ok(())
}

pub fn unload_model(&self, voice_model_id: &VoiceModelId) -> Result<()> {
pub(crate) fn unload_model(&self, voice_model_id: &VoiceModelId) -> Result<()> {
self.loaded_models.lock().unwrap().remove(voice_model_id)
}

pub fn metas(&self) -> VoiceModelMeta {
pub(crate) fn metas(&self) -> VoiceModelMeta {
self.loaded_models.lock().unwrap().metas()
}

pub(crate) fn ids_for(&self, style_id: StyleId) -> Result<(VoiceModelId, ModelInnerId)> {
self.loaded_models.lock().unwrap().ids_for(style_id)
}

pub fn is_loaded_model(&self, voice_model_id: &VoiceModelId) -> bool {
pub(crate) fn is_loaded_model(&self, voice_model_id: &VoiceModelId) -> bool {
self.loaded_models
.lock()
.unwrap()
.contains_voice_model(voice_model_id)
}

pub fn is_loaded_model_by_style_id(&self, style_id: StyleId) -> bool {
pub(crate) fn is_loaded_model_by_style_id(&self, style_id: StyleId) -> bool {
self.loaded_models.lock().unwrap().contains_style(style_id)
}

pub fn validate_speaker_id(&self, style_id: StyleId) -> bool {
pub(crate) fn validate_speaker_id(&self, style_id: StyleId) -> bool {
self.is_loaded_model_by_style_id(style_id)
}

Expand Down
2 changes: 1 addition & 1 deletion crates/voicevox_core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,13 @@ mod numerics;
mod result;
mod synthesizer;
mod task;
mod text_analyzer;
mod user_dict;
mod version;
mod voice_model;

pub mod __internal;
pub mod blocking;
pub mod text_analyzer;
pub mod tokio;

#[cfg(test)]
Expand Down
2 changes: 1 addition & 1 deletion crates/voicevox_core/src/test_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use std::path::PathBuf;

use crate::Result;

pub async fn open_default_vvm_file() -> crate::tokio::VoiceModel {
pub(crate) async fn open_default_vvm_file() -> crate::tokio::VoiceModel {
crate::tokio::VoiceModel::from_path(
::test_util::convert_zip_vvm(
PathBuf::from(env!("CARGO_WORKSPACE_DIR"))
Expand Down
8 changes: 4 additions & 4 deletions crates/voicevox_core/src/text_analyzer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@ use crate::{
AccentPhraseModel, FullcontextExtractor, Result,
};

pub trait TextAnalyzer {
pub(crate) trait TextAnalyzer {
fn analyze(&self, text: &str) -> Result<Vec<AccentPhraseModel>>;
}

/// AquesTalk風記法からAccentPhraseの配列を生成するTextAnalyzer
#[derive(Clone)]
pub struct KanaAnalyzer;
pub(crate) struct KanaAnalyzer;

impl TextAnalyzer for KanaAnalyzer {
fn analyze(&self, text: &str) -> Result<Vec<AccentPhraseModel>> {
Expand All @@ -22,10 +22,10 @@ impl TextAnalyzer for KanaAnalyzer {

/// OpenJtalkからAccentPhraseの配列を生成するTextAnalyzer
#[derive(Clone)]
pub struct OpenJTalkAnalyzer<O>(O);
pub(crate) struct OpenJTalkAnalyzer<O>(O);

impl<O> OpenJTalkAnalyzer<O> {
pub fn new(open_jtalk: O) -> Self {
pub(crate) fn new(open_jtalk: O) -> Self {
Self(open_jtalk)
}
}
Expand Down
37 changes: 18 additions & 19 deletions crates/voicevox_core/src/user_dict/part_of_speech_data.rs
Original file line number Diff line number Diff line change
@@ -1,37 +1,36 @@
use derive_getters::Getters;
use once_cell::sync::Lazy;
use std::collections::HashMap;

use crate::UserDictWordType;

/// 最小の優先度。
pub static MIN_PRIORITY: u32 = 0;
pub(super) static MIN_PRIORITY: u32 = 0;
/// 最大の優先度。
pub static MAX_PRIORITY: u32 = 10;
pub(super) static MAX_PRIORITY: u32 = 10;

/// 品詞ごとの情報。
#[derive(Debug, Getters)]
pub struct PartOfSpeechDetail {
#[derive(Debug)]
pub(super) struct PartOfSpeechDetail {
/// 品詞。
pub part_of_speech: &'static str,
pub(super) part_of_speech: &'static str,
/// 品詞細分類1。
pub part_of_speech_detail_1: &'static str,
pub(super) part_of_speech_detail_1: &'static str,
/// 品詞細分類2。
pub part_of_speech_detail_2: &'static str,
pub(super) part_of_speech_detail_2: &'static str,
/// 品詞細分類3。
pub part_of_speech_detail_3: &'static str,
pub(super) part_of_speech_detail_3: &'static str,
/// 文脈IDは辞書の左・右文脈IDのこと。
///
/// 参考: <https://github.com/VOICEVOX/open_jtalk/blob/427cfd761b78efb6094bea3c5bb8c968f0d711ab/src/mecab-naist-jdic/_left-id.def>
pub context_id: i32,
pub(super) context_id: i32,
/// コストのパーセンタイル。
pub cost_candidates: Vec<i32>,
cost_candidates: Vec<i32>,
/// アクセント結合規則の一覧。
pub accent_associative_rules: Vec<&'static str>,
_accent_associative_rules: Vec<&'static str>, // unused for now
}

// 元データ: https://github.com/VOICEVOX/voicevox_engine/blob/master/voicevox_engine/part_of_speech_data.py
pub static PART_OF_SPEECH_DETAIL: Lazy<HashMap<UserDictWordType, PartOfSpeechDetail>> =
pub(super) static PART_OF_SPEECH_DETAIL: Lazy<HashMap<UserDictWordType, PartOfSpeechDetail>> =
Lazy::new(|| {
HashMap::from_iter([
(
Expand All @@ -45,7 +44,7 @@ pub static PART_OF_SPEECH_DETAIL: Lazy<HashMap<UserDictWordType, PartOfSpeechDet
cost_candidates: vec![
-988, 3488, 4768, 6048, 7328, 8609, 8734, 8859, 8984, 9110, 14176,
],
accent_associative_rules: vec!["*", "C1", "C2", "C3", "C4", "C5"],
_accent_associative_rules: vec!["*", "C1", "C2", "C3", "C4", "C5"],
},
),
(
Expand All @@ -59,7 +58,7 @@ pub static PART_OF_SPEECH_DETAIL: Lazy<HashMap<UserDictWordType, PartOfSpeechDet
cost_candidates: vec![
-4445, 49, 1473, 2897, 4321, 5746, 6554, 7362, 8170, 8979, 15001,
],
accent_associative_rules: vec!["*", "C1", "C2", "C3", "C4", "C5"],
_accent_associative_rules: vec!["*", "C1", "C2", "C3", "C4", "C5"],
},
),
(
Expand All @@ -73,7 +72,7 @@ pub static PART_OF_SPEECH_DETAIL: Lazy<HashMap<UserDictWordType, PartOfSpeechDet
cost_candidates: vec![
3100, 6160, 6360, 6561, 6761, 6962, 7414, 7866, 8318, 8771, 13433,
],
accent_associative_rules: vec!["*"],
_accent_associative_rules: vec!["*"],
},
),
(
Expand All @@ -87,7 +86,7 @@ pub static PART_OF_SPEECH_DETAIL: Lazy<HashMap<UserDictWordType, PartOfSpeechDet
cost_candidates: vec![
1527, 3266, 3561, 3857, 4153, 4449, 5149, 5849, 6549, 7250, 10001,
],
accent_associative_rules: vec!["*"],
_accent_associative_rules: vec!["*"],
},
),
(
Expand All @@ -101,7 +100,7 @@ pub static PART_OF_SPEECH_DETAIL: Lazy<HashMap<UserDictWordType, PartOfSpeechDet
cost_candidates: vec![
4399, 5373, 6041, 6710, 7378, 8047, 9440, 10834, 12228, 13622, 15847,
],
accent_associative_rules: vec!["*", "C1", "C2", "C3", "C4", "C5"],
_accent_associative_rules: vec!["*", "C1", "C2", "C3", "C4", "C5"],
},
),
])
Expand All @@ -115,7 +114,7 @@ fn search_cost_candidates(context_id: i32) -> &'static [i32] {
.cost_candidates
}

pub fn priority2cost(context_id: i32, priority: u32) -> i32 {
pub(super) fn priority2cost(context_id: i32, priority: u32) -> i32 {
let cost_candidates = search_cost_candidates(context_id);
cost_candidates[(MAX_PRIORITY - priority) as usize]
}
2 changes: 1 addition & 1 deletion crates/voicevox_core/src/user_dict/word.rs
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ pub enum UserDictWordType {
}

impl UserDictWord {
pub fn to_mecab_format(&self) -> String {
pub(super) fn to_mecab_format(&self) -> String {
let pos = PART_OF_SPEECH_DETAIL.get(&self.word_type).unwrap();
format!(
"{},{},{},{},{},{},{},{},{},{},{},{},{},{}/{},{}",
Expand Down
2 changes: 1 addition & 1 deletion crates/voicevox_core_c_api/src/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ pub(crate) fn into_result_code_with_error(result: CApiResult<()>) -> VoicevoxRes
pub(crate) type CApiResult<T> = std::result::Result<T, CApiError>;

#[derive(Error, Debug)]
pub enum CApiError {
pub(crate) enum CApiError {
#[error("{0}")]
RustApi(#[from] voicevox_core::Error),
#[error("UTF-8として不正な入力です")]
Expand Down
4 changes: 2 additions & 2 deletions crates/voicevox_core_java_api/src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ macro_rules! enum_object {
};
}

pub fn throw_if_err<T, F>(mut env: JNIEnv<'_>, fallback: T, inner: F) -> T
pub(crate) fn throw_if_err<T, F>(mut env: JNIEnv<'_>, fallback: T, inner: F) -> T
where
F: FnOnce(&mut JNIEnv<'_>) -> Result<T, JavaApiError>,
{
Expand Down Expand Up @@ -155,7 +155,7 @@ where
}

#[derive(From, Debug)]
pub enum JavaApiError {
pub(crate) enum JavaApiError {
#[from]
RustApi(voicevox_core::Error),

Expand Down
Loading

0 comments on commit ff4f705

Please sign in to comment.