-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfunctions.py
47 lines (39 loc) · 1.48 KB
/
functions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import numpy as np
import librosa
import io
import time
from pathlib import Path
import soundfile
import asyncio
import edge_tts #version 6.1.3
from slicer import Slicer
async def _main(text, voice, output_file) -> None:
communicate = edge_tts.Communicate(text, voice)
await communicate.save(output_file)
def generate_wav(text, voice, output_file):
asyncio.get_event_loop().run_until_complete(_main(text, voice, output_file))
def split(audio, sample_rate, hop_size, db_thresh = -40, min_len = 5000):
slicer = Slicer(
sr=sample_rate,
threshold=db_thresh,
min_length=min_len)
chunks = dict(slicer.slice(audio))
result = []
for k, v in chunks.items():
tag = v["split_time"].split(",")
if tag[0] != tag[1]:
start_frame = int(int(tag[0]) // hop_size)
end_frame = int(int(tag[1]) // hop_size)
if end_frame > start_frame:
result.append((
start_frame,
audio[int(start_frame * hop_size) : int(end_frame * hop_size)]))
return result
def cross_fade(a: np.ndarray, b: np.ndarray, idx: int):
result = np.zeros(idx + b.shape[0])
fade_len = a.shape[0] - idx
np.copyto(dst=result[:idx], src=a[:idx])
k = np.linspace(0, 1.0, num=fade_len, endpoint=True)
result[idx: a.shape[0]] = (1 - k) * a[idx:] + k * b[: fade_len]
np.copyto(dst=result[a.shape[0]:], src=b[fade_len:])
return result