-
Notifications
You must be signed in to change notification settings - Fork 0
/
Recognition.py
57 lines (43 loc) · 1.62 KB
/
Recognition.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import pyaudio
import wave
import nemo.collections.asr as nemo_asr
class Recognition:
def __init__(self):
self.__CHUNK = 1024
self.__FORMAT = pyaudio.paInt16
self.__CHANNELS = 1
self.__RATE = 16000
self.__OUTPUT_FILENAME = "processed_audio.wav"
self.__AUDIO_PATH = f"./{self.__OUTPUT_FILENAME}"
self.__sber_quartzNet = nemo_asr.models.EncDecCTCModel.restore_from("./ZMv")
def record(self, is_recording):
p = pyaudio.PyAudio()
stream = p.open(format=self.__FORMAT,
channels=self.__CHANNELS,
rate=self.__RATE,
input=True,
frames_per_buffer=self.__CHUNK)
print("* recording")
frames = []
while is_recording[0]:
data = stream.read(self.__CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(self.__OUTPUT_FILENAME, 'wb')
wf.setnchannels(self.__CHANNELS)
wf.setsampwidth(p.get_sample_size(self.__FORMAT))
wf.setframerate(self.__RATE)
wf.writeframes(b''.join(frames))
wf.close()
def __recognize(self, language: str) -> str:
files = [self.__AUDIO_PATH]
transcripts = self.__sber_quartzNet.transcribe(paths2audio_files=files)
print("* done transcribing")
return transcripts[0]
def recognize_speech(self, language: str) -> str:
transcript = self.__recognize(language)
print("* done speech recognize")
return transcript