speechrecognition.py
# importing libraries
import speech_recognition as sr
import os
from pydub import AudioSegment
from pydub.silence import split_on_silence
# create a speech recognition object
r = sr.Recognizer()
# a function to recognize speech in the audio file
# so that we don't repeat ourselves in in other functions
def transcribe_audio(path):
# use the audio file as the audio source
with sr.AudioFile(path) as source:
audio_listened = r.record(source)
# try converting it to text
text = r.recognize_google(audio_listened)
return text
# a function that splits the audio file into chunks on silence
# and applies speech recognition
def get_large_audio_transcription_on_silence(path):
"""
Splitting the large audio file into chunks
and apply speech recognition on each of these chunks
"""
# open the audio file using pydub
sound = AudioSegment.from_file(path)
# split audio sound where silence is 700 miliseconds or more and get chunks
chunks = split_on_silence(sound,
# experiment with this value for your target audio file
min_silence_len = 500,
# adjust this per requirement
silence_thresh = sound.dBFS-14,
# keep the silence for 1 second, adjustable as well
keep_silence=500,
)
folder_name = "audio-chunks"
# create a directory to store the audio chunks
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
whole_text = ""
# process each chunk
for i, audio_chunk in enumerate(chunks, start=1):
# export audio chunk and save it in
# the `folder_name` directory.
chunk_filename = os.path.join(folder_name, f"chunk{i}.wav")
audio_chunk.export(chunk_filename, format="wav")
# recognize the chunk
with sr.AudioFile(chunk_filename) as source:
audio_listened = r.record(source)
# try converting it to text
try:
text = r.recognize_google(audio_listened)
except sr.UnknownValueError as e:
print("Error:", str(e))
else:
text = f"{text.capitalize()}. "
print(chunk_filename, ":", text)
whole_text += text
# return the text for all chunks detected
return whole_text
# a function that splits the audio file into fixed interval chunks
# and applies speech recognition
def get_large_audio_transcription_fixed_interval(path, minutes=5):
"""
Splitting the large audio file into fixed interval chunks
and apply speech recognition on each of these chunks
"""
# open the audio file using pydub
sound = AudioSegment.from_file(path)
# split the audio file into chunks
chunk_length_ms = int(1000 * 60 * minutes) # convert to milliseconds
chunks = [sound[i:i + chunk_length_ms] for i in range(0, len(sound), chunk_length_ms)]
folder_name = "audio-fixed-chunks"
# create a directory to store the audio chunks
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
whole_text = ""
# process each chunk
for i, audio_chunk in enumerate(chunks, start=1):
# export audio chunk and save it in
# the `folder_name` directory.
chunk_filename = os.path.join(folder_name, f"chunk{i}.wav")
audio_chunk.export(chunk_filename, format="wav")
# recognize the chunk
with sr.AudioFile(chunk_filename) as source:
audio_listened = r.record(source)
# try converting it to text
try:
text = r.recognize_google(audio_listened)
except sr.UnknownValueError as e:
print("Error:", str(e))
else:
text = f"{text.capitalize()}. "
print(chunk_filename, ":", text)
whole_text += text
# return the text for all chunks detected
return whole_text
if __name__ == "__main__":
print(get_large_audio_transcription_on_silence("7601-291468-0006.wav"))
whisper_api.py
import openai
# API key
openai.api_key = "<API_KEY>"
def get_openai_api_transcription(audio_filename):
# open the audio file
with open(audio_filename, "rb") as audio_file:
# transcribe the audio file
transcription = openai.Audio.transcribe("whisper-1", audio_file) # whisper-1 is the model name
return transcription
if __name__ == "__main__":
transcription = get_openai_api_transcription("7601-291468-0006.wav")
print(transcription.get("text"))
whisper_api_long.py
from pydub.silence import split_on_silence
from pydub import AudioSegment
from whisper_api import get_openai_api_transcription
import os
# a function that splits the audio file into chunks
# and applies speech recognition
def get_large_audio_transcription_on_silence(path):
"""
Splitting the large audio file into chunks
and apply speech recognition on each of these chunks
"""
# open the audio file using pydub
sound = AudioSegment.from_file(path)
# split audio sound where silence is 700 miliseconds or more and get chunks
chunks = split_on_silence(sound,
# experiment with this value for your target audio file
min_silence_len = 500,
# adjust this per requirement
silence_thresh = sound.dBFS-14,
# keep the silence for 1 second, adjustable as well
keep_silence=500,
)
folder_name = "audio-chunks"
# create a directory to store the audio chunks
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
whole_text = ""
# process each chunk
for i, audio_chunk in enumerate(chunks, start=1):
# export audio chunk and save it in
# the `folder_name` directory.
chunk_filename = os.path.join(folder_name, f"chunk{i}.wav")
audio_chunk.export(chunk_filename, format="wav")
# recognize the chunk
transcription = get_openai_api_transcription(chunk_filename)
print(f"{chunk_filename}: {transcription.get('text')}")
whole_text += " " + transcription.get("text")
# return the text for all chunks detected
return whole_text
# a function that splits the audio file into fixed interval chunks
# and applies speech recognition
def get_large_audio_transcription_fixed_interval(path, minutes=5):
"""
Splitting the large audio file into 5-minute chunks
and apply speech recognition on each of these chunks
"""
# open the audio file using pydub
sound = AudioSegment.from_file(path)
# split the audio file into chunks
chunk_length_ms = int(1000 * 60 * minutes) # convert to milliseconds
chunks = [sound[i:i + chunk_length_ms] for i in range(0, len(sound), chunk_length_ms)]
folder_name = "audio-fixed-chunks"
# create a directory to store the audio chunks
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
whole_text = ""
# process each chunk
for i, audio_chunk in enumerate(chunks, start=1):
# export audio chunk and save it in
# the `folder_name` directory.
chunk_filename = os.path.join(folder_name, f"chunk{i}.wav")
audio_chunk.export(chunk_filename, format="wav")
# recognize the chunk
transcription = get_openai_api_transcription(chunk_filename)
print(f"{chunk_filename}: {transcription.get('text')}")
whole_text += " " + transcription.get("text")
# return the text for all chunks detected
return whole_text
if __name__ == "__main__":
# print("\nFull text:", get_large_audio_transcription_fixed_interval("032.mp3", minutes=1))
print("\nFull text:", get_large_audio_transcription_on_silence("7601-291468-0006.wav"))
transformers_whisper.py
from transformers import WhisperProcessor, WhisperForConditionalGeneration
import torch
import torchaudio
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# whisper_model_name = "openai/whisper-tiny.en" # English-only, ~ 151 MB
# whisper_model_name = "openai/whisper-base.en" # English-only, ~ 290 MB
# whisper_model_name = "openai/whisper-small.en" # English-only, ~ 967 MB
# whisper_model_name = "openai/whisper-medium.en" # English-only, ~ 3.06 GB
whisper_model_name = "openai/whisper-tiny" # multilingual, ~ 151 MB
# whisper_model_name = "openai/whisper-base" # multilingual, ~ 290 MB
# whisper_model_name = "openai/whisper-small" # multilingual, ~ 967 MB
# whisper_model_name = "openai/whisper-medium" # multilingual, ~ 3.06 GB
# whisper_model_name = "openai/whisper-large-v2" # multilingual, ~ 6.17 GB
# load the model and the processor
whisper_processor = WhisperProcessor.from_pretrained(whisper_model_name)
whisper_model = WhisperForConditionalGeneration.from_pretrained(whisper_model_name).to(device)
def load_audio(audio_path):
"""Load the audio file & convert to 16,000 sampling rate"""
# load our wav file
speech, sr = torchaudio.load(audio_path)
resampler = torchaudio.transforms.Resample(sr, 16000)
speech = resampler(speech)
return speech.squeeze()
def get_transcription_whisper(audio_path, model, processor, language="english", skip_special_tokens=True):
# resample from whatever the audio sampling rate to 16000
speech = load_audio(audio_path)
# get the input features from the audio file
input_features = processor(speech, return_tensors="pt", sampling_rate=16000).input_features.to(device)
# get the forced decoder ids
forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task="transcribe")
# print(forced_decoder_ids)
# generate the transcription
predicted_ids = model.generate(input_features, forced_decoder_ids=forced_decoder_ids)
# decode the predicted ids
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=skip_special_tokens)[0]
return transcription
if __name__ == "__main__":
english_transcription = get_transcription_whisper("7601-291468-0006.wav",
whisper_model,
whisper_processor,
language="english",
skip_special_tokens=True)
print("English transcription:", english_transcription)
arabic_transcription = get_transcription_whisper("arabic-audio.wav",
whisper_model,
whisper_processor,
language="arabic",
skip_special_tokens=True)
print("Arabic transcription:", arabic_transcription)
spanish_transcription = get_transcription_whisper("cual-es-la-fecha-cumple.mp3",
whisper_model,
whisper_processor,
language="spanish",
skip_special_tokens=True)
print("Spanish transcription:", spanish_transcription)
transformers_whisper_long.py
from transformers import pipeline
import torch
import torchaudio
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# whisper_model_name = "openai/whisper-tiny.en" # English-only, ~ 151 MB
# whisper_model_name = "openai/whisper-base.en" # English-only, ~ 290 MB
# whisper_model_name = "openai/whisper-small.en" # English-only, ~ 967 MB
# whisper_model_name = "openai/whisper-medium.en" # English-only, ~ 3.06 GB
whisper_model_name = "openai/whisper-tiny" # multilingual, ~ 151 MB
# whisper_model_name = "openai/whisper-base" # multilingual, ~ 290 MB
# whisper_model_name = "openai/whisper-small" # multilingual, ~ 967 MB
# whisper_model_name = "openai/whisper-medium" # multilingual, ~ 3.06 GB
# whisper_model_name = "openai/whisper-large-v2" # multilingual, ~ 6.17 GB
def load_audio(audio_path):
"""Load the audio file & convert to 16,000 sampling rate"""
# load our wav file
speech, sr = torchaudio.load(audio_path)
resampler = torchaudio.transforms.Resample(sr, 16000)
speech = resampler(speech)
return speech.squeeze()
def get_long_transcription_whisper(audio_path, pipe, return_timestamps=True,
chunk_length_s=10, stride_length_s=1):
"""Get the transcription of a long audio file using the Whisper model"""
return pipe(load_audio(audio_path).numpy(), return_timestamps=return_timestamps,
chunk_length_s=chunk_length_s, stride_length_s=stride_length_s)
if __name__ == "__main__":
# initialize the pipeline
pipe = pipeline("automatic-speech-recognition",
model=whisper_model_name, device=device)
# get the transcription of a sample long audio file
output = get_long_transcription_whisper(
"7601-291468-0006.wav", pipe, chunk_length_s=10, stride_length_s=2)
print(f"Transcription: {output}")
print("="*50)
for chunk in output["chunks"]:
# print the timestamp and the text
print(chunk["timestamp"], ":", chunk["text"])