Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
import uuid
|
| 4 |
+
import torch
|
| 5 |
+
import requests
|
| 6 |
+
import gradio as gr
|
| 7 |
+
from moviepy.editor import VideoFileClip
|
| 8 |
+
from speechbrain.pretrained.interfaces import foreign_class
|
| 9 |
+
|
| 10 |
+
# Load the pretrained model
|
| 11 |
+
classifier = foreign_class(
|
| 12 |
+
source="Jzuluaga/accent-id-commonaccent_xlsr-en-english",
|
| 13 |
+
pymodule_file="custom_interface.py",
|
| 14 |
+
classname="CustomEncoderWav2vec2Classifier"
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
def extract_audio(video_path, output_wav="output.wav"):
|
| 18 |
+
video = VideoFileClip(video_path)
|
| 19 |
+
audio = video.audio
|
| 20 |
+
audio.write_audiofile(output_wav, codec='pcm_s16le', fps=16000)
|
| 21 |
+
return output_wav
|
| 22 |
+
|
| 23 |
+
def download_video(url, filename="temp.mp4"):
|
| 24 |
+
response = requests.get(url, stream=True)
|
| 25 |
+
with open(filename, 'wb') as f:
|
| 26 |
+
for chunk in response.iter_content(chunk_size=8192):
|
| 27 |
+
f.write(chunk)
|
| 28 |
+
return filename
|
| 29 |
+
|
| 30 |
+
def classify_video_accent(video_url):
|
| 31 |
+
uid = str(uuid.uuid4())
|
| 32 |
+
video_path = f"{uid}.mp4"
|
| 33 |
+
wav_path = f"{uid}.wav"
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
download_video(video_url, video_path)
|
| 37 |
+
extract_audio(video_path, wav_path)
|
| 38 |
+
|
| 39 |
+
out_prob, score, index, text_lab = classifier.classify_file(wav_path)
|
| 40 |
+
confidence = torch.max(out_prob).item() * 100
|
| 41 |
+
|
| 42 |
+
return {
|
| 43 |
+
"accent": text_lab,
|
| 44 |
+
"confidence_score": f"{confidence:.2f}%",
|
| 45 |
+
"summary": f"The speaker is most likely using a(n) {text_lab} English accent."
|
| 46 |
+
}
|
| 47 |
+
finally:
|
| 48 |
+
for f in [video_path, wav_path]:
|
| 49 |
+
if os.path.exists(f):
|
| 50 |
+
os.remove(f)
|
| 51 |
+
|
| 52 |
+
def gradio_accent_classifier(video_url):
|
| 53 |
+
try:
|
| 54 |
+
result = classify_video_accent(video_url)
|
| 55 |
+
return f"Accent: {result['accent']}
|
| 56 |
+
Confidence: {result['confidence_score']}
|
| 57 |
+
Summary: {result['summary']}"
|
| 58 |
+
except Exception as e:
|
| 59 |
+
return f"Error: {str(e)}"
|
| 60 |
+
|
| 61 |
+
iface = gr.Interface(
|
| 62 |
+
fn=gradio_accent_classifier,
|
| 63 |
+
inputs=gr.Textbox(label="Public .mp4 Video URL"),
|
| 64 |
+
outputs="text",
|
| 65 |
+
title="English Accent Classifier",
|
| 66 |
+
description="Paste a direct link to a public .mp4 file to classify the English accent spoken in the video."
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
if __name__ == "__main__":
|
| 70 |
+
iface.launch()
|