ACloudCenter commited on
Commit
09eca75
·
verified ·
1 Parent(s): 7b150af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -10,8 +10,8 @@ from transformers import MoonshineForConditionalGeneration, AutoProcessor
10
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
11
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
12
 
13
- model = MoonshineForConditionalGeneration.from_pretrained('UsefulSensors/moonshine-tiny').to(device).to(torch_dtype)
14
- processor = AutoProcessor.from_pretrained('UsefulSensors/moonshine-tiny')
15
 
16
  TOKENS_PER_SEC = 12.0
17
  MIN_NEW_TOKENS = 48
@@ -28,13 +28,13 @@ def transcribe_audio(audio_file):
28
  if sr != target_sr:
29
  audio_array = librosa.resample(audio_array, orig_sr=sr, target_sr=target_sr)
30
  inputs = processor(audio_array, sampling_rate=target_sr, return_tensors="pt")
31
- inputs = {k: v.to(device) for k, v in inputs.items()}
32
  duration_sec = len(audio_array) / float(target_sr)
33
  max_new_tokens = min(MAX_NEW_TOKENS_CAP, max(MIN_NEW_TOKENS, int(math.ceil(duration_sec * TOKENS_PER_SEC))))
34
  generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=max_new_tokens, no_repeat_ngram_size=4, repetition_penalty=1.05)
35
  return processor.decode(generated_ids[0], skip_special_tokens=True)
36
 
37
- theme = gr.themes.Ocean(primary_hue="indigo", secondary_hue="fuchsia", neutral_hue="slate").set(button_large_radius='*radius_sm')
38
 
39
  with gr.Blocks(theme=theme) as demo:
40
  gr.Markdown("## Moonshine Tiny STT - 27M Parameters")
 
10
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
11
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
12
 
13
+ model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny").to(device).to(torch_dtype)
14
+ processor = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny")
15
 
16
  TOKENS_PER_SEC = 12.0
17
  MIN_NEW_TOKENS = 48
 
28
  if sr != target_sr:
29
  audio_array = librosa.resample(audio_array, orig_sr=sr, target_sr=target_sr)
30
  inputs = processor(audio_array, sampling_rate=target_sr, return_tensors="pt")
31
+ inputs = {k: v.to(device=device, dtype=torch_dtype) for k, v in inputs.items()}
32
  duration_sec = len(audio_array) / float(target_sr)
33
  max_new_tokens = min(MAX_NEW_TOKENS_CAP, max(MIN_NEW_TOKENS, int(math.ceil(duration_sec * TOKENS_PER_SEC))))
34
  generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=max_new_tokens, no_repeat_ngram_size=4, repetition_penalty=1.05)
35
  return processor.decode(generated_ids[0], skip_special_tokens=True)
36
 
37
+ theme = gr.themes.Ocean(primary_hue="indigo", secondary_hue="fuchsia", neutral_hue="slate").set(button_large_radius="*radius_sm")
38
 
39
  with gr.Blocks(theme=theme) as demo:
40
  gr.Markdown("## Moonshine Tiny STT - 27M Parameters")