Spaces:
Running
on
Zero
Running
on
Zero
Generate 3 outputs
#6
by
Fabrice-TIERCELIN
- opened
app.py
CHANGED
|
@@ -52,13 +52,13 @@ class Tango:
|
|
| 52 |
for i in range(0, len(lst), n):
|
| 53 |
yield lst[i:i + n]
|
| 54 |
|
| 55 |
-
def generate(self, prompt, steps=100, guidance=3, samples=
|
| 56 |
""" Genrate audio for a single prompt string. """
|
| 57 |
with torch.no_grad():
|
| 58 |
latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
|
| 59 |
mel = self.vae.decode_first_stage(latents)
|
| 60 |
wave = self.vae.decode_to_waveform(mel)
|
| 61 |
-
return wave
|
| 62 |
|
| 63 |
def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True):
|
| 64 |
""" Genrate audio for a list of prompt strings. """
|
|
@@ -86,10 +86,17 @@ tango.model.to(device_type)
|
|
| 86 |
def gradio_generate(prompt, steps, guidance):
|
| 87 |
output_wave = tango.generate(prompt, steps, guidance)
|
| 88 |
# output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
# description_text = """
|
| 95 |
# <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
|
|
@@ -118,7 +125,9 @@ Generate audio using Tango2 by providing a text prompt. Tango2 was built from Ta
|
|
| 118 |
"""
|
| 119 |
# Gradio input and output components
|
| 120 |
input_text = gr.Textbox(lines=2, label="Prompt")
|
| 121 |
-
|
|
|
|
|
|
|
| 122 |
denoising_steps = gr.Slider(minimum=100, maximum=200, value=100, step=1, label="Steps", interactive=True)
|
| 123 |
guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guidance Scale", interactive=True)
|
| 124 |
|
|
@@ -126,7 +135,7 @@ guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guid
|
|
| 126 |
gr_interface = gr.Interface(
|
| 127 |
fn=gradio_generate,
|
| 128 |
inputs=[input_text, denoising_steps, guidance_scale],
|
| 129 |
-
outputs=[
|
| 130 |
title="Tango 2: Aligning Diffusion-based Text-to-Audio Generations through Direct Preference Optimization",
|
| 131 |
description=description_text,
|
| 132 |
allow_flagging=False,
|
|
|
|
| 52 |
for i in range(0, len(lst), n):
|
| 53 |
yield lst[i:i + n]
|
| 54 |
|
| 55 |
+
def generate(self, prompt, steps=100, guidance=3, samples=3, disable_progress=True):
|
| 56 |
""" Genrate audio for a single prompt string. """
|
| 57 |
with torch.no_grad():
|
| 58 |
latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
|
| 59 |
mel = self.vae.decode_first_stage(latents)
|
| 60 |
wave = self.vae.decode_to_waveform(mel)
|
| 61 |
+
return wave
|
| 62 |
|
| 63 |
def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True):
|
| 64 |
""" Genrate audio for a list of prompt strings. """
|
|
|
|
| 86 |
def gradio_generate(prompt, steps, guidance):
|
| 87 |
output_wave = tango.generate(prompt, steps, guidance)
|
| 88 |
# output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
|
| 89 |
+
|
| 90 |
+
output_filename_1 = "tmp1_.wav"
|
| 91 |
+
wavio.write(output_filename, output_wave[0], rate=16000, sampwidth=2)
|
| 92 |
+
|
| 93 |
+
output_filename_2 = "tmp2_.wav"
|
| 94 |
+
wavio.write(output_filename, output_wave[1], rate=16000, sampwidth=2)
|
| 95 |
+
|
| 96 |
+
output_filename_3 = "tmp3_.wav"
|
| 97 |
+
wavio.write(output_filename, output_wave[2], rate=16000, sampwidth=2)
|
| 98 |
+
|
| 99 |
+
return [output_filename_1, output_filename_2, output_filename_3]
|
| 100 |
|
| 101 |
# description_text = """
|
| 102 |
# <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
|
|
|
|
| 125 |
"""
|
| 126 |
# Gradio input and output components
|
| 127 |
input_text = gr.Textbox(lines=2, label="Prompt")
|
| 128 |
+
output_audio_1 = gr.Audio(label="Generated Audio #1/3", type="filepath")
|
| 129 |
+
output_audio_2 = gr.Audio(label="Generated Audio #2/3", type="filepath")
|
| 130 |
+
output_audio_3 = gr.Audio(label="Generated Audio #3/3", type="filepath")
|
| 131 |
denoising_steps = gr.Slider(minimum=100, maximum=200, value=100, step=1, label="Steps", interactive=True)
|
| 132 |
guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guidance Scale", interactive=True)
|
| 133 |
|
|
|
|
| 135 |
gr_interface = gr.Interface(
|
| 136 |
fn=gradio_generate,
|
| 137 |
inputs=[input_text, denoising_steps, guidance_scale],
|
| 138 |
+
outputs=[output_audio_1, output_audio_2, output_audio_3],
|
| 139 |
title="Tango 2: Aligning Diffusion-based Text-to-Audio Generations through Direct Preference Optimization",
|
| 140 |
description=description_text,
|
| 141 |
allow_flagging=False,
|