Commit 1: Add 50 file(s)
Browse files
demos/audio_debugger/run.ipynb
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: audio_debugger"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import subprocess\n", "\n", "# get_audio returns the path to the audio file\n", "audio_file = gr.get_audio(\"cantina.wav\")\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Tab(\"Audio\"):\n", " gr.Audio(audio_file, buttons=[\"download\"])\n", " with gr.Tab(\"Interface\"):\n", " gr.Interface(\n", " lambda x: x
|
|
|
|
| 1 |
+
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: audio_debugger"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import subprocess\n", "\n", "# get_audio returns the path to the audio file\n", "audio_file = gr.get_audio(\"cantina.wav\")\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Tab(\"Audio\"):\n", " gr.Audio(audio_file, buttons=[\"download\"])\n", " with gr.Tab(\"Interface\"):\n", " gr.Interface(\n", " lambda x: x,\n", " gr.Audio(),\n", " gr.Audio(),\n", " examples=[audio_file],\n", " cache_examples=True,\n", " api_name=\"predict\"\n", " )\n", " with gr.Tab(\"Streaming\"):\n", " gr.Interface(\n", " lambda x: x,\n", " gr.Audio(streaming=True),\n", " \"audio\",\n", " examples=[audio_file],\n", " cache_examples=True,\n", " api_name=\"predict\"\n", " )\n", " with gr.Tab(\"console\"):\n", " ip = gr.Textbox(label=\"User IP Address\")\n", " gr.Interface(\n", " lambda cmd: subprocess.run([cmd], capture_output=True, shell=True, check=False)\n", " .stdout.decode(\"utf-8\")\n", " .strip(),\n", " \"text\",\n", " \"text\",\n", " api_name=\"predict\"\n", " )\n", "\n", " def get_ip(request: gr.Request):\n", " return request.client.host\n", "\n", " demo.load(get_ip, None, ip)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
demos/audio_debugger/run.py
CHANGED
|
@@ -9,10 +9,10 @@ with gr.Blocks() as demo:
|
|
| 9 |
gr.Audio(audio_file, buttons=["download"])
|
| 10 |
with gr.Tab("Interface"):
|
| 11 |
gr.Interface(
|
| 12 |
-
lambda x: x,
|
| 13 |
gr.Audio(),
|
| 14 |
gr.Audio(),
|
| 15 |
-
examples=[audio_file],
|
| 16 |
cache_examples=True,
|
| 17 |
api_name="predict"
|
| 18 |
)
|
|
|
|
| 9 |
gr.Audio(audio_file, buttons=["download"])
|
| 10 |
with gr.Tab("Interface"):
|
| 11 |
gr.Interface(
|
| 12 |
+
lambda x: x,
|
| 13 |
gr.Audio(),
|
| 14 |
gr.Audio(),
|
| 15 |
+
examples=[audio_file],
|
| 16 |
cache_examples=True,
|
| 17 |
api_name="predict"
|
| 18 |
)
|
demos/cancel_events/run.ipynb
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: cancel_events"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import time\n", "import gradio as gr\n", "import atexit\n", "import pathlib\n", "\n", "log_file = pathlib.Path(__file__).parent / \"cancel_events_output_log.txt\"\n", "\n", "def fake_diffusion(steps):\n", " log_file.write_text(\"\")\n", " for i in range(steps):\n", " print(f\"Current step: {i}\")\n", " with log_file.open(\"a\") as f:\n", " f.write(f\"Current step: {i}\\n\")\n", " time.sleep(0.2)\n", " yield str(i)\n", "\n", "def long_prediction(*args, **kwargs):\n", " time.sleep(4)\n", " return 42, 42\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " n = gr.Slider(1, 10, value=9, step=1, label=\"Number Steps\")\n", " run = gr.Button(value=\"Start Iterating\")\n", " output = gr.Textbox(label=\"Iterative Output\")\n", " stop = gr.Button(value=\"Stop Iterating\")\n", " with gr.Column():\n", " textbox = gr.Textbox(label=\"Prompt\")\n", " loading_box = gr.Textbox(label=\"Loading indicator for expensive calculation\")\n", " loading_box2 = gr.Textbox(label=\"Loading indicator for expensive calculation\")\n", " prediction = gr.Number(label=\"Expensive Calculation\")\n", " prediction2 = gr.Number(label=\"Expensive Calculation\")\n", " run_pred = gr.Button(value=\"Run Expensive Calculation\")\n", " with gr.Column():\n", " cancel_on_change = gr.Textbox(\n", " label=\"Cancel Iteration and Expensive Calculation on Change\"\n", " )\n", " cancel_on_submit = gr.Textbox(\n", " label=\"Cancel Iteration and Expensive Calculation on Submit\"\n", " )\n", " echo = gr.Textbox(label=\"Echo\")\n", " with gr.Row():\n", " with gr.Column():\n", " image = gr.Image(\n", " sources=[\"webcam\"], label=\"Cancel on clear\", interactive=True\n", " )\n", " with gr.Column():\n", " video = gr.Video(\n", " sources=[\"webcam\"], label=\"Cancel on start recording\", interactive=True\n", " )\n", "\n", " click_event = run.click(fake_diffusion, n, output)\n", " stop.click(fn=None, inputs=None, outputs=None, cancels=[click_event])\n", " pred_event = run_pred.click(\n", " fn=long_prediction, inputs=[textbox], outputs=[prediction, prediction2], show_progress_on=[loading_box, loading_box2]
|
|
|
|
| 1 |
+
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: cancel_events"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import time\n", "import gradio as gr\n", "import atexit\n", "import pathlib\n", "\n", "log_file = pathlib.Path(__file__).parent / \"cancel_events_output_log.txt\"\n", "\n", "\n", "def fake_diffusion(steps):\n", " log_file.write_text(\"\")\n", " for i in range(steps):\n", " print(f\"Current step: {i}\")\n", " with log_file.open(\"a\") as f:\n", " f.write(f\"Current step: {i}\\n\")\n", " time.sleep(0.2)\n", " yield str(i)\n", "\n", "\n", "def long_prediction(*args, **kwargs):\n", " time.sleep(4)\n", " return 42, 42\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " n = gr.Slider(1, 10, value=9, step=1, label=\"Number Steps\")\n", " run = gr.Button(value=\"Start Iterating\")\n", " output = gr.Textbox(label=\"Iterative Output\")\n", " stop = gr.Button(value=\"Stop Iterating\")\n", " with gr.Column():\n", " textbox = gr.Textbox(label=\"Prompt\")\n", " loading_box = gr.Textbox(\n", " label=\"Loading indicator for expensive calculation\"\n", " )\n", " loading_box2 = gr.Textbox(\n", " label=\"Loading indicator for expensive calculation\"\n", " )\n", " prediction = gr.Number(label=\"Expensive Calculation\")\n", " prediction2 = gr.Number(label=\"Expensive Calculation\")\n", " run_pred = gr.Button(value=\"Run Expensive Calculation\")\n", " with gr.Column():\n", " cancel_on_change = gr.Textbox(\n", " label=\"Cancel Iteration and Expensive Calculation on Change\"\n", " )\n", " cancel_on_submit = gr.Textbox(\n", " label=\"Cancel Iteration and Expensive Calculation on Submit\"\n", " )\n", " echo = gr.Textbox(label=\"Echo\")\n", " with gr.Row():\n", " with gr.Column():\n", " image = gr.Image(\n", " sources=[\"webcam\"], label=\"Cancel on clear\", interactive=True\n", " )\n", " with gr.Column():\n", " video = gr.Video(\n", " sources=[\"webcam\"], label=\"Cancel on start recording\", interactive=True\n", " )\n", "\n", " click_event = run.click(fake_diffusion, n, output)\n", " stop.click(fn=None, inputs=None, outputs=None, cancels=[click_event])\n", " pred_event = run_pred.click(\n", " fn=long_prediction,\n", " inputs=[textbox],\n", " outputs=[prediction, prediction2],\n", " show_progress_on=[loading_box, loading_box2],\n", " )\n", "\n", " cancel_on_change.change(None, None, None, cancels=[click_event, pred_event])\n", " cancel_on_submit.submit(\n", " lambda s: s, cancel_on_submit, echo, cancels=[click_event, pred_event]\n", " )\n", " image.clear(None, None, None, cancels=[click_event, pred_event])\n", " video.start_recording(None, None, None, cancels=[click_event, pred_event])\n", "\n", " demo.queue(max_size=20)\n", " atexit.register(lambda: log_file.unlink(True))\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
demos/cancel_events/run.py
CHANGED
|
@@ -5,6 +5,7 @@ import pathlib
|
|
| 5 |
|
| 6 |
log_file = pathlib.Path(__file__).parent / "cancel_events_output_log.txt"
|
| 7 |
|
|
|
|
| 8 |
def fake_diffusion(steps):
|
| 9 |
log_file.write_text("")
|
| 10 |
for i in range(steps):
|
|
@@ -14,10 +15,12 @@ def fake_diffusion(steps):
|
|
| 14 |
time.sleep(0.2)
|
| 15 |
yield str(i)
|
| 16 |
|
|
|
|
| 17 |
def long_prediction(*args, **kwargs):
|
| 18 |
time.sleep(4)
|
| 19 |
return 42, 42
|
| 20 |
|
|
|
|
| 21 |
with gr.Blocks() as demo:
|
| 22 |
with gr.Row():
|
| 23 |
with gr.Column():
|
|
@@ -27,8 +30,12 @@ with gr.Blocks() as demo:
|
|
| 27 |
stop = gr.Button(value="Stop Iterating")
|
| 28 |
with gr.Column():
|
| 29 |
textbox = gr.Textbox(label="Prompt")
|
| 30 |
-
loading_box = gr.Textbox(
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
prediction = gr.Number(label="Expensive Calculation")
|
| 33 |
prediction2 = gr.Number(label="Expensive Calculation")
|
| 34 |
run_pred = gr.Button(value="Run Expensive Calculation")
|
|
@@ -53,7 +60,10 @@ with gr.Blocks() as demo:
|
|
| 53 |
click_event = run.click(fake_diffusion, n, output)
|
| 54 |
stop.click(fn=None, inputs=None, outputs=None, cancels=[click_event])
|
| 55 |
pred_event = run_pred.click(
|
| 56 |
-
fn=long_prediction,
|
|
|
|
|
|
|
|
|
|
| 57 |
)
|
| 58 |
|
| 59 |
cancel_on_change.change(None, None, None, cancels=[click_event, pred_event])
|
|
@@ -64,7 +74,7 @@ with gr.Blocks() as demo:
|
|
| 64 |
video.start_recording(None, None, None, cancels=[click_event, pred_event])
|
| 65 |
|
| 66 |
demo.queue(max_size=20)
|
| 67 |
-
atexit.register(lambda: log_file.unlink())
|
| 68 |
|
| 69 |
if __name__ == "__main__":
|
| 70 |
demo.launch()
|
|
|
|
| 5 |
|
| 6 |
log_file = pathlib.Path(__file__).parent / "cancel_events_output_log.txt"
|
| 7 |
|
| 8 |
+
|
| 9 |
def fake_diffusion(steps):
|
| 10 |
log_file.write_text("")
|
| 11 |
for i in range(steps):
|
|
|
|
| 15 |
time.sleep(0.2)
|
| 16 |
yield str(i)
|
| 17 |
|
| 18 |
+
|
| 19 |
def long_prediction(*args, **kwargs):
|
| 20 |
time.sleep(4)
|
| 21 |
return 42, 42
|
| 22 |
|
| 23 |
+
|
| 24 |
with gr.Blocks() as demo:
|
| 25 |
with gr.Row():
|
| 26 |
with gr.Column():
|
|
|
|
| 30 |
stop = gr.Button(value="Stop Iterating")
|
| 31 |
with gr.Column():
|
| 32 |
textbox = gr.Textbox(label="Prompt")
|
| 33 |
+
loading_box = gr.Textbox(
|
| 34 |
+
label="Loading indicator for expensive calculation"
|
| 35 |
+
)
|
| 36 |
+
loading_box2 = gr.Textbox(
|
| 37 |
+
label="Loading indicator for expensive calculation"
|
| 38 |
+
)
|
| 39 |
prediction = gr.Number(label="Expensive Calculation")
|
| 40 |
prediction2 = gr.Number(label="Expensive Calculation")
|
| 41 |
run_pred = gr.Button(value="Run Expensive Calculation")
|
|
|
|
| 60 |
click_event = run.click(fake_diffusion, n, output)
|
| 61 |
stop.click(fn=None, inputs=None, outputs=None, cancels=[click_event])
|
| 62 |
pred_event = run_pred.click(
|
| 63 |
+
fn=long_prediction,
|
| 64 |
+
inputs=[textbox],
|
| 65 |
+
outputs=[prediction, prediction2],
|
| 66 |
+
show_progress_on=[loading_box, loading_box2],
|
| 67 |
)
|
| 68 |
|
| 69 |
cancel_on_change.change(None, None, None, cancels=[click_event, pred_event])
|
|
|
|
| 74 |
video.start_recording(None, None, None, cancels=[click_event, pred_event])
|
| 75 |
|
| 76 |
demo.queue(max_size=20)
|
| 77 |
+
atexit.register(lambda: log_file.unlink(True))
|
| 78 |
|
| 79 |
if __name__ == "__main__":
|
| 80 |
demo.launch()
|
demos/chatbot_multimodal/run.ipynb
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "\n", "def add_message(history, message):\n", " user_msg = {\"role\": \"user\", \"content\": []}\n", " for x in message[\"files\"]:\n", " user_msg[\"content\"].append({\"path\": x})\n", " if message[\"text\"] is not None:\n", " user_msg[\"content\"].append(message[\"text\"])\n", " history.append(user_msg)\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "\n", "def bot(history: list):\n", " response = \"**That's cool!**\"\n", " history.append({\"role\": \"assistant\", \"content\": \"\"})\n", " for character in response:\n", " history[-1][\"content\"] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(elem_id=\"chatbot\")\n", "\n", " chat_input = gr.MultimodalTextbox(\n", " interactive=True,\n", " file_count=\"multiple\",\n", " placeholder=\"Enter message or upload file...\",\n", " show_label=False,\n", " sources=[\"microphone\", \"upload\"],\n", " )\n", "\n", " chat_msg = chat_input.submit(\n", " add_message, [chatbot, chat_input], [chatbot, chat_input]\n", " )\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None
|
|
|
|
| 1 |
+
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "\n", "def add_message(history, message):\n", " user_msg = {\"role\": \"user\", \"content\": []}\n", " for x in message[\"files\"]:\n", " user_msg[\"content\"].append({\"path\": x})\n", " if message[\"text\"] is not None:\n", " user_msg[\"content\"].append(message[\"text\"])\n", " history.append(user_msg)\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "\n", "def bot(history: list):\n", " response = \"**That's cool!**\"\n", " history.append({\"role\": \"assistant\", \"content\": \"\"})\n", " for character in response:\n", " history[-1][\"content\"] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(elem_id=\"chatbot\", like_user_message=True)\n", "\n", " chat_input = gr.MultimodalTextbox(\n", " interactive=True,\n", " file_count=\"multiple\",\n", " placeholder=\"Enter message or upload file...\",\n", " show_label=False,\n", " sources=[\"microphone\", \"upload\"],\n", " )\n", "\n", " chat_msg = chat_input.submit(\n", " add_message, [chatbot, chat_input], [chatbot, chat_input]\n", " )\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
demos/chatbot_multimodal/run.py
CHANGED
|
@@ -28,7 +28,7 @@ def bot(history: list):
|
|
| 28 |
|
| 29 |
|
| 30 |
with gr.Blocks() as demo:
|
| 31 |
-
chatbot = gr.Chatbot(elem_id="chatbot")
|
| 32 |
|
| 33 |
chat_input = gr.MultimodalTextbox(
|
| 34 |
interactive=True,
|
|
@@ -44,7 +44,7 @@ with gr.Blocks() as demo:
|
|
| 44 |
bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
|
| 45 |
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
|
| 46 |
|
| 47 |
-
chatbot.like(print_like_dislike, None, None
|
| 48 |
|
| 49 |
if __name__ == "__main__":
|
| 50 |
demo.launch()
|
|
|
|
| 28 |
|
| 29 |
|
| 30 |
with gr.Blocks() as demo:
|
| 31 |
+
chatbot = gr.Chatbot(elem_id="chatbot", like_user_message=True)
|
| 32 |
|
| 33 |
chat_input = gr.MultimodalTextbox(
|
| 34 |
interactive=True,
|
|
|
|
| 44 |
bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
|
| 45 |
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
|
| 46 |
|
| 47 |
+
chatbot.like(print_like_dislike, None, None)
|
| 48 |
|
| 49 |
if __name__ == "__main__":
|
| 50 |
demo.launch()
|
demos/clear_components/run.ipynb
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: clear_components"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy pandas matplotlib "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/clear_components/__init__.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from datetime import datetime\n", "import os\n", "import random\n", "import string\n", "import pandas as pd\n", "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "# get_audio(), get_video() return file paths to sample media included with Gradio\n", "from gradio.media import get_audio, get_video, MEDIA_ROOT\n", "\n", "def random_plot():\n", " start_year = 2020\n", " x = np.arange(start_year, start_year + 5)\n", " year_count = x.shape[0]\n", " plt_format = \"-\"\n", " fig = plt.figure()\n", " ax = fig.add_subplot(111)\n", " series = np.arange(0, year_count, dtype=float)\n", " series = series**2\n", " series += np.random.rand(year_count)\n", " ax.plot(x, series, plt_format)\n", " return fig\n", "\n", "images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", "]\n", "highlighted_text_output_1 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-MISC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistani\",\n", " \"start\": 22,\n", " \"end\": 31,\n", " },\n", "]\n", "highlighted_text_output_2 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistan\",\n", " \"start\": 22,\n", " \"end\": 30,\n", " },\n", "]\n", "\n", "highlighted_text = \"Does Chicago have any Pakistani restaurants\"\n", "\n", "components = [\n", " gr.Textbox(value=lambda: datetime.now(), label=\"Current Time\"),\n", " gr.Number(value=lambda: random.random(), label=\"Random Percentage\"),\n", " gr.Slider(minimum=0, maximum=100, randomize=True, label=\"Slider with randomize\"),\n", " gr.Slider(\n", " minimum=0,\n", " maximum=1,\n", " value=lambda: random.random(),\n", " label=\"Slider with value func\",\n", " ),\n", " gr.Checkbox(value=lambda: random.random() > 0.5, label=\"Random Checkbox\"),\n", " gr.CheckboxGroup(\n", " choices=[\"a\", \"b\", \"c\", \"d\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\", \"d\"]),\n", " label=\"Random CheckboxGroup\",\n", " ),\n", " gr.Radio(\n", " choices=list(string.ascii_lowercase),\n", " value=lambda: random.choice(string.ascii_lowercase),\n", " ),\n", " gr.Dropdown(\n", " choices=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\"]),\n", " ),\n", " gr.Image(\n", " value=lambda: random.choice(images)\n", " ),\n", " gr.Video(value=lambda: get_video(\"world.mp4\")),\n", " gr.Audio(value=lambda: get_audio(\"cantina.wav\")),\n", " gr.File(\n", " value=gr.get_file\n", " ),\n", " gr.Dataframe(\n", "
|
|
|
|
| 1 |
+
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: clear_components"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy pandas matplotlib "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/clear_components/__init__.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from datetime import datetime\n", "import os\n", "import random\n", "import string\n", "import pandas as pd\n", "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "# get_audio(), get_video() return file paths to sample media included with Gradio\n", "from gradio.media import get_audio, get_video, MEDIA_ROOT\n", "\n", "def random_plot():\n", " start_year = 2020\n", " x = np.arange(start_year, start_year + 5)\n", " year_count = x.shape[0]\n", " plt_format = \"-\"\n", " fig = plt.figure()\n", " ax = fig.add_subplot(111)\n", " series = np.arange(0, year_count, dtype=float)\n", " series = series**2\n", " series += np.random.rand(year_count)\n", " ax.plot(x, series, plt_format)\n", " return fig\n", "\n", "images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", "]\n", "highlighted_text_output_1 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-MISC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistani\",\n", " \"start\": 22,\n", " \"end\": 31,\n", " },\n", "]\n", "highlighted_text_output_2 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistan\",\n", " \"start\": 22,\n", " \"end\": 30,\n", " },\n", "]\n", "\n", "highlighted_text = \"Does Chicago have any Pakistani restaurants\"\n", "\n", "components = [\n", " gr.Textbox(value=lambda: datetime.now(), label=\"Current Time\"),\n", " gr.Number(value=lambda: random.random(), label=\"Random Percentage\"),\n", " gr.Slider(minimum=0, maximum=100, randomize=True, label=\"Slider with randomize\"),\n", " gr.Slider(\n", " minimum=0,\n", " maximum=1,\n", " value=lambda: random.random(),\n", " label=\"Slider with value func\",\n", " ),\n", " gr.Checkbox(value=lambda: random.random() > 0.5, label=\"Random Checkbox\"),\n", " gr.CheckboxGroup(\n", " choices=[\"a\", \"b\", \"c\", \"d\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\", \"d\"]),\n", " label=\"Random CheckboxGroup\",\n", " ),\n", " gr.Radio(\n", " choices=list(string.ascii_lowercase),\n", " value=lambda: random.choice(string.ascii_lowercase),\n", " ),\n", " gr.Dropdown(\n", " choices=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\"]),\n", " ),\n", " gr.Image(\n", " value=lambda: random.choice(images)\n", " ),\n", " gr.Video(value=lambda: get_video(\"world.mp4\")),\n", " gr.Audio(value=lambda: get_audio(\"cantina.wav\")),\n", " gr.File(\n", " value=gr.get_file\n", " ),\n", " # gr.Dataframe(\n", " # value=lambda: pd.DataFrame({\"random_number_rows\": range(5)}, columns=[\"one\", \"two\", \"three\"]) # type: ignore\n", " # ),\n", " gr.ColorPicker(value=lambda: random.choice([\"#000000\", \"#ff0000\", \"#0000FF\"])),\n", " gr.Label(value=lambda: random.choice([\"Pedestrian\", \"Car\", \"Cyclist\"])),\n", " gr.HighlightedText(\n", " value=lambda: random.choice(\n", " [\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_1},\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_2},\n", " ]\n", " ),\n", " ),\n", " gr.JSON(value=lambda: random.choice([{\"a\": 1}, {\"b\": 2}])),\n", " # gr.HTML(\n", " # value=lambda: random.choice(\n", " # [\n", " # '<p style=\"color:red;\">I am red</p>',\n", " # '<p style=\"color:blue;\">I am blue</p>',\n", " # ]\n", " # )\n", " # ),\n", " gr.Gallery(\n", " value=lambda: images\n", " ),\n", " gr.Model3D(value=gr.get_model3d),\n", " # gr.Plot(value=random_plot),\n", " gr.Markdown(value=lambda: f\"### {random.choice(['Hello', 'Hi', 'Goodbye!'])}\"),\n", "]\n", "\n", "def evaluate_values(*args):\n", " are_false = []\n", " for a in args:\n", " if isinstance(a, (pd.DataFrame, np.ndarray)):\n", " are_false.append(not a.any().any()) # type: ignore\n", " elif isinstance(a, str) and a.startswith(\"#\"):\n", " are_false.append(a == \"#000000\")\n", " else:\n", " are_false.append(not a)\n", " return all(are_false)\n", "\n", "with gr.Blocks() as demo:\n", " for i, component in enumerate(components):\n", " component.label = f\"component_{str(i).zfill(2)}\"\n", " component.render()\n", " clear = gr.ClearButton(value=\"Clear\", components=components)\n", " result = gr.Textbox(label=\"Are all cleared?\")\n", " hide = gr.Button(value=\"Hide\")\n", " reveal = gr.Button(value=\"Reveal\")\n", " clear_button_and_components = components + [clear]\n", " hide.click(\n", " lambda: [c.__class__(visible=False) for c in clear_button_and_components],\n", " inputs=[],\n", " outputs=clear_button_and_components\n", " )\n", " reveal.click(\n", " lambda: [c.__class__(visible=True) for c in clear_button_and_components],\n", " inputs=[],\n", " outputs=clear_button_and_components\n", " )\n", " get_value = gr.Button(value=\"Get Values\")\n", " get_value.click(evaluate_values, components, result)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch(allowed_paths=[str(MEDIA_ROOT)])"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
demos/clear_components/run.py
CHANGED
|
@@ -99,9 +99,9 @@ components = [
|
|
| 99 |
gr.File(
|
| 100 |
value=gr.get_file
|
| 101 |
),
|
| 102 |
-
gr.Dataframe(
|
| 103 |
-
|
| 104 |
-
),
|
| 105 |
gr.ColorPicker(value=lambda: random.choice(["#000000", "#ff0000", "#0000FF"])),
|
| 106 |
gr.Label(value=lambda: random.choice(["Pedestrian", "Car", "Cyclist"])),
|
| 107 |
gr.HighlightedText(
|
|
@@ -113,19 +113,19 @@ components = [
|
|
| 113 |
),
|
| 114 |
),
|
| 115 |
gr.JSON(value=lambda: random.choice([{"a": 1}, {"b": 2}])),
|
| 116 |
-
gr.HTML(
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
),
|
| 124 |
gr.Gallery(
|
| 125 |
value=lambda: images
|
| 126 |
),
|
| 127 |
gr.Model3D(value=gr.get_model3d),
|
| 128 |
-
gr.Plot(value=random_plot),
|
| 129 |
gr.Markdown(value=lambda: f"### {random.choice(['Hello', 'Hi', 'Goodbye!'])}"),
|
| 130 |
]
|
| 131 |
|
|
|
|
| 99 |
gr.File(
|
| 100 |
value=gr.get_file
|
| 101 |
),
|
| 102 |
+
# gr.Dataframe(
|
| 103 |
+
# value=lambda: pd.DataFrame({"random_number_rows": range(5)}, columns=["one", "two", "three"]) # type: ignore
|
| 104 |
+
# ),
|
| 105 |
gr.ColorPicker(value=lambda: random.choice(["#000000", "#ff0000", "#0000FF"])),
|
| 106 |
gr.Label(value=lambda: random.choice(["Pedestrian", "Car", "Cyclist"])),
|
| 107 |
gr.HighlightedText(
|
|
|
|
| 113 |
),
|
| 114 |
),
|
| 115 |
gr.JSON(value=lambda: random.choice([{"a": 1}, {"b": 2}])),
|
| 116 |
+
# gr.HTML(
|
| 117 |
+
# value=lambda: random.choice(
|
| 118 |
+
# [
|
| 119 |
+
# '<p style="color:red;">I am red</p>',
|
| 120 |
+
# '<p style="color:blue;">I am blue</p>',
|
| 121 |
+
# ]
|
| 122 |
+
# )
|
| 123 |
+
# ),
|
| 124 |
gr.Gallery(
|
| 125 |
value=lambda: images
|
| 126 |
),
|
| 127 |
gr.Model3D(value=gr.get_model3d),
|
| 128 |
+
# gr.Plot(value=random_plot),
|
| 129 |
gr.Markdown(value=lambda: f"### {random.choice(['Hello', 'Hi', 'Goodbye!'])}"),
|
| 130 |
]
|
| 131 |
|