Spaces:
Running
Running
| from transformers import pipeline | |
| from PIL import Image | |
| import gradio as gr | |
| import numpy as np | |
| # Load the Hugging Face depth estimation pipelines | |
| pipe_base = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-base-hf") | |
| pipe_small = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-small-hf") | |
| pipe_intel = pipeline(task="depth-estimation", model="Intel/dpt-swinv2-tiny-256") | |
| pipe_beit = pipeline(task="depth-estimation", model="Intel/dpt-beit-base-384") | |
| def process_and_display(pipe, output_component): | |
| def process_image(image): | |
| depth_map = pipe(image)["depth"] | |
| normalized_depth = normalize_depth(depth_map) | |
| output_component.value = normalized_depth | |
| return process_image | |
| def normalize_depth(depth_map): | |
| # Normalize depth map values to range [0, 255] for visualization | |
| normalized_depth = ((depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())) * 255 | |
| return normalized_depth.astype(np.uint8) | |
| # Create Gradio output components for each pipeline | |
| output_base = gr.outputs.Image(type="numpy", label="LiheYoung/depth-anything-base-hf") | |
| output_small = gr.outputs.Image(type="numpy", label="LiheYoung/depth-anything-small-hf") | |
| output_intel = gr.outputs.Image(type="numpy", label="Intel/dpt-swinv2-tiny-256") | |
| output_beit = gr.outputs.Image(type="numpy", label="Intel/dpt-beit-base-384") | |
| # Create Gradio interfaces for each pipeline | |
| iface_base = gr.Interface(process_and_display(pipe_base, output_base), inputs=gr.inputs.Image(type="pil"), outputs=output_base, title="Depth Estimation - LiheYoung/depth-anything-base-hf") | |
| iface_small = gr.Interface(process_and_display(pipe_small, output_small), inputs=gr.inputs.Image(type="pil"), outputs=output_small, title="Depth Estimation - LiheYoung/depth-anything-small-hf") | |
| iface_intel = gr.Interface(process_and_display(pipe_intel, output_intel), inputs=gr.inputs.Image(type="pil"), outputs=output_intel, title="Depth Estimation - Intel/dpt-swinv2-tiny-256") | |
| iface_beit = gr.Interface(process_and_display(pipe_beit, output_beit), inputs=gr.inputs.Image(type="pil"), outputs=output_beit, title="Depth Estimation - Intel/dpt-beit-base-384") | |
| # Launch the Gradio interfaces | |
| iface_base.launch() | |
| iface_small.launch() | |
| iface_intel.launch() | |
| iface_beit.launch() | |
| """ | |
| from transformers import pipeline | |
| from PIL import Image | |
| import requests | |
| # load pipe | |
| pipe = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-small-hf") | |
| # load image | |
| url = 'http://images.cocodataset.org/val2017/000000039769.jpg' | |
| image = Image.open(requests.get(url, stream=True).raw) | |
| # inference | |
| depth = pipe(image)["depth"] | |
| """ |