Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| from transformers import pipeline | |
| from PIL import Image | |
| import numpy as np | |
| from diffusers import DiffusionPipeline | |
| # Ensure PyTorch runs on GPU if available | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| # Load translation model (Arabic to English) on GPU | |
| translator = pipeline("translation_ar_to_en", model="Helsinki-NLP/opus-mt-ar-en", device=0 if torch.cuda.is_available() else -1) | |
| # Load image generation model | |
| pipe = DiffusionPipeline.from_pretrained("sairajg/Text_To_Image") | |
| def translate_and_generate(arabic_text): | |
| translated_text = translator(arabic_text)[0]['translation_text'] | |
| result = pipe(translated_text).images[0] | |
| return result, translated_text | |
| try: | |
| # Ensure we get the actual image | |
| # if isinstance(result, tuple): | |
| # result = result[0] # Extract first element | |
| # if isinstance(result, torch.Tensor): | |
| # result = result.cpu().numpy() # Convert to NumPy array | |
| # if isinstance(result, np.ndarray): | |
| # result = Image.fromarray((result * 255).astype(np.uint8)) # Ensure proper pixel range | |
| # elif not isinstance(result, Image.Image): | |
| # raise ValueError(f"Unexpected output type: {type(result)}") | |
| debug_info += f"Translated Prompt: {translated_text}\nResult Type: {type(result)}" | |
| return result, debug_info | |
| except Exception as e: | |
| return None, debug_info | |
| with gr.Blocks() as interface: | |
| gr.Markdown("### Arabic to Image Generator ") | |
| text_input = gr.Textbox(label="Enter Arabic Prompt:", placeholder="اكتب هنا...") | |
| generate_button = gr.Button("Generate Image ") | |
| image_output = gr.Image(label="Generated Image") | |
| text_output = gr.Textbox(label="Debug Output") | |
| generate_button.click(translate_and_generate, inputs=text_input, outputs=[image_output, text_output]) | |
| interface.launch() |