# app.py # Gradio application to demonstrate text generation with distilbert/distilgpt2 import gradio as gr from transformers import pipeline import torch # Ensure torch is imported if needed by the backend print("Loading text generation pipeline for distilbert/distilgpt2...") try: # Load the text-generation pipeline using the specified model. # This automatically handles downloading the model and tokenizer. # Specify device_map="auto" or device=0 for GPU acceleration if available and configured. # For CPU or basic Spaces, omitting device argument is usually fine. generator = pipeline('text-generation', model='distilbert/distilgpt2') print("Pipeline loaded successfully.") except Exception as e: print(f"Error loading pipeline: {e}") # Handle pipeline loading failure gracefully in a real app # For this demo, we might let it raise or exit. generator = None # Ensure generator is None if loading fails def generate_text(prompt): """ Generates text continuation using the loaded DistilGPT-2 pipeline. Args: prompt (str): The input text prompt from the user. Returns: str: The generated text continuation or an error message. """ if generator is None: return "Error: Text generation model could not be loaded." if not prompt: return "Please enter a prompt." try: # Generate text using the pipeline. # Parameters like max_length control the output length. # num_return_sequences=1 ensures we get one output. results = generator(prompt, max_length=75, num_return_sequences=1, pad_token_id=generator.tokenizer.eos_token_id) # The pipeline returns a list of dictionaries. # Extract the generated text from the first result. generated_text = results[0]['generated_text'] return generated_text except Exception as e: print(f"Error during text generation: {e}") return f"Sorry, an error occurred during generation: {str(e)}" # Create the Gradio interface iface = gr.Interface( fn=generate_text, # The core function performing the generation inputs=gr.Textbox( lines=3, placeholder="Type your starting text here and press Enter or click Submit...", label="Input Prompt" ), outputs=gr.Textbox(label="Generated Text"), title="Text Generation with DistilGPT-2", description="Enter a text prompt and see what the `distilbert/distilgpt2` model generates next. This demo uses the Hugging Face `transformers` library pipeline for easy interaction.", examples=["Once upon a time"], allow_flagging='never') # Launch the interface # In a Hugging Face Space, this is automatically handled. # For local execution, it starts a web server. if __name__ == "__main__": iface.launch()