Spaces:
Sleeping
Sleeping
| from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer | |
| import gradio as gr | |
| # First define a prediction function that takes in a text prompt and returns the text completion | |
| model = pipeline("text-generation", model="zenai-org/SmolLM-prompt-generation") | |
| def predict(prompt): | |
| out = model( | |
| prompt, | |
| max_length=77, # Max length of the generated sequence | |
| min_length=10, # Minimum length of the generated sequence | |
| do_sample=True, # Enable sampling | |
| top_k=50, # Top-k sampling | |
| top_p=0.95, # Top-p sampling | |
| temperature=0.7, # Control the creativity of the output | |
| eos_token_id=0, # End-of-sequence token | |
| # pad_token_id = tokenizer.eos_token_id, | |
| ) | |
| return out[0]['generated_text'] | |
| # Now create the interface | |
| gr.Interface(fn=predict, inputs="text", outputs="text", css=".footer{display:none !important}").launch(share=True) | |