import torch from transformers import pipeline # Load the model and tokenizer model = pipeline('conversation', model='EpistemeAI/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.003-128K', tokenizer='EpistemeAI/Fireball-Meta-Llama-3.1-8B-Instruct-Agent-0.003-128K') def chatbot(input_text): response = model(input_text) return response[0]['generated_text'] # Create a Gradio interface demo = gr.Interface( fn=chatbot, inputs=gr.Textbox(label="User Input"), outputs=gr.Textbox(label="Chatbot Response"), title="Chatbot Demo" ) # Launch the Gradio app demo.launch()