from fastapi import FastAPI from pydantic import BaseModel import os from groq import Groq app = FastAPI(title="Simple AI Chat") # Initialize Groq client client = Groq(api_key=os.environ.get("GROQ_API_KEY")) class ChatRequest(BaseModel): message: str class ChatResponse(BaseModel): response: str @app.get("/") def home(): return {"message": "AI Chat API is running"} @app.post("/chat") def chat(request: ChatRequest): try: if not client.api_key: return ChatResponse(response="❌ GROQ_API_KEY not configured") completion = client.chat.completions.create( model="llama-3.1-8b-instant", messages=[ {"role": "system", "content": "You are a helpful AI assistant."}, {"role": "user", "content": request.message} ], temperature=0.7, max_tokens=1024 ) response = completion.choices[0].message.content return ChatResponse(response=response) except Exception as e: return ChatResponse(response=f"Error: {str(e)}")