Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- Dockerfile +13 -0
- app.py +34 -0
- requirements.txt +4 -0
Dockerfile
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.9
|
| 2 |
+
|
| 3 |
+
RUN useradd -m -u 1000 user
|
| 4 |
+
USER user
|
| 5 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 6 |
+
|
| 7 |
+
WORKDIR /app
|
| 8 |
+
|
| 9 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
| 10 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 11 |
+
|
| 12 |
+
COPY --chown=user . /app
|
| 13 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, HTTPException
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
app = FastAPI()
|
| 7 |
+
|
| 8 |
+
# Load model and tokenizer
|
| 9 |
+
model_name = "alibaba-pai/Qwen2-1.5B-Instruct-Refine"
|
| 10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 11 |
+
model = AutoModelForCausalLM.from_pretrained(model_name).to("cuda" if torch.cuda.is_available() else "cpu")
|
| 12 |
+
|
| 13 |
+
# Define request model
|
| 14 |
+
class UserPrompt(BaseModel):
|
| 15 |
+
prompt: str
|
| 16 |
+
|
| 17 |
+
@app.post("/refine-prompt")
|
| 18 |
+
async def refine_prompt(user_prompt: UserPrompt):
|
| 19 |
+
if model is None or tokenizer is None:
|
| 20 |
+
raise HTTPException(status_code=500, detail="Model not loaded.")
|
| 21 |
+
|
| 22 |
+
system_prompt = (
|
| 23 |
+
"You are a professional prompt refiner. Your task is to take a user's prompt and improve it by correcting "
|
| 24 |
+
"grammar, spelling, and sentence structure. Enhance fluency, clarity, and natural tone without changing "
|
| 25 |
+
"the original intent. Add slight descriptive detail only if it improves understanding. Do not over-extend, "
|
| 26 |
+
"repeat, or remove any important information. Return only the refined prompt, nothing else."
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
formatted_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt.prompt}<|im_end|>\n<|im_start|>assistant\n"
|
| 30 |
+
inputs = tokenizer(formatted_prompt, return_tensors="pt").to(model.device)
|
| 31 |
+
outputs = model.generate(**inputs, max_new_tokens=40)
|
| 32 |
+
refined_prompt = tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:], skip_special_tokens=True)
|
| 33 |
+
|
| 34 |
+
return {"refined_prompt": refined_prompt}
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
transformers
|
| 4 |
+
torch
|