Upload 4 files
Browse files- app.py +159 -80
- handler.py +1 -1
- requirements.txt +2 -4
app.py
CHANGED
|
@@ -11,12 +11,13 @@ from typing import List, Dict, Any, Optional
|
|
| 11 |
from PIL import Image, ImageOps
|
| 12 |
import io
|
| 13 |
import logging
|
|
|
|
| 14 |
|
| 15 |
-
app = FastAPI(title="Orcan VisionTrace GPU Service", version="1.0.0")
|
| 16 |
|
| 17 |
# Global models
|
| 18 |
face_app = None
|
| 19 |
-
|
| 20 |
|
| 21 |
class BatchEmbeddingRequest(BaseModel):
|
| 22 |
images: List[str] # Base64 encoded images
|
|
@@ -30,34 +31,76 @@ class IndexCreationRequest(BaseModel):
|
|
| 30 |
|
| 31 |
@app.on_event("startup")
|
| 32 |
async def startup_event():
|
| 33 |
-
global face_app,
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
@app.get("/health")
|
| 47 |
async def health_check():
|
| 48 |
return {
|
| 49 |
"status": "healthy",
|
| 50 |
"gpu_available": torch.cuda.is_available(),
|
| 51 |
-
"face_model_loaded": face_app is not None
|
|
|
|
|
|
|
|
|
|
| 52 |
}
|
| 53 |
|
| 54 |
@app.post("/extract_embeddings_batch")
|
| 55 |
async def extract_embeddings_batch(request: BatchEmbeddingRequest):
|
|
|
|
| 56 |
try:
|
| 57 |
embeddings = []
|
| 58 |
extraction_info = []
|
| 59 |
|
| 60 |
-
|
|
|
|
|
|
|
| 61 |
try:
|
| 62 |
# Decode base64 image
|
| 63 |
img_data = base64.b64decode(img_b64)
|
|
@@ -66,116 +109,149 @@ async def extract_embeddings_batch(request: BatchEmbeddingRequest):
|
|
| 66 |
|
| 67 |
if img is None:
|
| 68 |
embeddings.append(None)
|
| 69 |
-
extraction_info.append({"error": "Failed to decode image"})
|
| 70 |
continue
|
| 71 |
|
| 72 |
# Apply enhancement if requested
|
| 73 |
if request.enhance_quality:
|
| 74 |
-
img =
|
| 75 |
|
| 76 |
-
# Extract face embeddings
|
| 77 |
faces = face_app.get(img)
|
| 78 |
|
| 79 |
if len(faces) == 0:
|
| 80 |
embeddings.append(None)
|
| 81 |
extraction_info.append({
|
| 82 |
"face_count": 0,
|
| 83 |
-
"strategy_used": "gpu_batch",
|
| 84 |
-
"enhancement_used": request.enhance_quality
|
|
|
|
| 85 |
})
|
| 86 |
continue
|
| 87 |
|
| 88 |
-
# Get best face
|
| 89 |
face = max(faces, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]))
|
| 90 |
embedding = face.embedding
|
|
|
|
|
|
|
| 91 |
embedding = embedding / np.linalg.norm(embedding)
|
| 92 |
|
| 93 |
embeddings.append(embedding.tolist())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
extraction_info.append({
|
| 95 |
"face_count": len(faces),
|
| 96 |
-
"confidence": float(
|
| 97 |
-
"strategy_used": "gpu_batch",
|
| 98 |
"enhancement_used": request.enhance_quality,
|
| 99 |
-
"quality_score": 0.
|
|
|
|
|
|
|
| 100 |
})
|
| 101 |
|
| 102 |
except Exception as e:
|
| 103 |
embeddings.append(None)
|
| 104 |
-
extraction_info.append({"error": str(e)})
|
|
|
|
|
|
|
|
|
|
| 105 |
|
| 106 |
return {
|
| 107 |
"embeddings": embeddings,
|
| 108 |
"extraction_info": extraction_info,
|
| 109 |
"total_processed": len(request.images),
|
| 110 |
-
"successful":
|
|
|
|
| 111 |
}
|
| 112 |
|
| 113 |
except Exception as e:
|
|
|
|
| 114 |
raise HTTPException(status_code=500, detail=str(e))
|
| 115 |
|
| 116 |
-
def
|
| 117 |
-
"""
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
|
| 139 |
@app.post("/create_faiss_index")
|
| 140 |
async def create_faiss_index(request: IndexCreationRequest):
|
|
|
|
| 141 |
try:
|
| 142 |
embeddings_array = np.array(request.embeddings, dtype='float32')
|
|
|
|
| 143 |
|
| 144 |
-
# Choose index type based on dataset size
|
| 145 |
if request.dataset_size < 1000:
|
| 146 |
index = faiss.IndexFlatL2(request.dimension)
|
| 147 |
index_type = "IndexFlatL2"
|
|
|
|
| 148 |
elif request.dataset_size < 50000:
|
| 149 |
nlist = max(4, min(request.dataset_size // 39, 100))
|
| 150 |
quantizer = faiss.IndexFlatL2(request.dimension)
|
| 151 |
index = faiss.IndexIVFFlat(quantizer, request.dimension, nlist)
|
| 152 |
index_type = "IndexIVFFlat"
|
|
|
|
| 153 |
else:
|
| 154 |
nlist = max(100, min(request.dataset_size // 39, 1000))
|
| 155 |
quantizer = faiss.IndexFlatL2(request.dimension)
|
| 156 |
index = faiss.IndexIVFPQ(quantizer, request.dimension, nlist, 64, 8)
|
| 157 |
index_type = "IndexIVFPQ"
|
|
|
|
| 158 |
|
| 159 |
-
#
|
| 160 |
-
if
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
index = faiss.index_gpu_to_cpu(index_gpu)
|
| 169 |
-
else:
|
| 170 |
-
# Flat index - direct GPU processing
|
| 171 |
-
index_gpu = faiss.index_cpu_to_gpu(gpu_resources, 0, index)
|
| 172 |
-
index_gpu.add(embeddings_array)
|
| 173 |
-
index = faiss.index_gpu_to_cpu(index_gpu)
|
| 174 |
-
else:
|
| 175 |
-
# CPU fallback
|
| 176 |
-
if hasattr(index, 'train') and not index.is_trained:
|
| 177 |
-
index.train(embeddings_array)
|
| 178 |
-
index.add(embeddings_array)
|
| 179 |
|
| 180 |
# Serialize index
|
| 181 |
index_data = faiss.serialize_index(index)
|
|
@@ -183,16 +259,19 @@ async def create_faiss_index(request: IndexCreationRequest):
|
|
| 183 |
|
| 184 |
return {
|
| 185 |
"index_data": index_b64,
|
| 186 |
-
"index_type": f"
|
| 187 |
-
"index_params":
|
| 188 |
-
"vectors_added": index.ntotal
|
|
|
|
| 189 |
}
|
| 190 |
|
| 191 |
except Exception as e:
|
|
|
|
| 192 |
raise HTTPException(status_code=500, detail=str(e))
|
| 193 |
|
| 194 |
@app.post("/search_faiss")
|
| 195 |
async def search_faiss(request: dict):
|
|
|
|
| 196 |
try:
|
| 197 |
# Deserialize index
|
| 198 |
index_data = base64.b64decode(request["index_data"])
|
|
@@ -201,22 +280,22 @@ async def search_faiss(request: dict):
|
|
| 201 |
query_embedding = np.array([request["query_embedding"]], dtype='float32')
|
| 202 |
k = request.get("k", 25)
|
| 203 |
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
else:
|
| 209 |
-
distances, indices = index.search(query_embedding, k)
|
| 210 |
|
| 211 |
return {
|
| 212 |
"distances": distances[0].tolist(),
|
| 213 |
"indices": indices[0].tolist(),
|
| 214 |
-
"total_vectors": index.ntotal
|
|
|
|
| 215 |
}
|
| 216 |
|
| 217 |
except Exception as e:
|
|
|
|
| 218 |
raise HTTPException(status_code=500, detail=str(e))
|
| 219 |
|
| 220 |
if __name__ == "__main__":
|
| 221 |
import uvicorn
|
| 222 |
-
uvicorn.run(app, host="0.0.0.0", port=
|
|
|
|
| 11 |
from PIL import Image, ImageOps
|
| 12 |
import io
|
| 13 |
import logging
|
| 14 |
+
from datetime import datetime
|
| 15 |
|
| 16 |
+
app = FastAPI(title="Orcan VisionTrace Hybrid GPU Service", version="1.0.0")
|
| 17 |
|
| 18 |
# Global models
|
| 19 |
face_app = None
|
| 20 |
+
use_gpu_face_recognition = False
|
| 21 |
|
| 22 |
class BatchEmbeddingRequest(BaseModel):
|
| 23 |
images: List[str] # Base64 encoded images
|
|
|
|
| 31 |
|
| 32 |
@app.on_event("startup")
|
| 33 |
async def startup_event():
|
| 34 |
+
global face_app, use_gpu_face_recognition
|
| 35 |
|
| 36 |
+
print("Starting Orcan VisionTrace Hybrid Service...")
|
| 37 |
+
|
| 38 |
+
# Check GPU availability
|
| 39 |
+
use_gpu_face_recognition = torch.cuda.is_available()
|
| 40 |
+
print(f"CUDA Available: {use_gpu_face_recognition}")
|
| 41 |
+
|
| 42 |
+
if use_gpu_face_recognition:
|
| 43 |
+
print("GPU detected - Using CUDA for face recognition")
|
| 44 |
+
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
| 45 |
+
ctx_id = 0
|
| 46 |
+
else:
|
| 47 |
+
print("No GPU detected - Using CPU for face recognition")
|
| 48 |
+
providers = ['CPUExecutionProvider']
|
| 49 |
+
ctx_id = -1
|
| 50 |
+
|
| 51 |
+
try:
|
| 52 |
+
# Initialize InsightFace
|
| 53 |
+
face_app = insightface.app.FaceAnalysis(
|
| 54 |
+
providers=providers,
|
| 55 |
+
allowed_modules=['detection', 'recognition']
|
| 56 |
+
)
|
| 57 |
+
face_app.prepare(ctx_id=ctx_id, det_size=(640, 640))
|
| 58 |
+
print("InsightFace initialized successfully")
|
| 59 |
+
|
| 60 |
+
except Exception as e:
|
| 61 |
+
print(f"Error initializing InsightFace: {e}")
|
| 62 |
+
# Fallback to CPU
|
| 63 |
+
face_app = insightface.app.FaceAnalysis(
|
| 64 |
+
providers=['CPUExecutionProvider'],
|
| 65 |
+
allowed_modules=['detection', 'recognition']
|
| 66 |
+
)
|
| 67 |
+
face_app.prepare(ctx_id=-1, det_size=(640, 640))
|
| 68 |
+
use_gpu_face_recognition = False
|
| 69 |
+
print("Fallback to CPU face recognition")
|
| 70 |
+
|
| 71 |
+
print(f"Service ready - Face Recognition: {'GPU' if use_gpu_face_recognition else 'CPU'}, FAISS: CPU")
|
| 72 |
+
|
| 73 |
+
@app.get("/")
|
| 74 |
+
async def root():
|
| 75 |
+
return {
|
| 76 |
+
"service": "Orcan VisionTrace Hybrid GPU Service",
|
| 77 |
+
"status": "running",
|
| 78 |
+
"face_recognition": "GPU" if use_gpu_face_recognition else "CPU",
|
| 79 |
+
"faiss_indexing": "CPU",
|
| 80 |
+
"version": "1.0.0"
|
| 81 |
+
}
|
| 82 |
|
| 83 |
@app.get("/health")
|
| 84 |
async def health_check():
|
| 85 |
return {
|
| 86 |
"status": "healthy",
|
| 87 |
"gpu_available": torch.cuda.is_available(),
|
| 88 |
+
"face_model_loaded": face_app is not None,
|
| 89 |
+
"using_gpu_face_recognition": use_gpu_face_recognition,
|
| 90 |
+
"faiss_mode": "CPU",
|
| 91 |
+
"timestamp": datetime.utcnow().isoformat()
|
| 92 |
}
|
| 93 |
|
| 94 |
@app.post("/extract_embeddings_batch")
|
| 95 |
async def extract_embeddings_batch(request: BatchEmbeddingRequest):
|
| 96 |
+
"""Extract face embeddings from multiple images using GPU acceleration"""
|
| 97 |
try:
|
| 98 |
embeddings = []
|
| 99 |
extraction_info = []
|
| 100 |
|
| 101 |
+
print(f"Processing batch of {len(request.images)} images")
|
| 102 |
+
|
| 103 |
+
for idx, img_b64 in enumerate(request.images):
|
| 104 |
try:
|
| 105 |
# Decode base64 image
|
| 106 |
img_data = base64.b64decode(img_b64)
|
|
|
|
| 109 |
|
| 110 |
if img is None:
|
| 111 |
embeddings.append(None)
|
| 112 |
+
extraction_info.append({"error": "Failed to decode image", "index": idx})
|
| 113 |
continue
|
| 114 |
|
| 115 |
# Apply enhancement if requested
|
| 116 |
if request.enhance_quality:
|
| 117 |
+
img = enhance_image(img, request.aggressive_enhancement)
|
| 118 |
|
| 119 |
+
# Extract face embeddings using GPU/CPU
|
| 120 |
faces = face_app.get(img)
|
| 121 |
|
| 122 |
if len(faces) == 0:
|
| 123 |
embeddings.append(None)
|
| 124 |
extraction_info.append({
|
| 125 |
"face_count": 0,
|
| 126 |
+
"strategy_used": "gpu_batch" if use_gpu_face_recognition else "cpu_batch",
|
| 127 |
+
"enhancement_used": request.enhance_quality,
|
| 128 |
+
"index": idx
|
| 129 |
})
|
| 130 |
continue
|
| 131 |
|
| 132 |
+
# Get best face (largest bounding box)
|
| 133 |
face = max(faces, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]))
|
| 134 |
embedding = face.embedding
|
| 135 |
+
|
| 136 |
+
# Normalize embedding
|
| 137 |
embedding = embedding / np.linalg.norm(embedding)
|
| 138 |
|
| 139 |
embeddings.append(embedding.tolist())
|
| 140 |
+
|
| 141 |
+
# Calculate quality metrics
|
| 142 |
+
bbox_area = (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1])
|
| 143 |
+
img_area = img.shape[0] * img.shape[1]
|
| 144 |
+
face_size_ratio = bbox_area / img_area
|
| 145 |
+
|
| 146 |
extraction_info.append({
|
| 147 |
"face_count": len(faces),
|
| 148 |
+
"confidence": float(face_size_ratio),
|
| 149 |
+
"strategy_used": "gpu_batch" if use_gpu_face_recognition else "cpu_batch",
|
| 150 |
"enhancement_used": request.enhance_quality,
|
| 151 |
+
"quality_score": min(face_size_ratio * 2.0, 1.0),
|
| 152 |
+
"bbox_area": float(bbox_area),
|
| 153 |
+
"index": idx
|
| 154 |
})
|
| 155 |
|
| 156 |
except Exception as e:
|
| 157 |
embeddings.append(None)
|
| 158 |
+
extraction_info.append({"error": str(e), "index": idx})
|
| 159 |
+
|
| 160 |
+
successful_count = len([e for e in embeddings if e is not None])
|
| 161 |
+
print(f"Batch processing complete: {successful_count}/{len(request.images)} successful")
|
| 162 |
|
| 163 |
return {
|
| 164 |
"embeddings": embeddings,
|
| 165 |
"extraction_info": extraction_info,
|
| 166 |
"total_processed": len(request.images),
|
| 167 |
+
"successful": successful_count,
|
| 168 |
+
"processing_mode": "gpu" if use_gpu_face_recognition else "cpu"
|
| 169 |
}
|
| 170 |
|
| 171 |
except Exception as e:
|
| 172 |
+
print(f"Batch processing error: {e}")
|
| 173 |
raise HTTPException(status_code=500, detail=str(e))
|
| 174 |
|
| 175 |
+
def enhance_image(img, aggressive=False):
|
| 176 |
+
"""Enhanced image quality improvement"""
|
| 177 |
+
try:
|
| 178 |
+
if aggressive:
|
| 179 |
+
# Aggressive enhancement for very poor quality images
|
| 180 |
+
img = cv2.bilateralFilter(img, 15, 90, 90)
|
| 181 |
+
|
| 182 |
+
# Histogram equalization
|
| 183 |
+
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
|
| 184 |
+
l, a, b = cv2.split(lab)
|
| 185 |
+
clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8,8))
|
| 186 |
+
l = clahe.apply(l)
|
| 187 |
+
img = cv2.merge([l, a, b])
|
| 188 |
+
img = cv2.cvtColor(img, cv2.COLOR_LAB2BGR)
|
| 189 |
+
|
| 190 |
+
# Strong sharpening
|
| 191 |
+
kernel = np.array([[-1,-1,-1], [-1, 12,-1], [-1,-1,-1]])
|
| 192 |
+
img = cv2.filter2D(img, -1, kernel)
|
| 193 |
+
|
| 194 |
+
# Gamma correction
|
| 195 |
+
gamma = 1.4
|
| 196 |
+
inv_gamma = 1.0 / gamma
|
| 197 |
+
table = np.array([((i / 255.0) ** inv_gamma) * 255 for i in np.arange(0, 256)]).astype("uint8")
|
| 198 |
+
img = cv2.LUT(img, table)
|
| 199 |
+
|
| 200 |
+
else:
|
| 201 |
+
# Standard enhancement
|
| 202 |
+
img = cv2.bilateralFilter(img, 9, 75, 75)
|
| 203 |
+
|
| 204 |
+
# Sharpening
|
| 205 |
+
kernel = np.array([[-1,-1,-1], [-1, 9,-1], [-1,-1,-1]])
|
| 206 |
+
img = cv2.filter2D(img, -1, kernel)
|
| 207 |
+
|
| 208 |
+
# CLAHE
|
| 209 |
+
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
|
| 210 |
+
l, a, b = cv2.split(lab)
|
| 211 |
+
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
|
| 212 |
+
l = clahe.apply(l)
|
| 213 |
+
img = cv2.merge([l, a, b])
|
| 214 |
+
img = cv2.cvtColor(img, cv2.COLOR_LAB2BGR)
|
| 215 |
+
|
| 216 |
+
return img
|
| 217 |
+
except Exception as e:
|
| 218 |
+
print(f"Enhancement error: {e}")
|
| 219 |
+
return img
|
| 220 |
|
| 221 |
@app.post("/create_faiss_index")
|
| 222 |
async def create_faiss_index(request: IndexCreationRequest):
|
| 223 |
+
"""Create FAISS index using CPU (hybrid approach)"""
|
| 224 |
try:
|
| 225 |
embeddings_array = np.array(request.embeddings, dtype='float32')
|
| 226 |
+
print(f"Creating FAISS index for {embeddings_array.shape[0]} vectors")
|
| 227 |
|
| 228 |
+
# Choose optimal index type based on dataset size
|
| 229 |
if request.dataset_size < 1000:
|
| 230 |
index = faiss.IndexFlatL2(request.dimension)
|
| 231 |
index_type = "IndexFlatL2"
|
| 232 |
+
params = {}
|
| 233 |
elif request.dataset_size < 50000:
|
| 234 |
nlist = max(4, min(request.dataset_size // 39, 100))
|
| 235 |
quantizer = faiss.IndexFlatL2(request.dimension)
|
| 236 |
index = faiss.IndexIVFFlat(quantizer, request.dimension, nlist)
|
| 237 |
index_type = "IndexIVFFlat"
|
| 238 |
+
params = {"nlist": nlist}
|
| 239 |
else:
|
| 240 |
nlist = max(100, min(request.dataset_size // 39, 1000))
|
| 241 |
quantizer = faiss.IndexFlatL2(request.dimension)
|
| 242 |
index = faiss.IndexIVFPQ(quantizer, request.dimension, nlist, 64, 8)
|
| 243 |
index_type = "IndexIVFPQ"
|
| 244 |
+
params = {"nlist": nlist, "m": 64, "nbits": 8}
|
| 245 |
|
| 246 |
+
# Train index if needed
|
| 247 |
+
if hasattr(index, 'train') and not index.is_trained:
|
| 248 |
+
print(f"Training {index_type} index...")
|
| 249 |
+
index.train(embeddings_array)
|
| 250 |
+
print("Index training completed")
|
| 251 |
+
|
| 252 |
+
# Add vectors to index
|
| 253 |
+
index.add(embeddings_array)
|
| 254 |
+
print(f"Added {index.ntotal} vectors to index")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
|
| 256 |
# Serialize index
|
| 257 |
index_data = faiss.serialize_index(index)
|
|
|
|
| 259 |
|
| 260 |
return {
|
| 261 |
"index_data": index_b64,
|
| 262 |
+
"index_type": f"CPU_{index_type}",
|
| 263 |
+
"index_params": params,
|
| 264 |
+
"vectors_added": index.ntotal,
|
| 265 |
+
"dataset_size": request.dataset_size
|
| 266 |
}
|
| 267 |
|
| 268 |
except Exception as e:
|
| 269 |
+
print(f"Index creation error: {e}")
|
| 270 |
raise HTTPException(status_code=500, detail=str(e))
|
| 271 |
|
| 272 |
@app.post("/search_faiss")
|
| 273 |
async def search_faiss(request: dict):
|
| 274 |
+
"""Perform similarity search using CPU FAISS"""
|
| 275 |
try:
|
| 276 |
# Deserialize index
|
| 277 |
index_data = base64.b64decode(request["index_data"])
|
|
|
|
| 280 |
query_embedding = np.array([request["query_embedding"]], dtype='float32')
|
| 281 |
k = request.get("k", 25)
|
| 282 |
|
| 283 |
+
print(f"Searching index with {index.ntotal} vectors for top-{k}")
|
| 284 |
+
|
| 285 |
+
# Perform search on CPU
|
| 286 |
+
distances, indices = index.search(query_embedding, k)
|
|
|
|
|
|
|
| 287 |
|
| 288 |
return {
|
| 289 |
"distances": distances[0].tolist(),
|
| 290 |
"indices": indices[0].tolist(),
|
| 291 |
+
"total_vectors": index.ntotal,
|
| 292 |
+
"search_mode": "cpu"
|
| 293 |
}
|
| 294 |
|
| 295 |
except Exception as e:
|
| 296 |
+
print(f"Search error: {e}")
|
| 297 |
raise HTTPException(status_code=500, detail=str(e))
|
| 298 |
|
| 299 |
if __name__ == "__main__":
|
| 300 |
import uvicorn
|
| 301 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
handler.py
CHANGED
|
@@ -7,4 +7,4 @@ def handler(event, context):
|
|
| 7 |
# For direct FastAPI deployment
|
| 8 |
if __name__ == "__main__":
|
| 9 |
import uvicorn
|
| 10 |
-
uvicorn.run(app, host="0.0.0.0", port=
|
|
|
|
| 7 |
# For direct FastAPI deployment
|
| 8 |
if __name__ == "__main__":
|
| 9 |
import uvicorn
|
| 10 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
requirements.txt
CHANGED
|
@@ -2,13 +2,11 @@ fastapi==0.104.1
|
|
| 2 |
uvicorn[standard]==0.24.0
|
| 3 |
torch==2.1.0
|
| 4 |
torchvision==0.16.0
|
| 5 |
-
faiss-
|
| 6 |
insightface==0.7.3
|
| 7 |
opencv-python-headless==4.8.1.78
|
| 8 |
Pillow==10.1.0
|
| 9 |
numpy==1.24.3
|
| 10 |
pydantic==2.4.2
|
| 11 |
python-multipart==0.0.6
|
| 12 |
-
onnxruntime
|
| 13 |
-
scikit-image==0.21.0
|
| 14 |
-
requests==2.31.0
|
|
|
|
| 2 |
uvicorn[standard]==0.24.0
|
| 3 |
torch==2.1.0
|
| 4 |
torchvision==0.16.0
|
| 5 |
+
faiss-cpu==1.7.4
|
| 6 |
insightface==0.7.3
|
| 7 |
opencv-python-headless==4.8.1.78
|
| 8 |
Pillow==10.1.0
|
| 9 |
numpy==1.24.3
|
| 10 |
pydantic==2.4.2
|
| 11 |
python-multipart==0.0.6
|
| 12 |
+
onnxruntime==1.16.0
|
|
|
|
|
|