File size: 4,376 Bytes
5f1ba91
 
4d96bb5
 
 
 
 
 
04a4266
4d96bb5
5f1ba91
 
 
 
4d96bb5
5f1ba91
 
 
4d96bb5
 
 
 
 
 
 
 
 
 
 
5f1ba91
4d96bb5
2d9f3ba
 
 
 
 
4d96bb5
2d9f3ba
4d96bb5
 
2d9f3ba
4d96bb5
 
5f1ba91
 
 
4d96bb5
 
 
2d9f3ba
4d96bb5
 
04a4266
5f1ba91
 
bcf1c8c
 
 
4d96bb5
5f1ba91
2d9f3ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4d96bb5
 
 
 
 
 
 
 
 
 
 
04a4266
4d96bb5
 
5f1ba91
4d96bb5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
04a4266
4d96bb5
 
 
04a4266
4d96bb5
04a4266
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import List
import csv
import os
from datetime import datetime

# Importiamo l'istanza del modello e l'istanza di GoogleNews per ricerca news
from app.model.loader import model_instance
from app.services.news_client import news_instance # Assicurati che il file si chiami news_client.py

# 1. Inizializziamo l'app
app = FastAPI(
    title="Reputation Monitor API",
    description="API per l'analisi del sentiment della reputazione aziendale sulla base di news estrapolate con Google di un azienda/soggetto dato in input",
    version="1.0.0"
)

# --- MONITORAGGIO (Logging su CSV) ---
LOG_FILE = "reputation_logs.csv"
if not os.path.exists(LOG_FILE):
    with open(LOG_FILE, mode='w', newline='', encoding='utf-8') as file:
        writer = csv.writer(file)
        writer.writerow(["timestamp", "query", "text", "sentiment", "confidence"])

def log_prediction(query, text, sentiment, confidence):
    with open(LOG_FILE, mode='a', newline='', encoding='utf-8') as file:
        writer = csv.writer(file)
        writer.writerow([datetime.now(), query, text, sentiment, confidence])

# --- MODELLI DATI (Pydantic) ---
# 1. Modello per la richiesta singola (/predict)
class SentimentRequest(BaseModel):
    text: str

# 2. Modello per la richiesta complessa (/analyze)
class AnalysisRequest(BaseModel):
    query: str
    limit: int = 5

# 3. Modello per il risultato singolo (usato da entrambi)
class SingleResult(BaseModel):
    text: str
    sentiment: str
    confidence: float

class AnalysisResponse(BaseModel):
    query: str
    results: List[SingleResult]
    summary: dict

# --- ENDPOINTS ---

@app.get("/health")
def health_check():
    # VERIFICA: Controlliamo se il modello è stato caricato in memoria
    if model_instance.model is not None:
        return {"status": "ok", "model_loaded": True}
    raise HTTPException(status_code=503, detail="Model not loaded")

# --- ENDPOINT 1: PREVISIONE PURA (Utilizzabile per implementazioni dirette) ---
@app.post("/predict", response_model=SingleResult)
def predict_sentiment(request: SentimentRequest):
    """
    Analizza un singolo testo manuale.
    Utile per test unitari o integrazioni dirette.
    """
    try:
        sentiment, confidence = model_instance.predict(request.text)
        
        # Logghiamo usando "MANUAL" come query per distinguerlo nel CSV
        log_prediction("MANUAL_INPUT", request.text, sentiment, confidence)
        
        return {
            "text": request.text,
            "sentiment": sentiment,
            "confidence": confidence
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# --- ENDPOINT 2: Scarping + AI Classification
@app.post("/analyze", response_model=AnalysisResponse)
def analyze_company(request: AnalysisRequest):
    # --- DEBUG PRINT ---
    print(f"🔥 API RECEIVED REQUEST -> Query: {request.query}, Limit: {request.limit}") 
    # -------------------
    """
    1. Cerca news su Google
    2. Analizza il sentiment di ogni news
    3. Salva i log
    4. Restituisce il report completo
    """
    try:
        # 1. Scarica le News
        news_texts = news_instance.search_news(request.query, limit=request.limit)
        
        if not news_texts:
            return {"query": request.query, "results": [], "summary": {}}

        analyzed_results = []
        sentiment_counts = {"positive": 0, "neutral": 0, "negative": 0}

        # 2. Analizza ogni notizia col Modello
        for text in news_texts:
            sentiment, confidence = model_instance.predict(text)
            
            # Aggiorna conteggi
            sentiment_counts[sentiment] += 1
            
            # Aggiungi alla lista
            analyzed_results.append({
                "text": text,
                "sentiment": sentiment,
                "confidence": confidence
            })
            
            # 3. Logga per il monitoraggio (Punto 2 dell'esercizio)
            log_prediction(request.query, text, sentiment, confidence)

        return {
            "query": request.query,
            "results": analyzed_results,
            "summary": sentiment_counts
        }

    except Exception as e:
        # Se qualcosa va storto, restituiamo errore 500
        raise HTTPException(status_code=500, detail=str(e))