# app.py import torch import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification # --- Load model and tokenizer --- model_name = "taufiqdp/indonesian-sentiment" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # --- Prediction function --- def predict_sentiment(text): if not text.strip(): return "⚠️ Mohon masukkan teks." inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True).to(device) with torch.inference_mode(): logits = model(**inputs).logits pred_class = torch.argmax(logits, dim=1).item() labels = ['Negatif', 'Netral', 'Positif'] sentiment = labels[pred_class] return f"**Hasil Analisis Sentimen:** {sentiment}" # --- Build Gradio UI --- title = "🇮🇩 Indonesian Sentiment Analyzer" description = "Masukkan teks Bahasa Indonesia untuk mengetahui apakah sentimennya **Positif**, **Netral**, atau **Negatif**." interface = gr.Interface( fn=predict_sentiment, inputs=gr.Textbox(lines=4, placeholder="Contoh: Pelayanan cepat dan memuaskan"), outputs="markdown", title=title, description=description, theme="gradio/soft", examples=[ ["Makanan di restoran ini sangat enak dan lezat."], ["Pelayanan biasa saja, tidak buruk tapi tidak istimewa."], ["Saya kecewa dengan produk ini."] ] ) if __name__ == "__main__": interface.launch()