File size: 773 Bytes
009b39f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# app.py

import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# モデルとトークナイザーを読み込み
tokenizer = AutoTokenizer.from_pretrained("sonoisa/t5-base-japanese")
model = AutoModelForSeq2SeqLM.from_pretrained("sonoisa/t5-base-japanese")

def summarize(text):
    inputs = tokenizer("要約: " + text, return_tensors="pt", max_length=512, truncation=True)
    summary_ids = model.generate(
        inputs["input_ids"],
        max_length=150,
        min_length=30,
        length_penalty=2.0,
        num_beams=4,
        early_stopping=True
    )
    summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
    return summary

iface = gr.Interface(fn=summarize, inputs="text", outputs="text")
iface.launch()