Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| # --- η΄ζ₯ε 载樑εεεθ―ε¨ --- | |
| model_id = "xingyu1996/tiger-gpt2" | |
| tokenizer = AutoTokenizer.from_pretrained("gpt2") # δ½Ώη¨εε§η GPT-2 εθ―ε¨ | |
| model = AutoModelForCausalLM.from_pretrained(model_id) | |
| def respond( | |
| message, | |
| history: list[tuple[str, str]], | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| ): | |
| # ε°θΎε ₯ζζ¬θ½¬ζ’δΈΊ token ID | |
| input_ids = tokenizer.encode(message, return_tensors="pt") | |
| # εε€ηζεζ° | |
| gen_kwargs = { | |
| "max_length": input_ids.shape[1] + max_tokens, | |
| "do_sample": True if temperature > 0 else False, | |
| } | |
| if temperature > 0: | |
| gen_kwargs["temperature"] = temperature | |
| if top_p < 1.0: | |
| gen_kwargs["top_p"] = top_p | |
| # ηζζζ¬ | |
| with torch.no_grad(): | |
| output_ids = model.generate(input_ids, **gen_kwargs) | |
| # εͺδΏηζ°ηζηι¨ε | |
| new_tokens = output_ids[0, input_ids.shape[1]:] | |
| # θ§£η ηζη token ID | |
| response = tokenizer.decode(new_tokens, skip_special_tokens=True) | |
| return response | |
| # ε Άδ» Gradio ηι’代η δΈε | |
| demo = gr.ChatInterface( | |
| respond, | |
| additional_inputs=[ | |
| gr.Slider(minimum=1, maximum=512, value=325, step=1, label="Max new tokens"), | |
| gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.95, | |
| step=0.05, | |
| label="Top-p (nucleus sampling)", | |
| ), | |
| ], | |
| title=f"ζ¨ηζ΅θ―: {model_id}", | |
| description="θΎε ₯δΈζζζ¬οΌζ¨‘εε°θΏθ‘θ‘₯ε ¨γ" | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |