雷娃 commited on
Commit
a108bc1
·
1 Parent(s): de8bf82

add system prompt

Browse files
Files changed (2) hide show
  1. app.py +6 -0
  2. requirements.txt +1 -0
app.py CHANGED
@@ -1,6 +1,7 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
2
  from threading import Thread
3
  import gradio as gr
 
4
 
5
  # load model and tokenizer
6
  model_name = "inclusionAI/Ling-mini-2.0"
@@ -25,12 +26,17 @@ def respond(
25
  """
26
  #client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
27
 
 
 
 
28
  messages = [{"role": "system", "content": system_message}]
29
 
30
  messages.extend(history)
31
 
32
  messages.append({"role": "user", "content": message})
33
 
 
 
34
  text = tokenizer.apply_chat_template(
35
  messages,
36
  tokenize=False,
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
2
  from threading import Thread
3
  import gradio as gr
4
+ import json
5
 
6
  # load model and tokenizer
7
  model_name = "inclusionAI/Ling-mini-2.0"
 
26
  """
27
  #client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
28
 
29
+ if len(system_message) == 0:
30
+ system_message = "## 你是谁\n\n我是百灵(Ling),一个由蚂蚁集团(Ant Group) 开发的AI智能助手"
31
+
32
  messages = [{"role": "system", "content": system_message}]
33
 
34
  messages.extend(history)
35
 
36
  messages.append({"role": "user", "content": message})
37
 
38
+ print(f"system_prompt: {json.dumps(messages, ensure_ascii=False, indent=2)}")
39
+
40
  text = tokenizer.apply_chat_template(
41
  messages,
42
  tokenize=False,
requirements.txt CHANGED
@@ -3,3 +3,4 @@ transformers
3
  torch
4
  accelerate
5
  openai
 
 
3
  torch
4
  accelerate
5
  openai
6
+ json