hiko1999 commited on
Commit
bb49a7b
·
verified ·
1 Parent(s): c9e461d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -24
app.py CHANGED
@@ -5,21 +5,25 @@ import gradio as gr
5
  from PIL import Image
6
  from huggingface_hub import login
7
  import os
 
 
 
 
8
 
9
  # ========== 使用你的 secret 名称 fmv 登录 ==========
10
  token = os.getenv("fmv")
11
  if token:
12
  login(token=token)
13
- print("成功使用 token 登录!")
14
  else:
15
- print("警告:未找到 token")
16
  # ==========================================
17
 
18
- # Hugging Face 模型仓库路径
19
  model_path = "hiko1999/Qwen2-Wildfire-2B"
20
 
21
- # 加载模型和 processor
22
- print(f"正在加载模型: {model_path}")
23
  tokenizer = AutoTokenizer.from_pretrained(model_path)
24
  model = Qwen2VLForConditionalGeneration.from_pretrained(
25
  model_path,
@@ -27,27 +31,27 @@ model = Qwen2VLForConditionalGeneration.from_pretrained(
27
  device_map="cpu"
28
  )
29
  processor = AutoProcessor.from_pretrained(model_path)
30
- print("模型加载完成!")
31
 
32
- # 定义预测函数
33
  def predict(image):
34
- """处理图片并生成描述"""
35
  if image is None:
36
- return "错误:未上传图片"
37
 
38
  try:
39
- # 构建消息
40
  messages = [
41
  {
42
  "role": "user",
43
  "content": [
44
  {"type": "image", "image": image},
45
- {"type": "text", "text": "请描述这张图片中的火灾情况。"}
46
  ]
47
  }
48
  ]
49
 
50
- # 处理输入
51
  text = processor.apply_chat_template(
52
  messages,
53
  tokenize=False,
@@ -62,10 +66,10 @@ def predict(image):
62
  return_tensors="pt"
63
  )
64
 
65
- # 确保在 CPU 上运行
66
  inputs = inputs.to("cpu")
67
 
68
- # 生成输出
69
  generated_ids = model.generate(
70
  **inputs,
71
  max_new_tokens=256,
@@ -73,7 +77,7 @@ def predict(image):
73
  temperature=0.7
74
  )
75
 
76
- # 解码输出
77
  generated_ids_trimmed = [
78
  out_ids[len(in_ids):]
79
  for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
@@ -87,23 +91,23 @@ def predict(image):
87
  return output_text[0]
88
 
89
  except Exception as e:
90
- return f"预测失败: {str(e)}"
91
 
92
- # Gradio 界面函数
93
  def gradio_interface(image):
94
- """Gradio 界面的主函数"""
95
  result = predict(image)
96
  return result
97
 
98
- # 创建 Gradio 界面 (移除了 theme 和 allow_flagging)
99
  interface = gr.Interface(
100
  fn=gradio_interface,
101
- inputs=gr.Image(type="pil", label="上传火灾图片"),
102
- outputs=gr.Textbox(label="AI 分析结果", lines=10),
103
- title="🔥 火灾场景智能分析系统",
104
- description="上传火灾相关图片,AI 将自动分析并描述火灾情况。"
105
  )
106
 
107
- # 启动接口 (把 theme 移到这里)
108
  if __name__ == "__main__":
109
  interface.launch(share=False)
 
5
  from PIL import Image
6
  from huggingface_hub import login
7
  import os
8
+ import warnings
9
+
10
+ # 抑制警告
11
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
12
 
13
  # ========== 使用你的 secret 名称 fmv 登录 ==========
14
  token = os.getenv("fmv")
15
  if token:
16
  login(token=token)
17
+ print("Successfully logged in with token!")
18
  else:
19
+ print("Warning: Token not found")
20
  # ==========================================
21
 
22
+ # Hugging Face model repository path
23
  model_path = "hiko1999/Qwen2-Wildfire-2B"
24
 
25
+ # Load model and processor
26
+ print(f"Loading model: {model_path}")
27
  tokenizer = AutoTokenizer.from_pretrained(model_path)
28
  model = Qwen2VLForConditionalGeneration.from_pretrained(
29
  model_path,
 
31
  device_map="cpu"
32
  )
33
  processor = AutoProcessor.from_pretrained(model_path)
34
+ print("Model loaded successfully!")
35
 
36
+ # Define prediction function
37
  def predict(image):
38
+ """Process image and generate description"""
39
  if image is None:
40
+ return "Error: No image uploaded"
41
 
42
  try:
43
+ # Build message with English prompt
44
  messages = [
45
  {
46
  "role": "user",
47
  "content": [
48
  {"type": "image", "image": image},
49
+ {"type": "text", "text": "Describe this wildfire scene in English. Include details about the fire intensity, affected area, and visible environmental conditions."}
50
  ]
51
  }
52
  ]
53
 
54
+ # Process input
55
  text = processor.apply_chat_template(
56
  messages,
57
  tokenize=False,
 
66
  return_tensors="pt"
67
  )
68
 
69
+ # Ensure running on CPU
70
  inputs = inputs.to("cpu")
71
 
72
+ # Generate output
73
  generated_ids = model.generate(
74
  **inputs,
75
  max_new_tokens=256,
 
77
  temperature=0.7
78
  )
79
 
80
+ # Decode output
81
  generated_ids_trimmed = [
82
  out_ids[len(in_ids):]
83
  for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
 
91
  return output_text[0]
92
 
93
  except Exception as e:
94
+ return f"Prediction failed: {str(e)}"
95
 
96
+ # Gradio interface function
97
  def gradio_interface(image):
98
+ """Main function for Gradio interface"""
99
  result = predict(image)
100
  return result
101
 
102
+ # Create Gradio interface (all in English)
103
  interface = gr.Interface(
104
  fn=gradio_interface,
105
+ inputs=gr.Image(type="pil", label="Upload Wildfire Image"),
106
+ outputs=gr.Textbox(label="AI Analysis Result", lines=10),
107
+ title="🔥 Wildfire Scene Analysis System",
108
+ description="Upload a wildfire-related image and AI will automatically analyze and describe the fire situation in English."
109
  )
110
 
111
+ # Launch interface
112
  if __name__ == "__main__":
113
  interface.launch(share=False)