SlowPacer commited on
Commit
1b73145
·
1 Parent(s): 953ae18

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +2 -4
handler.py CHANGED
@@ -1,4 +1,3 @@
1
- # +
2
  from typing import Dict, List, Any
3
  from PIL import Image
4
  import base64
@@ -6,7 +5,6 @@ import torch
6
  import os
7
  from io import BytesIO
8
  from transformers import BlipForConditionalGeneration, BlipProcessor
9
- # -
10
 
11
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
12
 
@@ -32,7 +30,7 @@ class EndpointHandler():
32
  """
33
  inputs = data.pop("inputs", data)
34
  parameters = data.pop("parameters", {})
35
-
36
  raw_images = [Image.open(BytesIO(base64.b64decode(_img))) for _img in inputs]
37
 
38
  processed_images = self.processor(images=raw_images, return_tensors="pt")
@@ -43,4 +41,4 @@ class EndpointHandler():
43
  out = self.model.generate(**processed_images)
44
  captions = self.processor.batch_decode(out, skip_special_tokens=True)
45
 
46
- return {"captions": captions}
 
 
1
  from typing import Dict, List, Any
2
  from PIL import Image
3
  import base64
 
5
  import os
6
  from io import BytesIO
7
  from transformers import BlipForConditionalGeneration, BlipProcessor
 
8
 
9
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
 
 
30
  """
31
  inputs = data.pop("inputs", data)
32
  parameters = data.pop("parameters", {})
33
+
34
  raw_images = [Image.open(BytesIO(base64.b64decode(_img))) for _img in inputs]
35
 
36
  processed_images = self.processor(images=raw_images, return_tensors="pt")
 
41
  out = self.model.generate(**processed_images)
42
  captions = self.processor.batch_decode(out, skip_special_tokens=True)
43
 
44
+ return {"captions": captions}