jamesoncrate commited on
Commit
1ee8357
·
1 Parent(s): 83afffe

update device

Browse files
Files changed (1) hide show
  1. app.py +3 -11
app.py CHANGED
@@ -8,6 +8,7 @@ import os
8
 
9
  # Global variable to store the text pipeline
10
  text_pipe = None
 
11
 
12
  def load_model():
13
  """Load the T5 text encoder model"""
@@ -24,23 +25,14 @@ def load_model():
24
  load_in_8bit=True,
25
  variant="8bit",
26
  token=token
27
- )
28
  text_pipe = DiffusionPipeline.from_pretrained(
29
  "DeepFloyd/IF-I-L-v1.0",
30
  text_encoder=text_encoder,
31
  unet=None,
32
  token=token,
33
  )
34
-
35
- # Set the execution device to match the text encoder's device
36
- # This ensures tokenizer outputs go to the correct device
37
- if hasattr(text_encoder, 'device'):
38
- text_pipe._execution_device = text_encoder.device
39
- else:
40
- # Find the device of the first parameter
41
- text_pipe._execution_device = next(text_encoder.parameters()).device
42
-
43
- print(f"Model loaded successfully on device: {text_pipe._execution_device}")
44
  return text_pipe
45
 
46
  @spaces.GPU
 
8
 
9
  # Global variable to store the text pipeline
10
  text_pipe = None
11
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
12
 
13
  def load_model():
14
  """Load the T5 text encoder model"""
 
25
  load_in_8bit=True,
26
  variant="8bit",
27
  token=token
28
+ ).to(device)
29
  text_pipe = DiffusionPipeline.from_pretrained(
30
  "DeepFloyd/IF-I-L-v1.0",
31
  text_encoder=text_encoder,
32
  unet=None,
33
  token=token,
34
  )
35
+ print("Model loaded successfully!")
 
 
 
 
 
 
 
 
 
36
  return text_pipe
37
 
38
  @spaces.GPU