ybbwcwaps
commited on
Commit
·
2fc658c
1
Parent(s):
be0b14c
cpu
Browse files- networks/base_model.py +2 -1
- options/base_options.py +1 -1
- requirements.txt +2 -1
- run.py +0 -2
networks/base_model.py
CHANGED
|
@@ -16,7 +16,8 @@ class BaseModel(nn.Module):
|
|
| 16 |
self.device= torch.device('cuda:{}'.format(opt.gpu_ids[0]))
|
| 17 |
else:
|
| 18 |
print("gpu is not available! ")
|
| 19 |
-
exit()
|
|
|
|
| 20 |
# self.device = torch.device('cuda')
|
| 21 |
|
| 22 |
def save_networks(self, save_filename):
|
|
|
|
| 16 |
self.device= torch.device('cuda:{}'.format(opt.gpu_ids[0]))
|
| 17 |
else:
|
| 18 |
print("gpu is not available! ")
|
| 19 |
+
# exit()
|
| 20 |
+
self.device = torch.device('cpu')
|
| 21 |
# self.device = torch.device('cuda')
|
| 22 |
|
| 23 |
def save_networks(self, save_filename):
|
options/base_options.py
CHANGED
|
@@ -28,7 +28,7 @@ class BaseOptions():
|
|
| 28 |
|
| 29 |
parser.add_argument('--loadSize', type=int, default=256, help='scale images to this size')
|
| 30 |
parser.add_argument('--cropSize', type=int, default=224, help='then crop to this size')
|
| 31 |
-
parser.add_argument('--gpu_ids', type=str, default='
|
| 32 |
|
| 33 |
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
|
| 34 |
parser.add_argument('--name', type=str, default='experiment', help='name of the experiment. It decides where to store samples and models')
|
|
|
|
| 28 |
|
| 29 |
parser.add_argument('--loadSize', type=int, default=256, help='scale images to this size')
|
| 30 |
parser.add_argument('--cropSize', type=int, default=224, help='then crop to this size')
|
| 31 |
+
parser.add_argument('--gpu_ids', type=str, default='-1', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
|
| 32 |
|
| 33 |
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
|
| 34 |
parser.add_argument('--name', type=str, default='experiment', help='name of the experiment. It decides where to store samples and models')
|
requirements.txt
CHANGED
|
@@ -13,4 +13,5 @@ termcolor==2.4.0
|
|
| 13 |
torch==2.1.0
|
| 14 |
torchinfo==1.8.0
|
| 15 |
torchvision==0.16.0
|
| 16 |
-
tqdm==4.66.1
|
|
|
|
|
|
| 13 |
torch==2.1.0
|
| 14 |
torchinfo==1.8.0
|
| 15 |
torchvision==0.16.0
|
| 16 |
+
tqdm==4.66.1
|
| 17 |
+
PyAV==12.0.5
|
run.py
CHANGED
|
@@ -28,8 +28,6 @@ def detect_video(video_path):
|
|
| 28 |
print(f"working...")
|
| 29 |
|
| 30 |
model = Validator(val_opt)
|
| 31 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 32 |
-
model.to(device), model.model.to(device)
|
| 33 |
model.load_state_dict(val_opt.ckpt)
|
| 34 |
print("ckpt loaded!")
|
| 35 |
|
|
|
|
| 28 |
print(f"working...")
|
| 29 |
|
| 30 |
model = Validator(val_opt)
|
|
|
|
|
|
|
| 31 |
model.load_state_dict(val_opt.ckpt)
|
| 32 |
print("ckpt loaded!")
|
| 33 |
|