Spaces:
Runtime error
Runtime error
add license
Browse files- app.py +15 -0
- data.py +15 -12
- original_models.py +25 -0
- requirements.txt +2 -4
app.py
CHANGED
|
@@ -1,3 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
import time
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright 2016 The BigDL Authors.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
import gradio as gr
|
| 17 |
import numpy as np
|
| 18 |
import time
|
data.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# This file is
|
| 2 |
|
| 3 |
# MIT License
|
| 4 |
|
|
@@ -22,14 +22,19 @@
|
|
| 22 |
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 23 |
# SOFTWARE.
|
| 24 |
|
| 25 |
-
|
| 26 |
from typing import Callable, Dict
|
|
|
|
| 27 |
import torch
|
|
|
|
| 28 |
from torch.utils.data import Dataset
|
|
|
|
| 29 |
import torchvision.transforms.functional as F
|
| 30 |
-
from
|
| 31 |
import pytorch_lightning as pl
|
|
|
|
| 32 |
from collections.abc import Iterable
|
|
|
|
|
|
|
| 33 |
# image reader writer
|
| 34 |
from pathlib import Path
|
| 35 |
from PIL import Image
|
|
@@ -92,7 +97,6 @@ def prepare_data(data_path: Path, read_func: Callable = read_image_tensor) -> Di
|
|
| 92 |
"""
|
| 93 |
Takes a data_path of a folder which contains subfolders with input, target, etc.
|
| 94 |
lablelled by the same names.
|
| 95 |
-
|
| 96 |
:param data_path: Path of the folder containing data
|
| 97 |
:param read_func: function that reads data and returns a tensor
|
| 98 |
"""
|
|
@@ -103,7 +107,8 @@ def prepare_data(data_path: Path, read_func: Callable = read_image_tensor) -> Di
|
|
| 103 |
# checks only files for which there is an target
|
| 104 |
# TODO check for images
|
| 105 |
name_ls = [file.name for file in (
|
| 106 |
-
data_path / "target").iterdir() if file.is_file()]
|
|
|
|
| 107 |
subdirs = [data_path / sdn for sdn in subdir_names]
|
| 108 |
for sd in subdirs:
|
| 109 |
if sd.is_dir():
|
|
@@ -130,7 +135,6 @@ class DataDictLoader():
|
|
| 130 |
max_length: int = 128,
|
| 131 |
shuffle: bool = False):
|
| 132 |
"""
|
| 133 |
-
|
| 134 |
"""
|
| 135 |
|
| 136 |
self.batch_size = batch_size
|
|
@@ -139,7 +143,7 @@ class DataDictLoader():
|
|
| 139 |
self.batch_size = batch_size
|
| 140 |
|
| 141 |
self.data_dict = data_dict
|
| 142 |
-
self.dataset_len = data_dict['len']
|
| 143 |
self.len = self.dataset_len if max_length is None else min(
|
| 144 |
self.dataset_len, max_length)
|
| 145 |
# Calculate # batches
|
|
@@ -147,7 +151,6 @@ class DataDictLoader():
|
|
| 147 |
if remainder > 0:
|
| 148 |
num_batches += 1
|
| 149 |
self.num_batches = num_batches
|
| 150 |
-
|
| 151 |
|
| 152 |
def __iter__(self):
|
| 153 |
if self.shuffle:
|
|
@@ -182,8 +185,8 @@ class PatchDataModule(pl.LightningDataModule):
|
|
| 182 |
self.len = data_dict['len']
|
| 183 |
|
| 184 |
self.batch_size = batch_size
|
| 185 |
-
self.patch_size = patch_size
|
| 186 |
-
self.patch_num = patch_num
|
| 187 |
|
| 188 |
def dataloader(self, data_dict, **kwargs):
|
| 189 |
return DataDictLoader(data_dict, **kwargs)
|
|
@@ -191,7 +194,7 @@ class PatchDataModule(pl.LightningDataModule):
|
|
| 191 |
def train_dataloader(self):
|
| 192 |
patches = self.cut_patches()
|
| 193 |
return self.dataloader(patches, batch_size=self.batch_size, shuffle=True,
|
| 194 |
-
max_length=self.patch_num)
|
| 195 |
|
| 196 |
def val_dataloader(self):
|
| 197 |
return self.dataloader(self.data_dict, batch_size=1)
|
|
@@ -227,4 +230,4 @@ class ImageDataset(Dataset):
|
|
| 227 |
return read_image_tensor(file), file.name
|
| 228 |
|
| 229 |
def __len__(self) -> int:
|
| 230 |
-
return len(self.file_paths)
|
|
|
|
| 1 |
+
# This file is copied from https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/data.py
|
| 2 |
|
| 3 |
# MIT License
|
| 4 |
|
|
|
|
| 22 |
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 23 |
# SOFTWARE.
|
| 24 |
|
|
|
|
| 25 |
from typing import Callable, Dict
|
| 26 |
+
|
| 27 |
import torch
|
| 28 |
+
|
| 29 |
from torch.utils.data import Dataset
|
| 30 |
+
|
| 31 |
import torchvision.transforms.functional as F
|
| 32 |
+
from torchvision import transforms
|
| 33 |
import pytorch_lightning as pl
|
| 34 |
+
|
| 35 |
from collections.abc import Iterable
|
| 36 |
+
|
| 37 |
+
|
| 38 |
# image reader writer
|
| 39 |
from pathlib import Path
|
| 40 |
from PIL import Image
|
|
|
|
| 97 |
"""
|
| 98 |
Takes a data_path of a folder which contains subfolders with input, target, etc.
|
| 99 |
lablelled by the same names.
|
|
|
|
| 100 |
:param data_path: Path of the folder containing data
|
| 101 |
:param read_func: function that reads data and returns a tensor
|
| 102 |
"""
|
|
|
|
| 107 |
# checks only files for which there is an target
|
| 108 |
# TODO check for images
|
| 109 |
name_ls = [file.name for file in (
|
| 110 |
+
data_path / "target").iterdir() if file.is_file()]
|
| 111 |
+
|
| 112 |
subdirs = [data_path / sdn for sdn in subdir_names]
|
| 113 |
for sd in subdirs:
|
| 114 |
if sd.is_dir():
|
|
|
|
| 135 |
max_length: int = 128,
|
| 136 |
shuffle: bool = False):
|
| 137 |
"""
|
|
|
|
| 138 |
"""
|
| 139 |
|
| 140 |
self.batch_size = batch_size
|
|
|
|
| 143 |
self.batch_size = batch_size
|
| 144 |
|
| 145 |
self.data_dict = data_dict
|
| 146 |
+
self.dataset_len = data_dict['len']
|
| 147 |
self.len = self.dataset_len if max_length is None else min(
|
| 148 |
self.dataset_len, max_length)
|
| 149 |
# Calculate # batches
|
|
|
|
| 151 |
if remainder > 0:
|
| 152 |
num_batches += 1
|
| 153 |
self.num_batches = num_batches
|
|
|
|
| 154 |
|
| 155 |
def __iter__(self):
|
| 156 |
if self.shuffle:
|
|
|
|
| 185 |
self.len = data_dict['len']
|
| 186 |
|
| 187 |
self.batch_size = batch_size
|
| 188 |
+
self.patch_size = patch_size
|
| 189 |
+
self.patch_num = patch_num
|
| 190 |
|
| 191 |
def dataloader(self, data_dict, **kwargs):
|
| 192 |
return DataDictLoader(data_dict, **kwargs)
|
|
|
|
| 194 |
def train_dataloader(self):
|
| 195 |
patches = self.cut_patches()
|
| 196 |
return self.dataloader(patches, batch_size=self.batch_size, shuffle=True,
|
| 197 |
+
max_length=self.patch_num)
|
| 198 |
|
| 199 |
def val_dataloader(self):
|
| 200 |
return self.dataloader(self.data_dict, batch_size=1)
|
|
|
|
| 230 |
return read_image_tensor(file), file.name
|
| 231 |
|
| 232 |
def __len__(self) -> int:
|
| 233 |
+
return len(self.file_paths)
|
original_models.py
CHANGED
|
@@ -1,3 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
| 3 |
from torch.autograd import Variable
|
|
|
|
| 1 |
+
# This file is copied from https://github.com/rnwzd/FSPBT-Image-Translation/blob/master/original_models.py
|
| 2 |
+
|
| 3 |
+
# MIT License
|
| 4 |
+
|
| 5 |
+
# Copyright (c) 2022 Lorenzo Breschi
|
| 6 |
+
|
| 7 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 8 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 9 |
+
# in the Software without restriction, including without limitation the rights
|
| 10 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 11 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 12 |
+
# furnished to do so, subject to the following conditions:
|
| 13 |
+
|
| 14 |
+
# The above copyright notice and this permission notice shall be included in all
|
| 15 |
+
# copies or substantial portions of the Software.
|
| 16 |
+
|
| 17 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 18 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 19 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 20 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 21 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 22 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 23 |
+
# SOFTWARE.
|
| 24 |
+
|
| 25 |
+
|
| 26 |
import torch
|
| 27 |
import torch.nn as nn
|
| 28 |
from torch.autograd import Variable
|
requirements.txt
CHANGED
|
@@ -2,13 +2,11 @@
|
|
| 2 |
bigdl-nano[pytorch]==2.1.0b20220606
|
| 3 |
setuptools==58.0.4
|
| 4 |
protobuf==3.20.1
|
| 5 |
-
|
| 6 |
# inference
|
| 7 |
neural_compressor
|
| 8 |
onnx==1.9.0
|
| 9 |
onnxruntime==1.10.0
|
| 10 |
onnxruntime-extensions
|
| 11 |
|
| 12 |
-
|
| 13 |
-
pathlib
|
| 14 |
-
gif
|
|
|
|
| 2 |
bigdl-nano[pytorch]==2.1.0b20220606
|
| 3 |
setuptools==58.0.4
|
| 4 |
protobuf==3.20.1
|
| 5 |
+
|
| 6 |
# inference
|
| 7 |
neural_compressor
|
| 8 |
onnx==1.9.0
|
| 9 |
onnxruntime==1.10.0
|
| 10 |
onnxruntime-extensions
|
| 11 |
|
| 12 |
+
pathlib
|
|
|
|
|
|