Upload lora-scripts/sd-scripts/finetune/make_captions.py with huggingface_hub
Browse files
lora-scripts/sd-scripts/finetune/make_captions.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import glob
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
import random
|
| 6 |
+
import sys
|
| 7 |
+
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from PIL import Image
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
from library.device_utils import init_ipex, get_preferred_device
|
| 15 |
+
init_ipex()
|
| 16 |
+
|
| 17 |
+
from torchvision import transforms
|
| 18 |
+
from torchvision.transforms.functional import InterpolationMode
|
| 19 |
+
sys.path.append(os.path.dirname(__file__))
|
| 20 |
+
from blip.blip import blip_decoder, is_url
|
| 21 |
+
import library.train_util as train_util
|
| 22 |
+
from library.utils import setup_logging
|
| 23 |
+
setup_logging()
|
| 24 |
+
import logging
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
DEVICE = get_preferred_device()
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
IMAGE_SIZE = 384
|
| 31 |
+
|
| 32 |
+
# 正方形でいいのか? という気がするがソースがそうなので
|
| 33 |
+
IMAGE_TRANSFORM = transforms.Compose(
|
| 34 |
+
[
|
| 35 |
+
transforms.Resize((IMAGE_SIZE, IMAGE_SIZE), interpolation=InterpolationMode.BICUBIC),
|
| 36 |
+
transforms.ToTensor(),
|
| 37 |
+
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
|
| 38 |
+
]
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# 共通化したいが微妙に処理が異なる……
|
| 43 |
+
class ImageLoadingTransformDataset(torch.utils.data.Dataset):
|
| 44 |
+
def __init__(self, image_paths):
|
| 45 |
+
self.images = image_paths
|
| 46 |
+
|
| 47 |
+
def __len__(self):
|
| 48 |
+
return len(self.images)
|
| 49 |
+
|
| 50 |
+
def __getitem__(self, idx):
|
| 51 |
+
img_path = self.images[idx]
|
| 52 |
+
|
| 53 |
+
try:
|
| 54 |
+
image = Image.open(img_path).convert("RGB")
|
| 55 |
+
# convert to tensor temporarily so dataloader will accept it
|
| 56 |
+
tensor = IMAGE_TRANSFORM(image)
|
| 57 |
+
except Exception as e:
|
| 58 |
+
logger.error(f"Could not load image path / 画像を読み込めません: {img_path}, error: {e}")
|
| 59 |
+
return None
|
| 60 |
+
|
| 61 |
+
return (tensor, img_path)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def collate_fn_remove_corrupted(batch):
|
| 65 |
+
"""Collate function that allows to remove corrupted examples in the
|
| 66 |
+
dataloader. It expects that the dataloader returns 'None' when that occurs.
|
| 67 |
+
The 'None's in the batch are removed.
|
| 68 |
+
"""
|
| 69 |
+
# Filter out all the Nones (corrupted examples)
|
| 70 |
+
batch = list(filter(lambda x: x is not None, batch))
|
| 71 |
+
return batch
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def main(args):
|
| 75 |
+
# fix the seed for reproducibility
|
| 76 |
+
seed = args.seed # + utils.get_rank()
|
| 77 |
+
torch.manual_seed(seed)
|
| 78 |
+
np.random.seed(seed)
|
| 79 |
+
random.seed(seed)
|
| 80 |
+
|
| 81 |
+
if not os.path.exists("blip"):
|
| 82 |
+
args.train_data_dir = os.path.abspath(args.train_data_dir) # convert to absolute path
|
| 83 |
+
|
| 84 |
+
cwd = os.getcwd()
|
| 85 |
+
logger.info(f"Current Working Directory is: {cwd}")
|
| 86 |
+
os.chdir("finetune")
|
| 87 |
+
if not is_url(args.caption_weights) and not os.path.isfile(args.caption_weights):
|
| 88 |
+
args.caption_weights = os.path.join("..", args.caption_weights)
|
| 89 |
+
|
| 90 |
+
logger.info(f"load images from {args.train_data_dir}")
|
| 91 |
+
train_data_dir_path = Path(args.train_data_dir)
|
| 92 |
+
image_paths = train_util.glob_images_pathlib(train_data_dir_path, args.recursive)
|
| 93 |
+
logger.info(f"found {len(image_paths)} images.")
|
| 94 |
+
|
| 95 |
+
logger.info(f"loading BLIP caption: {args.caption_weights}")
|
| 96 |
+
model = blip_decoder(pretrained=args.caption_weights, image_size=IMAGE_SIZE, vit="large", med_config="./blip/med_config.json")
|
| 97 |
+
model.eval()
|
| 98 |
+
model = model.to(DEVICE)
|
| 99 |
+
logger.info("BLIP loaded")
|
| 100 |
+
|
| 101 |
+
# captioningする
|
| 102 |
+
def run_batch(path_imgs):
|
| 103 |
+
imgs = torch.stack([im for _, im in path_imgs]).to(DEVICE)
|
| 104 |
+
|
| 105 |
+
with torch.no_grad():
|
| 106 |
+
if args.beam_search:
|
| 107 |
+
captions = model.generate(
|
| 108 |
+
imgs, sample=False, num_beams=args.num_beams, max_length=args.max_length, min_length=args.min_length
|
| 109 |
+
)
|
| 110 |
+
else:
|
| 111 |
+
captions = model.generate(
|
| 112 |
+
imgs, sample=True, top_p=args.top_p, max_length=args.max_length, min_length=args.min_length
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
for (image_path, _), caption in zip(path_imgs, captions):
|
| 116 |
+
with open(os.path.splitext(image_path)[0] + args.caption_extension, "wt", encoding="utf-8") as f:
|
| 117 |
+
f.write(caption + "\n")
|
| 118 |
+
if args.debug:
|
| 119 |
+
logger.info(f'{image_path} {caption}')
|
| 120 |
+
|
| 121 |
+
# 読み込みの高速化のためにDataLoaderを使うオプション
|
| 122 |
+
if args.max_data_loader_n_workers is not None:
|
| 123 |
+
dataset = ImageLoadingTransformDataset(image_paths)
|
| 124 |
+
data = torch.utils.data.DataLoader(
|
| 125 |
+
dataset,
|
| 126 |
+
batch_size=args.batch_size,
|
| 127 |
+
shuffle=False,
|
| 128 |
+
num_workers=args.max_data_loader_n_workers,
|
| 129 |
+
collate_fn=collate_fn_remove_corrupted,
|
| 130 |
+
drop_last=False,
|
| 131 |
+
)
|
| 132 |
+
else:
|
| 133 |
+
data = [[(None, ip)] for ip in image_paths]
|
| 134 |
+
|
| 135 |
+
b_imgs = []
|
| 136 |
+
for data_entry in tqdm(data, smoothing=0.0):
|
| 137 |
+
for data in data_entry:
|
| 138 |
+
if data is None:
|
| 139 |
+
continue
|
| 140 |
+
|
| 141 |
+
img_tensor, image_path = data
|
| 142 |
+
if img_tensor is None:
|
| 143 |
+
try:
|
| 144 |
+
raw_image = Image.open(image_path)
|
| 145 |
+
if raw_image.mode != "RGB":
|
| 146 |
+
raw_image = raw_image.convert("RGB")
|
| 147 |
+
img_tensor = IMAGE_TRANSFORM(raw_image)
|
| 148 |
+
except Exception as e:
|
| 149 |
+
logger.error(f"Could not load image path / 画像を読み込めません: {image_path}, error: {e}")
|
| 150 |
+
continue
|
| 151 |
+
|
| 152 |
+
b_imgs.append((image_path, img_tensor))
|
| 153 |
+
if len(b_imgs) >= args.batch_size:
|
| 154 |
+
run_batch(b_imgs)
|
| 155 |
+
b_imgs.clear()
|
| 156 |
+
if len(b_imgs) > 0:
|
| 157 |
+
run_batch(b_imgs)
|
| 158 |
+
|
| 159 |
+
logger.info("done!")
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def setup_parser() -> argparse.ArgumentParser:
|
| 163 |
+
parser = argparse.ArgumentParser()
|
| 164 |
+
parser.add_argument("train_data_dir", type=str, help="directory for train images / 学習画像データのディレクトリ")
|
| 165 |
+
parser.add_argument(
|
| 166 |
+
"--caption_weights",
|
| 167 |
+
type=str,
|
| 168 |
+
default="https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth",
|
| 169 |
+
help="BLIP caption weights (model_large_caption.pth) / BLIP captionの重みファイル(model_large_caption.pth)",
|
| 170 |
+
)
|
| 171 |
+
parser.add_argument(
|
| 172 |
+
"--caption_extention",
|
| 173 |
+
type=str,
|
| 174 |
+
default=None,
|
| 175 |
+
help="extension of caption file (for backward compatibility) / 出力されるキャプションファイルの拡張子(スペルミスしていたのを残してあります)",
|
| 176 |
+
)
|
| 177 |
+
parser.add_argument("--caption_extension", type=str, default=".caption", help="extension of caption file / 出力されるキャプションファイルの拡張子")
|
| 178 |
+
parser.add_argument(
|
| 179 |
+
"--beam_search",
|
| 180 |
+
action="store_true",
|
| 181 |
+
help="use beam search (default Nucleus sampling) / beam searchを使う(このオプション未指定時はNucleus sampling)",
|
| 182 |
+
)
|
| 183 |
+
parser.add_argument("--batch_size", type=int, default=1, help="batch size in inference / 推論時のバッチサイズ")
|
| 184 |
+
parser.add_argument(
|
| 185 |
+
"--max_data_loader_n_workers",
|
| 186 |
+
type=int,
|
| 187 |
+
default=None,
|
| 188 |
+
help="enable image reading by DataLoader with this number of workers (faster) / DataLoaderによる画像読み込みを有効にしてこのワーカー数を適用する(読み込みを高速化)",
|
| 189 |
+
)
|
| 190 |
+
parser.add_argument("--num_beams", type=int, default=1, help="num of beams in beam search /beam search時のビーム数(多いと精度が上がるが時間がかかる)")
|
| 191 |
+
parser.add_argument("--top_p", type=float, default=0.9, help="top_p in Nucleus sampling / Nucleus sampling時のtop_p")
|
| 192 |
+
parser.add_argument("--max_length", type=int, default=75, help="max length of caption / captionの最大長")
|
| 193 |
+
parser.add_argument("--min_length", type=int, default=5, help="min length of caption / captionの最小長")
|
| 194 |
+
parser.add_argument("--seed", default=42, type=int, help="seed for reproducibility / 再現性を確保するための乱数seed")
|
| 195 |
+
parser.add_argument("--debug", action="store_true", help="debug mode")
|
| 196 |
+
parser.add_argument("--recursive", action="store_true", help="search for images in subfolders recursively / サブフォルダを再帰的に検索する")
|
| 197 |
+
|
| 198 |
+
return parser
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
if __name__ == "__main__":
|
| 202 |
+
parser = setup_parser()
|
| 203 |
+
|
| 204 |
+
args = parser.parse_args()
|
| 205 |
+
|
| 206 |
+
# スペルミスしていたオプションを復元する
|
| 207 |
+
if args.caption_extention is not None:
|
| 208 |
+
args.caption_extension = args.caption_extention
|
| 209 |
+
|
| 210 |
+
main(args)
|