Spaces:
Runtime error
Runtime error
Upload 3 files
Browse files- app.py +68 -0
- checkpoint.tar +3 -0
- requirements.txt +1 -0
app.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import timm
|
| 5 |
+
from torchvision import transforms
|
| 6 |
+
from torchvision.transforms.functional import InterpolationMode
|
| 7 |
+
import gradio as gr
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
|
| 11 |
+
# 전역 설정
|
| 12 |
+
CFG = {
|
| 13 |
+
'IMG_SIZE': 224
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
class MultiLabelClassificationModel(nn.Module):
|
| 17 |
+
def __init__(self, num_labels):
|
| 18 |
+
super(MultiLabelClassificationModel, self).__init__()
|
| 19 |
+
|
| 20 |
+
# 이미지 특징 추출
|
| 21 |
+
self.cnn = timm.create_model("timm/convnext_base.clip_laion2b_augreg_ft_in12k_in1k", pretrained=True, drop_rate=0.05, drop_path_rate=0.05, in_chans=3)
|
| 22 |
+
|
| 23 |
+
# 멀티 라벨 분류 헤드
|
| 24 |
+
self.classification_head = nn.Linear(1000, num_labels)
|
| 25 |
+
|
| 26 |
+
def forward(self, images):
|
| 27 |
+
# CNN
|
| 28 |
+
features = self.cnn(images)
|
| 29 |
+
features_flat = features.view(features.size(0), -1)
|
| 30 |
+
|
| 31 |
+
# 멀티 라벨 분류
|
| 32 |
+
logits = self.classification_head(features_flat)
|
| 33 |
+
# probs = torch.sigmoid(logits)
|
| 34 |
+
|
| 35 |
+
return logits
|
| 36 |
+
|
| 37 |
+
test_transform = transforms.Compose([
|
| 38 |
+
transforms.Resize(size=(CFG['IMG_SIZE'], CFG['IMG_SIZE']), interpolation=InterpolationMode.BICUBIC),
|
| 39 |
+
transforms.ToTensor(),
|
| 40 |
+
transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]),
|
| 41 |
+
])
|
| 42 |
+
|
| 43 |
+
model = MultiLabelClassificationModel(num_labels=13)
|
| 44 |
+
model.load_state_dict(torch.load(f'checkpoint.tar')['model_state_dict'])
|
| 45 |
+
model.eval() # 모델을 평가 모드로 설정
|
| 46 |
+
|
| 47 |
+
# 미리 설정한 라벨 목록
|
| 48 |
+
labels = ['Mold', 'blight', 'greening', 'healthy', 'measles',
|
| 49 |
+
'mildew', 'mite', 'rot', 'rust', 'scab', 'scorch', 'spot', 'virus']
|
| 50 |
+
|
| 51 |
+
def predict(image_path):
|
| 52 |
+
image = Image.open(image_path)
|
| 53 |
+
image = test_transform(image).unsqueeze(0)
|
| 54 |
+
with torch.no_grad():
|
| 55 |
+
logits = model(image)
|
| 56 |
+
probs = F.softmax(logits, dim=1) # softmax를 적용하여 확률 값으로 변환
|
| 57 |
+
result = {label: float(probs[0][i]) for i, label in enumerate(labels)}
|
| 58 |
+
return result
|
| 59 |
+
|
| 60 |
+
app = gr.Interface(
|
| 61 |
+
fn=predict,
|
| 62 |
+
inputs=gr.Image(type='filepath'),
|
| 63 |
+
outputs=gr.Label(),
|
| 64 |
+
title='Multi-Label Image Classification',
|
| 65 |
+
description='Automatically classify images into the following categories: ' + ', '.join(labels) + '.'
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
app.launch(share=True)
|
checkpoint.tar
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:16ff8a0bc11b36857571ace7153d223c26860e7afbffa1de4b16a91ba9e75085
|
| 3 |
+
size 1063686374
|
requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
huggingface_hub
|