Upload 3 files
Browse files- README.md +12 -0
- app.py +158 -0
- requirements.txt +2 -0
README.md
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Fashion Classification
|
| 3 |
+
emoji: ⚡
|
| 4 |
+
colorFrom: purple
|
| 5 |
+
colorTo: pink
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 5.29.0
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import open_clip
|
| 3 |
+
import torch
|
| 4 |
+
import requests
|
| 5 |
+
import numpy as np
|
| 6 |
+
from PIL import Image
|
| 7 |
+
from io import BytesIO
|
| 8 |
+
|
| 9 |
+
# Sidebar content
|
| 10 |
+
sidebar_markdown = """
|
| 11 |
+
This project is designed to classify clothing types to help Khun Nu get dressed each day 🧥👗👕
|
| 12 |
+
It supports classification of up to 200 categories.
|
| 13 |
+
## Documentation
|
| 14 |
+
📚 [Blog Post](https://www.marqo.ai/blog/search-model-for-fashion)
|
| 15 |
+
📝 [Use Case Blog Post](https://www.marqo.ai/blog/ecommerce-image-classification-with-marqo-fashionclip)
|
| 16 |
+
## Code
|
| 17 |
+
💻 [GitHub Repo](https://github.com/marqo-ai/marqo-FashionCLIP)
|
| 18 |
+
🤝 [Google Colab](https://colab.research.google.com/drive/1nq978xFJjJcnyrJ2aE5l82GHAXOvTmfd?usp=sharing)
|
| 19 |
+
🤗 [Hugging Face Collection](https://huggingface.co/collections/Marqo/marqo-fashionclip-and-marqo-fashionsiglip-66b43f2d09a06ad2368d4af6)
|
| 20 |
+
## Citation
|
| 21 |
+
If you use Fashion-Classification, please cite us:
|
| 22 |
+
```
|
| 23 |
+
@software{Jung_Marqo-FashionCLIP_and_Marqo-FashionSigLIP_2024,
|
| 24 |
+
author = {Jung, Myong Chol and Clark, Jesse},
|
| 25 |
+
month = aug,
|
| 26 |
+
title = {{Marqo-FashionCLIP and Marqo-FashionSigLIP}},
|
| 27 |
+
url = {https://github.com/marqo-ai/marqo-FashionCLIP},
|
| 28 |
+
version = {1.0.0},
|
| 29 |
+
year = {2024}
|
| 30 |
+
```
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
# List of fashion items
|
| 34 |
+
items = [
|
| 35 |
+
'abaya', 'anorak', 'apron', 'ball gown', 'bandanna', 'baseball cap',
|
| 36 |
+
'beanie', 'belt', 'beret', 'Bermuda shorts', 'baby clothes',
|
| 37 |
+
'bib', 'bikini', 'blazer', 'blouse', 'boots', 'bow tie', 'boxer shorts', 'boxers', 'bra',
|
| 38 |
+
'bracelet', 'breeches', 'buckle', 'button', 'camouflage', 'cap', 'cape', 'cardigan', 'cloak', 'clogs',
|
| 39 |
+
'coat', 'corset', 'crown', 'cuff links', 'dress', 'dress shirt', 'dungarees', 'earmuffs',
|
| 40 |
+
'earrings', 'flannel shirt', 'flip-flops', 'fur coat', 'gilet', 'glasses', 'gloves', 'gown', 'handbag',
|
| 41 |
+
'hat', 'Hawaiian shirt', 'helmet', 'hijab', 'high heels', 'hoodie', 'hospital gown', 'jacket',
|
| 42 |
+
'jeans', 'jewelry', 'jumper', 'jumpsuit', 'khakis', 'kilt', 'knickers', 'lab coat',
|
| 43 |
+
'leather jacket', 'leggings', 'leotard', 'life jacket', 'lingerie', 'loafers',
|
| 44 |
+
'miniskirt', 'mittens', 'necklace', 'nightgown', 'nightshirt', 'onesies','pajamas', 'pants',
|
| 45 |
+
'pantsuit', 'pantyhose', 'parka','polo shirt', 'poncho', 'purse', 'raincoat',
|
| 46 |
+
'ring', 'robe', 'rugby shirt', 'sandals', 'scarf', 'scrubs', 'shirt', 'shoes', 'shorts', 'skirt',
|
| 47 |
+
'slippers', 'sneakers', 'socks', 'spacesuit', 'stockings', 'stole', 'suit',
|
| 48 |
+
'sun hat', 'sundress', 'sunglasses', 'suspenders', 'sweater', 'sweatpants', 'sweatshirt', 'swimsuit',
|
| 49 |
+
't-shirt', 'tank top', 'tiara', 'tie', 'tie clip', 'tights', 'toga', 'top', 'top coat', 'top hat', 'train',
|
| 50 |
+
'trench coat', 'trousers', 'trunks', 'tube top', 'turban', 'turtleneck', 'tutu', 'tuxedo', 'umbrella',
|
| 51 |
+
'veil', 'vest', 'waistcoat', 'wedding gown', 'wetsuit',
|
| 52 |
+
'windbreaker', 'jogger', 'palazzo', 'cargo', 'dresspants', 'chinos',
|
| 53 |
+
'crop top', 'romper', 'insulated jacket', 'fleece', 'rain jacket',
|
| 54 |
+
'running jacket', 'graphic top', 'legging', 'skort',
|
| 55 |
+
'sports bra', 'water shorts', 'goggle', 'glove', 'mitten',
|
| 56 |
+
'leg gaiter', 'neck gaiter', 'watch', 'bag', 'swim trunk',
|
| 57 |
+
'pocket watch', 'insoles', "climbing shoes",
|
| 58 |
+
]
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# Initialize the model and tokenizer
|
| 62 |
+
model_name = 'hf-hub:Marqo/marqo-fashionSigLIP'
|
| 63 |
+
model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms(model_name)
|
| 64 |
+
tokenizer = open_clip.get_tokenizer(model_name)
|
| 65 |
+
|
| 66 |
+
def generate_description(item):
|
| 67 |
+
if "Pants" in item or item in ["Leggings", "Jogger", "Cargo", "Chinos", "Palazzo", "Dresspants", "Sweatpants", "Pant", "Legging", "Skort", "Trouser"]:
|
| 68 |
+
return f"A pair of {item} pants"
|
| 69 |
+
elif item in ["Dress", "Blouse", "T-Shirt", "Tank Top", "Sweater", "Cardigan", "Hoodie", "Coat", "Jacket", "Polo Shirt", "Crop Top", "Romper", "Blazer", "Vest", "Bodysuit", "Maxi Dress", "Graphic Top", "Shirt", "Base Layer Top", "Base Layer Bottom", "Swimsuit", "Rashguard", "Cover Up", "Tuxedo"]:
|
| 70 |
+
return f"A {item}"
|
| 71 |
+
elif item in ["Hat", "Sunglasses", "Glasses", "Sun Hat", "Goggle", "Balaclava"]:
|
| 72 |
+
return f"A {item} worn on the head or face"
|
| 73 |
+
elif item in ["Shoes", "Sandals", "Heels", "Trainers", "Boots", "Slippers", "Sneakers", "Insoles", "Socks"]:
|
| 74 |
+
return f"A pair of {item} worn on the feet"
|
| 75 |
+
elif item in ["Jeans", "Skirt", "Shorts", "Dungarees", "Poncho", "Overalls", "Boxer", "Swim Trunk", "Ring", "Necklace", "Earing", "Pocket Watch"]:
|
| 76 |
+
return f"A {item} piece of clothing"
|
| 77 |
+
elif item in ["Boxing Gloves", "Glove", "Mitten"]:
|
| 78 |
+
return f"An item of {item} worn on the hands"
|
| 79 |
+
else:
|
| 80 |
+
return f"A fashion item called {item}"
|
| 81 |
+
items_desc = [generate_description(item) for item in items]
|
| 82 |
+
text = tokenizer(items_desc)
|
| 83 |
+
|
| 84 |
+
# Encode text features (unchanged)
|
| 85 |
+
with torch.no_grad(), torch.amp.autocast('cuda'):
|
| 86 |
+
text_features = model.encode_text(text)
|
| 87 |
+
text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 88 |
+
|
| 89 |
+
# Prediction function
|
| 90 |
+
def predict(image, url):
|
| 91 |
+
if url:
|
| 92 |
+
response = requests.get(url)
|
| 93 |
+
image = Image.open(BytesIO(response.content))
|
| 94 |
+
|
| 95 |
+
processed_image = preprocess_val(image).unsqueeze(0)
|
| 96 |
+
|
| 97 |
+
with torch.no_grad(), torch.amp.autocast('cuda'):
|
| 98 |
+
image_features = model.encode_image(processed_image)
|
| 99 |
+
image_features /= image_features.norm(dim=-1, keepdim=True)
|
| 100 |
+
|
| 101 |
+
text_probs = (100 * image_features @ text_features.T).softmax(dim=-1)
|
| 102 |
+
|
| 103 |
+
sorted_confidences = sorted(
|
| 104 |
+
{items[i]: float(text_probs[0, i]) for i in range(len(items))}.items(),
|
| 105 |
+
key=lambda x: x[1],
|
| 106 |
+
reverse=True
|
| 107 |
+
)
|
| 108 |
+
top_10_confidences = dict(sorted_confidences[:10])
|
| 109 |
+
|
| 110 |
+
return image, top_10_confidences
|
| 111 |
+
|
| 112 |
+
# Clear function
|
| 113 |
+
def clear_fields():
|
| 114 |
+
return None, ""
|
| 115 |
+
|
| 116 |
+
# Gradio interface
|
| 117 |
+
title = "Fashion Item Classifier with Chuai Khun Nu Taengtua Noi"
|
| 118 |
+
description = "Upload an image or provide a URL of a fashion item to classify it using [Marqo-FashionSigLIP](https://huggingface.co/Marqo/marqo-fashionSigLIP)!"
|
| 119 |
+
|
| 120 |
+
examples = [
|
| 121 |
+
["images/dress.jpg", "Dress"],
|
| 122 |
+
["images/sweatpants.jpg", "Sweatpants"],
|
| 123 |
+
["images/t-shirt.jpg", "T-Shirt"],
|
| 124 |
+
["images/hat.jpg", "Hat"],
|
| 125 |
+
["images/blouse.jpg", "Blouse"],
|
| 126 |
+
["images/cargo.jpg", "Cargos"],
|
| 127 |
+
["images/sunglasses.jpg", "Sunglasses"],
|
| 128 |
+
["images/polo-shirt.jpg", "Polo Shirt"],
|
| 129 |
+
]
|
| 130 |
+
|
| 131 |
+
with gr.Blocks(css="""
|
| 132 |
+
.remove-btn {
|
| 133 |
+
font-size: 24px !important; /* Increase the font size of the cross button */
|
| 134 |
+
line-height: 24px !important;
|
| 135 |
+
width: 30px !important; /* Increase the width */
|
| 136 |
+
height: 30px !important; /* Increase the height */
|
| 137 |
+
}
|
| 138 |
+
""") as demo:
|
| 139 |
+
with gr.Row():
|
| 140 |
+
with gr.Column(scale=1):
|
| 141 |
+
gr.Markdown(f"# {title}")
|
| 142 |
+
gr.Markdown(description)
|
| 143 |
+
gr.Markdown(sidebar_markdown)
|
| 144 |
+
gr.Markdown(" ", elem_id="vertical-line") # Add an empty Markdown with a custom ID
|
| 145 |
+
with gr.Column(scale=2):
|
| 146 |
+
input_image = gr.Image(type="pil", label="Upload Fashion Item Image", height=312)
|
| 147 |
+
input_url = gr.Textbox(label="Or provide an image URL")
|
| 148 |
+
with gr.Row():
|
| 149 |
+
predict_button = gr.Button("Classify")
|
| 150 |
+
clear_button = gr.Button("Clear")
|
| 151 |
+
gr.Markdown("Or click on one of the images below to classify it:")
|
| 152 |
+
gr.Examples(examples=examples, inputs=input_image)
|
| 153 |
+
output_label = gr.Label(num_top_classes=6)
|
| 154 |
+
predict_button.click(predict, inputs=[input_image, input_url], outputs=[input_image, output_label])
|
| 155 |
+
clear_button.click(clear_fields, outputs=[input_image, input_url])
|
| 156 |
+
|
| 157 |
+
# Launch the interface
|
| 158 |
+
demo.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
open_clip_torch
|
| 2 |
+
transformers
|