|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
RT-DETR Fine-tuning Script for Object Detection |
|
|
Fine-tunes RT-DETR on the CPPE-5 dataset (medical PPE detection) |
|
|
""" |
|
|
|
|
|
import os |
|
|
import torch |
|
|
import numpy as np |
|
|
from PIL import Image |
|
|
from functools import partial |
|
|
from dataclasses import dataclass |
|
|
from typing import Dict, List, Any |
|
|
|
|
|
from datasets import load_dataset |
|
|
from transformers import ( |
|
|
RTDetrForObjectDetection, |
|
|
RTDetrImageProcessor, |
|
|
TrainingArguments, |
|
|
Trainer, |
|
|
) |
|
|
from huggingface_hub import login |
|
|
|
|
|
|
|
|
hf_token = os.environ.get("HF_TOKEN") |
|
|
if hf_token: |
|
|
login(token=hf_token) |
|
|
print("Logged in to Hugging Face Hub") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MODEL_NAME = "PekingU/rtdetr_r50vd" |
|
|
DATASET_NAME = "cppe-5" |
|
|
OUTPUT_DIR = "rtdetr-cppe5-detection" |
|
|
HUB_MODEL_ID = "Godsonntungi2/rtdetr-cppe5-detection" |
|
|
|
|
|
|
|
|
BATCH_SIZE = 4 |
|
|
LEARNING_RATE = 1e-5 |
|
|
NUM_EPOCHS = 10 |
|
|
MAX_TRAIN_SAMPLES = 500 |
|
|
|
|
|
print(f"Loading model: {MODEL_NAME}") |
|
|
print(f"Dataset: {DATASET_NAME}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\nLoading CPPE-5 dataset...") |
|
|
dataset = load_dataset(DATASET_NAME) |
|
|
print(f"Train samples: {len(dataset['train'])}") |
|
|
print(f"Test samples: {len(dataset['test'])}") |
|
|
|
|
|
|
|
|
if MAX_TRAIN_SAMPLES: |
|
|
dataset["train"] = dataset["train"].select(range(min(MAX_TRAIN_SAMPLES, len(dataset["train"])))) |
|
|
dataset["test"] = dataset["test"].select(range(min(100, len(dataset["test"])))) |
|
|
print(f"Using {len(dataset['train'])} train, {len(dataset['test'])} test samples") |
|
|
|
|
|
|
|
|
categories = dataset["train"].features["objects"]["category"].feature.names |
|
|
id2label = {i: label for i, label in enumerate(categories)} |
|
|
label2id = {label: i for i, label in enumerate(categories)} |
|
|
print(f"Classes: {categories}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\nLoading image processor and model...") |
|
|
image_processor = RTDetrImageProcessor.from_pretrained(MODEL_NAME) |
|
|
|
|
|
model = RTDetrForObjectDetection.from_pretrained( |
|
|
MODEL_NAME, |
|
|
id2label=id2label, |
|
|
label2id=label2id, |
|
|
ignore_mismatched_sizes=True, |
|
|
) |
|
|
print(f"Model loaded with {len(id2label)} classes") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def format_annotations(image_id, objects, image_size): |
|
|
"""Convert dataset annotations to COCO format for RT-DETR""" |
|
|
annotations = [] |
|
|
for i, (bbox, category) in enumerate(zip(objects["bbox"], objects["category"])): |
|
|
|
|
|
annotations.append({ |
|
|
"id": i, |
|
|
"image_id": image_id, |
|
|
"category_id": category, |
|
|
"bbox": bbox, |
|
|
"area": bbox[2] * bbox[3], |
|
|
"iscrowd": 0, |
|
|
}) |
|
|
return { |
|
|
"image_id": image_id, |
|
|
"annotations": annotations, |
|
|
} |
|
|
|
|
|
def transform_batch(examples, image_processor): |
|
|
"""Transform a batch of examples for RT-DETR""" |
|
|
images = [] |
|
|
annotations = [] |
|
|
|
|
|
for idx, (image, objects) in enumerate(zip(examples["image"], examples["objects"])): |
|
|
|
|
|
if image.mode != "RGB": |
|
|
image = image.convert("RGB") |
|
|
images.append(image) |
|
|
|
|
|
|
|
|
anno = format_annotations(idx, objects, image.size) |
|
|
annotations.append(anno) |
|
|
|
|
|
|
|
|
result = image_processor( |
|
|
images=images, |
|
|
annotations=annotations, |
|
|
return_tensors="pt", |
|
|
) |
|
|
|
|
|
return result |
|
|
|
|
|
|
|
|
print("\nPreparing datasets...") |
|
|
transform_fn = partial(transform_batch, image_processor=image_processor) |
|
|
|
|
|
|
|
|
train_dataset = dataset["train"].with_transform( |
|
|
lambda x: transform_fn(x) |
|
|
) |
|
|
eval_dataset = dataset["test"].with_transform( |
|
|
lambda x: transform_fn(x) |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def collate_fn(batch): |
|
|
"""Custom collate function for object detection""" |
|
|
pixel_values = torch.stack([item["pixel_values"] for item in batch]) |
|
|
labels = [item["labels"] for item in batch] |
|
|
|
|
|
return { |
|
|
"pixel_values": pixel_values, |
|
|
"labels": labels, |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
training_args = TrainingArguments( |
|
|
output_dir=OUTPUT_DIR, |
|
|
|
|
|
|
|
|
num_train_epochs=NUM_EPOCHS, |
|
|
per_device_train_batch_size=BATCH_SIZE, |
|
|
per_device_eval_batch_size=BATCH_SIZE, |
|
|
learning_rate=LEARNING_RATE, |
|
|
weight_decay=0.01, |
|
|
|
|
|
|
|
|
lr_scheduler_type="cosine", |
|
|
warmup_ratio=0.1, |
|
|
fp16=True, |
|
|
gradient_accumulation_steps=4, |
|
|
|
|
|
|
|
|
logging_steps=10, |
|
|
eval_strategy="epoch", |
|
|
save_strategy="epoch", |
|
|
save_total_limit=2, |
|
|
load_best_model_at_end=True, |
|
|
metric_for_best_model="eval_loss", |
|
|
|
|
|
|
|
|
push_to_hub=True, |
|
|
hub_model_id=HUB_MODEL_ID, |
|
|
hub_strategy="end", |
|
|
|
|
|
|
|
|
remove_unused_columns=False, |
|
|
dataloader_num_workers=2, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\nInitializing Trainer...") |
|
|
trainer = Trainer( |
|
|
model=model, |
|
|
args=training_args, |
|
|
train_dataset=train_dataset, |
|
|
eval_dataset=eval_dataset, |
|
|
processing_class=image_processor, |
|
|
data_collator=collate_fn, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\nStarting training...") |
|
|
print(f" Epochs: {NUM_EPOCHS}") |
|
|
print(f" Batch size: {BATCH_SIZE}") |
|
|
print(f" Learning rate: {LEARNING_RATE}") |
|
|
print("="*60) |
|
|
|
|
|
trainer.train() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\nSaving model...") |
|
|
trainer.save_model() |
|
|
image_processor.save_pretrained(OUTPUT_DIR) |
|
|
|
|
|
print("\nPushing to Hub...") |
|
|
trainer.push_to_hub() |
|
|
|
|
|
print("\n" + "="*60) |
|
|
print("Training complete!") |
|
|
print(f"Model: https://huggingface.co/{HUB_MODEL_ID}") |
|
|
print("="*60) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\nRunning quick inference test...") |
|
|
test_image = dataset["test"][0]["image"] |
|
|
if test_image.mode != "RGB": |
|
|
test_image = test_image.convert("RGB") |
|
|
|
|
|
inputs = image_processor(images=test_image, return_tensors="pt") |
|
|
inputs = {k: v.to(model.device) for k, v in inputs.items()} |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
|
|
|
|
|
|
results = image_processor.post_process_object_detection( |
|
|
outputs, |
|
|
target_sizes=torch.tensor([test_image.size[::-1]]), |
|
|
threshold=0.5 |
|
|
)[0] |
|
|
|
|
|
print(f"Detected {len(results['labels'])} objects:") |
|
|
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): |
|
|
print(f" - {id2label[label.item()]}: {score.item():.2f}") |
|
|
|
|
|
print("\nDone!") |
|
|
|