training-scripts / train_rtdetr_detection.py
Godsonntungi2's picture
Fix CPPE-5 dataset feature access
a6edeec verified
# /// script
# dependencies = [
# "transformers>=4.40.0",
# "torch",
# "torchvision",
# "datasets",
# "albumentations",
# "accelerate",
# "huggingface_hub",
# "Pillow",
# "evaluate",
# "pycocotools",
# ]
# ///
"""
RT-DETR Fine-tuning Script for Object Detection
Fine-tunes RT-DETR on the CPPE-5 dataset (medical PPE detection)
"""
import os
import torch
import numpy as np
from PIL import Image
from functools import partial
from dataclasses import dataclass
from typing import Dict, List, Any
from datasets import load_dataset
from transformers import (
RTDetrForObjectDetection,
RTDetrImageProcessor,
TrainingArguments,
Trainer,
)
from huggingface_hub import login
# Login
hf_token = os.environ.get("HF_TOKEN")
if hf_token:
login(token=hf_token)
print("Logged in to Hugging Face Hub")
# ============================================================================
# Configuration
# ============================================================================
MODEL_NAME = "PekingU/rtdetr_r50vd" # RT-DETR with ResNet-50 backbone
DATASET_NAME = "cppe-5" # Medical PPE detection dataset (5 classes)
OUTPUT_DIR = "rtdetr-cppe5-detection"
HUB_MODEL_ID = "Godsonntungi2/rtdetr-cppe5-detection"
# Training parameters (optimized for A10G 24GB)
BATCH_SIZE = 4
LEARNING_RATE = 1e-5
NUM_EPOCHS = 10
MAX_TRAIN_SAMPLES = 500 # Limit for demo (full dataset has ~1000)
print(f"Loading model: {MODEL_NAME}")
print(f"Dataset: {DATASET_NAME}")
# ============================================================================
# Load Dataset
# ============================================================================
print("\nLoading CPPE-5 dataset...")
dataset = load_dataset(DATASET_NAME)
print(f"Train samples: {len(dataset['train'])}")
print(f"Test samples: {len(dataset['test'])}")
# Limit samples for demo
if MAX_TRAIN_SAMPLES:
dataset["train"] = dataset["train"].select(range(min(MAX_TRAIN_SAMPLES, len(dataset["train"]))))
dataset["test"] = dataset["test"].select(range(min(100, len(dataset["test"]))))
print(f"Using {len(dataset['train'])} train, {len(dataset['test'])} test samples")
# Get categories - CPPE-5 uses ClassLabel in objects dict
categories = dataset["train"].features["objects"]["category"].feature.names
id2label = {i: label for i, label in enumerate(categories)}
label2id = {label: i for i, label in enumerate(categories)}
print(f"Classes: {categories}")
# ============================================================================
# Image Processor & Model
# ============================================================================
print("\nLoading image processor and model...")
image_processor = RTDetrImageProcessor.from_pretrained(MODEL_NAME)
model = RTDetrForObjectDetection.from_pretrained(
MODEL_NAME,
id2label=id2label,
label2id=label2id,
ignore_mismatched_sizes=True, # Important: class head size changes
)
print(f"Model loaded with {len(id2label)} classes")
# ============================================================================
# Data Preprocessing
# ============================================================================
def format_annotations(image_id, objects, image_size):
"""Convert dataset annotations to COCO format for RT-DETR"""
annotations = []
for i, (bbox, category) in enumerate(zip(objects["bbox"], objects["category"])):
# CPPE-5 bbox format: [x, y, width, height]
annotations.append({
"id": i,
"image_id": image_id,
"category_id": category,
"bbox": bbox,
"area": bbox[2] * bbox[3],
"iscrowd": 0,
})
return {
"image_id": image_id,
"annotations": annotations,
}
def transform_batch(examples, image_processor):
"""Transform a batch of examples for RT-DETR"""
images = []
annotations = []
for idx, (image, objects) in enumerate(zip(examples["image"], examples["objects"])):
# Convert to RGB if needed
if image.mode != "RGB":
image = image.convert("RGB")
images.append(image)
# Format annotations
anno = format_annotations(idx, objects, image.size)
annotations.append(anno)
# Process with image processor
result = image_processor(
images=images,
annotations=annotations,
return_tensors="pt",
)
return result
# Apply transforms
print("\nPreparing datasets...")
transform_fn = partial(transform_batch, image_processor=image_processor)
# Process in batches
train_dataset = dataset["train"].with_transform(
lambda x: transform_fn(x)
)
eval_dataset = dataset["test"].with_transform(
lambda x: transform_fn(x)
)
# ============================================================================
# Custom Collator
# ============================================================================
def collate_fn(batch):
"""Custom collate function for object detection"""
pixel_values = torch.stack([item["pixel_values"] for item in batch])
labels = [item["labels"] for item in batch]
return {
"pixel_values": pixel_values,
"labels": labels,
}
# ============================================================================
# Training Arguments
# ============================================================================
training_args = TrainingArguments(
output_dir=OUTPUT_DIR,
# Training params
num_train_epochs=NUM_EPOCHS,
per_device_train_batch_size=BATCH_SIZE,
per_device_eval_batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE,
weight_decay=0.01,
# Optimization
lr_scheduler_type="cosine",
warmup_ratio=0.1,
fp16=True, # Mixed precision
gradient_accumulation_steps=4,
# Logging & saving
logging_steps=10,
eval_strategy="epoch",
save_strategy="epoch",
save_total_limit=2,
load_best_model_at_end=True,
metric_for_best_model="eval_loss",
# Hub
push_to_hub=True,
hub_model_id=HUB_MODEL_ID,
hub_strategy="end",
# Other
remove_unused_columns=False,
dataloader_num_workers=2,
)
# ============================================================================
# Trainer
# ============================================================================
print("\nInitializing Trainer...")
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
processing_class=image_processor,
data_collator=collate_fn,
)
# ============================================================================
# Train!
# ============================================================================
print("\nStarting training...")
print(f" Epochs: {NUM_EPOCHS}")
print(f" Batch size: {BATCH_SIZE}")
print(f" Learning rate: {LEARNING_RATE}")
print("="*60)
trainer.train()
# ============================================================================
# Save & Push to Hub
# ============================================================================
print("\nSaving model...")
trainer.save_model()
image_processor.save_pretrained(OUTPUT_DIR)
print("\nPushing to Hub...")
trainer.push_to_hub()
print("\n" + "="*60)
print("Training complete!")
print(f"Model: https://huggingface.co/{HUB_MODEL_ID}")
print("="*60)
# ============================================================================
# Quick Inference Test
# ============================================================================
print("\nRunning quick inference test...")
test_image = dataset["test"][0]["image"]
if test_image.mode != "RGB":
test_image = test_image.convert("RGB")
inputs = image_processor(images=test_image, return_tensors="pt")
inputs = {k: v.to(model.device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
# Post-process
results = image_processor.post_process_object_detection(
outputs,
target_sizes=torch.tensor([test_image.size[::-1]]),
threshold=0.5
)[0]
print(f"Detected {len(results['labels'])} objects:")
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
print(f" - {id2label[label.item()]}: {score.item():.2f}")
print("\nDone!")