Florence2LanguageForConditionalGeneration error
[β] Error processing 39-aP8nZGwTiMmB2-m_aug3.jpg: 'Florence2LanguageForConditionalGeneration' object has no attribute 'generate'
[β] Error processing 39-aP8nZGwTiMmB2-m_aug4.jpg: 'Florence2LanguageForConditionalGeneration' object has no attribute 'generate'
[β] Error processing 39-R-QRYTw8kjVhw.jpg: 'Florence2LanguageForConditionalGeneration' object has no attribute 'generate'
the code i am using
from transformers import AutoProcessor
from PIL import Image
import os
import json
import torch
import sys
Point to your custom repo code (adjust as needed)
sys.path.append("E:/magidataset/magiv3") # Path where modeling_florence2.py is located
Import ONLY the top-level model
from modeling_florence2 import Florence2ForConditionalGeneration as MagiV3
Model and processor
model_id = "ragavsachdeva/magiv3"
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
model = MagiV3.from_pretrained(
model_id,
torch_dtype=torch.float16,
trust_remote_code=True
).cuda().eval()
print("Loaded model class:", type(model))
image_dir = "E:/magidataset/mlpimage"
output_jsonl = "magiv3_scenegraph_outputs.jsonl"
with open(output_jsonl, "w", encoding="utf-8") as f_out:
for fname in os.listdir(image_dir):
if not fname.lower().endswith(('.jpg', '.jpeg', '.png')):
continue
img_path = os.path.join(image_dir, fname)
image = Image.open(img_path).convert("RGB")
output = {"file_name": fname}
try:
# These methods must exist in Florence2ForConditionalGeneration
output["scenegraph"] = model.predict_detections_and_associations([image], processor)[0]
output["ocr"] = model.predict_ocr([image], processor)[0]
output["grounding"] = model.predict_character_grounding([image], ["<image>"], processor)[0]
f_out.write(json.dumps(output, ensure_ascii=False) + "\n")
print(f"[β] Processed: {fname}")
except Exception as e:
print(f"[β] Error processing {fname}: {e}")