furkanycy commited on
Commit
e3e98a1
·
verified ·
1 Parent(s): 270ea53

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +52 -0
README.md CHANGED
@@ -14,6 +14,7 @@ For a complete overview of the project, including all related models, datasets,
14
  ```python
15
  from transformers import MllamaForConditionalGeneration, AutoProcessor
16
  from peft import PeftModel
 
17
 
18
  # Load base model
19
  base_model_name = "meta-llama/Llama-3.2-11B-Vision-Instruct"
@@ -23,6 +24,57 @@ processor = AutoProcessor.from_pretrained(base_model_name)
23
  # Load LoRA adapter
24
  adapter_path = "DermaVLM/DermatoLLama-200k"
25
  model = PeftModel.from_pretrained(model, adapter_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  ```
27
 
28
  ## Citation
 
14
  ```python
15
  from transformers import MllamaForConditionalGeneration, AutoProcessor
16
  from peft import PeftModel
17
+ from PIL import Image
18
 
19
  # Load base model
20
  base_model_name = "meta-llama/Llama-3.2-11B-Vision-Instruct"
 
24
  # Load LoRA adapter
25
  adapter_path = "DermaVLM/DermatoLLama-200k"
26
  model = PeftModel.from_pretrained(model, adapter_path)
27
+
28
+ # Inference
29
+ image_path = "DERM12345.jpg"
30
+ image = Image.open(image_path).convert("RGB")
31
+ prompt_text = "Describe the image in detail."
32
+ messages = []
33
+ content_list = []
34
+ if image:
35
+ content_list.append({"type": "image"})
36
+
37
+ # Add the text part of the prompt
38
+ content_list.append({"type": "text", "text": prompt_text})
39
+ messages.append({"role": "user", "content": content_list})
40
+
41
+ input_text = processor.apply_chat_template(
42
+ messages,
43
+ add_generation_prompt=True,
44
+ tokenize=False,
45
+ )
46
+
47
+ # Prepare final inputs
48
+ inputs = processor(
49
+ images=image,
50
+ text=input_text,
51
+ add_special_tokens=False,
52
+ return_tensors="pt",
53
+ ).to(model.device)
54
+
55
+ generation_config = {
56
+ "max_new_tokens": 256,
57
+ "do_sample": True,
58
+ "temperature": 0.4,
59
+ "top_p": 0.95,
60
+ }
61
+
62
+ input_length = inputs.input_ids.shape[1]
63
+
64
+ with torch.no_grad():
65
+ outputs = model.generate(
66
+ **inputs,
67
+ **generation_config,
68
+ pad_token_id=(
69
+ processor.tokenizer.pad_token_id
70
+ if processor.tokenizer.pad_token_id is not None
71
+ else processor.tokenizer.eos_token_id
72
+ ),
73
+ )
74
+ generated_tokens = outputs[0][input_length:]
75
+ raw_output = processor.decode(generated_tokens, skip_special_tokens=True)
76
+
77
+ print(raw_output)
78
  ```
79
 
80
  ## Citation