krishnateja95 commited on
Commit
34c4389
·
verified ·
1 Parent(s): 673650d

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,188 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ pipeline_tag: text-generation
4
+ tags:
5
+ - fp8
6
+ - quantized
7
+ - llm-compressor
8
+ - compressed-tensors
9
+ - red hat
10
+ base_model:
11
+ - Qwen/Qwen3-VL-32B-Instruct
12
+ ---
13
+
14
+
15
+
16
+ # Qwen3-VL-32B-Instruct-FP8-block
17
+
18
+ ## Model Overview
19
+ - **Model Architecture:** Qwen3VLForConditionalGeneration
20
+ - **Input:** Text, Image
21
+ - **Output:** Text
22
+ - **Model Optimizations:**
23
+ - **Weight quantization:** FP8
24
+ - **Activation quantization:** FP8
25
+ - **Release Date:**
26
+ - **Version:** 1.0
27
+ - **Model Developers:**: Red Hat
28
+
29
+ Quantized version of [Qwen/Qwen3-VL-32B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-32B-Instruct).
30
+
31
+ ### Model Optimizations
32
+
33
+ This model was obtained by quantizing the weights and activations of [Qwen/Qwen3-VL-32B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-32B-Instruct) to FP8 data type.
34
+ This optimization reduces the number of bits per parameter from 16 to 8, reducing the disk size and GPU memory requirements by approximately 50%.
35
+ Only the weights and activations of the linear operators within transformers blocks of the language model are quantized.
36
+
37
+
38
+
39
+ ## Deployment
40
+
41
+ ### Use with vLLM
42
+
43
+ 1. Initialize vLLM server:
44
+ ```
45
+ vllm serve RedHatAI/Qwen3-VL-32B-Instruct-FP8-block --tensor_parallel_size 2
46
+ ```
47
+
48
+ 2. Send requests to the server:
49
+
50
+ ```python
51
+ from openai import OpenAI
52
+
53
+ # Modify OpenAI's API key and API base to use vLLM's API server.
54
+ openai_api_key = "EMPTY"
55
+ openai_api_base = "http://<your-server-host>:8000/v1"
56
+
57
+ client = OpenAI(
58
+ api_key=openai_api_key,
59
+ base_url=openai_api_base,
60
+ )
61
+
62
+ model = "RedHatAI/Qwen3-VL-32B-Instruct-FP8-block"
63
+
64
+ messages = [
65
+ {
66
+ "role": "user",
67
+ "content": [
68
+ {
69
+ "type": "image_url",
70
+ "image_url": {"url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"},
71
+ },
72
+ {"type": "text", "text": "Describe this image."},
73
+ ],
74
+ }
75
+ ]
76
+
77
+ outputs = client.chat.completions.create(
78
+ model=model,
79
+ messages=messages,
80
+ )
81
+
82
+ generated_text = outputs.choices[0].message.content
83
+ print(generated_text)
84
+ ```
85
+
86
+
87
+
88
+
89
+
90
+ ## Creation
91
+
92
+ This model was quantized using the [llm-compressor](https://github.com/vllm-project/llm-compressor) library as shown below.
93
+
94
+ <details>
95
+ <summary>Creation details</summary>
96
+
97
+ ```python
98
+ from transformers import AutoProcessor, Qwen3VLForConditionalGeneration
99
+
100
+ from llmcompressor import oneshot
101
+ from llmcompressor.modifiers.quantization import QuantizationModifier
102
+
103
+ # NOTE: Requires a minimum of transformers 4.57.0
104
+
105
+ MODEL_ID = "Qwen/Qwen3-VL-32B-Instruct"
106
+
107
+ # Load model.
108
+ model = Qwen3VLForConditionalGeneration.from_pretrained(MODEL_ID, torch_dtype="auto")
109
+ processor = AutoProcessor.from_pretrained(MODEL_ID)
110
+
111
+ # Configure the quantization algorithm and scheme.
112
+ # In this case, we:
113
+ # * quantize the weights to fp8 with channel-wise quantization
114
+ # * quantize the activations to fp8 with dynamic token activations
115
+ # NOTE: only datafree quantization is supported for Qwen3-VL-MoE currently
116
+ recipe = QuantizationModifier(
117
+ targets="Linear",
118
+ scheme="FP8_BLOCK",
119
+ ignore=[
120
+ "re:.*lm_head",
121
+ "re:visual.*",
122
+ "re:model.visual.*",
123
+ "re:.*mlp.gate$",
124
+ ],
125
+ )
126
+
127
+ # Apply quantization.
128
+ oneshot(model=model, recipe=recipe)
129
+
130
+ # Save to disk in compressed-tensors format.
131
+ SAVE_DIR = MODEL_ID.rstrip("/").split("/")[-1] + "-FP8-BLOCK"
132
+ model.save_pretrained(SAVE_DIR)
133
+ processor.save_pretrained(SAVE_DIR)
134
+ ```
135
+ </details>
136
+
137
+
138
+ ## Evaluation
139
+
140
+
141
+ The model was evaluated on the OpenLLMv1 leaderboard task, using [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness).
142
+ [vLLM](https://docs.vllm.ai/en/stable/) was used for all evaluations.
143
+
144
+ <details>
145
+ <summary>Evaluation details</summary>
146
+
147
+ **ChartQA**
148
+ ```
149
+ lm_eval \
150
+ --model vllm-vlm \
151
+ --model_args pretrained="RedHatAI/Qwen3-VL-32B-Instruct-FP8-block",dtype=auto,add_bos_token=False,max_model_len=262144,tensor_parallel_size=2,gpu_memory_utilization=0.9,enable_chunked_prefill=True,trust_remote_code=True,max_images=10 \
152
+ --tasks chartqa \
153
+ --apply_chat_template \
154
+ --batch_size auto
155
+ ```
156
+
157
+
158
+ **MMLU**
159
+ ```
160
+ lm_eval \
161
+ --model vllm-vlm \
162
+ --model_args pretrained="RedHatAI/Qwen3-VL-32B-Instruct-FP8-block",dtype=auto,add_bos_token=False,max_model_len=262144,tensor_parallel_size=2,gpu_memory_utilization=0.9,enable_chunked_prefill=True,trust_remote_code=True,max_images=10 \
163
+ --tasks mmlu \
164
+ --apply_chat_template \
165
+ --batch_size auto
166
+ ```
167
+ </details>
168
+
169
+
170
+ # Accuracy Comparison
171
+
172
+ ## ChartQA Results
173
+
174
+ | Model | Accuracy | Recovery (%) |
175
+ |-------|----------|--------------|
176
+ | Qwen/Qwen3-VL-32B-Instruct | 61.52 | 100.00 |
177
+ | Qwen/Qwen3-VL-32B-Instruct-FP8 | 86.92 | 141.32 |
178
+ | RedHatAI/Qwen3-VL-32B-Instruct-FP8-block | 86.60 | 140.82 |
179
+ | RedHatAI/Qwen3-VL-32B-Instruct-FP8-dynamic | 86.68 | 140.95 |
180
+
181
+ ## MMLU Results
182
+
183
+ | Model | Accuracy | Recovery (%) |
184
+ |-------|----------|--------------|
185
+ | Qwen/Qwen3-VL-32B-Instruct | 78.03 | 100.00 |
186
+ | Qwen/Qwen3-VL-32B-Instruct-FP8 | 77.80 | 99.71 |
187
+ | RedHatAI/Qwen3-VL-32B-Instruct-FP8-block | 77.72 | 99.60 |
188
+ | RedHatAI/Qwen3-VL-32B-Instruct-FP8-dynamic | 77.89 | 99.82 |
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
chat_template.jinja ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {%- if messages[0].content is string %}
5
+ {{- messages[0].content }}
6
+ {%- else %}
7
+ {%- for content in messages[0].content %}
8
+ {%- if 'text' in content %}
9
+ {{- content.text }}
10
+ {%- endif %}
11
+ {%- endfor %}
12
+ {%- endif %}
13
+ {{- '\n\n' }}
14
+ {%- endif %}
15
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
16
+ {%- for tool in tools %}
17
+ {{- "\n" }}
18
+ {{- tool | tojson }}
19
+ {%- endfor %}
20
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
21
+ {%- else %}
22
+ {%- if messages[0].role == 'system' %}
23
+ {{- '<|im_start|>system\n' }}
24
+ {%- if messages[0].content is string %}
25
+ {{- messages[0].content }}
26
+ {%- else %}
27
+ {%- for content in messages[0].content %}
28
+ {%- if 'text' in content %}
29
+ {{- content.text }}
30
+ {%- endif %}
31
+ {%- endfor %}
32
+ {%- endif %}
33
+ {{- '<|im_end|>\n' }}
34
+ {%- endif %}
35
+ {%- endif %}
36
+ {%- set image_count = namespace(value=0) %}
37
+ {%- set video_count = namespace(value=0) %}
38
+ {%- for message in messages %}
39
+ {%- if message.role == "user" %}
40
+ {{- '<|im_start|>' + message.role + '\n' }}
41
+ {%- if message.content is string %}
42
+ {{- message.content }}
43
+ {%- else %}
44
+ {%- for content in message.content %}
45
+ {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}
46
+ {%- set image_count.value = image_count.value + 1 %}
47
+ {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}
48
+ <|vision_start|><|image_pad|><|vision_end|>
49
+ {%- elif content.type == 'video' or 'video' in content %}
50
+ {%- set video_count.value = video_count.value + 1 %}
51
+ {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}
52
+ <|vision_start|><|video_pad|><|vision_end|>
53
+ {%- elif 'text' in content %}
54
+ {{- content.text }}
55
+ {%- endif %}
56
+ {%- endfor %}
57
+ {%- endif %}
58
+ {{- '<|im_end|>\n' }}
59
+ {%- elif message.role == "assistant" %}
60
+ {{- '<|im_start|>' + message.role + '\n' }}
61
+ {%- if message.content is string %}
62
+ {{- message.content }}
63
+ {%- else %}
64
+ {%- for content_item in message.content %}
65
+ {%- if 'text' in content_item %}
66
+ {{- content_item.text }}
67
+ {%- endif %}
68
+ {%- endfor %}
69
+ {%- endif %}
70
+ {%- if message.tool_calls %}
71
+ {%- for tool_call in message.tool_calls %}
72
+ {%- if (loop.first and message.content) or (not loop.first) %}
73
+ {{- '\n' }}
74
+ {%- endif %}
75
+ {%- if tool_call.function %}
76
+ {%- set tool_call = tool_call.function %}
77
+ {%- endif %}
78
+ {{- '<tool_call>\n{"name": "' }}
79
+ {{- tool_call.name }}
80
+ {{- '", "arguments": ' }}
81
+ {%- if tool_call.arguments is string %}
82
+ {{- tool_call.arguments }}
83
+ {%- else %}
84
+ {{- tool_call.arguments | tojson }}
85
+ {%- endif %}
86
+ {{- '}\n</tool_call>' }}
87
+ {%- endfor %}
88
+ {%- endif %}
89
+ {{- '<|im_end|>\n' }}
90
+ {%- elif message.role == "tool" %}
91
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
92
+ {{- '<|im_start|>user' }}
93
+ {%- endif %}
94
+ {{- '\n<tool_response>\n' }}
95
+ {%- if message.content is string %}
96
+ {{- message.content }}
97
+ {%- else %}
98
+ {%- for content in message.content %}
99
+ {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}
100
+ {%- set image_count.value = image_count.value + 1 %}
101
+ {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}
102
+ <|vision_start|><|image_pad|><|vision_end|>
103
+ {%- elif content.type == 'video' or 'video' in content %}
104
+ {%- set video_count.value = video_count.value + 1 %}
105
+ {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}
106
+ <|vision_start|><|video_pad|><|vision_end|>
107
+ {%- elif 'text' in content %}
108
+ {{- content.text }}
109
+ {%- endif %}
110
+ {%- endfor %}
111
+ {%- endif %}
112
+ {{- '\n</tool_response>' }}
113
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
114
+ {{- '<|im_end|>\n' }}
115
+ {%- endif %}
116
+ {%- endif %}
117
+ {%- endfor %}
118
+ {%- if add_generation_prompt %}
119
+ {{- '<|im_start|>assistant\n' }}
120
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3VLForConditionalGeneration"
4
+ ],
5
+ "dtype": "bfloat16",
6
+ "image_token_id": 151655,
7
+ "model_type": "qwen3_vl",
8
+ "quantization_config": {
9
+ "config_groups": {
10
+ "group_0": {
11
+ "format": "float-quantized",
12
+ "input_activations": {
13
+ "actorder": null,
14
+ "block_structure": null,
15
+ "dynamic": true,
16
+ "group_size": 128,
17
+ "num_bits": 8,
18
+ "observer": null,
19
+ "observer_kwargs": {},
20
+ "scale_dtype": null,
21
+ "strategy": "group",
22
+ "symmetric": true,
23
+ "type": "float",
24
+ "zp_dtype": null
25
+ },
26
+ "output_activations": null,
27
+ "targets": [
28
+ "Linear"
29
+ ],
30
+ "weights": {
31
+ "actorder": null,
32
+ "block_structure": [
33
+ 128,
34
+ 128
35
+ ],
36
+ "dynamic": false,
37
+ "group_size": null,
38
+ "num_bits": 8,
39
+ "observer": "minmax",
40
+ "observer_kwargs": {},
41
+ "scale_dtype": null,
42
+ "strategy": "block",
43
+ "symmetric": true,
44
+ "type": "float",
45
+ "zp_dtype": null
46
+ }
47
+ }
48
+ },
49
+ "format": "float-quantized",
50
+ "global_compression_ratio": null,
51
+ "ignore": [
52
+ "model.visual.blocks.0.attn.qkv",
53
+ "model.visual.blocks.0.attn.proj",
54
+ "model.visual.blocks.0.mlp.linear_fc1",
55
+ "model.visual.blocks.0.mlp.linear_fc2",
56
+ "model.visual.blocks.1.attn.qkv",
57
+ "model.visual.blocks.1.attn.proj",
58
+ "model.visual.blocks.1.mlp.linear_fc1",
59
+ "model.visual.blocks.1.mlp.linear_fc2",
60
+ "model.visual.blocks.2.attn.qkv",
61
+ "model.visual.blocks.2.attn.proj",
62
+ "model.visual.blocks.2.mlp.linear_fc1",
63
+ "model.visual.blocks.2.mlp.linear_fc2",
64
+ "model.visual.blocks.3.attn.qkv",
65
+ "model.visual.blocks.3.attn.proj",
66
+ "model.visual.blocks.3.mlp.linear_fc1",
67
+ "model.visual.blocks.3.mlp.linear_fc2",
68
+ "model.visual.blocks.4.attn.qkv",
69
+ "model.visual.blocks.4.attn.proj",
70
+ "model.visual.blocks.4.mlp.linear_fc1",
71
+ "model.visual.blocks.4.mlp.linear_fc2",
72
+ "model.visual.blocks.5.attn.qkv",
73
+ "model.visual.blocks.5.attn.proj",
74
+ "model.visual.blocks.5.mlp.linear_fc1",
75
+ "model.visual.blocks.5.mlp.linear_fc2",
76
+ "model.visual.blocks.6.attn.qkv",
77
+ "model.visual.blocks.6.attn.proj",
78
+ "model.visual.blocks.6.mlp.linear_fc1",
79
+ "model.visual.blocks.6.mlp.linear_fc2",
80
+ "model.visual.blocks.7.attn.qkv",
81
+ "model.visual.blocks.7.attn.proj",
82
+ "model.visual.blocks.7.mlp.linear_fc1",
83
+ "model.visual.blocks.7.mlp.linear_fc2",
84
+ "model.visual.blocks.8.attn.qkv",
85
+ "model.visual.blocks.8.attn.proj",
86
+ "model.visual.blocks.8.mlp.linear_fc1",
87
+ "model.visual.blocks.8.mlp.linear_fc2",
88
+ "model.visual.blocks.9.attn.qkv",
89
+ "model.visual.blocks.9.attn.proj",
90
+ "model.visual.blocks.9.mlp.linear_fc1",
91
+ "model.visual.blocks.9.mlp.linear_fc2",
92
+ "model.visual.blocks.10.attn.qkv",
93
+ "model.visual.blocks.10.attn.proj",
94
+ "model.visual.blocks.10.mlp.linear_fc1",
95
+ "model.visual.blocks.10.mlp.linear_fc2",
96
+ "model.visual.blocks.11.attn.qkv",
97
+ "model.visual.blocks.11.attn.proj",
98
+ "model.visual.blocks.11.mlp.linear_fc1",
99
+ "model.visual.blocks.11.mlp.linear_fc2",
100
+ "model.visual.blocks.12.attn.qkv",
101
+ "model.visual.blocks.12.attn.proj",
102
+ "model.visual.blocks.12.mlp.linear_fc1",
103
+ "model.visual.blocks.12.mlp.linear_fc2",
104
+ "model.visual.blocks.13.attn.qkv",
105
+ "model.visual.blocks.13.attn.proj",
106
+ "model.visual.blocks.13.mlp.linear_fc1",
107
+ "model.visual.blocks.13.mlp.linear_fc2",
108
+ "model.visual.blocks.14.attn.qkv",
109
+ "model.visual.blocks.14.attn.proj",
110
+ "model.visual.blocks.14.mlp.linear_fc1",
111
+ "model.visual.blocks.14.mlp.linear_fc2",
112
+ "model.visual.blocks.15.attn.qkv",
113
+ "model.visual.blocks.15.attn.proj",
114
+ "model.visual.blocks.15.mlp.linear_fc1",
115
+ "model.visual.blocks.15.mlp.linear_fc2",
116
+ "model.visual.blocks.16.attn.qkv",
117
+ "model.visual.blocks.16.attn.proj",
118
+ "model.visual.blocks.16.mlp.linear_fc1",
119
+ "model.visual.blocks.16.mlp.linear_fc2",
120
+ "model.visual.blocks.17.attn.qkv",
121
+ "model.visual.blocks.17.attn.proj",
122
+ "model.visual.blocks.17.mlp.linear_fc1",
123
+ "model.visual.blocks.17.mlp.linear_fc2",
124
+ "model.visual.blocks.18.attn.qkv",
125
+ "model.visual.blocks.18.attn.proj",
126
+ "model.visual.blocks.18.mlp.linear_fc1",
127
+ "model.visual.blocks.18.mlp.linear_fc2",
128
+ "model.visual.blocks.19.attn.qkv",
129
+ "model.visual.blocks.19.attn.proj",
130
+ "model.visual.blocks.19.mlp.linear_fc1",
131
+ "model.visual.blocks.19.mlp.linear_fc2",
132
+ "model.visual.blocks.20.attn.qkv",
133
+ "model.visual.blocks.20.attn.proj",
134
+ "model.visual.blocks.20.mlp.linear_fc1",
135
+ "model.visual.blocks.20.mlp.linear_fc2",
136
+ "model.visual.blocks.21.attn.qkv",
137
+ "model.visual.blocks.21.attn.proj",
138
+ "model.visual.blocks.21.mlp.linear_fc1",
139
+ "model.visual.blocks.21.mlp.linear_fc2",
140
+ "model.visual.blocks.22.attn.qkv",
141
+ "model.visual.blocks.22.attn.proj",
142
+ "model.visual.blocks.22.mlp.linear_fc1",
143
+ "model.visual.blocks.22.mlp.linear_fc2",
144
+ "model.visual.blocks.23.attn.qkv",
145
+ "model.visual.blocks.23.attn.proj",
146
+ "model.visual.blocks.23.mlp.linear_fc1",
147
+ "model.visual.blocks.23.mlp.linear_fc2",
148
+ "model.visual.blocks.24.attn.qkv",
149
+ "model.visual.blocks.24.attn.proj",
150
+ "model.visual.blocks.24.mlp.linear_fc1",
151
+ "model.visual.blocks.24.mlp.linear_fc2",
152
+ "model.visual.blocks.25.attn.qkv",
153
+ "model.visual.blocks.25.attn.proj",
154
+ "model.visual.blocks.25.mlp.linear_fc1",
155
+ "model.visual.blocks.25.mlp.linear_fc2",
156
+ "model.visual.blocks.26.attn.qkv",
157
+ "model.visual.blocks.26.attn.proj",
158
+ "model.visual.blocks.26.mlp.linear_fc1",
159
+ "model.visual.blocks.26.mlp.linear_fc2",
160
+ "model.visual.merger.linear_fc1",
161
+ "model.visual.merger.linear_fc2",
162
+ "model.visual.deepstack_merger_list.0.linear_fc1",
163
+ "model.visual.deepstack_merger_list.0.linear_fc2",
164
+ "model.visual.deepstack_merger_list.1.linear_fc1",
165
+ "model.visual.deepstack_merger_list.1.linear_fc2",
166
+ "model.visual.deepstack_merger_list.2.linear_fc1",
167
+ "model.visual.deepstack_merger_list.2.linear_fc2",
168
+ "lm_head"
169
+ ],
170
+ "kv_cache_scheme": null,
171
+ "quant_method": "compressed-tensors",
172
+ "quantization_status": "compressed",
173
+ "sparsity_config": {},
174
+ "transform_config": {},
175
+ "version": "0.12.3.a20251114"
176
+ },
177
+ "text_config": {
178
+ "attention_bias": false,
179
+ "attention_dropout": 0.0,
180
+ "bos_token_id": 151643,
181
+ "dtype": "bfloat16",
182
+ "eos_token_id": 151645,
183
+ "head_dim": 128,
184
+ "hidden_act": "silu",
185
+ "hidden_size": 5120,
186
+ "initializer_range": 0.02,
187
+ "intermediate_size": 25600,
188
+ "max_position_embeddings": 262144,
189
+ "model_type": "qwen3_vl_text",
190
+ "num_attention_heads": 64,
191
+ "num_hidden_layers": 64,
192
+ "num_key_value_heads": 8,
193
+ "rms_norm_eps": 1e-06,
194
+ "rope_scaling": {
195
+ "mrope_interleaved": true,
196
+ "mrope_section": [
197
+ 24,
198
+ 20,
199
+ 20
200
+ ],
201
+ "rope_type": "default"
202
+ },
203
+ "rope_theta": 5000000,
204
+ "use_cache": true,
205
+ "vocab_size": 151936
206
+ },
207
+ "tie_word_embeddings": false,
208
+ "transformers_version": "4.57.3",
209
+ "video_token_id": 151656,
210
+ "vision_config": {
211
+ "deepstack_visual_indexes": [
212
+ 8,
213
+ 16,
214
+ 24
215
+ ],
216
+ "depth": 27,
217
+ "hidden_act": "gelu_pytorch_tanh",
218
+ "hidden_size": 1152,
219
+ "in_channels": 3,
220
+ "initializer_range": 0.02,
221
+ "intermediate_size": 4304,
222
+ "model_type": "qwen3_vl",
223
+ "num_heads": 16,
224
+ "num_position_embeddings": 2304,
225
+ "out_hidden_size": 5120,
226
+ "patch_size": 16,
227
+ "spatial_merge_size": 2,
228
+ "temporal_patch_size": 2
229
+ },
230
+ "vision_end_token_id": 151653,
231
+ "vision_start_token_id": 151652
232
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.7,
10
+ "top_k": 20,
11
+ "top_p": 0.8,
12
+ "transformers_version": "4.57.3"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f553256404487f4e614043b64f6239a97e74160085f3ab315e474d3a8cb98e0a
3
+ size 4922554816
model-00002-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a6740f119745a257843eb3aaf08290ad76d794dd6bbaaac1b97ee50768496e3
3
+ size 4876706488
model-00003-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43e175a905054a2f71a2858f2215512d95e79be89e26d3708565c9506211466f
3
+ size 4876706600
model-00004-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3b64ca0de626ccd7ba41d8a1db5c2cabe6829ce9eacb894e9663f25f13bcc59
3
+ size 4876706600
model-00005-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00f516469475c11018bd3d247284a16c4dd46d7f61e677172554897002d97679
3
+ size 4876706600
model-00006-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a01eb8cd66765ead306ed9ea703a52990031b03e0d0b89e7a5d9be0e6d5e255
3
+ size 4876706600
model-00007-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bcac0e9c6aec96b24d650c890179ba0d2cfe7cb62a92afa41bdf602edc07131
3
+ size 4651243520
model-00008-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cf1aab5d6a34929aaf97ac0d6028a8af456567ce32f3914783356875457af7a
3
+ size 1555824768
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "disable_grouping": null,
7
+ "do_center_crop": null,
8
+ "do_convert_rgb": true,
9
+ "do_normalize": true,
10
+ "do_pad": null,
11
+ "do_rescale": true,
12
+ "do_resize": true,
13
+ "image_mean": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "image_processor_type": "Qwen2VLImageProcessorFast",
19
+ "image_std": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "input_data_format": null,
25
+ "max_pixels": null,
26
+ "merge_size": 2,
27
+ "min_pixels": null,
28
+ "pad_size": null,
29
+ "patch_size": 16,
30
+ "processor_class": "Qwen3VLProcessor",
31
+ "resample": 3,
32
+ "rescale_factor": 0.00392156862745098,
33
+ "return_tensors": null,
34
+ "size": {
35
+ "longest_edge": 16777216,
36
+ "shortest_edge": 65536
37
+ },
38
+ "temporal_patch_size": 2
39
+ }
recipe.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ default_stage:
2
+ default_modifiers:
3
+ QuantizationModifier:
4
+ targets: [Linear]
5
+ ignore: ['re:.*lm_head', 're:visual.*', 're:model.visual.*', 're:.*mlp.gate$']
6
+ scheme: FP8_BLOCK
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 262144,
235
+ "pad_token": "<|endoftext|>",
236
+ "processor_class": "Qwen3VLProcessor",
237
+ "split_special_tokens": false,
238
+ "tokenizer_class": "Qwen2Tokenizer",
239
+ "unk_token": null
240
+ }
video_preprocessor_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "do_center_crop": null,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "do_sample_frames": true,
12
+ "fps": 2,
13
+ "image_mean": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "image_std": [
19
+ 0.5,
20
+ 0.5,
21
+ 0.5
22
+ ],
23
+ "input_data_format": null,
24
+ "max_frames": 768,
25
+ "merge_size": 2,
26
+ "min_frames": 4,
27
+ "num_frames": null,
28
+ "pad_size": null,
29
+ "patch_size": 16,
30
+ "processor_class": "Qwen3VLProcessor",
31
+ "resample": 3,
32
+ "rescale_factor": 0.00392156862745098,
33
+ "return_metadata": false,
34
+ "size": {
35
+ "longest_edge": 25165824,
36
+ "shortest_edge": 4096
37
+ },
38
+ "temporal_patch_size": 2,
39
+ "video_metadata": null,
40
+ "video_processor_type": "Qwen3VLVideoProcessor"
41
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff