cpatonn commited on
Commit
569c5a5
·
verified ·
0 Parent(s):

Super-squash branch 'main' using huggingface_hub

Browse files
.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.safetensors.index.json filter=lfs diff=lfs merge=lfs -text
37
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ license_link: https://huggingface.co/Qwen/Qwen3-Next-80B-A3B-Instruct/blob/main/LICENSE
5
+ pipeline_tag: text-generation
6
+ base_model:
7
+ - Qwen/Qwen3-Next-80B-A3B-Instruct
8
+ ---
9
+
10
+ # Qwen3-Next-80B-A3B-Instruct AWQ - INT4
11
+
12
+ ## Model Details
13
+
14
+ ### Quantization Details
15
+
16
+ - **Quantization Method:** cyankiwi AWQ v1.0
17
+ - **Bits:** 4
18
+ - **Group Size:** 32
19
+ - **Calibration Dataset:** [nvidia/Llama-Nemotron-Post-Training-Dataset](https://huggingface.co/datasets/nvidia/Llama-Nemotron-Post-Training-Dataset)
20
+ - **Quantization Tool:** [llm-compressor](https://github.com/vllm-project/llm-compressor)
21
+
22
+ ### Memory Usage
23
+
24
+ | **Type** | **Qwen3-Next-80B-A3B-Instruct** | **Qwen3-Next-80B-A3B-Instruct-AWQ-4bit** |
25
+ |:---------------:|:----------------:|:----------------:|
26
+ | **Memory Size** | 151.5 GB | 45.9 GB |
27
+
28
+ ### Evaluations
29
+
30
+ | **Benchmarks** | **Qwen3-Next-80B-A3B-Instruct** | **Qwen3-Next-80B-A3B-Instruct-AWQ-4bit** |
31
+ |:---------------:|:----------------:|:----------------:|
32
+ | **Perplexity** | 1.48256 | 1.48602 |
33
+ | **GPQA Diamond** | 74.2 | 73.7 |
34
+ | **AIME25** | 70.0 | 70.0 |
35
+
36
+ - **Evaluation Context Length:** 16384
37
+
38
+ ## Inference
39
+
40
+ ### Prerequisite
41
+
42
+ ```bash
43
+ VLLM_USE_PRECOMPILED=1 pip install git+https://github.com/vllm-project/vllm.git@main
44
+ ```
45
+
46
+ ### Basic Usage
47
+
48
+ ```bash
49
+ VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 vllm serve cpatonn/Qwen3-Next-80B-A3B-Instruct-AWQ-4bit \
50
+ --port 8000 \
51
+ --tensor-parallel-size 4 \
52
+ --max-model-len 262144 \
53
+ --speculative-config '{"method":"qwen3_next_mtp","num_speculative_tokens":2}'
54
+ ```
55
+
56
+ ## Additional Information
57
+
58
+ ### Changelog
59
+
60
+ - **v1.0.0** - Add MTP layers.
61
+ - **v0.1.0** - Initial quantized release
62
+
63
+ ### Authors
64
+
65
+ - **Name:** Ton Cao
66
+ - **Contacts:** ton@cyan.kiwi
67
+
68
+ # Qwen3-Next-80B-A3B-Instruct
69
+ <a href="https://chat.qwen.ai/" target="_blank" style="margin: 2px;">
70
+ <img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/>
71
+ </a>
72
+
73
+ Over the past few months, we have observed increasingly clear trends toward scaling both total parameters and context lengths in the pursuit of more powerful and agentic artificial intelligence (AI).
74
+ We are excited to share our latest advancements in addressing these demands, centered on improving scaling efficiency through innovative model architecture.
75
+ We call this next-generation foundation models **Qwen3-Next**.
76
+
77
+ ## Highlights
78
+
79
+ **Qwen3-Next-80B-A3B** is the first installment in the Qwen3-Next series and features the following key enchancements:
80
+ - **Hybrid Attention**: Replaces standard attention with the combination of **Gated DeltaNet** and **Gated Attention**, enabling efficient context modeling for ultra-long context length.
81
+ - **High-Sparsity Mixture-of-Experts (MoE)**: Achieves an extreme low activation ratio in MoE layers, drastically reducing FLOPs per token while preserving model capacity.
82
+ - **Stability Optimizations**: Includes techniques such as **zero-centered and weight-decayed layernorm**, and other stabilizing enhancements for robust pre-training and post-training.
83
+ - **Multi-Token Prediction (MTP)**: Boosts pretraining model performance and accelerates inference.
84
+
85
+ We are seeing strong performance in terms of both parameter efficiency and inference speed for Qwen3-Next-80B-A3B:
86
+ - Qwen3-Next-80B-A3B-Base outperforms Qwen3-32B-Base on downstream tasks with 10% of the total training cost and with 10 times inference throughput for context over 32K tokens.
87
+ - Qwen3-Next-80B-A3B-Instruct performs on par with Qwen3-235B-A22B-Instruct-2507 on certain benchmarks, while demonstrating significant advantages in handling ultra-long-context tasks up to 256K tokens.
88
+
89
+ ![Qwen3-Next-80B-A3B-Instruct Benchmark Comparison](https://qianwen-res.oss-accelerate.aliyuncs.com/Qwen3-Next/Qwen3-Next-80B-A3B-Instruct.001.jpeg)
90
+
91
+ For more details, please refer to our blog post [Qwen3-Next](https://qwenlm.github.io/blog/qwen3_next/).
92
+
93
+ ## Model Overview
94
+
95
+ > [!Note]
96
+ > **Qwen3-Next-80B-A3B-Instruct** supports only instruct (non-thinking) mode and does not generate ``<think></think>`` blocks in its output.
97
+
98
+ **Qwen3-Next-80B-A3B-Instruct** has the following features:
99
+ - Type: Causal Language Models
100
+ - Training Stage: Pretraining (15T tokens) & Post-training
101
+ - Number of Parameters: 80B in total and 3B activated
102
+ - Number of Paramaters (Non-Embedding): 79B
103
+ - Number of Layers: 48
104
+ - Hidden Dimension: 2048
105
+ - Hybrid Layout: 12 \* (3 \* (Gated DeltaNet -> MoE) -> (Gated Attention -> MoE))
106
+ - Gated Attention:
107
+ - Number of Attention Heads: 16 for Q and 2 for KV
108
+ - Head Dimension: 256
109
+ - Rotary Position Embedding Dimension: 64
110
+ - Gated DeltaNet:
111
+ - Number of Linear Attention Heads: 32 for V and 16 for QK
112
+ - Head Dimension: 128
113
+ - Mixture of Experts:
114
+ - Number of Experts: 512
115
+ - Number of Activated Experts: 10
116
+ - Number of Shared Experts: 1
117
+ - Expert Intermediate Dimension: 512
118
+ - Context Length: 262,144 natively and extensible up to 1,010,000 tokens
119
+
120
+ <img src="https://qianwen-res.oss-accelerate.aliyuncs.com/Qwen3-Next/model_architecture.png" height="384px" title="Qwen3-Next Model Architecture" />
121
+
122
+
123
+ ## Performance
124
+
125
+ | | Qwen3-30B-A3B-Instruct-2507 | Qwen3-32B Non-Thinking | Qwen3-235B-A22B-Instruct-2507 | Qwen3-Next-80B-A3B-Instruct |
126
+ |--- | --- | --- | --- | --- |
127
+ | **Knowledge** | | | | |
128
+ | MMLU-Pro | 78.4 | 71.9 | **83.0** | 80.6 |
129
+ | MMLU-Redux | 89.3 | 85.7 | **93.1** | 90.9 |
130
+ | GPQA | 70.4 | 54.6 | **77.5** | 72.9 |
131
+ | SuperGPQA | 53.4 | 43.2 | **62.6** | 58.8 |
132
+ | **Reasoning** | | | | |
133
+ | AIME25 | 61.3 | 20.2 | **70.3** | 69.5 |
134
+ | HMMT25 | 43.0 | 9.8 | **55.4** | 54.1 |
135
+ | LiveBench 20241125 | 69.0 | 59.8 | 75.4 | **75.8** |
136
+ | **Coding** | | | | |
137
+ | LiveCodeBench v6 (25.02-25.05) | 43.2 | 29.1 | 51.8 | **56.6** |
138
+ | MultiPL-E | 83.8 | 76.9 | **87.9** | 87.8 |
139
+ | Aider-Polyglot | 35.6 | 40.0 | **57.3** | 49.8 |
140
+ | **Alignment** | | | | |
141
+ | IFEval | 84.7 | 83.2 | **88.7** | 87.6 |
142
+ | Arena-Hard v2* | 69.0 | 34.1 | 79.2 | **82.7** |
143
+ | Creative Writing v3 | 86.0 | 78.3 | **87.5** | 85.3 |
144
+ | WritingBench | 85.5 | 75.4 | 85.2 | **87.3** |
145
+ | **Agent** | | | | |
146
+ | BFCL-v3 | 65.1 | 63.0 | **70.9** | 70.3 |
147
+ | TAU1-Retail | 59.1 | 40.1 | **71.3** | 60.9 |
148
+ | TAU1-Airline | 40.0 | 17.0 | **44.0** | 44.0 |
149
+ | TAU2-Retail | 57.0 | 48.8 | **74.6** | 57.3 |
150
+ | TAU2-Airline | 38.0 | 24.0 | **50.0** | 45.5 |
151
+ | TAU2-Telecom | 12.3 | 24.6 | **32.5** | 13.2 |
152
+ | **Multilingualism** | | | | |
153
+ | MultiIF | 67.9 | 70.7 | **77.5** | 75.8 |
154
+ | MMLU-ProX | 72.0 | 69.3 | **79.4** | 76.7 |
155
+ | INCLUDE | 71.9 | 70.9 | **79.5** | 78.9 |
156
+ | PolyMATH | 43.1 | 22.5 | **50.2** | 45.9 |
157
+
158
+ *: For reproducibility, we report the win rates evaluated by GPT-4.1.
159
+
160
+ ## Quickstart
161
+
162
+ The code for Qwen3-Next has been merged into the main branch of Hugging Face `transformers`.
163
+
164
+ ```shell
165
+ pip install git+https://github.com/huggingface/transformers.git@main
166
+ ```
167
+
168
+ With earlier versions, you will encounter the following error:
169
+ ```
170
+ KeyError: 'qwen3_next'
171
+ ```
172
+
173
+ The following contains a code snippet illustrating how to use the model generate content based on given inputs.
174
+ ```python
175
+ from transformers import AutoModelForCausalLM, AutoTokenizer
176
+
177
+ model_name = "Qwen/Qwen3-Next-80B-A3B-Instruct"
178
+
179
+ # load the tokenizer and the model
180
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
181
+ model = AutoModelForCausalLM.from_pretrained(
182
+ model_name,
183
+ dtype="auto",
184
+ device_map="auto",
185
+ )
186
+
187
+ # prepare the model input
188
+ prompt = "Give me a short introduction to large language model."
189
+ messages = [
190
+ {"role": "user", "content": prompt},
191
+ ]
192
+ text = tokenizer.apply_chat_template(
193
+ messages,
194
+ tokenize=False,
195
+ add_generation_prompt=True,
196
+ )
197
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
198
+
199
+ # conduct text completion
200
+ generated_ids = model.generate(
201
+ **model_inputs,
202
+ max_new_tokens=16384,
203
+ )
204
+ output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
205
+
206
+ content = tokenizer.decode(output_ids, skip_special_tokens=True)
207
+
208
+ print("content:", content)
209
+ ```
210
+
211
+ > [!Note]
212
+ > Multi-Token Prediction (MTP) is not generally available in Hugging Face Transformers.
213
+
214
+ > [!Note]
215
+ > The efficiency or throughput improvement depends highly on the implementation.
216
+ > It is recommended to adopt a dedicated inference framework, e.g., SGLang and vLLM, for inference tasks.
217
+
218
+ > [!Tip]
219
+ > Depending on the inference settings, you may observe better efficiency with [`flash-linear-attention`](https://github.com/fla-org/flash-linear-attention#installation) and [`causal-conv1d`](https://github.com/Dao-AILab/causal-conv1d).
220
+ > See the above links for detailed instructions and requirements.
221
+
222
+
223
+ ## Deployment
224
+
225
+ For deployment, you can use the latest `sglang` or `vllm` to create an OpenAI-compatible API endpoint.
226
+
227
+ ### SGLang
228
+
229
+ [SGLang](https://github.com/sgl-project/sglang) is a fast serving framework for large language models and vision language models.
230
+ SGLang could be used to launch a server with OpenAI-compatible API service.
231
+
232
+ SGLang has supported Qwen3-Next in its `main` branch, which can be installed from source:
233
+ ```shell
234
+ pip install 'sglang[all] @ git+https://github.com/sgl-project/sglang.git@main#subdirectory=python'
235
+ ```
236
+
237
+ The following command can be used to create an API endpoint at `http://localhost:30000/v1` with maximum context length 256K tokens using tensor parallel on 4 GPUs.
238
+ ```shell
239
+ SGLANG_ALLOW_OVERWRITE_LONGER_CONTEXT_LEN=1 python -m sglang.launch_server --model-path Qwen/Qwen3-Next-80B-A3B-Instruct --port 30000 --tp-size 4 --context-length 262144 --mem-fraction-static 0.8
240
+ ```
241
+
242
+ The following command is recommended for MTP with the rest settings the same as above:
243
+ ```shell
244
+ SGLANG_ALLOW_OVERWRITE_LONGER_CONTEXT_LEN=1 python -m sglang.launch_server --model-path Qwen/Qwen3-Next-80B-A3B-Instruct --port 30000 --tp-size 4 --context-length 262144 --mem-fraction-static 0.8 --speculative-algo NEXTN --speculative-num-steps 3 --speculative-eagle-topk 1 --speculative-num-draft-tokens 4
245
+ ```
246
+
247
+ > [!Note]
248
+ > The environment variable `SGLANG_ALLOW_OVERWRITE_LONGER_CONTEXT_LEN=1` is required at the moment.
249
+
250
+ > [!Note]
251
+ > The default context length is 256K. Consider reducing the context length to a smaller value, e.g., `32768`, if the server fail to start.
252
+
253
+ ### vLLM
254
+
255
+ [vLLM](https://github.com/vllm-project/vllm) is a high-throughput and memory-efficient inference and serving engine for LLMs.
256
+ vLLM could be used to launch a server with OpenAI-compatible API service.
257
+
258
+ vLLM has supported Qwen3-Next in its `main` branch, which can be installed from source:
259
+ ```shell
260
+ pip install git+https://github.com/vllm-project/vllm.git
261
+ ```
262
+
263
+ The following command can be used to create an API endpoint at `http://localhost:8000/v1` with maximum context length 256K tokens using tensor parallel on 4 GPUs.
264
+ ```shell
265
+ VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 vllm serve Qwen/Qwen3-Next-80B-A3B-Instruct --port 8000 --tensor-parallel-size 4 --max-model-len 262144
266
+ ```
267
+
268
+ The following command is recommended for MTP with the rest settings the same as above:
269
+ ```shell
270
+ VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 vllm serve Qwen/Qwen3-Next-80B-A3B-Instruct --port 8000 --tensor-parallel-size 4 --max-model-len 262144 --speculative-config '{"method":"qwen3_next_mtp","num_speculative_tokens":2}'
271
+ ```
272
+
273
+ > [!Note]
274
+ > The environment variable `VLLM_ALLOW_LONG_MAX_MODEL_LEN=1` is required at the moment.
275
+
276
+ > [!Note]
277
+ > The default context length is 256K. Consider reducing the context length to a smaller value, e.g., `32768`, if the server fail to start.
278
+
279
+ ## Agentic Use
280
+
281
+ Qwen3 excels in tool calling capabilities. We recommend using [Qwen-Agent](https://github.com/QwenLM/Qwen-Agent) to make the best use of agentic ability of Qwen3. Qwen-Agent encapsulates tool-calling templates and tool-calling parsers internally, greatly reducing coding complexity.
282
+
283
+ To define the available tools, you can use the MCP configuration file, use the integrated tool of Qwen-Agent, or integrate other tools by yourself.
284
+ ```python
285
+ from qwen_agent.agents import Assistant
286
+
287
+ # Define LLM
288
+ llm_cfg = {
289
+ 'model': 'Qwen3-Next-80B-A3B-Instruct',
290
+
291
+ # Use a custom endpoint compatible with OpenAI API:
292
+ 'model_server': 'http://localhost:8000/v1', # api_base
293
+ 'api_key': 'EMPTY',
294
+ }
295
+
296
+ # Define Tools
297
+ tools = [
298
+ {'mcpServers': { # You can specify the MCP configuration file
299
+ 'time': {
300
+ 'command': 'uvx',
301
+ 'args': ['mcp-server-time', '--local-timezone=Asia/Shanghai']
302
+ },
303
+ "fetch": {
304
+ "command": "uvx",
305
+ "args": ["mcp-server-fetch"]
306
+ }
307
+ }
308
+ },
309
+ 'code_interpreter', # Built-in tools
310
+ ]
311
+
312
+ # Define Agent
313
+ bot = Assistant(llm=llm_cfg, function_list=tools)
314
+
315
+ # Streaming generation
316
+ messages = [{'role': 'user', 'content': 'https://qwenlm.github.io/blog/ Introduce the latest developments of Qwen'}]
317
+ for responses in bot.run(messages=messages):
318
+ pass
319
+ print(responses)
320
+ ```
321
+
322
+
323
+ ## Processing Ultra-Long Texts
324
+
325
+ Qwen3-Next natively supports context lengths of up to 262,144 tokens.
326
+ For conversations where the total length (including both input and output) significantly exceeds this limit, we recommend using RoPE scaling techniques to handle long texts effectively.
327
+ We have validated the model's performance on context lengths of up to 1 million tokens using the [YaRN](https://arxiv.org/abs/2309.00071) method.
328
+
329
+ YaRN is currently supported by several inference frameworks, e.g., `transformers`, `vllm` and `sglang`.
330
+ In general, there are two approaches to enabling YaRN for supported frameworks:
331
+
332
+ - Modifying the model files:
333
+ In the `config.json` file, add the `rope_scaling` fields:
334
+ ```json
335
+ {
336
+ ...,
337
+ "rope_scaling": {
338
+ "rope_type": "yarn",
339
+ "factor": 4.0,
340
+ "original_max_position_embeddings": 262144
341
+ }
342
+ }
343
+ ```
344
+
345
+ - Passing command line arguments:
346
+
347
+ For `vllm`, you can use
348
+ ```shell
349
+ VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 vllm serve ... --rope-scaling '{"rope_type":"yarn","factor":4.0,"original_max_position_embeddings":262144}' --max-model-len 1010000
350
+ ```
351
+
352
+ For `sglang`, you can use
353
+ ```shell
354
+ SGLANG_ALLOW_OVERWRITE_LONGER_CONTEXT_LEN=1 python -m sglang.launch_server ... --json-model-override-args '{"rope_scaling":{"rope_type":"yarn","factor":4.0,"original_max_position_embeddings":262144}}' --context-length 1010000
355
+ ```
356
+
357
+ > [!NOTE]
358
+ > All the notable open-source frameworks implement static YaRN, which means the scaling factor remains constant regardless of input length, **potentially impacting performance on shorter texts.**
359
+ > We advise adding the `rope_scaling` configuration only when processing long contexts is required.
360
+ > It is also recommended to modify the `factor` as needed. For example, if the typical context length for your application is 524,288 tokens, it would be better to set `factor` as 2.0.
361
+
362
+ #### Long-Context Performance
363
+
364
+ We test the model on an 1M version of the [RULER](https://arxiv.org/abs/2404.06654) benchmark.
365
+
366
+ | Model Name | Acc avg | 4k | 8k | 16k | 32k | 64k | 96k | 128k | 192k | 256k | 384k | 512k | 640k | 768k | 896k | 1000k |
367
+ |---------------------------------------------|---------|------|------|------|------|------|------|------|------|------|------|------|------|------|------|-------|
368
+ | Qwen3-30B-A3B-Instruct-2507 | 86.8 | 98.0 | 96.7 | 96.9 | 97.2 | 93.4 | 91.0 | 89.1 | 89.8 | 82.5 | 83.6 | 78.4 | 79.7 | 77.6 | 75.7 | 72.8 |
369
+ | Qwen3-235B-A22B-Instruct-2507 | 92.5 | 98.5 | 97.6 | 96.9 | 97.3 | 95.8 | 94.9 | 93.9 | 94.5 | 91.0 | 92.2 | 90.9 | 87.8 | 84.8 | 86.5 | 84.5 |
370
+ | Qwen3-Next-80B-A3B-Instruct | 91.8 | 98.5 | 99.0 | 98.0 | 98.7 | 97.6 | 95.0 | 96.0 | 94.0 | 93.5 | 91.7 | 86.9 | 85.5 | 81.7 | 80.3 | 80.3 |
371
+
372
+ * Qwen3-Next are evaluated with YaRN enabled. Qwen3-2507 models are evaluated with Dual Chunk Attention enabled.
373
+ * Since the evaluation is time-consuming, we use 260 samples for each length (13 sub-tasks, 20 samples for each).
374
+
375
+ ## Best Practices
376
+
377
+ To achieve optimal performance, we recommend the following settings:
378
+
379
+ 1. **Sampling Parameters**:
380
+ - We suggest using `Temperature=0.7`, `TopP=0.8`, `TopK=20`, and `MinP=0`.
381
+ - For supported frameworks, you can adjust the `presence_penalty` parameter between 0 and 2 to reduce endless repetitions. However, using a higher value may occasionally result in language mixing and a slight decrease in model performance.
382
+
383
+ 2. **Adequate Output Length**: We recommend using an output length of 16,384 tokens for most queries, which is adequate for instruct models.
384
+
385
+ 3. **Standardize Output Format**: We recommend using prompts to standardize model outputs when benchmarking.
386
+ - **Math Problems**: Include "Please reason step by step, and put your final answer within \boxed{}." in the prompt.
387
+ - **Multiple-Choice Questions**: Add the following JSON structure to the prompt to standardize responses: "Please show your choice in the `answer` field with only the choice letter, e.g., `"answer": "C"`."
388
+
389
+ ### Citation
390
+
391
+ If you find our work helpful, feel free to give us a cite.
392
+
393
+ ```
394
+ @misc{qwen3technicalreport,
395
+ title={Qwen3 Technical Report},
396
+ author={Qwen Team},
397
+ year={2025},
398
+ eprint={2505.09388},
399
+ archivePrefix={arXiv},
400
+ primaryClass={cs.CL},
401
+ url={https://arxiv.org/abs/2505.09388},
402
+ }
403
+
404
+ @article{qwen2.5-1m,
405
+ title={Qwen2.5-1M Technical Report},
406
+ author={An Yang and Bowen Yu and Chengyuan Li and Dayiheng Liu and Fei Huang and Haoyan Huang and Jiandong Jiang and Jianhong Tu and Jianwei Zhang and Jingren Zhou and Junyang Lin and Kai Dang and Kexin Yang and Le Yu and Mei Li and Minmin Sun and Qin Zhu and Rui Men and Tao He and Weijia Xu and Wenbiao Yin and Wenyuan Yu and Xiafei Qiu and Xingzhang Ren and Xinlong Yang and Yong Li and Zhiying Xu and Zipeng Zhang},
407
+ journal={arXiv preprint arXiv:2501.15383},
408
+ year={2025}
409
+ }
410
+ ```
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
chat_template.jinja ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- for message in messages %}
18
+ {%- if message.content is string %}
19
+ {%- set content = message.content %}
20
+ {%- else %}
21
+ {%- set content = '' %}
22
+ {%- endif %}
23
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
24
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
25
+ {%- elif message.role == "assistant" %}
26
+ {{- '<|im_start|>' + message.role + '\n' + content }}
27
+ {%- if message.tool_calls %}
28
+ {%- for tool_call in message.tool_calls %}
29
+ {%- if (loop.first and content) or (not loop.first) %}
30
+ {{- '\n' }}
31
+ {%- endif %}
32
+ {%- if tool_call.function %}
33
+ {%- set tool_call = tool_call.function %}
34
+ {%- endif %}
35
+ {{- '<tool_call>\n{"name": "' }}
36
+ {{- tool_call.name }}
37
+ {{- '", "arguments": ' }}
38
+ {%- if tool_call.arguments is string %}
39
+ {{- tool_call.arguments }}
40
+ {%- else %}
41
+ {{- tool_call.arguments | tojson }}
42
+ {%- endif %}
43
+ {{- '}\n</tool_call>' }}
44
+ {%- endfor %}
45
+ {%- endif %}
46
+ {{- '<|im_end|>\n' }}
47
+ {%- elif message.role == "tool" %}
48
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
49
+ {{- '<|im_start|>user' }}
50
+ {%- endif %}
51
+ {{- '\n<tool_response>\n' }}
52
+ {{- content }}
53
+ {{- '\n</tool_response>' }}
54
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
55
+ {{- '<|im_end|>\n' }}
56
+ {%- endif %}
57
+ {%- endif %}
58
+ {%- endfor %}
59
+ {%- if add_generation_prompt %}
60
+ {{- '<|im_start|>assistant\n' }}
61
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3NextForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "decoder_sparse_step": 1,
9
+ "dtype": "bfloat16",
10
+ "eos_token_id": 151645,
11
+ "full_attention_interval": 4,
12
+ "head_dim": 256,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 2048,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 5120,
17
+ "layer_types": [
18
+ "linear_attention",
19
+ "linear_attention",
20
+ "linear_attention",
21
+ "full_attention",
22
+ "linear_attention",
23
+ "linear_attention",
24
+ "linear_attention",
25
+ "full_attention",
26
+ "linear_attention",
27
+ "linear_attention",
28
+ "linear_attention",
29
+ "full_attention",
30
+ "linear_attention",
31
+ "linear_attention",
32
+ "linear_attention",
33
+ "full_attention",
34
+ "linear_attention",
35
+ "linear_attention",
36
+ "linear_attention",
37
+ "full_attention",
38
+ "linear_attention",
39
+ "linear_attention",
40
+ "linear_attention",
41
+ "full_attention",
42
+ "linear_attention",
43
+ "linear_attention",
44
+ "linear_attention",
45
+ "full_attention",
46
+ "linear_attention",
47
+ "linear_attention",
48
+ "linear_attention",
49
+ "full_attention",
50
+ "linear_attention",
51
+ "linear_attention",
52
+ "linear_attention",
53
+ "full_attention",
54
+ "linear_attention",
55
+ "linear_attention",
56
+ "linear_attention",
57
+ "full_attention",
58
+ "linear_attention",
59
+ "linear_attention",
60
+ "linear_attention",
61
+ "full_attention",
62
+ "linear_attention",
63
+ "linear_attention",
64
+ "linear_attention",
65
+ "full_attention"
66
+ ],
67
+ "linear_conv_kernel_dim": 4,
68
+ "linear_key_head_dim": 128,
69
+ "linear_num_key_heads": 16,
70
+ "linear_num_value_heads": 32,
71
+ "linear_value_head_dim": 128,
72
+ "max_position_embeddings": 262144,
73
+ "mlp_only_layers": [],
74
+ "model_type": "qwen3_next",
75
+ "moe_intermediate_size": 512,
76
+ "norm_topk_prob": true,
77
+ "num_attention_heads": 16,
78
+ "num_experts": 512,
79
+ "num_experts_per_tok": 10,
80
+ "num_hidden_layers": 48,
81
+ "num_key_value_heads": 2,
82
+ "output_router_logits": false,
83
+ "partial_rotary_factor": 0.25,
84
+ "quantization_config": {
85
+ "config_groups": {
86
+ "group_0": {
87
+ "format": "pack-quantized",
88
+ "input_activations": null,
89
+ "output_activations": null,
90
+ "targets": [
91
+ "Linear"
92
+ ],
93
+ "weights": {
94
+ "actorder": null,
95
+ "block_structure": null,
96
+ "dynamic": false,
97
+ "group_size": 32,
98
+ "num_bits": 4,
99
+ "observer": "mse",
100
+ "observer_kwargs": {},
101
+ "strategy": "group",
102
+ "symmetric": true,
103
+ "type": "int"
104
+ }
105
+ }
106
+ },
107
+ "format": "pack-quantized",
108
+ "global_compression_ratio": null,
109
+ "ignore": [
110
+ "model.layers.0.linear_attn.in_proj_qkvz",
111
+ "model.layers.0.linear_attn.in_proj_ba",
112
+ "model.layers.0.linear_attn.out_proj",
113
+ "model.layers.0.mlp.gate",
114
+ "model.layers.0.mlp.shared_expert.gate_proj",
115
+ "model.layers.0.mlp.shared_expert.up_proj",
116
+ "model.layers.0.mlp.shared_expert.down_proj",
117
+ "model.layers.0.mlp.shared_expert_gate",
118
+ "model.layers.1.linear_attn.in_proj_qkvz",
119
+ "model.layers.1.linear_attn.in_proj_ba",
120
+ "model.layers.1.linear_attn.out_proj",
121
+ "model.layers.1.mlp.gate",
122
+ "model.layers.1.mlp.shared_expert.gate_proj",
123
+ "model.layers.1.mlp.shared_expert.up_proj",
124
+ "model.layers.1.mlp.shared_expert.down_proj",
125
+ "model.layers.1.mlp.shared_expert_gate",
126
+ "model.layers.2.linear_attn.in_proj_qkvz",
127
+ "model.layers.2.linear_attn.in_proj_ba",
128
+ "model.layers.2.linear_attn.out_proj",
129
+ "model.layers.2.mlp.gate",
130
+ "model.layers.2.mlp.shared_expert.gate_proj",
131
+ "model.layers.2.mlp.shared_expert.up_proj",
132
+ "model.layers.2.mlp.shared_expert.down_proj",
133
+ "model.layers.2.mlp.shared_expert_gate",
134
+ "model.layers.3.self_attn.q_proj",
135
+ "model.layers.3.self_attn.k_proj",
136
+ "model.layers.3.self_attn.v_proj",
137
+ "model.layers.3.self_attn.o_proj",
138
+ "model.layers.3.mlp.gate",
139
+ "model.layers.3.mlp.shared_expert.gate_proj",
140
+ "model.layers.3.mlp.shared_expert.up_proj",
141
+ "model.layers.3.mlp.shared_expert.down_proj",
142
+ "model.layers.3.mlp.shared_expert_gate",
143
+ "model.layers.4.linear_attn.in_proj_qkvz",
144
+ "model.layers.4.linear_attn.in_proj_ba",
145
+ "model.layers.4.linear_attn.out_proj",
146
+ "model.layers.4.mlp.gate",
147
+ "model.layers.4.mlp.shared_expert.gate_proj",
148
+ "model.layers.4.mlp.shared_expert.up_proj",
149
+ "model.layers.4.mlp.shared_expert.down_proj",
150
+ "model.layers.4.mlp.shared_expert_gate",
151
+ "model.layers.5.linear_attn.in_proj_qkvz",
152
+ "model.layers.5.linear_attn.in_proj_ba",
153
+ "model.layers.5.linear_attn.out_proj",
154
+ "model.layers.5.mlp.gate",
155
+ "model.layers.5.mlp.shared_expert.gate_proj",
156
+ "model.layers.5.mlp.shared_expert.up_proj",
157
+ "model.layers.5.mlp.shared_expert.down_proj",
158
+ "model.layers.5.mlp.shared_expert_gate",
159
+ "model.layers.6.linear_attn.in_proj_qkvz",
160
+ "model.layers.6.linear_attn.in_proj_ba",
161
+ "model.layers.6.linear_attn.out_proj",
162
+ "model.layers.6.mlp.gate",
163
+ "model.layers.6.mlp.shared_expert.gate_proj",
164
+ "model.layers.6.mlp.shared_expert.up_proj",
165
+ "model.layers.6.mlp.shared_expert.down_proj",
166
+ "model.layers.6.mlp.shared_expert_gate",
167
+ "model.layers.7.self_attn.q_proj",
168
+ "model.layers.7.self_attn.k_proj",
169
+ "model.layers.7.self_attn.v_proj",
170
+ "model.layers.7.self_attn.o_proj",
171
+ "model.layers.7.mlp.gate",
172
+ "model.layers.7.mlp.shared_expert.gate_proj",
173
+ "model.layers.7.mlp.shared_expert.up_proj",
174
+ "model.layers.7.mlp.shared_expert.down_proj",
175
+ "model.layers.7.mlp.shared_expert_gate",
176
+ "model.layers.8.linear_attn.in_proj_qkvz",
177
+ "model.layers.8.linear_attn.in_proj_ba",
178
+ "model.layers.8.linear_attn.out_proj",
179
+ "model.layers.8.mlp.gate",
180
+ "model.layers.8.mlp.shared_expert.gate_proj",
181
+ "model.layers.8.mlp.shared_expert.up_proj",
182
+ "model.layers.8.mlp.shared_expert.down_proj",
183
+ "model.layers.8.mlp.shared_expert_gate",
184
+ "model.layers.9.linear_attn.in_proj_qkvz",
185
+ "model.layers.9.linear_attn.in_proj_ba",
186
+ "model.layers.9.linear_attn.out_proj",
187
+ "model.layers.9.mlp.gate",
188
+ "model.layers.9.mlp.shared_expert.gate_proj",
189
+ "model.layers.9.mlp.shared_expert.up_proj",
190
+ "model.layers.9.mlp.shared_expert.down_proj",
191
+ "model.layers.9.mlp.shared_expert_gate",
192
+ "model.layers.10.linear_attn.in_proj_qkvz",
193
+ "model.layers.10.linear_attn.in_proj_ba",
194
+ "model.layers.10.linear_attn.out_proj",
195
+ "model.layers.10.mlp.gate",
196
+ "model.layers.10.mlp.shared_expert.gate_proj",
197
+ "model.layers.10.mlp.shared_expert.up_proj",
198
+ "model.layers.10.mlp.shared_expert.down_proj",
199
+ "model.layers.10.mlp.shared_expert_gate",
200
+ "model.layers.11.self_attn.q_proj",
201
+ "model.layers.11.self_attn.k_proj",
202
+ "model.layers.11.self_attn.v_proj",
203
+ "model.layers.11.self_attn.o_proj",
204
+ "model.layers.11.mlp.gate",
205
+ "model.layers.11.mlp.shared_expert.gate_proj",
206
+ "model.layers.11.mlp.shared_expert.up_proj",
207
+ "model.layers.11.mlp.shared_expert.down_proj",
208
+ "model.layers.11.mlp.shared_expert_gate",
209
+ "model.layers.12.linear_attn.in_proj_qkvz",
210
+ "model.layers.12.linear_attn.in_proj_ba",
211
+ "model.layers.12.linear_attn.out_proj",
212
+ "model.layers.12.mlp.gate",
213
+ "model.layers.12.mlp.shared_expert.gate_proj",
214
+ "model.layers.12.mlp.shared_expert.up_proj",
215
+ "model.layers.12.mlp.shared_expert.down_proj",
216
+ "model.layers.12.mlp.shared_expert_gate",
217
+ "model.layers.13.linear_attn.in_proj_qkvz",
218
+ "model.layers.13.linear_attn.in_proj_ba",
219
+ "model.layers.13.linear_attn.out_proj",
220
+ "model.layers.13.mlp.gate",
221
+ "model.layers.13.mlp.shared_expert.gate_proj",
222
+ "model.layers.13.mlp.shared_expert.up_proj",
223
+ "model.layers.13.mlp.shared_expert.down_proj",
224
+ "model.layers.13.mlp.shared_expert_gate",
225
+ "model.layers.14.linear_attn.in_proj_qkvz",
226
+ "model.layers.14.linear_attn.in_proj_ba",
227
+ "model.layers.14.linear_attn.out_proj",
228
+ "model.layers.14.mlp.gate",
229
+ "model.layers.14.mlp.shared_expert.gate_proj",
230
+ "model.layers.14.mlp.shared_expert.up_proj",
231
+ "model.layers.14.mlp.shared_expert.down_proj",
232
+ "model.layers.14.mlp.shared_expert_gate",
233
+ "model.layers.15.self_attn.q_proj",
234
+ "model.layers.15.self_attn.k_proj",
235
+ "model.layers.15.self_attn.v_proj",
236
+ "model.layers.15.self_attn.o_proj",
237
+ "model.layers.15.mlp.gate",
238
+ "model.layers.15.mlp.shared_expert.gate_proj",
239
+ "model.layers.15.mlp.shared_expert.up_proj",
240
+ "model.layers.15.mlp.shared_expert.down_proj",
241
+ "model.layers.15.mlp.shared_expert_gate",
242
+ "model.layers.16.linear_attn.in_proj_qkvz",
243
+ "model.layers.16.linear_attn.in_proj_ba",
244
+ "model.layers.16.linear_attn.out_proj",
245
+ "model.layers.16.mlp.gate",
246
+ "model.layers.16.mlp.shared_expert.gate_proj",
247
+ "model.layers.16.mlp.shared_expert.up_proj",
248
+ "model.layers.16.mlp.shared_expert.down_proj",
249
+ "model.layers.16.mlp.shared_expert_gate",
250
+ "model.layers.17.linear_attn.in_proj_qkvz",
251
+ "model.layers.17.linear_attn.in_proj_ba",
252
+ "model.layers.17.linear_attn.out_proj",
253
+ "model.layers.17.mlp.gate",
254
+ "model.layers.17.mlp.shared_expert.gate_proj",
255
+ "model.layers.17.mlp.shared_expert.up_proj",
256
+ "model.layers.17.mlp.shared_expert.down_proj",
257
+ "model.layers.17.mlp.shared_expert_gate",
258
+ "model.layers.18.linear_attn.in_proj_qkvz",
259
+ "model.layers.18.linear_attn.in_proj_ba",
260
+ "model.layers.18.linear_attn.out_proj",
261
+ "model.layers.18.mlp.gate",
262
+ "model.layers.18.mlp.shared_expert.gate_proj",
263
+ "model.layers.18.mlp.shared_expert.up_proj",
264
+ "model.layers.18.mlp.shared_expert.down_proj",
265
+ "model.layers.18.mlp.shared_expert_gate",
266
+ "model.layers.19.self_attn.q_proj",
267
+ "model.layers.19.self_attn.k_proj",
268
+ "model.layers.19.self_attn.v_proj",
269
+ "model.layers.19.self_attn.o_proj",
270
+ "model.layers.19.mlp.gate",
271
+ "model.layers.19.mlp.shared_expert.gate_proj",
272
+ "model.layers.19.mlp.shared_expert.up_proj",
273
+ "model.layers.19.mlp.shared_expert.down_proj",
274
+ "model.layers.19.mlp.shared_expert_gate",
275
+ "model.layers.20.linear_attn.in_proj_qkvz",
276
+ "model.layers.20.linear_attn.in_proj_ba",
277
+ "model.layers.20.linear_attn.out_proj",
278
+ "model.layers.20.mlp.gate",
279
+ "model.layers.20.mlp.shared_expert.gate_proj",
280
+ "model.layers.20.mlp.shared_expert.up_proj",
281
+ "model.layers.20.mlp.shared_expert.down_proj",
282
+ "model.layers.20.mlp.shared_expert_gate",
283
+ "model.layers.21.linear_attn.in_proj_qkvz",
284
+ "model.layers.21.linear_attn.in_proj_ba",
285
+ "model.layers.21.linear_attn.out_proj",
286
+ "model.layers.21.mlp.gate",
287
+ "model.layers.21.mlp.shared_expert.gate_proj",
288
+ "model.layers.21.mlp.shared_expert.up_proj",
289
+ "model.layers.21.mlp.shared_expert.down_proj",
290
+ "model.layers.21.mlp.shared_expert_gate",
291
+ "model.layers.22.linear_attn.in_proj_qkvz",
292
+ "model.layers.22.linear_attn.in_proj_ba",
293
+ "model.layers.22.linear_attn.out_proj",
294
+ "model.layers.22.mlp.gate",
295
+ "model.layers.22.mlp.shared_expert.gate_proj",
296
+ "model.layers.22.mlp.shared_expert.up_proj",
297
+ "model.layers.22.mlp.shared_expert.down_proj",
298
+ "model.layers.22.mlp.shared_expert_gate",
299
+ "model.layers.23.self_attn.q_proj",
300
+ "model.layers.23.self_attn.k_proj",
301
+ "model.layers.23.self_attn.v_proj",
302
+ "model.layers.23.self_attn.o_proj",
303
+ "model.layers.23.mlp.gate",
304
+ "model.layers.23.mlp.shared_expert.gate_proj",
305
+ "model.layers.23.mlp.shared_expert.up_proj",
306
+ "model.layers.23.mlp.shared_expert.down_proj",
307
+ "model.layers.23.mlp.shared_expert_gate",
308
+ "model.layers.24.linear_attn.in_proj_qkvz",
309
+ "model.layers.24.linear_attn.in_proj_ba",
310
+ "model.layers.24.linear_attn.out_proj",
311
+ "model.layers.24.mlp.gate",
312
+ "model.layers.24.mlp.shared_expert.gate_proj",
313
+ "model.layers.24.mlp.shared_expert.up_proj",
314
+ "model.layers.24.mlp.shared_expert.down_proj",
315
+ "model.layers.24.mlp.shared_expert_gate",
316
+ "model.layers.25.linear_attn.in_proj_qkvz",
317
+ "model.layers.25.linear_attn.in_proj_ba",
318
+ "model.layers.25.linear_attn.out_proj",
319
+ "model.layers.25.mlp.gate",
320
+ "model.layers.25.mlp.shared_expert.gate_proj",
321
+ "model.layers.25.mlp.shared_expert.up_proj",
322
+ "model.layers.25.mlp.shared_expert.down_proj",
323
+ "model.layers.25.mlp.shared_expert_gate",
324
+ "model.layers.26.linear_attn.in_proj_qkvz",
325
+ "model.layers.26.linear_attn.in_proj_ba",
326
+ "model.layers.26.linear_attn.out_proj",
327
+ "model.layers.26.mlp.gate",
328
+ "model.layers.26.mlp.shared_expert.gate_proj",
329
+ "model.layers.26.mlp.shared_expert.up_proj",
330
+ "model.layers.26.mlp.shared_expert.down_proj",
331
+ "model.layers.26.mlp.shared_expert_gate",
332
+ "model.layers.27.self_attn.q_proj",
333
+ "model.layers.27.self_attn.k_proj",
334
+ "model.layers.27.self_attn.v_proj",
335
+ "model.layers.27.self_attn.o_proj",
336
+ "model.layers.27.mlp.gate",
337
+ "model.layers.27.mlp.shared_expert.gate_proj",
338
+ "model.layers.27.mlp.shared_expert.up_proj",
339
+ "model.layers.27.mlp.shared_expert.down_proj",
340
+ "model.layers.27.mlp.shared_expert_gate",
341
+ "model.layers.28.linear_attn.in_proj_qkvz",
342
+ "model.layers.28.linear_attn.in_proj_ba",
343
+ "model.layers.28.linear_attn.out_proj",
344
+ "model.layers.28.mlp.gate",
345
+ "model.layers.28.mlp.shared_expert.gate_proj",
346
+ "model.layers.28.mlp.shared_expert.up_proj",
347
+ "model.layers.28.mlp.shared_expert.down_proj",
348
+ "model.layers.28.mlp.shared_expert_gate",
349
+ "model.layers.29.linear_attn.in_proj_qkvz",
350
+ "model.layers.29.linear_attn.in_proj_ba",
351
+ "model.layers.29.linear_attn.out_proj",
352
+ "model.layers.29.mlp.gate",
353
+ "model.layers.29.mlp.shared_expert.gate_proj",
354
+ "model.layers.29.mlp.shared_expert.up_proj",
355
+ "model.layers.29.mlp.shared_expert.down_proj",
356
+ "model.layers.29.mlp.shared_expert_gate",
357
+ "model.layers.30.linear_attn.in_proj_qkvz",
358
+ "model.layers.30.linear_attn.in_proj_ba",
359
+ "model.layers.30.linear_attn.out_proj",
360
+ "model.layers.30.mlp.gate",
361
+ "model.layers.30.mlp.shared_expert.gate_proj",
362
+ "model.layers.30.mlp.shared_expert.up_proj",
363
+ "model.layers.30.mlp.shared_expert.down_proj",
364
+ "model.layers.30.mlp.shared_expert_gate",
365
+ "model.layers.31.self_attn.q_proj",
366
+ "model.layers.31.self_attn.k_proj",
367
+ "model.layers.31.self_attn.v_proj",
368
+ "model.layers.31.self_attn.o_proj",
369
+ "model.layers.31.mlp.gate",
370
+ "model.layers.31.mlp.shared_expert.gate_proj",
371
+ "model.layers.31.mlp.shared_expert.up_proj",
372
+ "model.layers.31.mlp.shared_expert.down_proj",
373
+ "model.layers.31.mlp.shared_expert_gate",
374
+ "model.layers.32.linear_attn.in_proj_qkvz",
375
+ "model.layers.32.linear_attn.in_proj_ba",
376
+ "model.layers.32.linear_attn.out_proj",
377
+ "model.layers.32.mlp.gate",
378
+ "model.layers.32.mlp.shared_expert.gate_proj",
379
+ "model.layers.32.mlp.shared_expert.up_proj",
380
+ "model.layers.32.mlp.shared_expert.down_proj",
381
+ "model.layers.32.mlp.shared_expert_gate",
382
+ "model.layers.33.linear_attn.in_proj_qkvz",
383
+ "model.layers.33.linear_attn.in_proj_ba",
384
+ "model.layers.33.linear_attn.out_proj",
385
+ "model.layers.33.mlp.gate",
386
+ "model.layers.33.mlp.shared_expert.gate_proj",
387
+ "model.layers.33.mlp.shared_expert.up_proj",
388
+ "model.layers.33.mlp.shared_expert.down_proj",
389
+ "model.layers.33.mlp.shared_expert_gate",
390
+ "model.layers.34.linear_attn.in_proj_qkvz",
391
+ "model.layers.34.linear_attn.in_proj_ba",
392
+ "model.layers.34.linear_attn.out_proj",
393
+ "model.layers.34.mlp.gate",
394
+ "model.layers.34.mlp.shared_expert.gate_proj",
395
+ "model.layers.34.mlp.shared_expert.up_proj",
396
+ "model.layers.34.mlp.shared_expert.down_proj",
397
+ "model.layers.34.mlp.shared_expert_gate",
398
+ "model.layers.35.self_attn.q_proj",
399
+ "model.layers.35.self_attn.k_proj",
400
+ "model.layers.35.self_attn.v_proj",
401
+ "model.layers.35.self_attn.o_proj",
402
+ "model.layers.35.mlp.gate",
403
+ "model.layers.35.mlp.shared_expert.gate_proj",
404
+ "model.layers.35.mlp.shared_expert.up_proj",
405
+ "model.layers.35.mlp.shared_expert.down_proj",
406
+ "model.layers.35.mlp.shared_expert_gate",
407
+ "model.layers.36.linear_attn.in_proj_qkvz",
408
+ "model.layers.36.linear_attn.in_proj_ba",
409
+ "model.layers.36.linear_attn.out_proj",
410
+ "model.layers.36.mlp.gate",
411
+ "model.layers.36.mlp.shared_expert.gate_proj",
412
+ "model.layers.36.mlp.shared_expert.up_proj",
413
+ "model.layers.36.mlp.shared_expert.down_proj",
414
+ "model.layers.36.mlp.shared_expert_gate",
415
+ "model.layers.37.linear_attn.in_proj_qkvz",
416
+ "model.layers.37.linear_attn.in_proj_ba",
417
+ "model.layers.37.linear_attn.out_proj",
418
+ "model.layers.37.mlp.gate",
419
+ "model.layers.37.mlp.shared_expert.gate_proj",
420
+ "model.layers.37.mlp.shared_expert.up_proj",
421
+ "model.layers.37.mlp.shared_expert.down_proj",
422
+ "model.layers.37.mlp.shared_expert_gate",
423
+ "model.layers.38.linear_attn.in_proj_qkvz",
424
+ "model.layers.38.linear_attn.in_proj_ba",
425
+ "model.layers.38.linear_attn.out_proj",
426
+ "model.layers.38.mlp.gate",
427
+ "model.layers.38.mlp.shared_expert.gate_proj",
428
+ "model.layers.38.mlp.shared_expert.up_proj",
429
+ "model.layers.38.mlp.shared_expert.down_proj",
430
+ "model.layers.38.mlp.shared_expert_gate",
431
+ "model.layers.39.self_attn.q_proj",
432
+ "model.layers.39.self_attn.k_proj",
433
+ "model.layers.39.self_attn.v_proj",
434
+ "model.layers.39.self_attn.o_proj",
435
+ "model.layers.39.mlp.gate",
436
+ "model.layers.39.mlp.shared_expert.gate_proj",
437
+ "model.layers.39.mlp.shared_expert.up_proj",
438
+ "model.layers.39.mlp.shared_expert.down_proj",
439
+ "model.layers.39.mlp.shared_expert_gate",
440
+ "model.layers.40.linear_attn.in_proj_qkvz",
441
+ "model.layers.40.linear_attn.in_proj_ba",
442
+ "model.layers.40.linear_attn.out_proj",
443
+ "model.layers.40.mlp.gate",
444
+ "model.layers.40.mlp.shared_expert.gate_proj",
445
+ "model.layers.40.mlp.shared_expert.up_proj",
446
+ "model.layers.40.mlp.shared_expert.down_proj",
447
+ "model.layers.40.mlp.shared_expert_gate",
448
+ "model.layers.41.linear_attn.in_proj_qkvz",
449
+ "model.layers.41.linear_attn.in_proj_ba",
450
+ "model.layers.41.linear_attn.out_proj",
451
+ "model.layers.41.mlp.gate",
452
+ "model.layers.41.mlp.shared_expert.gate_proj",
453
+ "model.layers.41.mlp.shared_expert.up_proj",
454
+ "model.layers.41.mlp.shared_expert.down_proj",
455
+ "model.layers.41.mlp.shared_expert_gate",
456
+ "model.layers.42.linear_attn.in_proj_qkvz",
457
+ "model.layers.42.linear_attn.in_proj_ba",
458
+ "model.layers.42.linear_attn.out_proj",
459
+ "model.layers.42.mlp.gate",
460
+ "model.layers.42.mlp.shared_expert.gate_proj",
461
+ "model.layers.42.mlp.shared_expert.up_proj",
462
+ "model.layers.42.mlp.shared_expert.down_proj",
463
+ "model.layers.42.mlp.shared_expert_gate",
464
+ "model.layers.43.self_attn.q_proj",
465
+ "model.layers.43.self_attn.k_proj",
466
+ "model.layers.43.self_attn.v_proj",
467
+ "model.layers.43.self_attn.o_proj",
468
+ "model.layers.43.mlp.gate",
469
+ "model.layers.43.mlp.shared_expert.gate_proj",
470
+ "model.layers.43.mlp.shared_expert.up_proj",
471
+ "model.layers.43.mlp.shared_expert.down_proj",
472
+ "model.layers.43.mlp.shared_expert_gate",
473
+ "model.layers.44.linear_attn.in_proj_qkvz",
474
+ "model.layers.44.linear_attn.in_proj_ba",
475
+ "model.layers.44.linear_attn.out_proj",
476
+ "model.layers.44.mlp.gate",
477
+ "model.layers.44.mlp.shared_expert.gate_proj",
478
+ "model.layers.44.mlp.shared_expert.up_proj",
479
+ "model.layers.44.mlp.shared_expert.down_proj",
480
+ "model.layers.44.mlp.shared_expert_gate",
481
+ "model.layers.45.linear_attn.in_proj_qkvz",
482
+ "model.layers.45.linear_attn.in_proj_ba",
483
+ "model.layers.45.linear_attn.out_proj",
484
+ "model.layers.45.mlp.gate",
485
+ "model.layers.45.mlp.shared_expert.gate_proj",
486
+ "model.layers.45.mlp.shared_expert.up_proj",
487
+ "model.layers.45.mlp.shared_expert.down_proj",
488
+ "model.layers.45.mlp.shared_expert_gate",
489
+ "model.layers.46.linear_attn.in_proj_qkvz",
490
+ "model.layers.46.linear_attn.in_proj_ba",
491
+ "model.layers.46.linear_attn.out_proj",
492
+ "model.layers.46.mlp.gate",
493
+ "model.layers.46.mlp.shared_expert.gate_proj",
494
+ "model.layers.46.mlp.shared_expert.up_proj",
495
+ "model.layers.46.mlp.shared_expert.down_proj",
496
+ "model.layers.46.mlp.shared_expert_gate",
497
+ "model.layers.47.self_attn.q_proj",
498
+ "model.layers.47.self_attn.k_proj",
499
+ "model.layers.47.self_attn.v_proj",
500
+ "model.layers.47.self_attn.o_proj",
501
+ "model.layers.47.mlp.gate",
502
+ "model.layers.47.mlp.shared_expert.gate_proj",
503
+ "model.layers.47.mlp.shared_expert.up_proj",
504
+ "model.layers.47.mlp.shared_expert.down_proj",
505
+ "model.layers.47.mlp.shared_expert_gate",
506
+ "mtp.fc",
507
+ "mtp.layers.0.mlp.gate",
508
+ "mtp.layers.0.mlp.shared_expert.gate_proj",
509
+ "mtp.layers.0.mlp.shared_expert.up_proj",
510
+ "mtp.layers.0.mlp.shared_expert.down_proj",
511
+ "mtp.layers.0.self_attn.k_proj",
512
+ "mtp.layers.0.self_attn.o_proj",
513
+ "mtp.layers.0.self_attn.q_norm",
514
+ "mtp.layers.0.self_attn.q_proj",
515
+ "mtp.layers.0.self_attn.v_proj",
516
+ "lm_head"
517
+ ],
518
+ "kv_cache_scheme": null,
519
+ "quant_method": "compressed-tensors",
520
+ "quantization_status": "compressed",
521
+ "sparsity_config": {},
522
+ "transform_config": {},
523
+ "version": "0.12.3.a20251028"
524
+ },
525
+ "rms_norm_eps": 1e-06,
526
+ "rope_scaling": null,
527
+ "rope_theta": 10000000,
528
+ "router_aux_loss_coef": 0.001,
529
+ "shared_expert_intermediate_size": 512,
530
+ "tie_word_embeddings": false,
531
+ "transformers_version": "5.0.0.dev0",
532
+ "use_cache": true,
533
+ "use_sliding_window": false,
534
+ "vocab_size": 151936
535
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.7,
10
+ "top_k": 20,
11
+ "top_p": 0.8,
12
+ "transformers_version": "5.0.0.dev0"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00010.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e7235ad62b765b8b63bfefc6373dd76ddfc4b9b0724fb8cd294bc63c64dd56e
3
+ size 5371909552
model-00002-of-00010.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b94de32520ee767398bc63b0e3c245e440dd70039de4b3dd73f980bbb38ff020
3
+ size 5371575416
model-00003-of-00010.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f100214f79c7385c5a97e7db6ff447d37a05761c8ac8eab04c4101ba7a7ce0dc
3
+ size 5371658008
model-00004-of-00010.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:694e387f834e4279844016d43c0187330cd6e22ebdf8da82612a9627ae63cc10
3
+ size 5371582416
model-00005-of-00010.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:debe0aface3bb85b32598990f8e5b971d628c5b22290eddfa90e32d1d2f3bc4e
3
+ size 5371657544
model-00006-of-00010.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2734d9704f0438f7fd3f74606ba6bc0721cd425ad41b0f8fc26e974c1df43f2d
3
+ size 5371341976
model-00007-of-00010.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b431930e171e5f6f3e95a3e6c940147e1e57360fbeb47cb858f8fe625f3a728b
3
+ size 5371660192
model-00008-of-00010.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d56882e60b6824b204c86fa777b691d944b36c8063cc2d00a47952a18c616d10
3
+ size 5371225880
model-00009-of-00010.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c1006169ac480caacf31052fe7709e7d6254ed1ac21c64c7af0b491b7606e52
3
+ size 5371882200
model-00010-of-00010.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee34bc2d4894bc5641bf011c1959fcc3ef56f510cf53c115e4c22182564313e7
3
+ size 886957728
model.safetensors.index.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb41a1f0e0547ce3108728fc216a36ecb118e08749616a7921841b02737a3317
3
+ size 22002272
recipe.yaml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_stage:
2
+ default_modifiers:
3
+ AWQModifier:
4
+ config_groups:
5
+ group_0:
6
+ targets: [Linear]
7
+ weights:
8
+ num_bits: 4
9
+ type: int
10
+ symmetric: true
11
+ group_size: 32
12
+ strategy: group
13
+ block_structure: null
14
+ dynamic: false
15
+ actorder: null
16
+ observer: mse
17
+ observer_kwargs: {}
18
+ input_activations: null
19
+ output_activations: null
20
+ format: null
21
+ targets: [Linear]
22
+ ignore: [model.embed_tokens, 're:.*input_layernorm$', 're:.*linear_attn.*', 're:.*norm.*',
23
+ 're:.*RMSNorm.*', 're:.*rotary.*', 're:.*shared_expert.*', 're:.*shared_expert_gate$',
24
+ 're:.*mlp[.]gate$', 're:.*router.*', 're:.*post_attention_layernorm$', 're:.*self_attn.*',
25
+ 're:mtp.*', lm_head]
26
+ mappings:
27
+ - smooth_layer: re:.*input_layernorm$
28
+ balance_layers: ['re:.*self_attn[.]q_proj$', 're:.*self_attn[.]k_proj$', 're:.*self_attn[.]v_proj$',
29
+ 're:.*linear_attn[.]in_proj_qkvz$', 're:.*linear_attn[.]in_proj_ba$']
30
+ - smooth_layer: re:.*self_attn[.]v_proj$
31
+ balance_layers: ['re:.*self_attn[.]o_proj$']
32
+ - smooth_layer: re:.*post_attention_layernorm$
33
+ balance_layers: ['re:.*gate_proj$', 're:.*up_proj$']
34
+ - smooth_layer: re:.*up_proj$
35
+ balance_layers: ['re:.*down_proj$']
36
+ - smooth_layer: re:.*linear_attn[.]norm$
37
+ balance_layers: ['re:.*linear_attn[.]out_proj$']
38
+ offload_device: !!python/object/apply:torch.device [cpu]
39
+ duo_scaling: true
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 1010000,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff