diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..52373fe24473b1aa44333d318f578ae6bf04b49b 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+tokenizer.json filter=lfs diff=lfs merge=lfs -text
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..15633276a7c317d4619c7d9283ec3286492e3f8b
--- /dev/null
+++ b/README.md
@@ -0,0 +1,54 @@
+---
+language:
+- en
+license: llama3
+tags:
+- Llama-3.1
+- instruct
+- finetune
+- reasoning
+- hybrid-mode
+- chatml
+- function calling
+- tool use
+- json mode
+- structured outputs
+- atropos
+- dataforge
+- long context
+- roleplaying
+- chat
+- mlx
+base_model: NousResearch/Hermes-4-405B
+library_name: transformers
+widget:
+- example_title: Hermes 4
+ messages:
+ - role: system
+ content: You are Hermes 4, a capable, neutrally-aligned assistant. Prefer concise,
+ correct answers.
+ - role: user
+ content: Explain the difference between BFS and DFS to a new CS student.
+model-index:
+- name: Hermes-4-Llama-3.1-405B
+ results: []
+---
+## 💫 Community Model> Hermes-4-405B by NousResearch
+
+_👾 [LM Studio](https://lmstudio.ai) Community models highlights program. Highlighting new & noteworthy models by the community. Join the conversation on [Discord](https://discord.gg/aPQfnNkxGC)_.
+
+**Model creator**: [NousResearch](https://huggingface.co/NousResearch)
+**Original model**: [Hermes-4-405B](https://huggingface.co/NousResearch/Hermes-4-405B)
+**MLX quantization**: provided by [LM Studio team](https://x.com/lmstudio) using [mlx_lm](https://github.com/ml-explore/mlx-lm)
+
+## Technical Details
+
+6-bit quantized version of Hermes-4-405B using MLX, optimized for Apple Silicon.
+
+## Special thanks
+
+🙏 Special thanks to the [Apple Machine Learning Research](https://github.com/ml-explore) team for creating [MLX](https://github.com/ml-explore/mlx).
+
+## Disclaimers
+
+LM Studio is not the creator, originator, or owner of any Model featured in the Community Model Program. Each Community Model is created and provided by third parties. LM Studio does not endorse, support, represent or guarantee the completeness, truthfulness, accuracy, or reliability of any Community Model. You understand that Community Models can produce content that might be offensive, harmful, inaccurate or otherwise inappropriate, or deceptive. Each Community Model is the sole responsibility of the person or entity who originated such Model. LM Studio may not monitor or control the Community Models and cannot, and does not, take responsibility for any such Model. LM Studio disclaims all warranties or guarantees about the accuracy, reliability or benefits of the Community Models. LM Studio further disclaims any warranty that the Community Model will meet your requirements, be secure, uninterrupted or available at any time or location, or error-free, viruses-free, or that any errors will be corrected, or otherwise. You will be solely responsible for any damage resulting from your use of or access to the Community Models, your downloading of any Community Model, or use of any other Community Model provided by or through LM Studio.
diff --git a/chat_template.jinja b/chat_template.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..d894f3b5ebdfe9e094f56e79259fe779156ca47a
--- /dev/null
+++ b/chat_template.jinja
@@ -0,0 +1,76 @@
+{%- set thinking_prompt = 'You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem.' %}
+{%- set standard_prompt = 'You are Hermes, created by Nous Research.' %}
+{%- if not thinking is defined %}{% set thinking = false %}{% endif %}
+{%- if not keep_cots is defined %}{% set keep_cots = false %}{% endif %}
+{%- if thinking %}{%- set system_prompt = thinking_prompt %}{%- else %}{%- set system_prompt = standard_prompt %}{%- endif %}
+{%- if tools %}
+ {{- bos_token + '<|start_header_id|>system<|end_header_id|>\n' }}
+ {%- if messages[0]['role'] == 'system' %}
+ {{- messages[0]['content'] }}
+ {%- else %}
+ {{- system_prompt }}
+ {%- endif %}
+ {{- "\n\n# Tools\n\nYou are a function calling AI model. You may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within XML tags:\n" }}
+ {%- for tool in tools %}
+ {{- "\n" }}
+ {{- tool | tojson }}
+ {%- endfor %}
+ {{- "\n\n\nFor each function call, return a json object with function name and arguments within XML tags:\n\n{\"name\": \"\", \"arguments\": }\n<|eot_id|>" }}
+{%- else %}
+ {%- if messages[0]['role'] == 'system' %}
+ {{- bos_token + '<|start_header_id|>system<|end_header_id|>\n\n' + messages[0]['content'] + '<|eot_id|>' }}
+ {%- else %}
+ {{- bos_token + '<|start_header_id|>system<|end_header_id|>\n\n' + system_prompt + '<|eot_id|>' }}
+ {%- endif %}
+{%- endif %}
+{%- for message in messages %}
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
+ {{- '<|start_header_id|>' + message.role + '<|end_header_id|>\n\n' + message.content + '<|eot_id|>' }}
+ {%- elif (message.role == "assistant" and not message.tool_calls) %}
+ {{- '<|start_header_id|>' + message.role + '<|end_header_id|>\n' }}
+ {%- if message.content %}
+ {%- set content = message['content'] -%}
+ {%- if thinking %}
+ {%- if not keep_cots %}
+ {%- set content = ' ' + content.split('', 1)[1] -%}
+ {%- endif %}
+ {%- endif %}
+ {{- '\n' + content + '<|eot_id|>' }}
+ {%- endif %}
+ {%- elif message.role == "assistant" %}
+ {{- '<|start_header_id|>' + message.role + '<|end_header_id|>\n' }}
+ {%- if message.content %}
+ {%- set content = message['content'] -%}
+ {%- if thinking %}
+ {%- if not keep_cots %}
+ {%- set content = ' ' + content.split('', 1)[1] -%}
+ {%- endif %}
+ {%- endif %}
+ {{- '\n' + content }}
+ {%- endif %}
+ {%- for tool_call in message.tool_calls %}
+ {%- if tool_call.function is defined %}
+ {%- set tool_call = tool_call.function %}
+ {%- endif %}
+ {{- '\n\n{"name": "' }}
+ {{- tool_call.name }}
+ {{- '", "arguments": ' }}
+ {{- tool_call.arguments | tojson }}
+ {{- '}\n' }}
+ {%- endfor %}
+ {{- '<|eot_id|>' }}
+ {%- elif message.role == "tool" %}
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
+ {{- '<|start_header_id|>user<|end_header_id|>\n' }}
+ {%- endif %}
+ {{- '\n\n' }}
+ {{- message.content }}
+ {{- '\n' }}
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
+ {{- '<|eot_id|>' }}
+ {%- endif %}
+ {%- endif %}
+{%- endfor %}
+{%- if add_generation_prompt %}
+ {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
+{%- endif %}
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..47b66cb93df0c5ea3cf2ebf76c44533ce7d9c443
--- /dev/null
+++ b/config.json
@@ -0,0 +1,43 @@
+{
+ "architectures": [
+ "LlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 128000,
+ "eos_token_id": 128001,
+ "head_dim": 128,
+ "hidden_act": "silu",
+ "hidden_size": 16384,
+ "initializer_range": 0.02,
+ "intermediate_size": 53248,
+ "max_position_embeddings": 131072,
+ "mlp_bias": false,
+ "model_type": "llama",
+ "num_attention_heads": 128,
+ "num_hidden_layers": 126,
+ "num_key_value_heads": 8,
+ "pretraining_tp": 1,
+ "quantization": {
+ "group_size": 64,
+ "bits": 6
+ },
+ "quantization_config": {
+ "group_size": 64,
+ "bits": 6
+ },
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": {
+ "factor": 8.0,
+ "high_freq_factor": 4.0,
+ "low_freq_factor": 1.0,
+ "original_max_position_embeddings": 8192,
+ "rope_type": "llama3"
+ },
+ "rope_theta": 500000.0,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.52.4",
+ "use_cache": true,
+ "vocab_size": 128256
+}
\ No newline at end of file
diff --git a/generation_config.json b/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..27a9e4c5a8b7c8e0394f0e31149ae19d4822dcd3
--- /dev/null
+++ b/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 128000,
+ "do_sample": true,
+ "eos_token_id": 128001,
+ "temperature": 0.6,
+ "top_p": 0.9,
+ "transformers_version": "4.52.4"
+}
diff --git a/model-00001-of-00064.safetensors b/model-00001-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..53e38d188465942e20182f3ac4c596b3ce021c57
--- /dev/null
+++ b/model-00001-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:45175dd79a49a11b346232d7cb8bd91d276c23065315061eb2c8c69621287f55
+size 4760867066
diff --git a/model-00007-of-00064.safetensors b/model-00007-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..107119e337065081b8819df99f0c76f1ebb0cbc5
--- /dev/null
+++ b/model-00007-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:950c0283879820a19e2d8be07b89132bb57a1a17e80f6c6d2a9c5ffad2f79a7e
+size 5180101854
diff --git a/model-00010-of-00064.safetensors b/model-00010-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c970d03d960b1053c14ab66cb0ea4be908322668
--- /dev/null
+++ b/model-00010-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a70c2af32c00ea05dbe5d8e0d36626966aded63f7e051f670668848fdd6df45f
+size 5180101868
diff --git a/model-00011-of-00064.safetensors b/model-00011-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..36ad75c28506c3431dad4c3006b06be2e7201446
--- /dev/null
+++ b/model-00011-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8f1bc5e255141a0932fda981071729862d375e5585a5b442caaa9ed76546dbbf
+size 5180101858
diff --git a/model-00013-of-00064.safetensors b/model-00013-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7ecc3fcdabfd0461e3f6fe35917eeb4d0527556e
--- /dev/null
+++ b/model-00013-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:30a1e643c9d5cb45c3125859a08be10d3eaae5040fbd7be8be6cc1c59a16f747
+size 5180101844
diff --git a/model-00014-of-00064.safetensors b/model-00014-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9069017b6e02e68113e431a38ad92a7356cf44b4
--- /dev/null
+++ b/model-00014-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:66e41976057a3b176617f66a28633be23d3bb0f320d827180a123e1d61803daf
+size 5180101850
diff --git a/model-00015-of-00064.safetensors b/model-00015-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..74993b44bc4414536cdc05bc2691a21fddf07f68
--- /dev/null
+++ b/model-00015-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e0e831db5ece81c277df8c04188b4f7900cbfcb114359e2249f375c606932158
+size 5180101850
diff --git a/model-00017-of-00064.safetensors b/model-00017-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a3a0a9518484ed6012898f3f88f1fdce24a5d27f
--- /dev/null
+++ b/model-00017-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eb093219cb46875842ef142270f5eac22d92f61dd1e99ea0e8de96b5df6eb02b
+size 5180101866
diff --git a/model-00018-of-00064.safetensors b/model-00018-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dde0eeb9a8c926f3b2ed80e533ea76ae75e6abfc
--- /dev/null
+++ b/model-00018-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4a14bb3c5c6330ec169d6076bfa7b4242df593bc4525f91a6cdf007efa4dae2
+size 5180101852
diff --git a/model-00019-of-00064.safetensors b/model-00019-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..75bdf0d8a596393ba67050859bd0920664a8f75a
--- /dev/null
+++ b/model-00019-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:28e7c5c19b16512e35de3a25463c221e078a2e1ab45c857cdf384a863224cab5
+size 5180101850
diff --git a/model-00023-of-00064.safetensors b/model-00023-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3f4e428cc7f8ec54fe0c641acd89a5e0a6afd402
--- /dev/null
+++ b/model-00023-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dc8c4c706dd4671c4a2270c52eedf91869dcdf55093cbc914903dad7384eabf2
+size 5180101844
diff --git a/model-00027-of-00064.safetensors b/model-00027-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c89187d1c3eb9f4ea0cb4d3aa581acef0e371545
--- /dev/null
+++ b/model-00027-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6916b8af7da9482d4a55cfbd016c2a50cd52d4f097235be5837da963c4ab960
+size 5180101866
diff --git a/model-00028-of-00064.safetensors b/model-00028-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4a0942b412780d82e4e303b75df28eaf7a36f967
--- /dev/null
+++ b/model-00028-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:32ab483746a0f416a70b88d19e123f395ba516c8b625dfbec73d24d118a420fa
+size 5180101848
diff --git a/model-00029-of-00064.safetensors b/model-00029-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8759d14eb0fc33fdf6c1d45c199b01ed35b35c81
--- /dev/null
+++ b/model-00029-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4bd8320ac1d6dae47dd00552526814a6c00f953b23387bf732d508a058be102d
+size 5180101862
diff --git a/model-00030-of-00064.safetensors b/model-00030-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3a92936d17e1f2662c35d34ef7264e2609e8780d
--- /dev/null
+++ b/model-00030-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f31c8da5bf37772a4b36fb14d080b76ae4f4826548e43d418a552909c3a7656
+size 5180101862
diff --git a/model-00032-of-00064.safetensors b/model-00032-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9f115bed005594b4be7ded224dd884c35bbd3615
--- /dev/null
+++ b/model-00032-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:013b4a0a04059223166962c9e2af5bb208aff0a2b5fc6a6efd2d6e0d678ed8c6
+size 5180101848
diff --git a/model-00033-of-00064.safetensors b/model-00033-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..24940bd5ad5a5a7e4530171f5c01fbf9f720c10a
--- /dev/null
+++ b/model-00033-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7d50b64f860dd0300faf69c4f5677562c772dd9d1a344fd87714ff0c644b02df
+size 5180101870
diff --git a/model-00035-of-00064.safetensors b/model-00035-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c0c243580c9b3ca0439ec3c4430e8bc061b720a3
--- /dev/null
+++ b/model-00035-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a44342d296cf1cbbe345468547ee6f49251d2d92dd8b805d575f0c3fb14eb5db
+size 5180101860
diff --git a/model-00037-of-00064.safetensors b/model-00037-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2f40485e3b0264dcdbbaf299db73a6439dcee86c
--- /dev/null
+++ b/model-00037-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:db61a1c281eb24fe9fb487caa07097630192d11d2a23c2af6672c5551561742d
+size 5180101844
diff --git a/model-00038-of-00064.safetensors b/model-00038-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..da4288a332e4b901995951c2ae0dd376d00f3b06
--- /dev/null
+++ b/model-00038-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:41bb8f64f8896d1967bcb6f6f6df87663a9bf73e099c9fa618623f59d701b34d
+size 5180101846
diff --git a/model-00039-of-00064.safetensors b/model-00039-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3e4f2f381247050feec4542f11cb8af60d4839e0
--- /dev/null
+++ b/model-00039-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b0ff46e605f27ec44bca9b490bfc561ebcef77c720829553d49b2308496d3b5
+size 5180101852
diff --git a/model-00040-of-00064.safetensors b/model-00040-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ebb8b88091d70ced258332f5c881f01669dae8ba
--- /dev/null
+++ b/model-00040-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1af63339cce11c5e627005e6d91a16ed49a45a909db822f97c5b4d6a488c14fe
+size 5180101862
diff --git a/model-00042-of-00064.safetensors b/model-00042-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9ad9f10667b83a76706bff61fdcd713d3edc598e
--- /dev/null
+++ b/model-00042-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:72d529cfed1eb9d90bcfe91a61eee9b03c6d96a514f2489eaa561e279858b10a
+size 5180101820
diff --git a/model-00043-of-00064.safetensors b/model-00043-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f4b61c31ac5afe999cdcaf7580a266e7f389d166
--- /dev/null
+++ b/model-00043-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3f9ad32a0c5172576e095a8f399025401a0919b5e8109326be45fe9b47c68c4e
+size 5180101852
diff --git a/model-00045-of-00064.safetensors b/model-00045-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3308128b862da878d556d69d3ad1f954e07644dc
--- /dev/null
+++ b/model-00045-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8c72fb459b120d2a89c973495386c5c1e2612c761b14b5e62fd443be81858f9d
+size 5180101840
diff --git a/model-00046-of-00064.safetensors b/model-00046-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..facee6540597a7524ed310cea82a64b08c168024
--- /dev/null
+++ b/model-00046-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dc894833cc6953e9a46d77363156da89cd440276f4538c6126e215c6fa8a1a7b
+size 5180101864
diff --git a/model-00048-of-00064.safetensors b/model-00048-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..121c023157795ca048755c83a542927e3b2581cc
--- /dev/null
+++ b/model-00048-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a742228093674ba9d6079462d2d52e2a967d4fde66c738533bc6f77468973c03
+size 5180101846
diff --git a/model-00049-of-00064.safetensors b/model-00049-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..95349bffdc5e968147e818d89d439bd0dacfbe0b
--- /dev/null
+++ b/model-00049-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5abf6f19240284f19707ad87da0c65201dc3c2746f0975063fe76431de8d5fa0
+size 5180101864
diff --git a/model-00051-of-00064.safetensors b/model-00051-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3134529e4d885c8ad028cf5c11d7a87d041447bf
--- /dev/null
+++ b/model-00051-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:28625b2effa90ed529eca9ae9b07f0b88acaa2c684a6778777db34de8468db68
+size 5180101887
diff --git a/model-00052-of-00064.safetensors b/model-00052-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1ea5c2ef3e33efa28b95f87c5abc04754eeae037
--- /dev/null
+++ b/model-00052-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b1d50b74f5962da4fe3c95fe629354345cd9639d89051337b86141a814b2441
+size 5180101900
diff --git a/model-00053-of-00064.safetensors b/model-00053-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f4878af61a0ae7dcf02ff24987c49add4a9ef113
--- /dev/null
+++ b/model-00053-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:51ae731aa358a975c5bd13c2a8818cdbcbec125a7d2df975f89cc26dd0476e8e
+size 5180101900
diff --git a/model-00054-of-00064.safetensors b/model-00054-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d3be4b1b91845dad550cc929685b6eca9a35a727
--- /dev/null
+++ b/model-00054-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79da7b84f09b258979f4d5804fc28ea54d92b87ff508249534423b38fc31b61d
+size 5180101892
diff --git a/model-00055-of-00064.safetensors b/model-00055-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..04a46ab9c5a48ed8144197839f78240b1f3ba59d
--- /dev/null
+++ b/model-00055-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4359e9f4a7626f482181f8107fdfc57bcfee418766cae597215191f38133ab5c
+size 5180101918
diff --git a/model-00056-of-00064.safetensors b/model-00056-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..970b41ab68c2d5b888d419d5ea6924df644a9f79
--- /dev/null
+++ b/model-00056-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aa955017e607fc89e55070e3c5d728ce9b5674a367c913ebe0fc180244573650
+size 5180101898
diff --git a/model-00057-of-00064.safetensors b/model-00057-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0f57011adcb32361b0d35a53363672f1cd24f76a
--- /dev/null
+++ b/model-00057-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:710e962c777872c35d89d0341c387f7feaa8184924bf3ee33ffec23b835acfcc
+size 5180101904
diff --git a/model-00058-of-00064.safetensors b/model-00058-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7c220351f18ebdc3c4389c4ba6a1f210f7519b3f
--- /dev/null
+++ b/model-00058-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a38e13d41c2ec787ffa5b7c801a5f1b3f1347f955e82941cbf9c2169d4451eca
+size 5180101920
diff --git a/model-00059-of-00064.safetensors b/model-00059-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8bfbeeb801b3d089b93037bc5f3eeba8e19043f3
--- /dev/null
+++ b/model-00059-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da211dc9cef404a0603e39a6e9c8e8649ceace417ee3d92bfc12236d43f7d897
+size 5180101914
diff --git a/model-00060-of-00064.safetensors b/model-00060-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0a7c153b772654cd9064d5e75ffd4eb71c512079
--- /dev/null
+++ b/model-00060-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3218c4d3fbdf9b431b9183d45d4159b774d36d2065a61a6b78f30da5411da747
+size 5180101924
diff --git a/model-00061-of-00064.safetensors b/model-00061-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9cf32709181bb9a2d2dfd00b04796d54d4d0f02e
--- /dev/null
+++ b/model-00061-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4df3b325686eb354da35bbb1ac0e8c87063cbdd6ba946b45f9b63e05787f4d10
+size 5180101902
diff --git a/model-00062-of-00064.safetensors b/model-00062-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..67ed90b33a105fae2c35bd02e1142fe490a04ce3
--- /dev/null
+++ b/model-00062-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fd266da3cb8a4a13626c9e10cc36f6c050669c883c7e9b0bd7ba6f52e51f07ec
+size 5180101914
diff --git a/model-00063-of-00064.safetensors b/model-00063-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8fc77fb40f97fe96f02bb1825615627ffe047449
--- /dev/null
+++ b/model-00063-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eebdd1461013f0cb05de74323e2270c8a8cef6d42b12459072fef78ce6cfe8fb
+size 5180101898
diff --git a/model-00064-of-00064.safetensors b/model-00064-of-00064.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..556daa1306a137ff087c04562b5ec74c0fc35e0d
--- /dev/null
+++ b/model-00064-of-00064.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e7ce09361abc7593205284c247d75d186b1ddc6c90d74fb3888dc2f61ba0f211
+size 3833955981
diff --git a/model.safetensors.index.json b/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..665cf18c9d43b8e83c39d2918b03be759405bbc7
--- /dev/null
+++ b/model.safetensors.index.json
@@ -0,0 +1,2913 @@
+{
+ "metadata": {
+ "total_size": 329760800768,
+ "total_parameters": 405853388800
+ },
+ "weight_map": {
+ "lm_head.biases": "model-00064-of-00064.safetensors",
+ "lm_head.scales": "model-00064-of-00064.safetensors",
+ "lm_head.weight": "model-00064-of-00064.safetensors",
+ "model.embed_tokens.biases": "model-00001-of-00064.safetensors",
+ "model.embed_tokens.scales": "model-00001-of-00064.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00064.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00064.safetensors",
+ "model.layers.0.mlp.down_proj.biases": "model-00001-of-00064.safetensors",
+ "model.layers.0.mlp.down_proj.scales": "model-00001-of-00064.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00064.safetensors",
+ "model.layers.0.mlp.gate_proj.biases": "model-00001-of-00064.safetensors",
+ "model.layers.0.mlp.gate_proj.scales": "model-00001-of-00064.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00064.safetensors",
+ "model.layers.0.mlp.up_proj.biases": "model-00001-of-00064.safetensors",
+ "model.layers.0.mlp.up_proj.scales": "model-00001-of-00064.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00064.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00064.safetensors",
+ "model.layers.0.self_attn.k_proj.biases": "model-00001-of-00064.safetensors",
+ "model.layers.0.self_attn.k_proj.scales": "model-00001-of-00064.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00064.safetensors",
+ "model.layers.0.self_attn.o_proj.biases": "model-00001-of-00064.safetensors",
+ "model.layers.0.self_attn.o_proj.scales": "model-00001-of-00064.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00064.safetensors",
+ "model.layers.0.self_attn.q_proj.biases": "model-00001-of-00064.safetensors",
+ "model.layers.0.self_attn.q_proj.scales": "model-00001-of-00064.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00064.safetensors",
+ "model.layers.0.self_attn.v_proj.biases": "model-00001-of-00064.safetensors",
+ "model.layers.0.self_attn.v_proj.scales": "model-00001-of-00064.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00064.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00002-of-00064.safetensors",
+ "model.layers.1.mlp.down_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.1.mlp.down_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.1.mlp.gate_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.1.mlp.gate_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.1.mlp.up_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.1.mlp.up_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00002-of-00064.safetensors",
+ "model.layers.1.self_attn.k_proj.biases": "model-00001-of-00064.safetensors",
+ "model.layers.1.self_attn.k_proj.scales": "model-00001-of-00064.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00064.safetensors",
+ "model.layers.1.self_attn.o_proj.biases": "model-00001-of-00064.safetensors",
+ "model.layers.1.self_attn.o_proj.scales": "model-00001-of-00064.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00064.safetensors",
+ "model.layers.1.self_attn.q_proj.biases": "model-00001-of-00064.safetensors",
+ "model.layers.1.self_attn.q_proj.scales": "model-00001-of-00064.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00064.safetensors",
+ "model.layers.1.self_attn.v_proj.biases": "model-00001-of-00064.safetensors",
+ "model.layers.1.self_attn.v_proj.scales": "model-00001-of-00064.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00064.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00006-of-00064.safetensors",
+ "model.layers.10.mlp.down_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.10.mlp.down_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.10.mlp.gate_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.10.mlp.gate_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.10.mlp.up_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.10.mlp.up_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00006-of-00064.safetensors",
+ "model.layers.10.self_attn.k_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.10.self_attn.k_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.10.self_attn.o_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.10.self_attn.o_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.10.self_attn.q_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.10.self_attn.q_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.10.self_attn.v_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.10.self_attn.v_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.100.input_layernorm.weight": "model-00051-of-00064.safetensors",
+ "model.layers.100.mlp.down_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.100.mlp.down_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.100.mlp.down_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.100.mlp.gate_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.100.mlp.gate_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.100.mlp.gate_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.100.mlp.up_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.100.mlp.up_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.100.mlp.up_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.100.post_attention_layernorm.weight": "model-00051-of-00064.safetensors",
+ "model.layers.100.self_attn.k_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.100.self_attn.k_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.100.self_attn.k_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.100.self_attn.o_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.100.self_attn.o_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.100.self_attn.o_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.100.self_attn.q_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.100.self_attn.q_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.100.self_attn.q_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.100.self_attn.v_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.100.self_attn.v_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.100.self_attn.v_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.101.input_layernorm.weight": "model-00052-of-00064.safetensors",
+ "model.layers.101.mlp.down_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.101.mlp.down_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.101.mlp.down_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.101.mlp.gate_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.101.mlp.gate_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.101.mlp.gate_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.101.mlp.up_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.101.mlp.up_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.101.mlp.up_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.101.post_attention_layernorm.weight": "model-00052-of-00064.safetensors",
+ "model.layers.101.self_attn.k_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.101.self_attn.k_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.101.self_attn.k_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.101.self_attn.o_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.101.self_attn.o_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.101.self_attn.o_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.101.self_attn.q_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.101.self_attn.q_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.101.self_attn.q_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.101.self_attn.v_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.101.self_attn.v_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.101.self_attn.v_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.102.input_layernorm.weight": "model-00052-of-00064.safetensors",
+ "model.layers.102.mlp.down_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.102.mlp.down_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.102.mlp.down_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.102.mlp.gate_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.102.mlp.gate_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.102.mlp.gate_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.102.mlp.up_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.102.mlp.up_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.102.mlp.up_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.102.post_attention_layernorm.weight": "model-00052-of-00064.safetensors",
+ "model.layers.102.self_attn.k_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.102.self_attn.k_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.102.self_attn.k_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.102.self_attn.o_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.102.self_attn.o_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.102.self_attn.o_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.102.self_attn.q_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.102.self_attn.q_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.102.self_attn.q_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.102.self_attn.v_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.102.self_attn.v_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.102.self_attn.v_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.103.input_layernorm.weight": "model-00053-of-00064.safetensors",
+ "model.layers.103.mlp.down_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.103.mlp.down_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.103.mlp.down_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.103.mlp.gate_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.103.mlp.gate_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.103.mlp.gate_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.103.mlp.up_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.103.mlp.up_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.103.mlp.up_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.103.post_attention_layernorm.weight": "model-00053-of-00064.safetensors",
+ "model.layers.103.self_attn.k_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.103.self_attn.k_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.103.self_attn.k_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.103.self_attn.o_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.103.self_attn.o_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.103.self_attn.o_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.103.self_attn.q_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.103.self_attn.q_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.103.self_attn.q_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.103.self_attn.v_proj.biases": "model-00052-of-00064.safetensors",
+ "model.layers.103.self_attn.v_proj.scales": "model-00052-of-00064.safetensors",
+ "model.layers.103.self_attn.v_proj.weight": "model-00052-of-00064.safetensors",
+ "model.layers.104.input_layernorm.weight": "model-00053-of-00064.safetensors",
+ "model.layers.104.mlp.down_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.104.mlp.down_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.104.mlp.down_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.104.mlp.gate_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.104.mlp.gate_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.104.mlp.gate_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.104.mlp.up_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.104.mlp.up_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.104.mlp.up_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.104.post_attention_layernorm.weight": "model-00053-of-00064.safetensors",
+ "model.layers.104.self_attn.k_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.104.self_attn.k_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.104.self_attn.k_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.104.self_attn.o_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.104.self_attn.o_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.104.self_attn.o_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.104.self_attn.q_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.104.self_attn.q_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.104.self_attn.q_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.104.self_attn.v_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.104.self_attn.v_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.104.self_attn.v_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.105.input_layernorm.weight": "model-00054-of-00064.safetensors",
+ "model.layers.105.mlp.down_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.105.mlp.down_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.105.mlp.down_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.105.mlp.gate_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.105.mlp.gate_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.105.mlp.gate_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.105.mlp.up_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.105.mlp.up_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.105.mlp.up_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.105.post_attention_layernorm.weight": "model-00054-of-00064.safetensors",
+ "model.layers.105.self_attn.k_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.105.self_attn.k_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.105.self_attn.k_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.105.self_attn.o_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.105.self_attn.o_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.105.self_attn.o_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.105.self_attn.q_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.105.self_attn.q_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.105.self_attn.q_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.105.self_attn.v_proj.biases": "model-00053-of-00064.safetensors",
+ "model.layers.105.self_attn.v_proj.scales": "model-00053-of-00064.safetensors",
+ "model.layers.105.self_attn.v_proj.weight": "model-00053-of-00064.safetensors",
+ "model.layers.106.input_layernorm.weight": "model-00054-of-00064.safetensors",
+ "model.layers.106.mlp.down_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.106.mlp.down_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.106.mlp.down_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.106.mlp.gate_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.106.mlp.gate_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.106.mlp.gate_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.106.mlp.up_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.106.mlp.up_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.106.mlp.up_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.106.post_attention_layernorm.weight": "model-00054-of-00064.safetensors",
+ "model.layers.106.self_attn.k_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.106.self_attn.k_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.106.self_attn.k_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.106.self_attn.o_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.106.self_attn.o_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.106.self_attn.o_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.106.self_attn.q_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.106.self_attn.q_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.106.self_attn.q_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.106.self_attn.v_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.106.self_attn.v_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.106.self_attn.v_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.107.input_layernorm.weight": "model-00055-of-00064.safetensors",
+ "model.layers.107.mlp.down_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.107.mlp.down_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.107.mlp.down_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.107.mlp.gate_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.107.mlp.gate_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.107.mlp.gate_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.107.mlp.up_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.107.mlp.up_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.107.mlp.up_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.107.post_attention_layernorm.weight": "model-00055-of-00064.safetensors",
+ "model.layers.107.self_attn.k_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.107.self_attn.k_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.107.self_attn.k_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.107.self_attn.o_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.107.self_attn.o_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.107.self_attn.o_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.107.self_attn.q_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.107.self_attn.q_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.107.self_attn.q_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.107.self_attn.v_proj.biases": "model-00054-of-00064.safetensors",
+ "model.layers.107.self_attn.v_proj.scales": "model-00054-of-00064.safetensors",
+ "model.layers.107.self_attn.v_proj.weight": "model-00054-of-00064.safetensors",
+ "model.layers.108.input_layernorm.weight": "model-00055-of-00064.safetensors",
+ "model.layers.108.mlp.down_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.108.mlp.down_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.108.mlp.down_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.108.mlp.gate_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.108.mlp.gate_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.108.mlp.gate_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.108.mlp.up_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.108.mlp.up_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.108.mlp.up_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.108.post_attention_layernorm.weight": "model-00055-of-00064.safetensors",
+ "model.layers.108.self_attn.k_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.108.self_attn.k_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.108.self_attn.k_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.108.self_attn.o_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.108.self_attn.o_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.108.self_attn.o_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.108.self_attn.q_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.108.self_attn.q_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.108.self_attn.q_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.108.self_attn.v_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.108.self_attn.v_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.108.self_attn.v_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.109.input_layernorm.weight": "model-00056-of-00064.safetensors",
+ "model.layers.109.mlp.down_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.109.mlp.down_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.109.mlp.down_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.109.mlp.gate_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.109.mlp.gate_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.109.mlp.gate_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.109.mlp.up_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.109.mlp.up_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.109.mlp.up_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.109.post_attention_layernorm.weight": "model-00056-of-00064.safetensors",
+ "model.layers.109.self_attn.k_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.109.self_attn.k_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.109.self_attn.k_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.109.self_attn.o_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.109.self_attn.o_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.109.self_attn.o_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.109.self_attn.q_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.109.self_attn.q_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.109.self_attn.q_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.109.self_attn.v_proj.biases": "model-00055-of-00064.safetensors",
+ "model.layers.109.self_attn.v_proj.scales": "model-00055-of-00064.safetensors",
+ "model.layers.109.self_attn.v_proj.weight": "model-00055-of-00064.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00007-of-00064.safetensors",
+ "model.layers.11.mlp.down_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.11.mlp.down_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.11.mlp.gate_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.11.mlp.gate_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.11.mlp.up_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.11.mlp.up_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00007-of-00064.safetensors",
+ "model.layers.11.self_attn.k_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.11.self_attn.k_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.11.self_attn.o_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.11.self_attn.o_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.11.self_attn.q_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.11.self_attn.q_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.11.self_attn.v_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.11.self_attn.v_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.110.input_layernorm.weight": "model-00056-of-00064.safetensors",
+ "model.layers.110.mlp.down_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.110.mlp.down_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.110.mlp.down_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.110.mlp.gate_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.110.mlp.gate_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.110.mlp.gate_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.110.mlp.up_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.110.mlp.up_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.110.mlp.up_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.110.post_attention_layernorm.weight": "model-00056-of-00064.safetensors",
+ "model.layers.110.self_attn.k_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.110.self_attn.k_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.110.self_attn.k_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.110.self_attn.o_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.110.self_attn.o_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.110.self_attn.o_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.110.self_attn.q_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.110.self_attn.q_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.110.self_attn.q_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.110.self_attn.v_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.110.self_attn.v_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.110.self_attn.v_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.111.input_layernorm.weight": "model-00057-of-00064.safetensors",
+ "model.layers.111.mlp.down_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.111.mlp.down_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.111.mlp.down_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.111.mlp.gate_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.111.mlp.gate_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.111.mlp.gate_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.111.mlp.up_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.111.mlp.up_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.111.mlp.up_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.111.post_attention_layernorm.weight": "model-00057-of-00064.safetensors",
+ "model.layers.111.self_attn.k_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.111.self_attn.k_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.111.self_attn.k_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.111.self_attn.o_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.111.self_attn.o_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.111.self_attn.o_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.111.self_attn.q_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.111.self_attn.q_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.111.self_attn.q_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.111.self_attn.v_proj.biases": "model-00056-of-00064.safetensors",
+ "model.layers.111.self_attn.v_proj.scales": "model-00056-of-00064.safetensors",
+ "model.layers.111.self_attn.v_proj.weight": "model-00056-of-00064.safetensors",
+ "model.layers.112.input_layernorm.weight": "model-00057-of-00064.safetensors",
+ "model.layers.112.mlp.down_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.112.mlp.down_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.112.mlp.down_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.112.mlp.gate_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.112.mlp.gate_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.112.mlp.gate_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.112.mlp.up_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.112.mlp.up_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.112.mlp.up_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.112.post_attention_layernorm.weight": "model-00057-of-00064.safetensors",
+ "model.layers.112.self_attn.k_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.112.self_attn.k_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.112.self_attn.k_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.112.self_attn.o_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.112.self_attn.o_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.112.self_attn.o_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.112.self_attn.q_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.112.self_attn.q_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.112.self_attn.q_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.112.self_attn.v_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.112.self_attn.v_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.112.self_attn.v_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.113.input_layernorm.weight": "model-00058-of-00064.safetensors",
+ "model.layers.113.mlp.down_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.113.mlp.down_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.113.mlp.down_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.113.mlp.gate_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.113.mlp.gate_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.113.mlp.gate_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.113.mlp.up_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.113.mlp.up_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.113.mlp.up_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.113.post_attention_layernorm.weight": "model-00058-of-00064.safetensors",
+ "model.layers.113.self_attn.k_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.113.self_attn.k_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.113.self_attn.k_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.113.self_attn.o_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.113.self_attn.o_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.113.self_attn.o_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.113.self_attn.q_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.113.self_attn.q_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.113.self_attn.q_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.113.self_attn.v_proj.biases": "model-00057-of-00064.safetensors",
+ "model.layers.113.self_attn.v_proj.scales": "model-00057-of-00064.safetensors",
+ "model.layers.113.self_attn.v_proj.weight": "model-00057-of-00064.safetensors",
+ "model.layers.114.input_layernorm.weight": "model-00058-of-00064.safetensors",
+ "model.layers.114.mlp.down_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.114.mlp.down_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.114.mlp.down_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.114.mlp.gate_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.114.mlp.gate_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.114.mlp.gate_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.114.mlp.up_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.114.mlp.up_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.114.mlp.up_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.114.post_attention_layernorm.weight": "model-00058-of-00064.safetensors",
+ "model.layers.114.self_attn.k_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.114.self_attn.k_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.114.self_attn.k_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.114.self_attn.o_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.114.self_attn.o_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.114.self_attn.o_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.114.self_attn.q_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.114.self_attn.q_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.114.self_attn.q_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.114.self_attn.v_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.114.self_attn.v_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.114.self_attn.v_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.115.input_layernorm.weight": "model-00059-of-00064.safetensors",
+ "model.layers.115.mlp.down_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.115.mlp.down_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.115.mlp.down_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.115.mlp.gate_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.115.mlp.gate_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.115.mlp.gate_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.115.mlp.up_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.115.mlp.up_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.115.mlp.up_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.115.post_attention_layernorm.weight": "model-00059-of-00064.safetensors",
+ "model.layers.115.self_attn.k_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.115.self_attn.k_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.115.self_attn.k_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.115.self_attn.o_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.115.self_attn.o_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.115.self_attn.o_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.115.self_attn.q_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.115.self_attn.q_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.115.self_attn.q_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.115.self_attn.v_proj.biases": "model-00058-of-00064.safetensors",
+ "model.layers.115.self_attn.v_proj.scales": "model-00058-of-00064.safetensors",
+ "model.layers.115.self_attn.v_proj.weight": "model-00058-of-00064.safetensors",
+ "model.layers.116.input_layernorm.weight": "model-00059-of-00064.safetensors",
+ "model.layers.116.mlp.down_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.116.mlp.down_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.116.mlp.down_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.116.mlp.gate_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.116.mlp.gate_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.116.mlp.gate_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.116.mlp.up_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.116.mlp.up_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.116.mlp.up_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.116.post_attention_layernorm.weight": "model-00059-of-00064.safetensors",
+ "model.layers.116.self_attn.k_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.116.self_attn.k_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.116.self_attn.k_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.116.self_attn.o_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.116.self_attn.o_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.116.self_attn.o_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.116.self_attn.q_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.116.self_attn.q_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.116.self_attn.q_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.116.self_attn.v_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.116.self_attn.v_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.116.self_attn.v_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.117.input_layernorm.weight": "model-00060-of-00064.safetensors",
+ "model.layers.117.mlp.down_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.117.mlp.down_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.117.mlp.down_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.117.mlp.gate_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.117.mlp.gate_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.117.mlp.gate_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.117.mlp.up_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.117.mlp.up_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.117.mlp.up_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.117.post_attention_layernorm.weight": "model-00060-of-00064.safetensors",
+ "model.layers.117.self_attn.k_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.117.self_attn.k_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.117.self_attn.k_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.117.self_attn.o_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.117.self_attn.o_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.117.self_attn.o_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.117.self_attn.q_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.117.self_attn.q_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.117.self_attn.q_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.117.self_attn.v_proj.biases": "model-00059-of-00064.safetensors",
+ "model.layers.117.self_attn.v_proj.scales": "model-00059-of-00064.safetensors",
+ "model.layers.117.self_attn.v_proj.weight": "model-00059-of-00064.safetensors",
+ "model.layers.118.input_layernorm.weight": "model-00060-of-00064.safetensors",
+ "model.layers.118.mlp.down_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.118.mlp.down_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.118.mlp.down_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.118.mlp.gate_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.118.mlp.gate_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.118.mlp.gate_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.118.mlp.up_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.118.mlp.up_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.118.mlp.up_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.118.post_attention_layernorm.weight": "model-00060-of-00064.safetensors",
+ "model.layers.118.self_attn.k_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.118.self_attn.k_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.118.self_attn.k_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.118.self_attn.o_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.118.self_attn.o_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.118.self_attn.o_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.118.self_attn.q_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.118.self_attn.q_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.118.self_attn.q_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.118.self_attn.v_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.118.self_attn.v_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.118.self_attn.v_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.119.input_layernorm.weight": "model-00061-of-00064.safetensors",
+ "model.layers.119.mlp.down_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.119.mlp.down_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.119.mlp.down_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.119.mlp.gate_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.119.mlp.gate_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.119.mlp.gate_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.119.mlp.up_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.119.mlp.up_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.119.mlp.up_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.119.post_attention_layernorm.weight": "model-00061-of-00064.safetensors",
+ "model.layers.119.self_attn.k_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.119.self_attn.k_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.119.self_attn.k_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.119.self_attn.o_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.119.self_attn.o_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.119.self_attn.o_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.119.self_attn.q_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.119.self_attn.q_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.119.self_attn.q_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.119.self_attn.v_proj.biases": "model-00060-of-00064.safetensors",
+ "model.layers.119.self_attn.v_proj.scales": "model-00060-of-00064.safetensors",
+ "model.layers.119.self_attn.v_proj.weight": "model-00060-of-00064.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00007-of-00064.safetensors",
+ "model.layers.12.mlp.down_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.12.mlp.down_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.12.mlp.gate_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.12.mlp.gate_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.12.mlp.up_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.12.mlp.up_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00007-of-00064.safetensors",
+ "model.layers.12.self_attn.k_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.12.self_attn.k_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.12.self_attn.o_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.12.self_attn.o_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.12.self_attn.q_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.12.self_attn.q_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.12.self_attn.v_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.12.self_attn.v_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.120.input_layernorm.weight": "model-00061-of-00064.safetensors",
+ "model.layers.120.mlp.down_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.120.mlp.down_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.120.mlp.down_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.120.mlp.gate_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.120.mlp.gate_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.120.mlp.gate_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.120.mlp.up_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.120.mlp.up_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.120.mlp.up_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.120.post_attention_layernorm.weight": "model-00061-of-00064.safetensors",
+ "model.layers.120.self_attn.k_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.120.self_attn.k_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.120.self_attn.k_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.120.self_attn.o_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.120.self_attn.o_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.120.self_attn.o_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.120.self_attn.q_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.120.self_attn.q_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.120.self_attn.q_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.120.self_attn.v_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.120.self_attn.v_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.120.self_attn.v_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.121.input_layernorm.weight": "model-00062-of-00064.safetensors",
+ "model.layers.121.mlp.down_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.121.mlp.down_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.121.mlp.down_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.121.mlp.gate_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.121.mlp.gate_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.121.mlp.gate_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.121.mlp.up_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.121.mlp.up_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.121.mlp.up_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.121.post_attention_layernorm.weight": "model-00062-of-00064.safetensors",
+ "model.layers.121.self_attn.k_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.121.self_attn.k_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.121.self_attn.k_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.121.self_attn.o_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.121.self_attn.o_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.121.self_attn.o_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.121.self_attn.q_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.121.self_attn.q_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.121.self_attn.q_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.121.self_attn.v_proj.biases": "model-00061-of-00064.safetensors",
+ "model.layers.121.self_attn.v_proj.scales": "model-00061-of-00064.safetensors",
+ "model.layers.121.self_attn.v_proj.weight": "model-00061-of-00064.safetensors",
+ "model.layers.122.input_layernorm.weight": "model-00062-of-00064.safetensors",
+ "model.layers.122.mlp.down_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.122.mlp.down_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.122.mlp.down_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.122.mlp.gate_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.122.mlp.gate_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.122.mlp.gate_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.122.mlp.up_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.122.mlp.up_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.122.mlp.up_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.122.post_attention_layernorm.weight": "model-00062-of-00064.safetensors",
+ "model.layers.122.self_attn.k_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.122.self_attn.k_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.122.self_attn.k_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.122.self_attn.o_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.122.self_attn.o_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.122.self_attn.o_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.122.self_attn.q_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.122.self_attn.q_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.122.self_attn.q_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.122.self_attn.v_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.122.self_attn.v_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.122.self_attn.v_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.123.input_layernorm.weight": "model-00063-of-00064.safetensors",
+ "model.layers.123.mlp.down_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.123.mlp.down_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.123.mlp.down_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.123.mlp.gate_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.123.mlp.gate_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.123.mlp.gate_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.123.mlp.up_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.123.mlp.up_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.123.mlp.up_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.123.post_attention_layernorm.weight": "model-00063-of-00064.safetensors",
+ "model.layers.123.self_attn.k_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.123.self_attn.k_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.123.self_attn.k_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.123.self_attn.o_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.123.self_attn.o_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.123.self_attn.o_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.123.self_attn.q_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.123.self_attn.q_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.123.self_attn.q_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.123.self_attn.v_proj.biases": "model-00062-of-00064.safetensors",
+ "model.layers.123.self_attn.v_proj.scales": "model-00062-of-00064.safetensors",
+ "model.layers.123.self_attn.v_proj.weight": "model-00062-of-00064.safetensors",
+ "model.layers.124.input_layernorm.weight": "model-00063-of-00064.safetensors",
+ "model.layers.124.mlp.down_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.124.mlp.down_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.124.mlp.down_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.124.mlp.gate_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.124.mlp.gate_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.124.mlp.gate_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.124.mlp.up_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.124.mlp.up_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.124.mlp.up_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.124.post_attention_layernorm.weight": "model-00063-of-00064.safetensors",
+ "model.layers.124.self_attn.k_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.124.self_attn.k_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.124.self_attn.k_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.124.self_attn.o_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.124.self_attn.o_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.124.self_attn.o_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.124.self_attn.q_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.124.self_attn.q_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.124.self_attn.q_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.124.self_attn.v_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.124.self_attn.v_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.124.self_attn.v_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.125.input_layernorm.weight": "model-00064-of-00064.safetensors",
+ "model.layers.125.mlp.down_proj.biases": "model-00064-of-00064.safetensors",
+ "model.layers.125.mlp.down_proj.scales": "model-00064-of-00064.safetensors",
+ "model.layers.125.mlp.down_proj.weight": "model-00064-of-00064.safetensors",
+ "model.layers.125.mlp.gate_proj.biases": "model-00064-of-00064.safetensors",
+ "model.layers.125.mlp.gate_proj.scales": "model-00064-of-00064.safetensors",
+ "model.layers.125.mlp.gate_proj.weight": "model-00064-of-00064.safetensors",
+ "model.layers.125.mlp.up_proj.biases": "model-00064-of-00064.safetensors",
+ "model.layers.125.mlp.up_proj.scales": "model-00064-of-00064.safetensors",
+ "model.layers.125.mlp.up_proj.weight": "model-00064-of-00064.safetensors",
+ "model.layers.125.post_attention_layernorm.weight": "model-00064-of-00064.safetensors",
+ "model.layers.125.self_attn.k_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.125.self_attn.k_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.125.self_attn.k_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.125.self_attn.o_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.125.self_attn.o_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.125.self_attn.o_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.125.self_attn.q_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.125.self_attn.q_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.125.self_attn.q_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.125.self_attn.v_proj.biases": "model-00063-of-00064.safetensors",
+ "model.layers.125.self_attn.v_proj.scales": "model-00063-of-00064.safetensors",
+ "model.layers.125.self_attn.v_proj.weight": "model-00063-of-00064.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00008-of-00064.safetensors",
+ "model.layers.13.mlp.down_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.13.mlp.down_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.13.mlp.gate_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.13.mlp.gate_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.13.mlp.up_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.13.mlp.up_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00008-of-00064.safetensors",
+ "model.layers.13.self_attn.k_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.13.self_attn.k_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.13.self_attn.o_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.13.self_attn.o_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.13.self_attn.q_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.13.self_attn.q_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.13.self_attn.v_proj.biases": "model-00007-of-00064.safetensors",
+ "model.layers.13.self_attn.v_proj.scales": "model-00007-of-00064.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00007-of-00064.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00008-of-00064.safetensors",
+ "model.layers.14.mlp.down_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.14.mlp.down_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.14.mlp.gate_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.14.mlp.gate_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.14.mlp.up_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.14.mlp.up_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00008-of-00064.safetensors",
+ "model.layers.14.self_attn.k_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.14.self_attn.k_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.14.self_attn.o_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.14.self_attn.o_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.14.self_attn.q_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.14.self_attn.q_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.14.self_attn.v_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.14.self_attn.v_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00009-of-00064.safetensors",
+ "model.layers.15.mlp.down_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.15.mlp.down_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.15.mlp.gate_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.15.mlp.gate_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.15.mlp.up_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.15.mlp.up_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00009-of-00064.safetensors",
+ "model.layers.15.self_attn.k_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.15.self_attn.k_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.15.self_attn.o_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.15.self_attn.o_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.15.self_attn.q_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.15.self_attn.q_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.15.self_attn.v_proj.biases": "model-00008-of-00064.safetensors",
+ "model.layers.15.self_attn.v_proj.scales": "model-00008-of-00064.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00008-of-00064.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00009-of-00064.safetensors",
+ "model.layers.16.mlp.down_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.16.mlp.down_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.16.mlp.gate_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.16.mlp.gate_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.16.mlp.up_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.16.mlp.up_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00009-of-00064.safetensors",
+ "model.layers.16.self_attn.k_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.16.self_attn.k_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.16.self_attn.o_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.16.self_attn.o_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.16.self_attn.q_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.16.self_attn.q_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.16.self_attn.v_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.16.self_attn.v_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00010-of-00064.safetensors",
+ "model.layers.17.mlp.down_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.17.mlp.down_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.17.mlp.gate_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.17.mlp.gate_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.17.mlp.up_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.17.mlp.up_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00010-of-00064.safetensors",
+ "model.layers.17.self_attn.k_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.17.self_attn.k_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.17.self_attn.o_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.17.self_attn.o_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.17.self_attn.q_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.17.self_attn.q_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.17.self_attn.v_proj.biases": "model-00009-of-00064.safetensors",
+ "model.layers.17.self_attn.v_proj.scales": "model-00009-of-00064.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00009-of-00064.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00010-of-00064.safetensors",
+ "model.layers.18.mlp.down_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.18.mlp.down_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.18.mlp.gate_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.18.mlp.gate_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.18.mlp.up_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.18.mlp.up_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00010-of-00064.safetensors",
+ "model.layers.18.self_attn.k_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.18.self_attn.k_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.18.self_attn.o_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.18.self_attn.o_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.18.self_attn.q_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.18.self_attn.q_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.18.self_attn.v_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.18.self_attn.v_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00011-of-00064.safetensors",
+ "model.layers.19.mlp.down_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.19.mlp.down_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.19.mlp.gate_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.19.mlp.gate_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.19.mlp.up_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.19.mlp.up_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00011-of-00064.safetensors",
+ "model.layers.19.self_attn.k_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.19.self_attn.k_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.19.self_attn.o_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.19.self_attn.o_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.19.self_attn.q_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.19.self_attn.q_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.19.self_attn.v_proj.biases": "model-00010-of-00064.safetensors",
+ "model.layers.19.self_attn.v_proj.scales": "model-00010-of-00064.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00010-of-00064.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00002-of-00064.safetensors",
+ "model.layers.2.mlp.down_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.2.mlp.down_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.2.mlp.gate_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.2.mlp.gate_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.2.mlp.up_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.2.mlp.up_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00002-of-00064.safetensors",
+ "model.layers.2.self_attn.k_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.2.self_attn.k_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.2.self_attn.o_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.2.self_attn.o_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.2.self_attn.q_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.2.self_attn.q_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.2.self_attn.v_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.2.self_attn.v_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00011-of-00064.safetensors",
+ "model.layers.20.mlp.down_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.20.mlp.down_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.20.mlp.gate_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.20.mlp.gate_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.20.mlp.up_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.20.mlp.up_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00011-of-00064.safetensors",
+ "model.layers.20.self_attn.k_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.20.self_attn.k_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.20.self_attn.o_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.20.self_attn.o_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.20.self_attn.q_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.20.self_attn.q_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.20.self_attn.v_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.20.self_attn.v_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00012-of-00064.safetensors",
+ "model.layers.21.mlp.down_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.21.mlp.down_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.21.mlp.gate_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.21.mlp.gate_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.21.mlp.up_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.21.mlp.up_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00012-of-00064.safetensors",
+ "model.layers.21.self_attn.k_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.21.self_attn.k_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.21.self_attn.o_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.21.self_attn.o_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.21.self_attn.q_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.21.self_attn.q_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.21.self_attn.v_proj.biases": "model-00011-of-00064.safetensors",
+ "model.layers.21.self_attn.v_proj.scales": "model-00011-of-00064.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00011-of-00064.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00012-of-00064.safetensors",
+ "model.layers.22.mlp.down_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.22.mlp.down_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.22.mlp.gate_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.22.mlp.gate_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.22.mlp.up_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.22.mlp.up_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00012-of-00064.safetensors",
+ "model.layers.22.self_attn.k_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.22.self_attn.k_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.22.self_attn.o_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.22.self_attn.o_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.22.self_attn.q_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.22.self_attn.q_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.22.self_attn.v_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.22.self_attn.v_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00013-of-00064.safetensors",
+ "model.layers.23.mlp.down_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.23.mlp.down_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.23.mlp.gate_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.23.mlp.gate_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.23.mlp.up_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.23.mlp.up_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00013-of-00064.safetensors",
+ "model.layers.23.self_attn.k_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.23.self_attn.k_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.23.self_attn.o_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.23.self_attn.o_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.23.self_attn.q_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.23.self_attn.q_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.23.self_attn.v_proj.biases": "model-00012-of-00064.safetensors",
+ "model.layers.23.self_attn.v_proj.scales": "model-00012-of-00064.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00012-of-00064.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00013-of-00064.safetensors",
+ "model.layers.24.mlp.down_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.24.mlp.down_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.24.mlp.gate_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.24.mlp.gate_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.24.mlp.up_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.24.mlp.up_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00013-of-00064.safetensors",
+ "model.layers.24.self_attn.k_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.24.self_attn.k_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.24.self_attn.o_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.24.self_attn.o_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.24.self_attn.q_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.24.self_attn.q_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.24.self_attn.v_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.24.self_attn.v_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00014-of-00064.safetensors",
+ "model.layers.25.mlp.down_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.25.mlp.down_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.25.mlp.gate_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.25.mlp.gate_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.25.mlp.up_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.25.mlp.up_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00014-of-00064.safetensors",
+ "model.layers.25.self_attn.k_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.25.self_attn.k_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.25.self_attn.o_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.25.self_attn.o_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.25.self_attn.q_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.25.self_attn.q_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.25.self_attn.v_proj.biases": "model-00013-of-00064.safetensors",
+ "model.layers.25.self_attn.v_proj.scales": "model-00013-of-00064.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00013-of-00064.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00014-of-00064.safetensors",
+ "model.layers.26.mlp.down_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.26.mlp.down_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.26.mlp.gate_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.26.mlp.gate_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.26.mlp.up_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.26.mlp.up_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00014-of-00064.safetensors",
+ "model.layers.26.self_attn.k_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.26.self_attn.k_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.26.self_attn.o_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.26.self_attn.o_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.26.self_attn.q_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.26.self_attn.q_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.26.self_attn.v_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.26.self_attn.v_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00015-of-00064.safetensors",
+ "model.layers.27.mlp.down_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.27.mlp.down_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.27.mlp.gate_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.27.mlp.gate_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.27.mlp.up_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.27.mlp.up_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00015-of-00064.safetensors",
+ "model.layers.27.self_attn.k_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.27.self_attn.k_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.27.self_attn.o_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.27.self_attn.o_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.27.self_attn.q_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.27.self_attn.q_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.27.self_attn.v_proj.biases": "model-00014-of-00064.safetensors",
+ "model.layers.27.self_attn.v_proj.scales": "model-00014-of-00064.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00014-of-00064.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00015-of-00064.safetensors",
+ "model.layers.28.mlp.down_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.28.mlp.down_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.28.mlp.gate_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.28.mlp.gate_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.28.mlp.up_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.28.mlp.up_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00015-of-00064.safetensors",
+ "model.layers.28.self_attn.k_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.28.self_attn.k_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.28.self_attn.o_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.28.self_attn.o_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.28.self_attn.q_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.28.self_attn.q_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.28.self_attn.v_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.28.self_attn.v_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00016-of-00064.safetensors",
+ "model.layers.29.mlp.down_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.29.mlp.down_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.29.mlp.gate_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.29.mlp.gate_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.29.mlp.up_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.29.mlp.up_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00016-of-00064.safetensors",
+ "model.layers.29.self_attn.k_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.29.self_attn.k_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.29.self_attn.o_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.29.self_attn.o_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.29.self_attn.q_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.29.self_attn.q_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.29.self_attn.v_proj.biases": "model-00015-of-00064.safetensors",
+ "model.layers.29.self_attn.v_proj.scales": "model-00015-of-00064.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00015-of-00064.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00003-of-00064.safetensors",
+ "model.layers.3.mlp.down_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.3.mlp.down_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.3.mlp.gate_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.3.mlp.gate_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.3.mlp.up_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.3.mlp.up_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00003-of-00064.safetensors",
+ "model.layers.3.self_attn.k_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.3.self_attn.k_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.3.self_attn.o_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.3.self_attn.o_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.3.self_attn.q_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.3.self_attn.q_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.3.self_attn.v_proj.biases": "model-00002-of-00064.safetensors",
+ "model.layers.3.self_attn.v_proj.scales": "model-00002-of-00064.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00002-of-00064.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00016-of-00064.safetensors",
+ "model.layers.30.mlp.down_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.30.mlp.down_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.30.mlp.gate_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.30.mlp.gate_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.30.mlp.up_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.30.mlp.up_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00016-of-00064.safetensors",
+ "model.layers.30.self_attn.k_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.30.self_attn.k_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.30.self_attn.o_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.30.self_attn.o_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.30.self_attn.q_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.30.self_attn.q_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.30.self_attn.v_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.30.self_attn.v_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00017-of-00064.safetensors",
+ "model.layers.31.mlp.down_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.31.mlp.down_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.31.mlp.gate_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.31.mlp.gate_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.31.mlp.up_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.31.mlp.up_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00017-of-00064.safetensors",
+ "model.layers.31.self_attn.k_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.31.self_attn.k_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.31.self_attn.o_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.31.self_attn.o_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.31.self_attn.q_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.31.self_attn.q_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.31.self_attn.v_proj.biases": "model-00016-of-00064.safetensors",
+ "model.layers.31.self_attn.v_proj.scales": "model-00016-of-00064.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00016-of-00064.safetensors",
+ "model.layers.32.input_layernorm.weight": "model-00017-of-00064.safetensors",
+ "model.layers.32.mlp.down_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.32.mlp.down_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.32.mlp.down_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.32.mlp.gate_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.32.mlp.gate_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.32.mlp.gate_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.32.mlp.up_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.32.mlp.up_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.32.mlp.up_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.32.post_attention_layernorm.weight": "model-00017-of-00064.safetensors",
+ "model.layers.32.self_attn.k_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.32.self_attn.k_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.32.self_attn.k_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.32.self_attn.o_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.32.self_attn.o_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.32.self_attn.o_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.32.self_attn.q_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.32.self_attn.q_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.32.self_attn.q_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.32.self_attn.v_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.32.self_attn.v_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.32.self_attn.v_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.33.input_layernorm.weight": "model-00018-of-00064.safetensors",
+ "model.layers.33.mlp.down_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.33.mlp.down_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.33.mlp.down_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.33.mlp.gate_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.33.mlp.gate_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.33.mlp.gate_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.33.mlp.up_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.33.mlp.up_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.33.mlp.up_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.33.post_attention_layernorm.weight": "model-00018-of-00064.safetensors",
+ "model.layers.33.self_attn.k_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.33.self_attn.k_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.33.self_attn.k_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.33.self_attn.o_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.33.self_attn.o_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.33.self_attn.o_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.33.self_attn.q_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.33.self_attn.q_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.33.self_attn.q_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.33.self_attn.v_proj.biases": "model-00017-of-00064.safetensors",
+ "model.layers.33.self_attn.v_proj.scales": "model-00017-of-00064.safetensors",
+ "model.layers.33.self_attn.v_proj.weight": "model-00017-of-00064.safetensors",
+ "model.layers.34.input_layernorm.weight": "model-00018-of-00064.safetensors",
+ "model.layers.34.mlp.down_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.34.mlp.down_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.34.mlp.down_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.34.mlp.gate_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.34.mlp.gate_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.34.mlp.gate_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.34.mlp.up_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.34.mlp.up_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.34.mlp.up_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.34.post_attention_layernorm.weight": "model-00018-of-00064.safetensors",
+ "model.layers.34.self_attn.k_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.34.self_attn.k_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.34.self_attn.k_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.34.self_attn.o_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.34.self_attn.o_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.34.self_attn.o_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.34.self_attn.q_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.34.self_attn.q_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.34.self_attn.q_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.34.self_attn.v_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.34.self_attn.v_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.34.self_attn.v_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.35.input_layernorm.weight": "model-00019-of-00064.safetensors",
+ "model.layers.35.mlp.down_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.35.mlp.down_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.35.mlp.down_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.35.mlp.gate_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.35.mlp.gate_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.35.mlp.gate_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.35.mlp.up_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.35.mlp.up_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.35.mlp.up_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.35.post_attention_layernorm.weight": "model-00019-of-00064.safetensors",
+ "model.layers.35.self_attn.k_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.35.self_attn.k_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.35.self_attn.k_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.35.self_attn.o_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.35.self_attn.o_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.35.self_attn.o_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.35.self_attn.q_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.35.self_attn.q_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.35.self_attn.q_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.35.self_attn.v_proj.biases": "model-00018-of-00064.safetensors",
+ "model.layers.35.self_attn.v_proj.scales": "model-00018-of-00064.safetensors",
+ "model.layers.35.self_attn.v_proj.weight": "model-00018-of-00064.safetensors",
+ "model.layers.36.input_layernorm.weight": "model-00019-of-00064.safetensors",
+ "model.layers.36.mlp.down_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.36.mlp.down_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.36.mlp.down_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.36.mlp.gate_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.36.mlp.gate_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.36.mlp.gate_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.36.mlp.up_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.36.mlp.up_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.36.mlp.up_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.36.post_attention_layernorm.weight": "model-00019-of-00064.safetensors",
+ "model.layers.36.self_attn.k_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.36.self_attn.k_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.36.self_attn.k_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.36.self_attn.o_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.36.self_attn.o_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.36.self_attn.o_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.36.self_attn.q_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.36.self_attn.q_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.36.self_attn.q_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.36.self_attn.v_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.36.self_attn.v_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.36.self_attn.v_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.37.input_layernorm.weight": "model-00020-of-00064.safetensors",
+ "model.layers.37.mlp.down_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.37.mlp.down_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.37.mlp.down_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.37.mlp.gate_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.37.mlp.gate_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.37.mlp.gate_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.37.mlp.up_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.37.mlp.up_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.37.mlp.up_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.37.post_attention_layernorm.weight": "model-00020-of-00064.safetensors",
+ "model.layers.37.self_attn.k_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.37.self_attn.k_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.37.self_attn.k_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.37.self_attn.o_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.37.self_attn.o_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.37.self_attn.o_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.37.self_attn.q_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.37.self_attn.q_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.37.self_attn.q_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.37.self_attn.v_proj.biases": "model-00019-of-00064.safetensors",
+ "model.layers.37.self_attn.v_proj.scales": "model-00019-of-00064.safetensors",
+ "model.layers.37.self_attn.v_proj.weight": "model-00019-of-00064.safetensors",
+ "model.layers.38.input_layernorm.weight": "model-00020-of-00064.safetensors",
+ "model.layers.38.mlp.down_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.38.mlp.down_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.38.mlp.down_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.38.mlp.gate_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.38.mlp.gate_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.38.mlp.gate_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.38.mlp.up_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.38.mlp.up_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.38.mlp.up_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.38.post_attention_layernorm.weight": "model-00020-of-00064.safetensors",
+ "model.layers.38.self_attn.k_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.38.self_attn.k_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.38.self_attn.k_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.38.self_attn.o_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.38.self_attn.o_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.38.self_attn.o_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.38.self_attn.q_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.38.self_attn.q_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.38.self_attn.q_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.38.self_attn.v_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.38.self_attn.v_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.38.self_attn.v_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.39.input_layernorm.weight": "model-00021-of-00064.safetensors",
+ "model.layers.39.mlp.down_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.39.mlp.down_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.39.mlp.down_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.39.mlp.gate_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.39.mlp.gate_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.39.mlp.gate_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.39.mlp.up_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.39.mlp.up_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.39.mlp.up_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.39.post_attention_layernorm.weight": "model-00021-of-00064.safetensors",
+ "model.layers.39.self_attn.k_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.39.self_attn.k_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.39.self_attn.k_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.39.self_attn.o_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.39.self_attn.o_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.39.self_attn.o_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.39.self_attn.q_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.39.self_attn.q_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.39.self_attn.q_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.39.self_attn.v_proj.biases": "model-00020-of-00064.safetensors",
+ "model.layers.39.self_attn.v_proj.scales": "model-00020-of-00064.safetensors",
+ "model.layers.39.self_attn.v_proj.weight": "model-00020-of-00064.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00003-of-00064.safetensors",
+ "model.layers.4.mlp.down_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.4.mlp.down_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.4.mlp.gate_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.4.mlp.gate_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.4.mlp.up_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.4.mlp.up_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00003-of-00064.safetensors",
+ "model.layers.4.self_attn.k_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.4.self_attn.k_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.4.self_attn.o_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.4.self_attn.o_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.4.self_attn.q_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.4.self_attn.q_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.4.self_attn.v_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.4.self_attn.v_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.40.input_layernorm.weight": "model-00021-of-00064.safetensors",
+ "model.layers.40.mlp.down_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.40.mlp.down_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.40.mlp.down_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.40.mlp.gate_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.40.mlp.gate_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.40.mlp.gate_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.40.mlp.up_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.40.mlp.up_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.40.mlp.up_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.40.post_attention_layernorm.weight": "model-00021-of-00064.safetensors",
+ "model.layers.40.self_attn.k_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.40.self_attn.k_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.40.self_attn.k_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.40.self_attn.o_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.40.self_attn.o_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.40.self_attn.o_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.40.self_attn.q_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.40.self_attn.q_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.40.self_attn.q_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.40.self_attn.v_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.40.self_attn.v_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.40.self_attn.v_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.41.input_layernorm.weight": "model-00022-of-00064.safetensors",
+ "model.layers.41.mlp.down_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.41.mlp.down_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.41.mlp.down_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.41.mlp.gate_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.41.mlp.gate_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.41.mlp.gate_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.41.mlp.up_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.41.mlp.up_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.41.mlp.up_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.41.post_attention_layernorm.weight": "model-00022-of-00064.safetensors",
+ "model.layers.41.self_attn.k_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.41.self_attn.k_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.41.self_attn.k_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.41.self_attn.o_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.41.self_attn.o_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.41.self_attn.o_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.41.self_attn.q_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.41.self_attn.q_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.41.self_attn.q_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.41.self_attn.v_proj.biases": "model-00021-of-00064.safetensors",
+ "model.layers.41.self_attn.v_proj.scales": "model-00021-of-00064.safetensors",
+ "model.layers.41.self_attn.v_proj.weight": "model-00021-of-00064.safetensors",
+ "model.layers.42.input_layernorm.weight": "model-00022-of-00064.safetensors",
+ "model.layers.42.mlp.down_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.42.mlp.down_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.42.mlp.down_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.42.mlp.gate_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.42.mlp.gate_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.42.mlp.gate_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.42.mlp.up_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.42.mlp.up_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.42.mlp.up_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.42.post_attention_layernorm.weight": "model-00022-of-00064.safetensors",
+ "model.layers.42.self_attn.k_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.42.self_attn.k_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.42.self_attn.k_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.42.self_attn.o_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.42.self_attn.o_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.42.self_attn.o_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.42.self_attn.q_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.42.self_attn.q_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.42.self_attn.q_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.42.self_attn.v_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.42.self_attn.v_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.42.self_attn.v_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.43.input_layernorm.weight": "model-00023-of-00064.safetensors",
+ "model.layers.43.mlp.down_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.43.mlp.down_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.43.mlp.down_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.43.mlp.gate_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.43.mlp.gate_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.43.mlp.gate_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.43.mlp.up_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.43.mlp.up_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.43.mlp.up_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.43.post_attention_layernorm.weight": "model-00023-of-00064.safetensors",
+ "model.layers.43.self_attn.k_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.43.self_attn.k_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.43.self_attn.k_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.43.self_attn.o_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.43.self_attn.o_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.43.self_attn.o_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.43.self_attn.q_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.43.self_attn.q_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.43.self_attn.q_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.43.self_attn.v_proj.biases": "model-00022-of-00064.safetensors",
+ "model.layers.43.self_attn.v_proj.scales": "model-00022-of-00064.safetensors",
+ "model.layers.43.self_attn.v_proj.weight": "model-00022-of-00064.safetensors",
+ "model.layers.44.input_layernorm.weight": "model-00023-of-00064.safetensors",
+ "model.layers.44.mlp.down_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.44.mlp.down_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.44.mlp.down_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.44.mlp.gate_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.44.mlp.gate_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.44.mlp.gate_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.44.mlp.up_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.44.mlp.up_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.44.mlp.up_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.44.post_attention_layernorm.weight": "model-00023-of-00064.safetensors",
+ "model.layers.44.self_attn.k_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.44.self_attn.k_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.44.self_attn.k_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.44.self_attn.o_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.44.self_attn.o_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.44.self_attn.o_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.44.self_attn.q_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.44.self_attn.q_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.44.self_attn.q_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.44.self_attn.v_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.44.self_attn.v_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.44.self_attn.v_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.45.input_layernorm.weight": "model-00024-of-00064.safetensors",
+ "model.layers.45.mlp.down_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.45.mlp.down_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.45.mlp.down_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.45.mlp.gate_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.45.mlp.gate_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.45.mlp.gate_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.45.mlp.up_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.45.mlp.up_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.45.mlp.up_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.45.post_attention_layernorm.weight": "model-00024-of-00064.safetensors",
+ "model.layers.45.self_attn.k_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.45.self_attn.k_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.45.self_attn.k_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.45.self_attn.o_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.45.self_attn.o_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.45.self_attn.o_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.45.self_attn.q_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.45.self_attn.q_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.45.self_attn.q_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.45.self_attn.v_proj.biases": "model-00023-of-00064.safetensors",
+ "model.layers.45.self_attn.v_proj.scales": "model-00023-of-00064.safetensors",
+ "model.layers.45.self_attn.v_proj.weight": "model-00023-of-00064.safetensors",
+ "model.layers.46.input_layernorm.weight": "model-00024-of-00064.safetensors",
+ "model.layers.46.mlp.down_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.46.mlp.down_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.46.mlp.down_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.46.mlp.gate_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.46.mlp.gate_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.46.mlp.gate_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.46.mlp.up_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.46.mlp.up_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.46.mlp.up_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.46.post_attention_layernorm.weight": "model-00024-of-00064.safetensors",
+ "model.layers.46.self_attn.k_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.46.self_attn.k_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.46.self_attn.k_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.46.self_attn.o_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.46.self_attn.o_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.46.self_attn.o_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.46.self_attn.q_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.46.self_attn.q_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.46.self_attn.q_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.46.self_attn.v_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.46.self_attn.v_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.46.self_attn.v_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.47.input_layernorm.weight": "model-00025-of-00064.safetensors",
+ "model.layers.47.mlp.down_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.47.mlp.down_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.47.mlp.down_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.47.mlp.gate_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.47.mlp.gate_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.47.mlp.gate_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.47.mlp.up_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.47.mlp.up_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.47.mlp.up_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.47.post_attention_layernorm.weight": "model-00025-of-00064.safetensors",
+ "model.layers.47.self_attn.k_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.47.self_attn.k_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.47.self_attn.k_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.47.self_attn.o_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.47.self_attn.o_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.47.self_attn.o_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.47.self_attn.q_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.47.self_attn.q_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.47.self_attn.q_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.47.self_attn.v_proj.biases": "model-00024-of-00064.safetensors",
+ "model.layers.47.self_attn.v_proj.scales": "model-00024-of-00064.safetensors",
+ "model.layers.47.self_attn.v_proj.weight": "model-00024-of-00064.safetensors",
+ "model.layers.48.input_layernorm.weight": "model-00025-of-00064.safetensors",
+ "model.layers.48.mlp.down_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.48.mlp.down_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.48.mlp.down_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.48.mlp.gate_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.48.mlp.gate_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.48.mlp.gate_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.48.mlp.up_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.48.mlp.up_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.48.mlp.up_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.48.post_attention_layernorm.weight": "model-00025-of-00064.safetensors",
+ "model.layers.48.self_attn.k_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.48.self_attn.k_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.48.self_attn.k_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.48.self_attn.o_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.48.self_attn.o_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.48.self_attn.o_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.48.self_attn.q_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.48.self_attn.q_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.48.self_attn.q_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.48.self_attn.v_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.48.self_attn.v_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.48.self_attn.v_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.49.input_layernorm.weight": "model-00026-of-00064.safetensors",
+ "model.layers.49.mlp.down_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.49.mlp.down_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.49.mlp.down_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.49.mlp.gate_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.49.mlp.gate_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.49.mlp.gate_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.49.mlp.up_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.49.mlp.up_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.49.mlp.up_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.49.post_attention_layernorm.weight": "model-00026-of-00064.safetensors",
+ "model.layers.49.self_attn.k_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.49.self_attn.k_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.49.self_attn.k_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.49.self_attn.o_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.49.self_attn.o_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.49.self_attn.o_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.49.self_attn.q_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.49.self_attn.q_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.49.self_attn.q_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.49.self_attn.v_proj.biases": "model-00025-of-00064.safetensors",
+ "model.layers.49.self_attn.v_proj.scales": "model-00025-of-00064.safetensors",
+ "model.layers.49.self_attn.v_proj.weight": "model-00025-of-00064.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00004-of-00064.safetensors",
+ "model.layers.5.mlp.down_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.5.mlp.down_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.5.mlp.gate_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.5.mlp.gate_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.5.mlp.up_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.5.mlp.up_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00004-of-00064.safetensors",
+ "model.layers.5.self_attn.k_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.5.self_attn.k_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.5.self_attn.o_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.5.self_attn.o_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.5.self_attn.q_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.5.self_attn.q_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.5.self_attn.v_proj.biases": "model-00003-of-00064.safetensors",
+ "model.layers.5.self_attn.v_proj.scales": "model-00003-of-00064.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00003-of-00064.safetensors",
+ "model.layers.50.input_layernorm.weight": "model-00026-of-00064.safetensors",
+ "model.layers.50.mlp.down_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.50.mlp.down_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.50.mlp.down_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.50.mlp.gate_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.50.mlp.gate_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.50.mlp.gate_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.50.mlp.up_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.50.mlp.up_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.50.mlp.up_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.50.post_attention_layernorm.weight": "model-00026-of-00064.safetensors",
+ "model.layers.50.self_attn.k_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.50.self_attn.k_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.50.self_attn.k_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.50.self_attn.o_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.50.self_attn.o_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.50.self_attn.o_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.50.self_attn.q_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.50.self_attn.q_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.50.self_attn.q_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.50.self_attn.v_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.50.self_attn.v_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.50.self_attn.v_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.51.input_layernorm.weight": "model-00027-of-00064.safetensors",
+ "model.layers.51.mlp.down_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.51.mlp.down_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.51.mlp.down_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.51.mlp.gate_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.51.mlp.gate_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.51.mlp.gate_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.51.mlp.up_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.51.mlp.up_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.51.mlp.up_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.51.post_attention_layernorm.weight": "model-00027-of-00064.safetensors",
+ "model.layers.51.self_attn.k_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.51.self_attn.k_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.51.self_attn.k_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.51.self_attn.o_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.51.self_attn.o_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.51.self_attn.o_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.51.self_attn.q_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.51.self_attn.q_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.51.self_attn.q_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.51.self_attn.v_proj.biases": "model-00026-of-00064.safetensors",
+ "model.layers.51.self_attn.v_proj.scales": "model-00026-of-00064.safetensors",
+ "model.layers.51.self_attn.v_proj.weight": "model-00026-of-00064.safetensors",
+ "model.layers.52.input_layernorm.weight": "model-00027-of-00064.safetensors",
+ "model.layers.52.mlp.down_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.52.mlp.down_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.52.mlp.down_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.52.mlp.gate_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.52.mlp.gate_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.52.mlp.gate_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.52.mlp.up_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.52.mlp.up_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.52.mlp.up_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.52.post_attention_layernorm.weight": "model-00027-of-00064.safetensors",
+ "model.layers.52.self_attn.k_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.52.self_attn.k_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.52.self_attn.k_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.52.self_attn.o_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.52.self_attn.o_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.52.self_attn.o_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.52.self_attn.q_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.52.self_attn.q_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.52.self_attn.q_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.52.self_attn.v_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.52.self_attn.v_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.52.self_attn.v_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.53.input_layernorm.weight": "model-00028-of-00064.safetensors",
+ "model.layers.53.mlp.down_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.53.mlp.down_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.53.mlp.down_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.53.mlp.gate_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.53.mlp.gate_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.53.mlp.gate_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.53.mlp.up_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.53.mlp.up_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.53.mlp.up_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.53.post_attention_layernorm.weight": "model-00028-of-00064.safetensors",
+ "model.layers.53.self_attn.k_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.53.self_attn.k_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.53.self_attn.k_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.53.self_attn.o_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.53.self_attn.o_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.53.self_attn.o_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.53.self_attn.q_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.53.self_attn.q_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.53.self_attn.q_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.53.self_attn.v_proj.biases": "model-00027-of-00064.safetensors",
+ "model.layers.53.self_attn.v_proj.scales": "model-00027-of-00064.safetensors",
+ "model.layers.53.self_attn.v_proj.weight": "model-00027-of-00064.safetensors",
+ "model.layers.54.input_layernorm.weight": "model-00028-of-00064.safetensors",
+ "model.layers.54.mlp.down_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.54.mlp.down_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.54.mlp.down_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.54.mlp.gate_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.54.mlp.gate_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.54.mlp.gate_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.54.mlp.up_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.54.mlp.up_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.54.mlp.up_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.54.post_attention_layernorm.weight": "model-00028-of-00064.safetensors",
+ "model.layers.54.self_attn.k_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.54.self_attn.k_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.54.self_attn.k_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.54.self_attn.o_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.54.self_attn.o_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.54.self_attn.o_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.54.self_attn.q_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.54.self_attn.q_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.54.self_attn.q_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.54.self_attn.v_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.54.self_attn.v_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.54.self_attn.v_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.55.input_layernorm.weight": "model-00029-of-00064.safetensors",
+ "model.layers.55.mlp.down_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.55.mlp.down_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.55.mlp.down_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.55.mlp.gate_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.55.mlp.gate_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.55.mlp.gate_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.55.mlp.up_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.55.mlp.up_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.55.mlp.up_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.55.post_attention_layernorm.weight": "model-00029-of-00064.safetensors",
+ "model.layers.55.self_attn.k_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.55.self_attn.k_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.55.self_attn.k_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.55.self_attn.o_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.55.self_attn.o_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.55.self_attn.o_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.55.self_attn.q_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.55.self_attn.q_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.55.self_attn.q_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.55.self_attn.v_proj.biases": "model-00028-of-00064.safetensors",
+ "model.layers.55.self_attn.v_proj.scales": "model-00028-of-00064.safetensors",
+ "model.layers.55.self_attn.v_proj.weight": "model-00028-of-00064.safetensors",
+ "model.layers.56.input_layernorm.weight": "model-00029-of-00064.safetensors",
+ "model.layers.56.mlp.down_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.56.mlp.down_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.56.mlp.down_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.56.mlp.gate_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.56.mlp.gate_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.56.mlp.gate_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.56.mlp.up_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.56.mlp.up_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.56.mlp.up_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.56.post_attention_layernorm.weight": "model-00029-of-00064.safetensors",
+ "model.layers.56.self_attn.k_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.56.self_attn.k_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.56.self_attn.k_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.56.self_attn.o_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.56.self_attn.o_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.56.self_attn.o_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.56.self_attn.q_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.56.self_attn.q_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.56.self_attn.q_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.56.self_attn.v_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.56.self_attn.v_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.56.self_attn.v_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.57.input_layernorm.weight": "model-00030-of-00064.safetensors",
+ "model.layers.57.mlp.down_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.57.mlp.down_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.57.mlp.down_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.57.mlp.gate_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.57.mlp.gate_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.57.mlp.gate_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.57.mlp.up_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.57.mlp.up_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.57.mlp.up_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.57.post_attention_layernorm.weight": "model-00030-of-00064.safetensors",
+ "model.layers.57.self_attn.k_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.57.self_attn.k_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.57.self_attn.k_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.57.self_attn.o_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.57.self_attn.o_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.57.self_attn.o_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.57.self_attn.q_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.57.self_attn.q_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.57.self_attn.q_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.57.self_attn.v_proj.biases": "model-00029-of-00064.safetensors",
+ "model.layers.57.self_attn.v_proj.scales": "model-00029-of-00064.safetensors",
+ "model.layers.57.self_attn.v_proj.weight": "model-00029-of-00064.safetensors",
+ "model.layers.58.input_layernorm.weight": "model-00030-of-00064.safetensors",
+ "model.layers.58.mlp.down_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.58.mlp.down_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.58.mlp.down_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.58.mlp.gate_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.58.mlp.gate_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.58.mlp.gate_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.58.mlp.up_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.58.mlp.up_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.58.mlp.up_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.58.post_attention_layernorm.weight": "model-00030-of-00064.safetensors",
+ "model.layers.58.self_attn.k_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.58.self_attn.k_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.58.self_attn.k_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.58.self_attn.o_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.58.self_attn.o_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.58.self_attn.o_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.58.self_attn.q_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.58.self_attn.q_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.58.self_attn.q_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.58.self_attn.v_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.58.self_attn.v_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.58.self_attn.v_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.59.input_layernorm.weight": "model-00031-of-00064.safetensors",
+ "model.layers.59.mlp.down_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.59.mlp.down_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.59.mlp.down_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.59.mlp.gate_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.59.mlp.gate_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.59.mlp.gate_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.59.mlp.up_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.59.mlp.up_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.59.mlp.up_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.59.post_attention_layernorm.weight": "model-00031-of-00064.safetensors",
+ "model.layers.59.self_attn.k_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.59.self_attn.k_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.59.self_attn.k_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.59.self_attn.o_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.59.self_attn.o_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.59.self_attn.o_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.59.self_attn.q_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.59.self_attn.q_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.59.self_attn.q_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.59.self_attn.v_proj.biases": "model-00030-of-00064.safetensors",
+ "model.layers.59.self_attn.v_proj.scales": "model-00030-of-00064.safetensors",
+ "model.layers.59.self_attn.v_proj.weight": "model-00030-of-00064.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00004-of-00064.safetensors",
+ "model.layers.6.mlp.down_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.6.mlp.down_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.6.mlp.gate_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.6.mlp.gate_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.6.mlp.up_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.6.mlp.up_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00004-of-00064.safetensors",
+ "model.layers.6.self_attn.k_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.6.self_attn.k_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.6.self_attn.o_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.6.self_attn.o_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.6.self_attn.q_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.6.self_attn.q_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.6.self_attn.v_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.6.self_attn.v_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.60.input_layernorm.weight": "model-00031-of-00064.safetensors",
+ "model.layers.60.mlp.down_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.60.mlp.down_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.60.mlp.down_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.60.mlp.gate_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.60.mlp.gate_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.60.mlp.gate_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.60.mlp.up_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.60.mlp.up_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.60.mlp.up_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.60.post_attention_layernorm.weight": "model-00031-of-00064.safetensors",
+ "model.layers.60.self_attn.k_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.60.self_attn.k_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.60.self_attn.k_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.60.self_attn.o_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.60.self_attn.o_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.60.self_attn.o_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.60.self_attn.q_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.60.self_attn.q_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.60.self_attn.q_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.60.self_attn.v_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.60.self_attn.v_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.60.self_attn.v_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.61.input_layernorm.weight": "model-00032-of-00064.safetensors",
+ "model.layers.61.mlp.down_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.61.mlp.down_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.61.mlp.down_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.61.mlp.gate_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.61.mlp.gate_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.61.mlp.gate_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.61.mlp.up_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.61.mlp.up_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.61.mlp.up_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.61.post_attention_layernorm.weight": "model-00032-of-00064.safetensors",
+ "model.layers.61.self_attn.k_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.61.self_attn.k_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.61.self_attn.k_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.61.self_attn.o_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.61.self_attn.o_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.61.self_attn.o_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.61.self_attn.q_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.61.self_attn.q_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.61.self_attn.q_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.61.self_attn.v_proj.biases": "model-00031-of-00064.safetensors",
+ "model.layers.61.self_attn.v_proj.scales": "model-00031-of-00064.safetensors",
+ "model.layers.61.self_attn.v_proj.weight": "model-00031-of-00064.safetensors",
+ "model.layers.62.input_layernorm.weight": "model-00032-of-00064.safetensors",
+ "model.layers.62.mlp.down_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.62.mlp.down_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.62.mlp.down_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.62.mlp.gate_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.62.mlp.gate_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.62.mlp.gate_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.62.mlp.up_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.62.mlp.up_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.62.mlp.up_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.62.post_attention_layernorm.weight": "model-00032-of-00064.safetensors",
+ "model.layers.62.self_attn.k_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.62.self_attn.k_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.62.self_attn.k_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.62.self_attn.o_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.62.self_attn.o_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.62.self_attn.o_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.62.self_attn.q_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.62.self_attn.q_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.62.self_attn.q_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.62.self_attn.v_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.62.self_attn.v_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.62.self_attn.v_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.63.input_layernorm.weight": "model-00033-of-00064.safetensors",
+ "model.layers.63.mlp.down_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.63.mlp.down_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.63.mlp.down_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.63.mlp.gate_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.63.mlp.gate_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.63.mlp.gate_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.63.mlp.up_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.63.mlp.up_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.63.mlp.up_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.63.post_attention_layernorm.weight": "model-00033-of-00064.safetensors",
+ "model.layers.63.self_attn.k_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.63.self_attn.k_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.63.self_attn.k_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.63.self_attn.o_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.63.self_attn.o_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.63.self_attn.o_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.63.self_attn.q_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.63.self_attn.q_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.63.self_attn.q_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.63.self_attn.v_proj.biases": "model-00032-of-00064.safetensors",
+ "model.layers.63.self_attn.v_proj.scales": "model-00032-of-00064.safetensors",
+ "model.layers.63.self_attn.v_proj.weight": "model-00032-of-00064.safetensors",
+ "model.layers.64.input_layernorm.weight": "model-00033-of-00064.safetensors",
+ "model.layers.64.mlp.down_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.64.mlp.down_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.64.mlp.down_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.64.mlp.gate_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.64.mlp.gate_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.64.mlp.gate_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.64.mlp.up_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.64.mlp.up_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.64.mlp.up_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.64.post_attention_layernorm.weight": "model-00033-of-00064.safetensors",
+ "model.layers.64.self_attn.k_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.64.self_attn.k_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.64.self_attn.k_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.64.self_attn.o_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.64.self_attn.o_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.64.self_attn.o_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.64.self_attn.q_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.64.self_attn.q_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.64.self_attn.q_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.64.self_attn.v_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.64.self_attn.v_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.64.self_attn.v_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.65.input_layernorm.weight": "model-00034-of-00064.safetensors",
+ "model.layers.65.mlp.down_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.65.mlp.down_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.65.mlp.down_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.65.mlp.gate_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.65.mlp.gate_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.65.mlp.gate_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.65.mlp.up_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.65.mlp.up_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.65.mlp.up_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.65.post_attention_layernorm.weight": "model-00034-of-00064.safetensors",
+ "model.layers.65.self_attn.k_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.65.self_attn.k_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.65.self_attn.k_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.65.self_attn.o_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.65.self_attn.o_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.65.self_attn.o_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.65.self_attn.q_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.65.self_attn.q_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.65.self_attn.q_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.65.self_attn.v_proj.biases": "model-00033-of-00064.safetensors",
+ "model.layers.65.self_attn.v_proj.scales": "model-00033-of-00064.safetensors",
+ "model.layers.65.self_attn.v_proj.weight": "model-00033-of-00064.safetensors",
+ "model.layers.66.input_layernorm.weight": "model-00034-of-00064.safetensors",
+ "model.layers.66.mlp.down_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.66.mlp.down_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.66.mlp.down_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.66.mlp.gate_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.66.mlp.gate_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.66.mlp.gate_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.66.mlp.up_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.66.mlp.up_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.66.mlp.up_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.66.post_attention_layernorm.weight": "model-00034-of-00064.safetensors",
+ "model.layers.66.self_attn.k_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.66.self_attn.k_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.66.self_attn.k_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.66.self_attn.o_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.66.self_attn.o_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.66.self_attn.o_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.66.self_attn.q_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.66.self_attn.q_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.66.self_attn.q_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.66.self_attn.v_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.66.self_attn.v_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.66.self_attn.v_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.67.input_layernorm.weight": "model-00035-of-00064.safetensors",
+ "model.layers.67.mlp.down_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.67.mlp.down_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.67.mlp.down_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.67.mlp.gate_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.67.mlp.gate_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.67.mlp.gate_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.67.mlp.up_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.67.mlp.up_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.67.mlp.up_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.67.post_attention_layernorm.weight": "model-00035-of-00064.safetensors",
+ "model.layers.67.self_attn.k_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.67.self_attn.k_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.67.self_attn.k_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.67.self_attn.o_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.67.self_attn.o_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.67.self_attn.o_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.67.self_attn.q_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.67.self_attn.q_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.67.self_attn.q_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.67.self_attn.v_proj.biases": "model-00034-of-00064.safetensors",
+ "model.layers.67.self_attn.v_proj.scales": "model-00034-of-00064.safetensors",
+ "model.layers.67.self_attn.v_proj.weight": "model-00034-of-00064.safetensors",
+ "model.layers.68.input_layernorm.weight": "model-00035-of-00064.safetensors",
+ "model.layers.68.mlp.down_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.68.mlp.down_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.68.mlp.down_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.68.mlp.gate_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.68.mlp.gate_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.68.mlp.gate_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.68.mlp.up_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.68.mlp.up_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.68.mlp.up_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.68.post_attention_layernorm.weight": "model-00035-of-00064.safetensors",
+ "model.layers.68.self_attn.k_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.68.self_attn.k_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.68.self_attn.k_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.68.self_attn.o_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.68.self_attn.o_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.68.self_attn.o_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.68.self_attn.q_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.68.self_attn.q_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.68.self_attn.q_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.68.self_attn.v_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.68.self_attn.v_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.68.self_attn.v_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.69.input_layernorm.weight": "model-00036-of-00064.safetensors",
+ "model.layers.69.mlp.down_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.69.mlp.down_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.69.mlp.down_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.69.mlp.gate_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.69.mlp.gate_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.69.mlp.gate_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.69.mlp.up_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.69.mlp.up_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.69.mlp.up_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.69.post_attention_layernorm.weight": "model-00036-of-00064.safetensors",
+ "model.layers.69.self_attn.k_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.69.self_attn.k_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.69.self_attn.k_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.69.self_attn.o_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.69.self_attn.o_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.69.self_attn.o_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.69.self_attn.q_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.69.self_attn.q_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.69.self_attn.q_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.69.self_attn.v_proj.biases": "model-00035-of-00064.safetensors",
+ "model.layers.69.self_attn.v_proj.scales": "model-00035-of-00064.safetensors",
+ "model.layers.69.self_attn.v_proj.weight": "model-00035-of-00064.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00005-of-00064.safetensors",
+ "model.layers.7.mlp.down_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.7.mlp.down_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.7.mlp.gate_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.7.mlp.gate_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.7.mlp.up_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.7.mlp.up_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00005-of-00064.safetensors",
+ "model.layers.7.self_attn.k_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.7.self_attn.k_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.7.self_attn.o_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.7.self_attn.o_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.7.self_attn.q_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.7.self_attn.q_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.7.self_attn.v_proj.biases": "model-00004-of-00064.safetensors",
+ "model.layers.7.self_attn.v_proj.scales": "model-00004-of-00064.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00004-of-00064.safetensors",
+ "model.layers.70.input_layernorm.weight": "model-00036-of-00064.safetensors",
+ "model.layers.70.mlp.down_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.70.mlp.down_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.70.mlp.down_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.70.mlp.gate_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.70.mlp.gate_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.70.mlp.gate_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.70.mlp.up_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.70.mlp.up_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.70.mlp.up_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.70.post_attention_layernorm.weight": "model-00036-of-00064.safetensors",
+ "model.layers.70.self_attn.k_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.70.self_attn.k_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.70.self_attn.k_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.70.self_attn.o_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.70.self_attn.o_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.70.self_attn.o_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.70.self_attn.q_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.70.self_attn.q_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.70.self_attn.q_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.70.self_attn.v_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.70.self_attn.v_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.70.self_attn.v_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.71.input_layernorm.weight": "model-00037-of-00064.safetensors",
+ "model.layers.71.mlp.down_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.71.mlp.down_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.71.mlp.down_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.71.mlp.gate_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.71.mlp.gate_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.71.mlp.gate_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.71.mlp.up_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.71.mlp.up_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.71.mlp.up_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.71.post_attention_layernorm.weight": "model-00037-of-00064.safetensors",
+ "model.layers.71.self_attn.k_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.71.self_attn.k_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.71.self_attn.k_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.71.self_attn.o_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.71.self_attn.o_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.71.self_attn.o_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.71.self_attn.q_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.71.self_attn.q_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.71.self_attn.q_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.71.self_attn.v_proj.biases": "model-00036-of-00064.safetensors",
+ "model.layers.71.self_attn.v_proj.scales": "model-00036-of-00064.safetensors",
+ "model.layers.71.self_attn.v_proj.weight": "model-00036-of-00064.safetensors",
+ "model.layers.72.input_layernorm.weight": "model-00037-of-00064.safetensors",
+ "model.layers.72.mlp.down_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.72.mlp.down_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.72.mlp.down_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.72.mlp.gate_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.72.mlp.gate_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.72.mlp.gate_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.72.mlp.up_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.72.mlp.up_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.72.mlp.up_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.72.post_attention_layernorm.weight": "model-00037-of-00064.safetensors",
+ "model.layers.72.self_attn.k_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.72.self_attn.k_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.72.self_attn.k_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.72.self_attn.o_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.72.self_attn.o_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.72.self_attn.o_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.72.self_attn.q_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.72.self_attn.q_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.72.self_attn.q_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.72.self_attn.v_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.72.self_attn.v_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.72.self_attn.v_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.73.input_layernorm.weight": "model-00038-of-00064.safetensors",
+ "model.layers.73.mlp.down_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.73.mlp.down_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.73.mlp.down_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.73.mlp.gate_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.73.mlp.gate_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.73.mlp.gate_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.73.mlp.up_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.73.mlp.up_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.73.mlp.up_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.73.post_attention_layernorm.weight": "model-00038-of-00064.safetensors",
+ "model.layers.73.self_attn.k_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.73.self_attn.k_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.73.self_attn.k_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.73.self_attn.o_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.73.self_attn.o_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.73.self_attn.o_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.73.self_attn.q_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.73.self_attn.q_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.73.self_attn.q_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.73.self_attn.v_proj.biases": "model-00037-of-00064.safetensors",
+ "model.layers.73.self_attn.v_proj.scales": "model-00037-of-00064.safetensors",
+ "model.layers.73.self_attn.v_proj.weight": "model-00037-of-00064.safetensors",
+ "model.layers.74.input_layernorm.weight": "model-00038-of-00064.safetensors",
+ "model.layers.74.mlp.down_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.74.mlp.down_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.74.mlp.down_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.74.mlp.gate_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.74.mlp.gate_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.74.mlp.gate_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.74.mlp.up_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.74.mlp.up_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.74.mlp.up_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.74.post_attention_layernorm.weight": "model-00038-of-00064.safetensors",
+ "model.layers.74.self_attn.k_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.74.self_attn.k_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.74.self_attn.k_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.74.self_attn.o_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.74.self_attn.o_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.74.self_attn.o_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.74.self_attn.q_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.74.self_attn.q_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.74.self_attn.q_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.74.self_attn.v_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.74.self_attn.v_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.74.self_attn.v_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.75.input_layernorm.weight": "model-00039-of-00064.safetensors",
+ "model.layers.75.mlp.down_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.75.mlp.down_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.75.mlp.down_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.75.mlp.gate_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.75.mlp.gate_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.75.mlp.gate_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.75.mlp.up_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.75.mlp.up_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.75.mlp.up_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.75.post_attention_layernorm.weight": "model-00039-of-00064.safetensors",
+ "model.layers.75.self_attn.k_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.75.self_attn.k_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.75.self_attn.k_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.75.self_attn.o_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.75.self_attn.o_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.75.self_attn.o_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.75.self_attn.q_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.75.self_attn.q_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.75.self_attn.q_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.75.self_attn.v_proj.biases": "model-00038-of-00064.safetensors",
+ "model.layers.75.self_attn.v_proj.scales": "model-00038-of-00064.safetensors",
+ "model.layers.75.self_attn.v_proj.weight": "model-00038-of-00064.safetensors",
+ "model.layers.76.input_layernorm.weight": "model-00039-of-00064.safetensors",
+ "model.layers.76.mlp.down_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.76.mlp.down_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.76.mlp.down_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.76.mlp.gate_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.76.mlp.gate_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.76.mlp.gate_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.76.mlp.up_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.76.mlp.up_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.76.mlp.up_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.76.post_attention_layernorm.weight": "model-00039-of-00064.safetensors",
+ "model.layers.76.self_attn.k_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.76.self_attn.k_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.76.self_attn.k_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.76.self_attn.o_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.76.self_attn.o_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.76.self_attn.o_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.76.self_attn.q_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.76.self_attn.q_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.76.self_attn.q_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.76.self_attn.v_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.76.self_attn.v_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.76.self_attn.v_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.77.input_layernorm.weight": "model-00040-of-00064.safetensors",
+ "model.layers.77.mlp.down_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.77.mlp.down_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.77.mlp.down_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.77.mlp.gate_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.77.mlp.gate_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.77.mlp.gate_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.77.mlp.up_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.77.mlp.up_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.77.mlp.up_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.77.post_attention_layernorm.weight": "model-00040-of-00064.safetensors",
+ "model.layers.77.self_attn.k_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.77.self_attn.k_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.77.self_attn.k_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.77.self_attn.o_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.77.self_attn.o_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.77.self_attn.o_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.77.self_attn.q_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.77.self_attn.q_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.77.self_attn.q_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.77.self_attn.v_proj.biases": "model-00039-of-00064.safetensors",
+ "model.layers.77.self_attn.v_proj.scales": "model-00039-of-00064.safetensors",
+ "model.layers.77.self_attn.v_proj.weight": "model-00039-of-00064.safetensors",
+ "model.layers.78.input_layernorm.weight": "model-00040-of-00064.safetensors",
+ "model.layers.78.mlp.down_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.78.mlp.down_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.78.mlp.down_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.78.mlp.gate_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.78.mlp.gate_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.78.mlp.gate_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.78.mlp.up_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.78.mlp.up_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.78.mlp.up_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.78.post_attention_layernorm.weight": "model-00040-of-00064.safetensors",
+ "model.layers.78.self_attn.k_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.78.self_attn.k_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.78.self_attn.k_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.78.self_attn.o_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.78.self_attn.o_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.78.self_attn.o_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.78.self_attn.q_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.78.self_attn.q_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.78.self_attn.q_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.78.self_attn.v_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.78.self_attn.v_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.78.self_attn.v_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.79.input_layernorm.weight": "model-00041-of-00064.safetensors",
+ "model.layers.79.mlp.down_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.79.mlp.down_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.79.mlp.down_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.79.mlp.gate_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.79.mlp.gate_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.79.mlp.gate_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.79.mlp.up_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.79.mlp.up_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.79.mlp.up_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.79.post_attention_layernorm.weight": "model-00041-of-00064.safetensors",
+ "model.layers.79.self_attn.k_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.79.self_attn.k_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.79.self_attn.k_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.79.self_attn.o_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.79.self_attn.o_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.79.self_attn.o_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.79.self_attn.q_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.79.self_attn.q_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.79.self_attn.q_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.79.self_attn.v_proj.biases": "model-00040-of-00064.safetensors",
+ "model.layers.79.self_attn.v_proj.scales": "model-00040-of-00064.safetensors",
+ "model.layers.79.self_attn.v_proj.weight": "model-00040-of-00064.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00005-of-00064.safetensors",
+ "model.layers.8.mlp.down_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.8.mlp.down_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.8.mlp.gate_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.8.mlp.gate_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.8.mlp.up_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.8.mlp.up_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00005-of-00064.safetensors",
+ "model.layers.8.self_attn.k_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.8.self_attn.k_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.8.self_attn.o_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.8.self_attn.o_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.8.self_attn.q_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.8.self_attn.q_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.8.self_attn.v_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.8.self_attn.v_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.80.input_layernorm.weight": "model-00041-of-00064.safetensors",
+ "model.layers.80.mlp.down_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.80.mlp.down_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.80.mlp.down_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.80.mlp.gate_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.80.mlp.gate_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.80.mlp.gate_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.80.mlp.up_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.80.mlp.up_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.80.mlp.up_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.80.post_attention_layernorm.weight": "model-00041-of-00064.safetensors",
+ "model.layers.80.self_attn.k_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.80.self_attn.k_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.80.self_attn.k_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.80.self_attn.o_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.80.self_attn.o_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.80.self_attn.o_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.80.self_attn.q_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.80.self_attn.q_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.80.self_attn.q_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.80.self_attn.v_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.80.self_attn.v_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.80.self_attn.v_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.81.input_layernorm.weight": "model-00042-of-00064.safetensors",
+ "model.layers.81.mlp.down_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.81.mlp.down_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.81.mlp.down_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.81.mlp.gate_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.81.mlp.gate_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.81.mlp.gate_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.81.mlp.up_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.81.mlp.up_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.81.mlp.up_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.81.post_attention_layernorm.weight": "model-00042-of-00064.safetensors",
+ "model.layers.81.self_attn.k_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.81.self_attn.k_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.81.self_attn.k_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.81.self_attn.o_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.81.self_attn.o_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.81.self_attn.o_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.81.self_attn.q_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.81.self_attn.q_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.81.self_attn.q_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.81.self_attn.v_proj.biases": "model-00041-of-00064.safetensors",
+ "model.layers.81.self_attn.v_proj.scales": "model-00041-of-00064.safetensors",
+ "model.layers.81.self_attn.v_proj.weight": "model-00041-of-00064.safetensors",
+ "model.layers.82.input_layernorm.weight": "model-00042-of-00064.safetensors",
+ "model.layers.82.mlp.down_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.82.mlp.down_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.82.mlp.down_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.82.mlp.gate_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.82.mlp.gate_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.82.mlp.gate_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.82.mlp.up_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.82.mlp.up_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.82.mlp.up_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.82.post_attention_layernorm.weight": "model-00042-of-00064.safetensors",
+ "model.layers.82.self_attn.k_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.82.self_attn.k_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.82.self_attn.k_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.82.self_attn.o_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.82.self_attn.o_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.82.self_attn.o_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.82.self_attn.q_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.82.self_attn.q_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.82.self_attn.q_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.82.self_attn.v_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.82.self_attn.v_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.82.self_attn.v_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.83.input_layernorm.weight": "model-00043-of-00064.safetensors",
+ "model.layers.83.mlp.down_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.83.mlp.down_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.83.mlp.down_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.83.mlp.gate_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.83.mlp.gate_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.83.mlp.gate_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.83.mlp.up_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.83.mlp.up_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.83.mlp.up_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.83.post_attention_layernorm.weight": "model-00043-of-00064.safetensors",
+ "model.layers.83.self_attn.k_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.83.self_attn.k_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.83.self_attn.k_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.83.self_attn.o_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.83.self_attn.o_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.83.self_attn.o_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.83.self_attn.q_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.83.self_attn.q_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.83.self_attn.q_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.83.self_attn.v_proj.biases": "model-00042-of-00064.safetensors",
+ "model.layers.83.self_attn.v_proj.scales": "model-00042-of-00064.safetensors",
+ "model.layers.83.self_attn.v_proj.weight": "model-00042-of-00064.safetensors",
+ "model.layers.84.input_layernorm.weight": "model-00043-of-00064.safetensors",
+ "model.layers.84.mlp.down_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.84.mlp.down_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.84.mlp.down_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.84.mlp.gate_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.84.mlp.gate_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.84.mlp.gate_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.84.mlp.up_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.84.mlp.up_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.84.mlp.up_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.84.post_attention_layernorm.weight": "model-00043-of-00064.safetensors",
+ "model.layers.84.self_attn.k_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.84.self_attn.k_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.84.self_attn.k_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.84.self_attn.o_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.84.self_attn.o_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.84.self_attn.o_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.84.self_attn.q_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.84.self_attn.q_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.84.self_attn.q_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.84.self_attn.v_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.84.self_attn.v_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.84.self_attn.v_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.85.input_layernorm.weight": "model-00044-of-00064.safetensors",
+ "model.layers.85.mlp.down_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.85.mlp.down_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.85.mlp.down_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.85.mlp.gate_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.85.mlp.gate_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.85.mlp.gate_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.85.mlp.up_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.85.mlp.up_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.85.mlp.up_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.85.post_attention_layernorm.weight": "model-00044-of-00064.safetensors",
+ "model.layers.85.self_attn.k_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.85.self_attn.k_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.85.self_attn.k_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.85.self_attn.o_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.85.self_attn.o_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.85.self_attn.o_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.85.self_attn.q_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.85.self_attn.q_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.85.self_attn.q_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.85.self_attn.v_proj.biases": "model-00043-of-00064.safetensors",
+ "model.layers.85.self_attn.v_proj.scales": "model-00043-of-00064.safetensors",
+ "model.layers.85.self_attn.v_proj.weight": "model-00043-of-00064.safetensors",
+ "model.layers.86.input_layernorm.weight": "model-00044-of-00064.safetensors",
+ "model.layers.86.mlp.down_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.86.mlp.down_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.86.mlp.down_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.86.mlp.gate_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.86.mlp.gate_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.86.mlp.gate_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.86.mlp.up_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.86.mlp.up_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.86.mlp.up_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.86.post_attention_layernorm.weight": "model-00044-of-00064.safetensors",
+ "model.layers.86.self_attn.k_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.86.self_attn.k_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.86.self_attn.k_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.86.self_attn.o_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.86.self_attn.o_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.86.self_attn.o_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.86.self_attn.q_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.86.self_attn.q_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.86.self_attn.q_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.86.self_attn.v_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.86.self_attn.v_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.86.self_attn.v_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.87.input_layernorm.weight": "model-00045-of-00064.safetensors",
+ "model.layers.87.mlp.down_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.87.mlp.down_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.87.mlp.down_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.87.mlp.gate_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.87.mlp.gate_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.87.mlp.gate_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.87.mlp.up_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.87.mlp.up_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.87.mlp.up_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.87.post_attention_layernorm.weight": "model-00045-of-00064.safetensors",
+ "model.layers.87.self_attn.k_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.87.self_attn.k_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.87.self_attn.k_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.87.self_attn.o_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.87.self_attn.o_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.87.self_attn.o_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.87.self_attn.q_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.87.self_attn.q_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.87.self_attn.q_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.87.self_attn.v_proj.biases": "model-00044-of-00064.safetensors",
+ "model.layers.87.self_attn.v_proj.scales": "model-00044-of-00064.safetensors",
+ "model.layers.87.self_attn.v_proj.weight": "model-00044-of-00064.safetensors",
+ "model.layers.88.input_layernorm.weight": "model-00045-of-00064.safetensors",
+ "model.layers.88.mlp.down_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.88.mlp.down_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.88.mlp.down_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.88.mlp.gate_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.88.mlp.gate_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.88.mlp.gate_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.88.mlp.up_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.88.mlp.up_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.88.mlp.up_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.88.post_attention_layernorm.weight": "model-00045-of-00064.safetensors",
+ "model.layers.88.self_attn.k_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.88.self_attn.k_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.88.self_attn.k_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.88.self_attn.o_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.88.self_attn.o_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.88.self_attn.o_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.88.self_attn.q_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.88.self_attn.q_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.88.self_attn.q_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.88.self_attn.v_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.88.self_attn.v_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.88.self_attn.v_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.89.input_layernorm.weight": "model-00046-of-00064.safetensors",
+ "model.layers.89.mlp.down_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.89.mlp.down_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.89.mlp.down_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.89.mlp.gate_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.89.mlp.gate_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.89.mlp.gate_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.89.mlp.up_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.89.mlp.up_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.89.mlp.up_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.89.post_attention_layernorm.weight": "model-00046-of-00064.safetensors",
+ "model.layers.89.self_attn.k_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.89.self_attn.k_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.89.self_attn.k_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.89.self_attn.o_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.89.self_attn.o_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.89.self_attn.o_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.89.self_attn.q_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.89.self_attn.q_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.89.self_attn.q_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.89.self_attn.v_proj.biases": "model-00045-of-00064.safetensors",
+ "model.layers.89.self_attn.v_proj.scales": "model-00045-of-00064.safetensors",
+ "model.layers.89.self_attn.v_proj.weight": "model-00045-of-00064.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00006-of-00064.safetensors",
+ "model.layers.9.mlp.down_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.9.mlp.down_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.9.mlp.gate_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.9.mlp.gate_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.9.mlp.up_proj.biases": "model-00006-of-00064.safetensors",
+ "model.layers.9.mlp.up_proj.scales": "model-00006-of-00064.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00006-of-00064.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00006-of-00064.safetensors",
+ "model.layers.9.self_attn.k_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.9.self_attn.k_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.9.self_attn.o_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.9.self_attn.o_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.9.self_attn.q_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.9.self_attn.q_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.9.self_attn.v_proj.biases": "model-00005-of-00064.safetensors",
+ "model.layers.9.self_attn.v_proj.scales": "model-00005-of-00064.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00005-of-00064.safetensors",
+ "model.layers.90.input_layernorm.weight": "model-00046-of-00064.safetensors",
+ "model.layers.90.mlp.down_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.90.mlp.down_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.90.mlp.down_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.90.mlp.gate_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.90.mlp.gate_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.90.mlp.gate_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.90.mlp.up_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.90.mlp.up_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.90.mlp.up_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.90.post_attention_layernorm.weight": "model-00046-of-00064.safetensors",
+ "model.layers.90.self_attn.k_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.90.self_attn.k_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.90.self_attn.k_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.90.self_attn.o_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.90.self_attn.o_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.90.self_attn.o_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.90.self_attn.q_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.90.self_attn.q_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.90.self_attn.q_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.90.self_attn.v_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.90.self_attn.v_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.90.self_attn.v_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.91.input_layernorm.weight": "model-00047-of-00064.safetensors",
+ "model.layers.91.mlp.down_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.91.mlp.down_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.91.mlp.down_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.91.mlp.gate_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.91.mlp.gate_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.91.mlp.gate_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.91.mlp.up_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.91.mlp.up_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.91.mlp.up_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.91.post_attention_layernorm.weight": "model-00047-of-00064.safetensors",
+ "model.layers.91.self_attn.k_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.91.self_attn.k_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.91.self_attn.k_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.91.self_attn.o_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.91.self_attn.o_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.91.self_attn.o_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.91.self_attn.q_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.91.self_attn.q_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.91.self_attn.q_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.91.self_attn.v_proj.biases": "model-00046-of-00064.safetensors",
+ "model.layers.91.self_attn.v_proj.scales": "model-00046-of-00064.safetensors",
+ "model.layers.91.self_attn.v_proj.weight": "model-00046-of-00064.safetensors",
+ "model.layers.92.input_layernorm.weight": "model-00047-of-00064.safetensors",
+ "model.layers.92.mlp.down_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.92.mlp.down_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.92.mlp.down_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.92.mlp.gate_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.92.mlp.gate_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.92.mlp.gate_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.92.mlp.up_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.92.mlp.up_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.92.mlp.up_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.92.post_attention_layernorm.weight": "model-00047-of-00064.safetensors",
+ "model.layers.92.self_attn.k_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.92.self_attn.k_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.92.self_attn.k_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.92.self_attn.o_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.92.self_attn.o_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.92.self_attn.o_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.92.self_attn.q_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.92.self_attn.q_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.92.self_attn.q_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.92.self_attn.v_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.92.self_attn.v_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.92.self_attn.v_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.93.input_layernorm.weight": "model-00048-of-00064.safetensors",
+ "model.layers.93.mlp.down_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.93.mlp.down_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.93.mlp.down_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.93.mlp.gate_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.93.mlp.gate_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.93.mlp.gate_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.93.mlp.up_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.93.mlp.up_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.93.mlp.up_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.93.post_attention_layernorm.weight": "model-00048-of-00064.safetensors",
+ "model.layers.93.self_attn.k_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.93.self_attn.k_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.93.self_attn.k_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.93.self_attn.o_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.93.self_attn.o_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.93.self_attn.o_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.93.self_attn.q_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.93.self_attn.q_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.93.self_attn.q_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.93.self_attn.v_proj.biases": "model-00047-of-00064.safetensors",
+ "model.layers.93.self_attn.v_proj.scales": "model-00047-of-00064.safetensors",
+ "model.layers.93.self_attn.v_proj.weight": "model-00047-of-00064.safetensors",
+ "model.layers.94.input_layernorm.weight": "model-00048-of-00064.safetensors",
+ "model.layers.94.mlp.down_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.94.mlp.down_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.94.mlp.down_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.94.mlp.gate_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.94.mlp.gate_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.94.mlp.gate_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.94.mlp.up_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.94.mlp.up_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.94.mlp.up_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.94.post_attention_layernorm.weight": "model-00048-of-00064.safetensors",
+ "model.layers.94.self_attn.k_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.94.self_attn.k_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.94.self_attn.k_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.94.self_attn.o_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.94.self_attn.o_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.94.self_attn.o_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.94.self_attn.q_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.94.self_attn.q_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.94.self_attn.q_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.94.self_attn.v_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.94.self_attn.v_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.94.self_attn.v_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.95.input_layernorm.weight": "model-00049-of-00064.safetensors",
+ "model.layers.95.mlp.down_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.95.mlp.down_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.95.mlp.down_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.95.mlp.gate_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.95.mlp.gate_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.95.mlp.gate_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.95.mlp.up_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.95.mlp.up_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.95.mlp.up_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.95.post_attention_layernorm.weight": "model-00049-of-00064.safetensors",
+ "model.layers.95.self_attn.k_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.95.self_attn.k_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.95.self_attn.k_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.95.self_attn.o_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.95.self_attn.o_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.95.self_attn.o_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.95.self_attn.q_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.95.self_attn.q_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.95.self_attn.q_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.95.self_attn.v_proj.biases": "model-00048-of-00064.safetensors",
+ "model.layers.95.self_attn.v_proj.scales": "model-00048-of-00064.safetensors",
+ "model.layers.95.self_attn.v_proj.weight": "model-00048-of-00064.safetensors",
+ "model.layers.96.input_layernorm.weight": "model-00049-of-00064.safetensors",
+ "model.layers.96.mlp.down_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.96.mlp.down_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.96.mlp.down_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.96.mlp.gate_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.96.mlp.gate_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.96.mlp.gate_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.96.mlp.up_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.96.mlp.up_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.96.mlp.up_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.96.post_attention_layernorm.weight": "model-00049-of-00064.safetensors",
+ "model.layers.96.self_attn.k_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.96.self_attn.k_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.96.self_attn.k_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.96.self_attn.o_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.96.self_attn.o_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.96.self_attn.o_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.96.self_attn.q_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.96.self_attn.q_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.96.self_attn.q_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.96.self_attn.v_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.96.self_attn.v_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.96.self_attn.v_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.97.input_layernorm.weight": "model-00050-of-00064.safetensors",
+ "model.layers.97.mlp.down_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.97.mlp.down_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.97.mlp.down_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.97.mlp.gate_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.97.mlp.gate_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.97.mlp.gate_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.97.mlp.up_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.97.mlp.up_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.97.mlp.up_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.97.post_attention_layernorm.weight": "model-00050-of-00064.safetensors",
+ "model.layers.97.self_attn.k_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.97.self_attn.k_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.97.self_attn.k_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.97.self_attn.o_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.97.self_attn.o_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.97.self_attn.o_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.97.self_attn.q_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.97.self_attn.q_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.97.self_attn.q_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.97.self_attn.v_proj.biases": "model-00049-of-00064.safetensors",
+ "model.layers.97.self_attn.v_proj.scales": "model-00049-of-00064.safetensors",
+ "model.layers.97.self_attn.v_proj.weight": "model-00049-of-00064.safetensors",
+ "model.layers.98.input_layernorm.weight": "model-00050-of-00064.safetensors",
+ "model.layers.98.mlp.down_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.98.mlp.down_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.98.mlp.down_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.98.mlp.gate_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.98.mlp.gate_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.98.mlp.gate_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.98.mlp.up_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.98.mlp.up_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.98.mlp.up_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.98.post_attention_layernorm.weight": "model-00050-of-00064.safetensors",
+ "model.layers.98.self_attn.k_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.98.self_attn.k_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.98.self_attn.k_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.98.self_attn.o_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.98.self_attn.o_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.98.self_attn.o_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.98.self_attn.q_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.98.self_attn.q_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.98.self_attn.q_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.98.self_attn.v_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.98.self_attn.v_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.98.self_attn.v_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.99.input_layernorm.weight": "model-00051-of-00064.safetensors",
+ "model.layers.99.mlp.down_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.99.mlp.down_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.99.mlp.down_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.99.mlp.gate_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.99.mlp.gate_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.99.mlp.gate_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.99.mlp.up_proj.biases": "model-00051-of-00064.safetensors",
+ "model.layers.99.mlp.up_proj.scales": "model-00051-of-00064.safetensors",
+ "model.layers.99.mlp.up_proj.weight": "model-00051-of-00064.safetensors",
+ "model.layers.99.post_attention_layernorm.weight": "model-00051-of-00064.safetensors",
+ "model.layers.99.self_attn.k_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.99.self_attn.k_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.99.self_attn.k_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.99.self_attn.o_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.99.self_attn.o_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.99.self_attn.o_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.99.self_attn.q_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.99.self_attn.q_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.99.self_attn.q_proj.weight": "model-00050-of-00064.safetensors",
+ "model.layers.99.self_attn.v_proj.biases": "model-00050-of-00064.safetensors",
+ "model.layers.99.self_attn.v_proj.scales": "model-00050-of-00064.safetensors",
+ "model.layers.99.self_attn.v_proj.weight": "model-00050-of-00064.safetensors",
+ "model.norm.weight": "model-00064-of-00064.safetensors"
+ }
+}
\ No newline at end of file
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..278b7f0f84be865c4687700ee7b3c63d89a51e18
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin_of_text|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|eot_id|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end_of_text|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tokenizer.json b/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..37d7807e935d1462a2f093828ac5ee9a08c628a7
--- /dev/null
+++ b/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:39e4573a56ca97b0233ddc2dd63f634aeee35b84f352d2ce2aaa5992735cfcad
+size 17209827
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..1916c2b20aa070a9deb86df289cd924027e7ac85
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,2065 @@
+{
+ "add_bos_token": true,
+ "added_tokens_decoder": {
+ "128000": {
+ "content": "<|begin_of_text|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128001": {
+ "content": "<|end_of_text|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128002": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "128003": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "128004": {
+ "content": "<|finetune_right_pad_id|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128005": {
+ "content": "<|reserved_special_token_2|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128006": {
+ "content": "<|start_header_id|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128007": {
+ "content": "<|end_header_id|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128008": {
+ "content": "<|eom_id|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128009": {
+ "content": "<|eot_id|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128010": {
+ "content": "<|python_tag|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128011": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "128012": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "128013": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "128014": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "128015": {
+ "content": "<|reserved_special_token_7|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128016": {
+ "content": "<|reserved_special_token_8|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128017": {
+ "content": "<|reserved_special_token_9|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128018": {
+ "content": "<|reserved_special_token_10|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128019": {
+ "content": "<|reserved_special_token_11|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128020": {
+ "content": "<|reserved_special_token_12|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128021": {
+ "content": "<|reserved_special_token_13|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128022": {
+ "content": "<|reserved_special_token_14|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128023": {
+ "content": "<|reserved_special_token_15|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128024": {
+ "content": "<|reserved_special_token_16|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128025": {
+ "content": "<|reserved_special_token_17|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128026": {
+ "content": "<|reserved_special_token_18|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128027": {
+ "content": "<|reserved_special_token_19|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128028": {
+ "content": "<|reserved_special_token_20|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128029": {
+ "content": "<|reserved_special_token_21|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128030": {
+ "content": "<|reserved_special_token_22|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128031": {
+ "content": "<|reserved_special_token_23|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128032": {
+ "content": "<|reserved_special_token_24|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128033": {
+ "content": "<|reserved_special_token_25|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128034": {
+ "content": "<|reserved_special_token_26|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128035": {
+ "content": "<|reserved_special_token_27|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128036": {
+ "content": "<|reserved_special_token_28|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128037": {
+ "content": "<|reserved_special_token_29|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128038": {
+ "content": "<|reserved_special_token_30|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128039": {
+ "content": "<|reserved_special_token_31|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128040": {
+ "content": "<|reserved_special_token_32|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128041": {
+ "content": "<|reserved_special_token_33|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128042": {
+ "content": "<|reserved_special_token_34|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128043": {
+ "content": "<|reserved_special_token_35|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128044": {
+ "content": "<|reserved_special_token_36|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128045": {
+ "content": "<|reserved_special_token_37|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128046": {
+ "content": "<|reserved_special_token_38|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128047": {
+ "content": "<|reserved_special_token_39|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128048": {
+ "content": "<|reserved_special_token_40|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128049": {
+ "content": "<|reserved_special_token_41|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128050": {
+ "content": "<|reserved_special_token_42|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128051": {
+ "content": "<|reserved_special_token_43|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128052": {
+ "content": "<|reserved_special_token_44|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128053": {
+ "content": "<|reserved_special_token_45|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128054": {
+ "content": "<|reserved_special_token_46|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128055": {
+ "content": "<|reserved_special_token_47|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128056": {
+ "content": "<|reserved_special_token_48|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128057": {
+ "content": "<|reserved_special_token_49|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128058": {
+ "content": "<|reserved_special_token_50|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128059": {
+ "content": "<|reserved_special_token_51|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128060": {
+ "content": "<|reserved_special_token_52|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128061": {
+ "content": "<|reserved_special_token_53|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128062": {
+ "content": "<|reserved_special_token_54|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128063": {
+ "content": "<|reserved_special_token_55|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128064": {
+ "content": "<|reserved_special_token_56|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128065": {
+ "content": "<|reserved_special_token_57|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128066": {
+ "content": "<|reserved_special_token_58|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128067": {
+ "content": "<|reserved_special_token_59|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128068": {
+ "content": "<|reserved_special_token_60|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128069": {
+ "content": "<|reserved_special_token_61|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128070": {
+ "content": "<|reserved_special_token_62|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128071": {
+ "content": "<|reserved_special_token_63|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128072": {
+ "content": "<|reserved_special_token_64|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128073": {
+ "content": "<|reserved_special_token_65|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128074": {
+ "content": "<|reserved_special_token_66|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128075": {
+ "content": "<|reserved_special_token_67|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128076": {
+ "content": "<|reserved_special_token_68|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128077": {
+ "content": "<|reserved_special_token_69|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128078": {
+ "content": "<|reserved_special_token_70|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128079": {
+ "content": "<|reserved_special_token_71|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128080": {
+ "content": "<|reserved_special_token_72|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128081": {
+ "content": "<|reserved_special_token_73|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128082": {
+ "content": "<|reserved_special_token_74|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128083": {
+ "content": "<|reserved_special_token_75|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128084": {
+ "content": "<|reserved_special_token_76|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128085": {
+ "content": "<|reserved_special_token_77|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128086": {
+ "content": "<|reserved_special_token_78|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128087": {
+ "content": "<|reserved_special_token_79|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128088": {
+ "content": "<|reserved_special_token_80|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128089": {
+ "content": "<|reserved_special_token_81|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128090": {
+ "content": "<|reserved_special_token_82|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128091": {
+ "content": "<|reserved_special_token_83|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128092": {
+ "content": "<|reserved_special_token_84|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128093": {
+ "content": "<|reserved_special_token_85|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128094": {
+ "content": "<|reserved_special_token_86|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128095": {
+ "content": "<|reserved_special_token_87|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128096": {
+ "content": "<|reserved_special_token_88|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128097": {
+ "content": "<|reserved_special_token_89|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128098": {
+ "content": "<|reserved_special_token_90|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128099": {
+ "content": "<|reserved_special_token_91|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128100": {
+ "content": "<|reserved_special_token_92|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128101": {
+ "content": "<|reserved_special_token_93|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128102": {
+ "content": "<|reserved_special_token_94|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128103": {
+ "content": "<|reserved_special_token_95|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128104": {
+ "content": "<|reserved_special_token_96|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128105": {
+ "content": "<|reserved_special_token_97|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128106": {
+ "content": "<|reserved_special_token_98|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128107": {
+ "content": "<|reserved_special_token_99|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128108": {
+ "content": "<|reserved_special_token_100|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128109": {
+ "content": "<|reserved_special_token_101|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128110": {
+ "content": "<|reserved_special_token_102|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128111": {
+ "content": "<|reserved_special_token_103|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128112": {
+ "content": "<|reserved_special_token_104|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128113": {
+ "content": "<|reserved_special_token_105|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128114": {
+ "content": "<|reserved_special_token_106|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128115": {
+ "content": "<|reserved_special_token_107|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128116": {
+ "content": "<|reserved_special_token_108|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128117": {
+ "content": "<|reserved_special_token_109|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128118": {
+ "content": "<|reserved_special_token_110|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128119": {
+ "content": "<|reserved_special_token_111|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128120": {
+ "content": "<|reserved_special_token_112|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128121": {
+ "content": "<|reserved_special_token_113|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128122": {
+ "content": "<|reserved_special_token_114|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128123": {
+ "content": "<|reserved_special_token_115|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128124": {
+ "content": "<|reserved_special_token_116|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128125": {
+ "content": "<|reserved_special_token_117|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128126": {
+ "content": "<|reserved_special_token_118|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128127": {
+ "content": "<|reserved_special_token_119|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128128": {
+ "content": "<|reserved_special_token_120|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128129": {
+ "content": "<|reserved_special_token_121|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128130": {
+ "content": "<|reserved_special_token_122|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128131": {
+ "content": "<|reserved_special_token_123|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128132": {
+ "content": "<|reserved_special_token_124|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128133": {
+ "content": "<|reserved_special_token_125|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128134": {
+ "content": "<|reserved_special_token_126|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128135": {
+ "content": "<|reserved_special_token_127|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128136": {
+ "content": "<|reserved_special_token_128|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128137": {
+ "content": "<|reserved_special_token_129|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128138": {
+ "content": "<|reserved_special_token_130|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128139": {
+ "content": "<|reserved_special_token_131|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128140": {
+ "content": "<|reserved_special_token_132|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128141": {
+ "content": "<|reserved_special_token_133|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128142": {
+ "content": "<|reserved_special_token_134|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128143": {
+ "content": "<|reserved_special_token_135|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128144": {
+ "content": "<|reserved_special_token_136|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128145": {
+ "content": "<|reserved_special_token_137|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128146": {
+ "content": "<|reserved_special_token_138|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128147": {
+ "content": "<|reserved_special_token_139|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128148": {
+ "content": "<|reserved_special_token_140|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128149": {
+ "content": "<|reserved_special_token_141|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128150": {
+ "content": "<|reserved_special_token_142|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128151": {
+ "content": "<|reserved_special_token_143|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128152": {
+ "content": "<|reserved_special_token_144|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128153": {
+ "content": "<|reserved_special_token_145|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128154": {
+ "content": "<|reserved_special_token_146|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128155": {
+ "content": "<|reserved_special_token_147|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128156": {
+ "content": "<|reserved_special_token_148|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128157": {
+ "content": "<|reserved_special_token_149|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128158": {
+ "content": "<|reserved_special_token_150|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128159": {
+ "content": "<|reserved_special_token_151|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128160": {
+ "content": "<|reserved_special_token_152|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128161": {
+ "content": "<|reserved_special_token_153|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128162": {
+ "content": "<|reserved_special_token_154|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128163": {
+ "content": "<|reserved_special_token_155|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128164": {
+ "content": "<|reserved_special_token_156|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128165": {
+ "content": "<|reserved_special_token_157|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128166": {
+ "content": "<|reserved_special_token_158|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128167": {
+ "content": "<|reserved_special_token_159|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128168": {
+ "content": "<|reserved_special_token_160|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128169": {
+ "content": "<|reserved_special_token_161|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128170": {
+ "content": "<|reserved_special_token_162|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128171": {
+ "content": "<|reserved_special_token_163|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128172": {
+ "content": "<|reserved_special_token_164|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128173": {
+ "content": "<|reserved_special_token_165|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128174": {
+ "content": "<|reserved_special_token_166|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128175": {
+ "content": "<|reserved_special_token_167|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128176": {
+ "content": "<|reserved_special_token_168|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128177": {
+ "content": "<|reserved_special_token_169|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128178": {
+ "content": "<|reserved_special_token_170|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128179": {
+ "content": "<|reserved_special_token_171|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128180": {
+ "content": "<|reserved_special_token_172|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128181": {
+ "content": "<|reserved_special_token_173|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128182": {
+ "content": "<|reserved_special_token_174|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128183": {
+ "content": "<|reserved_special_token_175|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128184": {
+ "content": "<|reserved_special_token_176|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128185": {
+ "content": "<|reserved_special_token_177|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128186": {
+ "content": "<|reserved_special_token_178|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128187": {
+ "content": "<|reserved_special_token_179|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128188": {
+ "content": "<|reserved_special_token_180|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128189": {
+ "content": "<|reserved_special_token_181|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128190": {
+ "content": "<|reserved_special_token_182|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128191": {
+ "content": "<|reserved_special_token_183|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128192": {
+ "content": "<|reserved_special_token_184|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128193": {
+ "content": "<|reserved_special_token_185|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128194": {
+ "content": "<|reserved_special_token_186|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128195": {
+ "content": "<|reserved_special_token_187|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128196": {
+ "content": "<|reserved_special_token_188|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128197": {
+ "content": "<|reserved_special_token_189|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128198": {
+ "content": "<|reserved_special_token_190|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128199": {
+ "content": "<|reserved_special_token_191|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128200": {
+ "content": "<|reserved_special_token_192|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128201": {
+ "content": "<|reserved_special_token_193|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128202": {
+ "content": "<|reserved_special_token_194|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128203": {
+ "content": "<|reserved_special_token_195|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128204": {
+ "content": "<|reserved_special_token_196|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128205": {
+ "content": "<|reserved_special_token_197|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128206": {
+ "content": "<|reserved_special_token_198|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128207": {
+ "content": "<|reserved_special_token_199|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128208": {
+ "content": "<|reserved_special_token_200|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128209": {
+ "content": "<|reserved_special_token_201|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128210": {
+ "content": "<|reserved_special_token_202|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128211": {
+ "content": "<|reserved_special_token_203|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128212": {
+ "content": "<|reserved_special_token_204|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128213": {
+ "content": "<|reserved_special_token_205|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128214": {
+ "content": "<|reserved_special_token_206|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128215": {
+ "content": "<|reserved_special_token_207|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128216": {
+ "content": "<|reserved_special_token_208|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128217": {
+ "content": "<|reserved_special_token_209|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128218": {
+ "content": "<|reserved_special_token_210|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128219": {
+ "content": "<|reserved_special_token_211|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128220": {
+ "content": "<|reserved_special_token_212|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128221": {
+ "content": "<|reserved_special_token_213|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128222": {
+ "content": "<|reserved_special_token_214|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128223": {
+ "content": "<|reserved_special_token_215|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128224": {
+ "content": "<|reserved_special_token_216|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128225": {
+ "content": "<|reserved_special_token_217|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128226": {
+ "content": "<|reserved_special_token_218|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128227": {
+ "content": "<|reserved_special_token_219|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128228": {
+ "content": "<|reserved_special_token_220|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128229": {
+ "content": "<|reserved_special_token_221|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128230": {
+ "content": "<|reserved_special_token_222|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128231": {
+ "content": "<|reserved_special_token_223|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128232": {
+ "content": "<|reserved_special_token_224|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128233": {
+ "content": "<|reserved_special_token_225|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128234": {
+ "content": "<|reserved_special_token_226|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128235": {
+ "content": "<|reserved_special_token_227|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128236": {
+ "content": "<|reserved_special_token_228|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128237": {
+ "content": "<|reserved_special_token_229|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128238": {
+ "content": "<|reserved_special_token_230|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128239": {
+ "content": "<|reserved_special_token_231|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128240": {
+ "content": "<|reserved_special_token_232|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128241": {
+ "content": "<|reserved_special_token_233|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128242": {
+ "content": "<|reserved_special_token_234|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128243": {
+ "content": "<|reserved_special_token_235|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128244": {
+ "content": "<|reserved_special_token_236|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128245": {
+ "content": "<|reserved_special_token_237|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128246": {
+ "content": "<|reserved_special_token_238|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128247": {
+ "content": "<|reserved_special_token_239|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128248": {
+ "content": "<|reserved_special_token_240|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128249": {
+ "content": "<|reserved_special_token_241|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128250": {
+ "content": "<|reserved_special_token_242|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128251": {
+ "content": "<|reserved_special_token_243|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128252": {
+ "content": "<|reserved_special_token_244|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128253": {
+ "content": "<|reserved_special_token_245|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128254": {
+ "content": "<|reserved_special_token_246|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "128255": {
+ "content": "<|reserved_special_token_247|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "<|begin_of_text|>",
+ "clean_up_tokenization_spaces": true,
+ "eos_token": "<|eot_id|>",
+ "extra_special_tokens": {},
+ "model_input_names": [
+ "input_ids",
+ "attention_mask"
+ ],
+ "model_max_length": 131072,
+ "pad_token": "<|end_of_text|>",
+ "tokenizer_class": "PreTrainedTokenizerFast",
+ "chat_template": "{%- set thinking_prompt = 'You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem.' %}\n{%- set standard_prompt = 'You are Hermes, created by Nous Research.' %}\n{%- if not thinking is defined %}{% set thinking = false %}{% endif %}\n{%- if not keep_cots is defined %}{% set keep_cots = false %}{% endif %}\n{%- if thinking %}{%- set system_prompt = thinking_prompt %}{%- else %}{%- set system_prompt = standard_prompt %}{%- endif %}\n{%- if tools %}\n {{- bos_token + '<|start_header_id|>system<|end_header_id|>\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- system_prompt }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou are a function calling AI model. You may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": \\\"\\\", \\\"arguments\\\": }\\n<|eot_id|>\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- bos_token + '<|start_header_id|>system<|end_header_id|>\\n\\n' + messages[0]['content'] + '<|eot_id|>' }}\n {%- else %}\n {{- bos_token + '<|start_header_id|>system<|end_header_id|>\\n\\n' + system_prompt + '<|eot_id|>' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|start_header_id|>' + message.role + '<|end_header_id|>\\n\\n' + message.content + '<|eot_id|>' }}\n {%- elif (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|start_header_id|>' + message.role + '<|end_header_id|>\\n' }}\n {%- if message.content %}\n {%- set content = message['content'] -%}\n {%- if thinking %}\n {%- if not keep_cots %}\n {%- set content = ' ' + content.split('', 1)[1] -%}\n {%- endif %}\n {%- endif %}\n {{- '\\n' + content + '<|eot_id|>' }}\n {%- endif %}\n {%- elif message.role == \"assistant\" %}\n {{- '<|start_header_id|>' + message.role + '<|end_header_id|>\\n' }}\n {%- if message.content %}\n {%- set content = message['content'] -%}\n {%- if thinking %}\n {%- if not keep_cots %}\n {%- set content = ' ' + content.split('', 1)[1] -%}\n {%- endif %}\n {%- endif %}\n {{- '\\n' + content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|eot_id|>' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|eot_id|>' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n"
+}
\ No newline at end of file