Update 3 LEAP sdk manifests
Browse files- leap/Q4_0.json +2 -1
- leap/Q4_K_M.json +2 -1
- leap/Q8_0.json +2 -1
leap/Q4_0.json
CHANGED
|
@@ -2,7 +2,8 @@
|
|
| 2 |
"inference_type": "llama.cpp/text-to-text",
|
| 3 |
"schema_version": "1.0.0",
|
| 4 |
"load_time_parameters": {
|
| 5 |
-
"model": "../LFM2-700M-Q4_0.gguf"
|
|
|
|
| 6 |
},
|
| 7 |
"generation_time_parameters": {
|
| 8 |
"sampling_parameters": {
|
|
|
|
| 2 |
"inference_type": "llama.cpp/text-to-text",
|
| 3 |
"schema_version": "1.0.0",
|
| 4 |
"load_time_parameters": {
|
| 5 |
+
"model": "../LFM2-700M-Q4_0.gguf",
|
| 6 |
+
"chat_template": "{{- bos_token -}}\n{%- set system_prompt = \"\" -%}\n{%- set ns = namespace(system_prompt=\"\") -%}\n{%- if messages[0][\"role\"] == \"system\" -%}\n\t{%- set ns.system_prompt = messages[0][\"content\"] -%}\n\t{%- set messages = messages[1:] -%}\n{%- endif -%}\n{%- if tools -%}\n\t{%- set ns.system_prompt = ns.system_prompt + (\"\\n\" if ns.system_prompt else \"\") + \"List of tools: <|tool_list_start|>[\" -%}\n\t{%- for tool in tools -%}\n\t\t{%- if tool is not string -%}\n\t\t\t{%- set tool = tool | tojson -%}\n\t\t{%- endif -%}\n\t\t{%- set ns.system_prompt = ns.system_prompt + tool -%}\n\t\t{%- if not loop.last -%}\n\t\t\t{%- set ns.system_prompt = ns.system_prompt + \", \" -%}\n\t\t{%- endif -%}\n\t{%- endfor -%}\n\t{%- set ns.system_prompt = ns.system_prompt + \"]<|tool_list_end|>\" -%}\n{%- endif -%}\n{%- if ns.system_prompt -%}\n\t{{- \"<|im_start|>system\\n\" + ns.system_prompt + \"<|im_end|>\\n\" -}}\n{%- endif -%}\n{%- for message in messages -%}\n\t{{- \"<|im_start|>\" + message[\"role\"] + \"\\n\" -}}\n\t{%- set content = message[\"content\"] -%}\n\t{%- if content is not string -%}\n\t\t{%- set content = content | tojson -%}\n\t{%- endif -%}\n\t{%- if message[\"role\"] == \"tool\" -%}\n\t\t{%- set content = \"<|tool_response_start|>\" + content + \"<|tool_response_end|>\" -%}\n\t{%- endif -%}\n\t{{- content + \"<|im_end|>\\n\" -}}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n\t{{- \"<|im_start|>assistant\\n\" -}}\n{%- endif -%}\n"
|
| 7 |
},
|
| 8 |
"generation_time_parameters": {
|
| 9 |
"sampling_parameters": {
|
leap/Q4_K_M.json
CHANGED
|
@@ -2,7 +2,8 @@
|
|
| 2 |
"inference_type": "llama.cpp/text-to-text",
|
| 3 |
"schema_version": "1.0.0",
|
| 4 |
"load_time_parameters": {
|
| 5 |
-
"model": "../LFM2-700M-Q4_K_M.gguf"
|
|
|
|
| 6 |
},
|
| 7 |
"generation_time_parameters": {
|
| 8 |
"sampling_parameters": {
|
|
|
|
| 2 |
"inference_type": "llama.cpp/text-to-text",
|
| 3 |
"schema_version": "1.0.0",
|
| 4 |
"load_time_parameters": {
|
| 5 |
+
"model": "../LFM2-700M-Q4_K_M.gguf",
|
| 6 |
+
"chat_template": "{{- bos_token -}}\n{%- set system_prompt = \"\" -%}\n{%- set ns = namespace(system_prompt=\"\") -%}\n{%- if messages[0][\"role\"] == \"system\" -%}\n\t{%- set ns.system_prompt = messages[0][\"content\"] -%}\n\t{%- set messages = messages[1:] -%}\n{%- endif -%}\n{%- if tools -%}\n\t{%- set ns.system_prompt = ns.system_prompt + (\"\\n\" if ns.system_prompt else \"\") + \"List of tools: <|tool_list_start|>[\" -%}\n\t{%- for tool in tools -%}\n\t\t{%- if tool is not string -%}\n\t\t\t{%- set tool = tool | tojson -%}\n\t\t{%- endif -%}\n\t\t{%- set ns.system_prompt = ns.system_prompt + tool -%}\n\t\t{%- if not loop.last -%}\n\t\t\t{%- set ns.system_prompt = ns.system_prompt + \", \" -%}\n\t\t{%- endif -%}\n\t{%- endfor -%}\n\t{%- set ns.system_prompt = ns.system_prompt + \"]<|tool_list_end|>\" -%}\n{%- endif -%}\n{%- if ns.system_prompt -%}\n\t{{- \"<|im_start|>system\\n\" + ns.system_prompt + \"<|im_end|>\\n\" -}}\n{%- endif -%}\n{%- for message in messages -%}\n\t{{- \"<|im_start|>\" + message[\"role\"] + \"\\n\" -}}\n\t{%- set content = message[\"content\"] -%}\n\t{%- if content is not string -%}\n\t\t{%- set content = content | tojson -%}\n\t{%- endif -%}\n\t{%- if message[\"role\"] == \"tool\" -%}\n\t\t{%- set content = \"<|tool_response_start|>\" + content + \"<|tool_response_end|>\" -%}\n\t{%- endif -%}\n\t{{- content + \"<|im_end|>\\n\" -}}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n\t{{- \"<|im_start|>assistant\\n\" -}}\n{%- endif -%}\n"
|
| 7 |
},
|
| 8 |
"generation_time_parameters": {
|
| 9 |
"sampling_parameters": {
|
leap/Q8_0.json
CHANGED
|
@@ -2,7 +2,8 @@
|
|
| 2 |
"inference_type": "llama.cpp/text-to-text",
|
| 3 |
"schema_version": "1.0.0",
|
| 4 |
"load_time_parameters": {
|
| 5 |
-
"model": "../LFM2-700M-Q8_0.gguf"
|
|
|
|
| 6 |
},
|
| 7 |
"generation_time_parameters": {
|
| 8 |
"sampling_parameters": {
|
|
|
|
| 2 |
"inference_type": "llama.cpp/text-to-text",
|
| 3 |
"schema_version": "1.0.0",
|
| 4 |
"load_time_parameters": {
|
| 5 |
+
"model": "../LFM2-700M-Q8_0.gguf",
|
| 6 |
+
"chat_template": "{{- bos_token -}}\n{%- set system_prompt = \"\" -%}\n{%- set ns = namespace(system_prompt=\"\") -%}\n{%- if messages[0][\"role\"] == \"system\" -%}\n\t{%- set ns.system_prompt = messages[0][\"content\"] -%}\n\t{%- set messages = messages[1:] -%}\n{%- endif -%}\n{%- if tools -%}\n\t{%- set ns.system_prompt = ns.system_prompt + (\"\\n\" if ns.system_prompt else \"\") + \"List of tools: <|tool_list_start|>[\" -%}\n\t{%- for tool in tools -%}\n\t\t{%- if tool is not string -%}\n\t\t\t{%- set tool = tool | tojson -%}\n\t\t{%- endif -%}\n\t\t{%- set ns.system_prompt = ns.system_prompt + tool -%}\n\t\t{%- if not loop.last -%}\n\t\t\t{%- set ns.system_prompt = ns.system_prompt + \", \" -%}\n\t\t{%- endif -%}\n\t{%- endfor -%}\n\t{%- set ns.system_prompt = ns.system_prompt + \"]<|tool_list_end|>\" -%}\n{%- endif -%}\n{%- if ns.system_prompt -%}\n\t{{- \"<|im_start|>system\\n\" + ns.system_prompt + \"<|im_end|>\\n\" -}}\n{%- endif -%}\n{%- for message in messages -%}\n\t{{- \"<|im_start|>\" + message[\"role\"] + \"\\n\" -}}\n\t{%- set content = message[\"content\"] -%}\n\t{%- if content is not string -%}\n\t\t{%- set content = content | tojson -%}\n\t{%- endif -%}\n\t{%- if message[\"role\"] == \"tool\" -%}\n\t\t{%- set content = \"<|tool_response_start|>\" + content + \"<|tool_response_end|>\" -%}\n\t{%- endif -%}\n\t{{- content + \"<|im_end|>\\n\" -}}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n\t{{- \"<|im_start|>assistant\\n\" -}}\n{%- endif -%}\n"
|
| 7 |
},
|
| 8 |
"generation_time_parameters": {
|
| 9 |
"sampling_parameters": {
|