chat template
Browse files- tokenizer_config.json +1 -1
 
    	
        tokenizer_config.json
    CHANGED
    
    | 
         @@ -143,7 +143,7 @@ 
     | 
|
| 143 | 
         
             
                "<|im_end|>"
         
     | 
| 144 | 
         
             
              ],
         
     | 
| 145 | 
         
             
              "bos_token": "<|im_start|>",
         
     | 
| 146 | 
         
            -
              "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
         
     | 
| 147 | 
         
             
              "clean_up_tokenization_spaces": false,
         
     | 
| 148 | 
         
             
              "eos_token": "<|im_end|>",
         
     | 
| 149 | 
         
             
              "model_max_length": 2048,
         
     | 
| 
         | 
|
| 143 | 
         
             
                "<|im_end|>"
         
     | 
| 144 | 
         
             
              ],
         
     | 
| 145 | 
         
             
              "bos_token": "<|im_start|>",
         
     | 
| 146 | 
         
            +
              "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful AI assistant named SmolLM, trained by Hugging Face<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
         
     | 
| 147 | 
         
             
              "clean_up_tokenization_spaces": false,
         
     | 
| 148 | 
         
             
              "eos_token": "<|im_end|>",
         
     | 
| 149 | 
         
             
              "model_max_length": 2048,
         
     |