End of training
Browse files- README.md +61 -10
- special_tokens_map.json +7 -0
- tokenizer.json +0 -0
- tokenizer_config.json +56 -0
- vocab.txt +0 -0
    	
        README.md
    CHANGED
    
    | @@ -1,14 +1,65 @@ | |
| 1 | 
             
            ---
         | 
|  | |
| 2 | 
             
            license: apache-2.0
         | 
|  | |
|  | |
|  | |
| 3 | 
             
            metrics:
         | 
| 4 | 
             
            - f1
         | 
| 5 | 
            -
             | 
| 6 | 
            -
            -  | 
| 7 | 
            -
             | 
| 8 | 
            -
             | 
| 9 | 
            -
             | 
| 10 | 
            -
             | 
| 11 | 
            -
             | 
| 12 | 
            -
             | 
| 13 | 
            -
             | 
| 14 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
             
            ---
         | 
| 2 | 
            +
            library_name: transformers
         | 
| 3 | 
             
            license: apache-2.0
         | 
| 4 | 
            +
            base_model: google-bert/bert-base-uncased
         | 
| 5 | 
            +
            tags:
         | 
| 6 | 
            +
            - generated_from_trainer
         | 
| 7 | 
             
            metrics:
         | 
| 8 | 
             
            - f1
         | 
| 9 | 
            +
            model-index:
         | 
| 10 | 
            +
            - name: llm-router
         | 
| 11 | 
            +
              results: []
         | 
| 12 | 
            +
            ---
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            <!-- This model card has been generated automatically according to the information the Trainer had access to. You
         | 
| 15 | 
            +
            should probably proofread and complete it, then remove this comment. -->
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            # llm-router
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            This model is a fine-tuned version of [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) on an unknown dataset.
         | 
| 20 | 
            +
            It achieves the following results on the evaluation set:
         | 
| 21 | 
            +
            - Loss: 0.2865
         | 
| 22 | 
            +
            - F1: 0.9301
         | 
| 23 | 
            +
             | 
| 24 | 
            +
            ## Model description
         | 
| 25 | 
            +
             | 
| 26 | 
            +
            More information needed
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            ## Intended uses & limitations
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            More information needed
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            ## Training and evaluation data
         | 
| 33 | 
            +
             | 
| 34 | 
            +
            More information needed
         | 
| 35 | 
            +
             | 
| 36 | 
            +
            ## Training procedure
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            ### Training hyperparameters
         | 
| 39 | 
            +
             | 
| 40 | 
            +
            The following hyperparameters were used during training:
         | 
| 41 | 
            +
            - learning_rate: 5e-05
         | 
| 42 | 
            +
            - train_batch_size: 32
         | 
| 43 | 
            +
            - eval_batch_size: 16
         | 
| 44 | 
            +
            - seed: 42
         | 
| 45 | 
            +
            - optimizer: Use adamw_torch_fused with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
         | 
| 46 | 
            +
            - lr_scheduler_type: linear
         | 
| 47 | 
            +
            - num_epochs: 5
         | 
| 48 | 
            +
             | 
| 49 | 
            +
            ### Training results
         | 
| 50 | 
            +
             | 
| 51 | 
            +
            | Training Loss | Epoch | Step | Validation Loss | F1     |
         | 
| 52 | 
            +
            |:-------------:|:-----:|:----:|:---------------:|:------:|
         | 
| 53 | 
            +
            | 1.6264        | 1.0   | 313  | 1.2031          | 0.7960 |
         | 
| 54 | 
            +
            | 0.524         | 2.0   | 626  | 0.4904          | 0.8985 |
         | 
| 55 | 
            +
            | 0.2594        | 3.0   | 939  | 0.3444          | 0.9189 |
         | 
| 56 | 
            +
            | 0.1366        | 4.0   | 1252 | 0.2952          | 0.9284 |
         | 
| 57 | 
            +
            | 0.0749        | 5.0   | 1565 | 0.2865          | 0.9301 |
         | 
| 58 | 
            +
             | 
| 59 | 
            +
             | 
| 60 | 
            +
            ### Framework versions
         | 
| 61 | 
            +
             | 
| 62 | 
            +
            - Transformers 4.48.0.dev0
         | 
| 63 | 
            +
            - Pytorch 2.4.1+cu121
         | 
| 64 | 
            +
            - Datasets 3.1.0
         | 
| 65 | 
            +
            - Tokenizers 0.21.0
         | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,7 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "cls_token": "[CLS]",
         | 
| 3 | 
            +
              "mask_token": "[MASK]",
         | 
| 4 | 
            +
              "pad_token": "[PAD]",
         | 
| 5 | 
            +
              "sep_token": "[SEP]",
         | 
| 6 | 
            +
              "unk_token": "[UNK]"
         | 
| 7 | 
            +
            }
         | 
    	
        tokenizer.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,56 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "added_tokens_decoder": {
         | 
| 3 | 
            +
                "0": {
         | 
| 4 | 
            +
                  "content": "[PAD]",
         | 
| 5 | 
            +
                  "lstrip": false,
         | 
| 6 | 
            +
                  "normalized": false,
         | 
| 7 | 
            +
                  "rstrip": false,
         | 
| 8 | 
            +
                  "single_word": false,
         | 
| 9 | 
            +
                  "special": true
         | 
| 10 | 
            +
                },
         | 
| 11 | 
            +
                "100": {
         | 
| 12 | 
            +
                  "content": "[UNK]",
         | 
| 13 | 
            +
                  "lstrip": false,
         | 
| 14 | 
            +
                  "normalized": false,
         | 
| 15 | 
            +
                  "rstrip": false,
         | 
| 16 | 
            +
                  "single_word": false,
         | 
| 17 | 
            +
                  "special": true
         | 
| 18 | 
            +
                },
         | 
| 19 | 
            +
                "101": {
         | 
| 20 | 
            +
                  "content": "[CLS]",
         | 
| 21 | 
            +
                  "lstrip": false,
         | 
| 22 | 
            +
                  "normalized": false,
         | 
| 23 | 
            +
                  "rstrip": false,
         | 
| 24 | 
            +
                  "single_word": false,
         | 
| 25 | 
            +
                  "special": true
         | 
| 26 | 
            +
                },
         | 
| 27 | 
            +
                "102": {
         | 
| 28 | 
            +
                  "content": "[SEP]",
         | 
| 29 | 
            +
                  "lstrip": false,
         | 
| 30 | 
            +
                  "normalized": false,
         | 
| 31 | 
            +
                  "rstrip": false,
         | 
| 32 | 
            +
                  "single_word": false,
         | 
| 33 | 
            +
                  "special": true
         | 
| 34 | 
            +
                },
         | 
| 35 | 
            +
                "103": {
         | 
| 36 | 
            +
                  "content": "[MASK]",
         | 
| 37 | 
            +
                  "lstrip": false,
         | 
| 38 | 
            +
                  "normalized": false,
         | 
| 39 | 
            +
                  "rstrip": false,
         | 
| 40 | 
            +
                  "single_word": false,
         | 
| 41 | 
            +
                  "special": true
         | 
| 42 | 
            +
                }
         | 
| 43 | 
            +
              },
         | 
| 44 | 
            +
              "clean_up_tokenization_spaces": false,
         | 
| 45 | 
            +
              "cls_token": "[CLS]",
         | 
| 46 | 
            +
              "do_lower_case": true,
         | 
| 47 | 
            +
              "extra_special_tokens": {},
         | 
| 48 | 
            +
              "mask_token": "[MASK]",
         | 
| 49 | 
            +
              "model_max_length": 512,
         | 
| 50 | 
            +
              "pad_token": "[PAD]",
         | 
| 51 | 
            +
              "sep_token": "[SEP]",
         | 
| 52 | 
            +
              "strip_accents": null,
         | 
| 53 | 
            +
              "tokenize_chinese_chars": true,
         | 
| 54 | 
            +
              "tokenizer_class": "BertTokenizer",
         | 
| 55 | 
            +
              "unk_token": "[UNK]"
         | 
| 56 | 
            +
            }
         | 
    	
        vocab.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
