Upload folder using huggingface_hub
Browse files- gliner_config.json +3 -1
 - pytorch_model.bin +1 -1
 - tokenizer_config.json +0 -1
 
    	
        gliner_config.json
    CHANGED
    
    | 
         @@ -43,6 +43,7 @@ 
     | 
|
| 43 | 
         
             
                "max_length": 20,
         
     | 
| 44 | 
         
             
                "max_position_embeddings": 2048,
         
     | 
| 45 | 
         
             
                "min_length": 0,
         
     | 
| 
         | 
|
| 46 | 
         
             
                "model_type": "llama",
         
     | 
| 47 | 
         
             
                "no_repeat_ngram_size": 0,
         
     | 
| 48 | 
         
             
                "num_attention_heads": 32,
         
     | 
| 
         @@ -88,6 +89,7 @@ 
     | 
|
| 88 | 
         
             
              "fine_tune": true,
         
     | 
| 89 | 
         
             
              "freeze_token_rep": false,
         
     | 
| 90 | 
         
             
              "fuse_layers": false,
         
     | 
| 
         | 
|
| 91 | 
         
             
              "has_rnn": true,
         
     | 
| 92 | 
         
             
              "hidden_size": 2048,
         
     | 
| 93 | 
         
             
              "label_smoothing": 0.0001,
         
     | 
| 
         @@ -122,7 +124,7 @@ 
     | 
|
| 122 | 
         
             
              "subtoken_pooling": "first",
         
     | 
| 123 | 
         
             
              "train_batch_size": 2,
         
     | 
| 124 | 
         
             
              "train_data": "data.json",
         
     | 
| 125 | 
         
            -
              "transformers_version": "4. 
     | 
| 126 | 
         
             
              "val_data_dir": "none",
         
     | 
| 127 | 
         
             
              "vocab_size": 32002,
         
     | 
| 128 | 
         
             
              "warmup_ratio": 0.1,
         
     | 
| 
         | 
|
| 43 | 
         
             
                "max_length": 20,
         
     | 
| 44 | 
         
             
                "max_position_embeddings": 2048,
         
     | 
| 45 | 
         
             
                "min_length": 0,
         
     | 
| 46 | 
         
            +
                "mlp_bias": false,
         
     | 
| 47 | 
         
             
                "model_type": "llama",
         
     | 
| 48 | 
         
             
                "no_repeat_ngram_size": 0,
         
     | 
| 49 | 
         
             
                "num_attention_heads": 32,
         
     | 
| 
         | 
|
| 89 | 
         
             
              "fine_tune": true,
         
     | 
| 90 | 
         
             
              "freeze_token_rep": false,
         
     | 
| 91 | 
         
             
              "fuse_layers": false,
         
     | 
| 92 | 
         
            +
              "has_post_transformer": false,
         
     | 
| 93 | 
         
             
              "has_rnn": true,
         
     | 
| 94 | 
         
             
              "hidden_size": 2048,
         
     | 
| 95 | 
         
             
              "label_smoothing": 0.0001,
         
     | 
| 
         | 
|
| 124 | 
         
             
              "subtoken_pooling": "first",
         
     | 
| 125 | 
         
             
              "train_batch_size": 2,
         
     | 
| 126 | 
         
             
              "train_data": "data.json",
         
     | 
| 127 | 
         
            +
              "transformers_version": "4.41.0",
         
     | 
| 128 | 
         
             
              "val_data_dir": "none",
         
     | 
| 129 | 
         
             
              "vocab_size": 32002,
         
     | 
| 130 | 
         
             
              "warmup_ratio": 0.1,
         
     | 
    	
        pytorch_model.bin
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
             
            size 4843077658
         
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:6bb965e7336d4b283244315f21ad34fcaaa50d0c46cb1f4005c9489d1a2465a1
         
     | 
| 3 | 
         
             
            size 4843077658
         
     | 
    	
        tokenizer_config.json
    CHANGED
    
    | 
         @@ -54,7 +54,6 @@ 
     | 
|
| 54 | 
         
             
              "bos_token": "<s>",
         
     | 
| 55 | 
         
             
              "clean_up_tokenization_spaces": false,
         
     | 
| 56 | 
         
             
              "eos_token": "</s>",
         
     | 
| 57 | 
         
            -
              "legacy": false,
         
     | 
| 58 | 
         
             
              "model_max_length": 1000000000000000019884624838656,
         
     | 
| 59 | 
         
             
              "pad_token": "[PAD]",
         
     | 
| 60 | 
         
             
              "padding_side": "right",
         
     | 
| 
         | 
|
| 54 | 
         
             
              "bos_token": "<s>",
         
     | 
| 55 | 
         
             
              "clean_up_tokenization_spaces": false,
         
     | 
| 56 | 
         
             
              "eos_token": "</s>",
         
     | 
| 
         | 
|
| 57 | 
         
             
              "model_max_length": 1000000000000000019884624838656,
         
     | 
| 58 | 
         
             
              "pad_token": "[PAD]",
         
     | 
| 59 | 
         
             
              "padding_side": "right",
         
     |