Commit 
							
							·
						
						94cbe14
	
1
								Parent(s):
							
							69bc4a0
								
Training in progress epoch 0
Browse files- README.md +54 -0
- config.json +123 -0
- special_tokens_map.json +7 -0
- tf_model.h5 +3 -0
- tokenizer.json +0 -0
- tokenizer_config.json +55 -0
- vocab.txt +0 -0
    	
        README.md
    ADDED
    
    | @@ -0,0 +1,54 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            license: apache-2.0
         | 
| 3 | 
            +
            base_model: bert-base-cased
         | 
| 4 | 
            +
            tags:
         | 
| 5 | 
            +
            - generated_from_keras_callback
         | 
| 6 | 
            +
            model-index:
         | 
| 7 | 
            +
            - name: HamzaSidhu786/bert-finetuned-pos
         | 
| 8 | 
            +
              results: []
         | 
| 9 | 
            +
            ---
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            <!-- This model card has been generated automatically according to the information Keras had access to. You should
         | 
| 12 | 
            +
            probably proofread and complete it, then remove this comment. -->
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            # HamzaSidhu786/bert-finetuned-pos
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset.
         | 
| 17 | 
            +
            It achieves the following results on the evaluation set:
         | 
| 18 | 
            +
            - Train Loss: 0.5212
         | 
| 19 | 
            +
            - Validation Loss: 0.3078
         | 
| 20 | 
            +
            - Epoch: 0
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            ## Model description
         | 
| 23 | 
            +
             | 
| 24 | 
            +
            More information needed
         | 
| 25 | 
            +
             | 
| 26 | 
            +
            ## Intended uses & limitations
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            More information needed
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            ## Training and evaluation data
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            More information needed
         | 
| 33 | 
            +
             | 
| 34 | 
            +
            ## Training procedure
         | 
| 35 | 
            +
             | 
| 36 | 
            +
            ### Training hyperparameters
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            The following hyperparameters were used during training:
         | 
| 39 | 
            +
            - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 2634, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}
         | 
| 40 | 
            +
            - training_precision: float32
         | 
| 41 | 
            +
             | 
| 42 | 
            +
            ### Training results
         | 
| 43 | 
            +
             | 
| 44 | 
            +
            | Train Loss | Validation Loss | Epoch |
         | 
| 45 | 
            +
            |:----------:|:---------------:|:-----:|
         | 
| 46 | 
            +
            | 0.5212     | 0.3078          | 0     |
         | 
| 47 | 
            +
             | 
| 48 | 
            +
             | 
| 49 | 
            +
            ### Framework versions
         | 
| 50 | 
            +
             | 
| 51 | 
            +
            - Transformers 4.41.2
         | 
| 52 | 
            +
            - TensorFlow 2.15.0
         | 
| 53 | 
            +
            - Datasets 2.20.0
         | 
| 54 | 
            +
            - Tokenizers 0.19.1
         | 
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,123 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_name_or_path": "bert-base-cased",
         | 
| 3 | 
            +
              "architectures": [
         | 
| 4 | 
            +
                "BertForTokenClassification"
         | 
| 5 | 
            +
              ],
         | 
| 6 | 
            +
              "attention_probs_dropout_prob": 0.1,
         | 
| 7 | 
            +
              "classifier_dropout": null,
         | 
| 8 | 
            +
              "gradient_checkpointing": false,
         | 
| 9 | 
            +
              "hidden_act": "gelu",
         | 
| 10 | 
            +
              "hidden_dropout_prob": 0.1,
         | 
| 11 | 
            +
              "hidden_size": 768,
         | 
| 12 | 
            +
              "id2label": {
         | 
| 13 | 
            +
                "0": "\"",
         | 
| 14 | 
            +
                "1": "''",
         | 
| 15 | 
            +
                "2": "#",
         | 
| 16 | 
            +
                "3": "$",
         | 
| 17 | 
            +
                "4": "(",
         | 
| 18 | 
            +
                "5": ")",
         | 
| 19 | 
            +
                "6": ",",
         | 
| 20 | 
            +
                "7": ".",
         | 
| 21 | 
            +
                "8": ":",
         | 
| 22 | 
            +
                "9": "``",
         | 
| 23 | 
            +
                "10": "CC",
         | 
| 24 | 
            +
                "11": "CD",
         | 
| 25 | 
            +
                "12": "DT",
         | 
| 26 | 
            +
                "13": "EX",
         | 
| 27 | 
            +
                "14": "FW",
         | 
| 28 | 
            +
                "15": "IN",
         | 
| 29 | 
            +
                "16": "JJ",
         | 
| 30 | 
            +
                "17": "JJR",
         | 
| 31 | 
            +
                "18": "JJS",
         | 
| 32 | 
            +
                "19": "LS",
         | 
| 33 | 
            +
                "20": "MD",
         | 
| 34 | 
            +
                "21": "NN",
         | 
| 35 | 
            +
                "22": "NNP",
         | 
| 36 | 
            +
                "23": "NNPS",
         | 
| 37 | 
            +
                "24": "NNS",
         | 
| 38 | 
            +
                "25": "NN|SYM",
         | 
| 39 | 
            +
                "26": "PDT",
         | 
| 40 | 
            +
                "27": "POS",
         | 
| 41 | 
            +
                "28": "PRP",
         | 
| 42 | 
            +
                "29": "PRP$",
         | 
| 43 | 
            +
                "30": "RB",
         | 
| 44 | 
            +
                "31": "RBR",
         | 
| 45 | 
            +
                "32": "RBS",
         | 
| 46 | 
            +
                "33": "RP",
         | 
| 47 | 
            +
                "34": "SYM",
         | 
| 48 | 
            +
                "35": "TO",
         | 
| 49 | 
            +
                "36": "UH",
         | 
| 50 | 
            +
                "37": "VB",
         | 
| 51 | 
            +
                "38": "VBD",
         | 
| 52 | 
            +
                "39": "VBG",
         | 
| 53 | 
            +
                "40": "VBN",
         | 
| 54 | 
            +
                "41": "VBP",
         | 
| 55 | 
            +
                "42": "VBZ",
         | 
| 56 | 
            +
                "43": "WDT",
         | 
| 57 | 
            +
                "44": "WP",
         | 
| 58 | 
            +
                "45": "WP$",
         | 
| 59 | 
            +
                "46": "WRB"
         | 
| 60 | 
            +
              },
         | 
| 61 | 
            +
              "initializer_range": 0.02,
         | 
| 62 | 
            +
              "intermediate_size": 3072,
         | 
| 63 | 
            +
              "label2id": {
         | 
| 64 | 
            +
                "\"": 0,
         | 
| 65 | 
            +
                "#": 2,
         | 
| 66 | 
            +
                "$": 3,
         | 
| 67 | 
            +
                "''": 1,
         | 
| 68 | 
            +
                "(": 4,
         | 
| 69 | 
            +
                ")": 5,
         | 
| 70 | 
            +
                ",": 6,
         | 
| 71 | 
            +
                ".": 7,
         | 
| 72 | 
            +
                ":": 8,
         | 
| 73 | 
            +
                "CC": 10,
         | 
| 74 | 
            +
                "CD": 11,
         | 
| 75 | 
            +
                "DT": 12,
         | 
| 76 | 
            +
                "EX": 13,
         | 
| 77 | 
            +
                "FW": 14,
         | 
| 78 | 
            +
                "IN": 15,
         | 
| 79 | 
            +
                "JJ": 16,
         | 
| 80 | 
            +
                "JJR": 17,
         | 
| 81 | 
            +
                "JJS": 18,
         | 
| 82 | 
            +
                "LS": 19,
         | 
| 83 | 
            +
                "MD": 20,
         | 
| 84 | 
            +
                "NN": 21,
         | 
| 85 | 
            +
                "NNP": 22,
         | 
| 86 | 
            +
                "NNPS": 23,
         | 
| 87 | 
            +
                "NNS": 24,
         | 
| 88 | 
            +
                "NN|SYM": 25,
         | 
| 89 | 
            +
                "PDT": 26,
         | 
| 90 | 
            +
                "POS": 27,
         | 
| 91 | 
            +
                "PRP": 28,
         | 
| 92 | 
            +
                "PRP$": 29,
         | 
| 93 | 
            +
                "RB": 30,
         | 
| 94 | 
            +
                "RBR": 31,
         | 
| 95 | 
            +
                "RBS": 32,
         | 
| 96 | 
            +
                "RP": 33,
         | 
| 97 | 
            +
                "SYM": 34,
         | 
| 98 | 
            +
                "TO": 35,
         | 
| 99 | 
            +
                "UH": 36,
         | 
| 100 | 
            +
                "VB": 37,
         | 
| 101 | 
            +
                "VBD": 38,
         | 
| 102 | 
            +
                "VBG": 39,
         | 
| 103 | 
            +
                "VBN": 40,
         | 
| 104 | 
            +
                "VBP": 41,
         | 
| 105 | 
            +
                "VBZ": 42,
         | 
| 106 | 
            +
                "WDT": 43,
         | 
| 107 | 
            +
                "WP": 44,
         | 
| 108 | 
            +
                "WP$": 45,
         | 
| 109 | 
            +
                "WRB": 46,
         | 
| 110 | 
            +
                "``": 9
         | 
| 111 | 
            +
              },
         | 
| 112 | 
            +
              "layer_norm_eps": 1e-12,
         | 
| 113 | 
            +
              "max_position_embeddings": 512,
         | 
| 114 | 
            +
              "model_type": "bert",
         | 
| 115 | 
            +
              "num_attention_heads": 12,
         | 
| 116 | 
            +
              "num_hidden_layers": 12,
         | 
| 117 | 
            +
              "pad_token_id": 0,
         | 
| 118 | 
            +
              "position_embedding_type": "absolute",
         | 
| 119 | 
            +
              "transformers_version": "4.41.2",
         | 
| 120 | 
            +
              "type_vocab_size": 2,
         | 
| 121 | 
            +
              "use_cache": true,
         | 
| 122 | 
            +
              "vocab_size": 28996
         | 
| 123 | 
            +
            }
         | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,7 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "cls_token": "[CLS]",
         | 
| 3 | 
            +
              "mask_token": "[MASK]",
         | 
| 4 | 
            +
              "pad_token": "[PAD]",
         | 
| 5 | 
            +
              "sep_token": "[SEP]",
         | 
| 6 | 
            +
              "unk_token": "[UNK]"
         | 
| 7 | 
            +
            }
         | 
    	
        tf_model.h5
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:8aead35b3f85955296627a98b0f12a30198585a74c8f79e0fce057093eee716c
         | 
| 3 | 
            +
            size 431296644
         | 
    	
        tokenizer.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,55 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "added_tokens_decoder": {
         | 
| 3 | 
            +
                "0": {
         | 
| 4 | 
            +
                  "content": "[PAD]",
         | 
| 5 | 
            +
                  "lstrip": false,
         | 
| 6 | 
            +
                  "normalized": false,
         | 
| 7 | 
            +
                  "rstrip": false,
         | 
| 8 | 
            +
                  "single_word": false,
         | 
| 9 | 
            +
                  "special": true
         | 
| 10 | 
            +
                },
         | 
| 11 | 
            +
                "100": {
         | 
| 12 | 
            +
                  "content": "[UNK]",
         | 
| 13 | 
            +
                  "lstrip": false,
         | 
| 14 | 
            +
                  "normalized": false,
         | 
| 15 | 
            +
                  "rstrip": false,
         | 
| 16 | 
            +
                  "single_word": false,
         | 
| 17 | 
            +
                  "special": true
         | 
| 18 | 
            +
                },
         | 
| 19 | 
            +
                "101": {
         | 
| 20 | 
            +
                  "content": "[CLS]",
         | 
| 21 | 
            +
                  "lstrip": false,
         | 
| 22 | 
            +
                  "normalized": false,
         | 
| 23 | 
            +
                  "rstrip": false,
         | 
| 24 | 
            +
                  "single_word": false,
         | 
| 25 | 
            +
                  "special": true
         | 
| 26 | 
            +
                },
         | 
| 27 | 
            +
                "102": {
         | 
| 28 | 
            +
                  "content": "[SEP]",
         | 
| 29 | 
            +
                  "lstrip": false,
         | 
| 30 | 
            +
                  "normalized": false,
         | 
| 31 | 
            +
                  "rstrip": false,
         | 
| 32 | 
            +
                  "single_word": false,
         | 
| 33 | 
            +
                  "special": true
         | 
| 34 | 
            +
                },
         | 
| 35 | 
            +
                "103": {
         | 
| 36 | 
            +
                  "content": "[MASK]",
         | 
| 37 | 
            +
                  "lstrip": false,
         | 
| 38 | 
            +
                  "normalized": false,
         | 
| 39 | 
            +
                  "rstrip": false,
         | 
| 40 | 
            +
                  "single_word": false,
         | 
| 41 | 
            +
                  "special": true
         | 
| 42 | 
            +
                }
         | 
| 43 | 
            +
              },
         | 
| 44 | 
            +
              "clean_up_tokenization_spaces": true,
         | 
| 45 | 
            +
              "cls_token": "[CLS]",
         | 
| 46 | 
            +
              "do_lower_case": false,
         | 
| 47 | 
            +
              "mask_token": "[MASK]",
         | 
| 48 | 
            +
              "model_max_length": 512,
         | 
| 49 | 
            +
              "pad_token": "[PAD]",
         | 
| 50 | 
            +
              "sep_token": "[SEP]",
         | 
| 51 | 
            +
              "strip_accents": null,
         | 
| 52 | 
            +
              "tokenize_chinese_chars": true,
         | 
| 53 | 
            +
              "tokenizer_class": "BertTokenizer",
         | 
| 54 | 
            +
              "unk_token": "[UNK]"
         | 
| 55 | 
            +
            }
         | 
    	
        vocab.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
