robinhad commited on
Commit
ff02fc7
·
verified ·
1 Parent(s): 76ab014

Model save

Browse files
.gitattributes CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ final/tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
  library_name: transformers
3
- license: apache-2.0
4
- base_model: Snowflake/snowflake-arctic-embed-m-v1.5
5
  tags:
6
  - generated_from_trainer
7
  metrics:
@@ -18,13 +18,13 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # alignment-score-model
20
 
21
- This model is a fine-tuned version of [Snowflake/snowflake-arctic-embed-m-v1.5](https://huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5) on the None dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.1019
24
- - Precision: 0.9234
25
- - Recall: 0.9136
26
- - F1 Macro: 0.9143
27
- - Accuracy: 0.9149
28
 
29
  ## Model description
30
 
@@ -47,23 +47,30 @@ The following hyperparameters were used during training:
47
  - train_batch_size: 256
48
  - eval_batch_size: 128
49
  - seed: 0
 
 
 
 
50
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
51
  - lr_scheduler_type: linear
52
- - num_epochs: 3
53
 
54
  ### Training results
55
 
56
  | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 Macro | Accuracy |
57
  |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:--------:|:--------:|
58
- | No log | 0 | 0 | 0.5032 | 0.2550 | 0.5 | 0.3377 | 0.5100 |
59
- | No log | 1.0 | 31 | 0.1634 | 0.8494 | 0.7842 | 0.7770 | 0.7884 |
60
- | No log | 2.0 | 62 | 0.1102 | 0.9267 | 0.9185 | 0.9193 | 0.9198 |
61
- | No log | 3.0 | 93 | 0.1019 | 0.9234 | 0.9136 | 0.9143 | 0.9149 |
 
 
 
62
 
63
 
64
  ### Framework versions
65
 
66
  - Transformers 4.56.1
67
- - Pytorch 2.7.1+cu126
68
  - Datasets 4.0.0
69
  - Tokenizers 0.22.0
 
1
  ---
2
  library_name: transformers
3
+ license: mit
4
+ base_model: intfloat/multilingual-e5-base
5
  tags:
6
  - generated_from_trainer
7
  metrics:
 
18
 
19
  # alignment-score-model
20
 
21
+ This model is a fine-tuned version of [intfloat/multilingual-e5-base](https://huggingface.co/intfloat/multilingual-e5-base) on the None dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 0.0969
24
+ - Precision: 0.9388
25
+ - Recall: 0.9275
26
+ - F1 Macro: 0.9283
27
+ - Accuracy: 0.9289
28
 
29
  ## Model description
30
 
 
47
  - train_batch_size: 256
48
  - eval_batch_size: 128
49
  - seed: 0
50
+ - distributed_type: multi-GPU
51
+ - num_devices: 8
52
+ - total_train_batch_size: 2048
53
+ - total_eval_batch_size: 1024
54
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
55
  - lr_scheduler_type: linear
56
+ - num_epochs: 6
57
 
58
  ### Training results
59
 
60
  | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 Macro | Accuracy |
61
  |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:--------:|:--------:|
62
+ | No log | 0 | 0 | 0.4846 | 0.2550 | 0.5 | 0.3377 | 0.5100 |
63
+ | No log | 1.0 | 4 | 0.2132 | 0.7608 | 0.6895 | 0.6704 | 0.6947 |
64
+ | No log | 2.0 | 8 | 0.1678 | 0.8813 | 0.8469 | 0.8458 | 0.8498 |
65
+ | No log | 3.0 | 12 | 0.1488 | 0.8297 | 0.7313 | 0.7137 | 0.7367 |
66
+ | No log | 4.0 | 16 | 0.1257 | 0.8693 | 0.8159 | 0.8121 | 0.8196 |
67
+ | No log | 5.0 | 20 | 0.1047 | 0.9183 | 0.8984 | 0.8989 | 0.9004 |
68
+ | No log | 6.0 | 24 | 0.0969 | 0.9388 | 0.9275 | 0.9283 | 0.9289 |
69
 
70
 
71
  ### Framework versions
72
 
73
  - Transformers 4.56.1
74
+ - Pytorch 2.6.0a0+ecf3bae40a.nv25.01
75
  - Datasets 4.0.0
76
  - Tokenizers 0.22.0
final/config.json CHANGED
@@ -1,11 +1,12 @@
1
  {
2
  "architectures": [
3
- "BertForSequenceClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
 
6
  "classifier_dropout": 0.0,
7
  "dtype": "float32",
8
- "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.0,
11
  "hidden_size": 768,
@@ -17,19 +18,17 @@
17
  "label2id": {
18
  "LABEL_0": 0
19
  },
20
- "layer_norm_eps": 1e-12,
21
- "matryoshka_dimensions": [
22
- 256
23
- ],
24
- "max_position_embeddings": 512,
25
- "model_type": "bert",
26
  "num_attention_heads": 12,
27
  "num_hidden_layers": 12,
28
- "pad_token_id": 0,
 
29
  "position_embedding_type": "absolute",
30
  "problem_type": "regression",
31
  "transformers_version": "4.56.1",
32
- "type_vocab_size": 2,
33
  "use_cache": true,
34
- "vocab_size": 30522
35
  }
 
1
  {
2
  "architectures": [
3
+ "XLMRobertaForSequenceClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
  "classifier_dropout": 0.0,
8
  "dtype": "float32",
9
+ "eos_token_id": 2,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.0,
12
  "hidden_size": 768,
 
18
  "label2id": {
19
  "LABEL_0": 0
20
  },
21
+ "layer_norm_eps": 1e-05,
22
+ "max_position_embeddings": 514,
23
+ "model_type": "xlm-roberta",
 
 
 
24
  "num_attention_heads": 12,
25
  "num_hidden_layers": 12,
26
+ "output_past": true,
27
+ "pad_token_id": 1,
28
  "position_embedding_type": "absolute",
29
  "problem_type": "regression",
30
  "transformers_version": "4.56.1",
31
+ "type_vocab_size": 1,
32
  "use_cache": true,
33
+ "vocab_size": 250002
34
  }
final/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04d78a7f400e135cc29992a8a458b1c6468ef1e41314bf9f7a2bd0bf40c2dbfb
3
- size 437955572
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2d871c1c9db3bf703b69015678e7daae94d8a6464b6fe788cc481f9665748a2
3
+ size 1112201932
final/special_tokens_map.json CHANGED
@@ -1,34 +1,48 @@
1
  {
 
 
 
 
 
 
 
2
  "cls_token": {
3
- "content": "[CLS]",
4
  "lstrip": false,
5
  "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
- "mask_token": {
10
- "content": "[MASK]",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
 
 
 
 
 
 
 
16
  "pad_token": {
17
- "content": "[PAD]",
18
  "lstrip": false,
19
  "normalized": false,
20
  "rstrip": false,
21
  "single_word": false
22
  },
23
  "sep_token": {
24
- "content": "[SEP]",
25
  "lstrip": false,
26
  "normalized": false,
27
  "rstrip": false,
28
  "single_word": false
29
  },
30
  "unk_token": {
31
- "content": "[UNK]",
32
  "lstrip": false,
33
  "normalized": false,
34
  "rstrip": false,
 
1
  {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
  "cls_token": {
10
+ "content": "<s>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "eos_token": {
17
+ "content": "</s>",
18
  "lstrip": false,
19
  "normalized": false,
20
  "rstrip": false,
21
  "single_word": false
22
  },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
  "pad_token": {
31
+ "content": "<pad>",
32
  "lstrip": false,
33
  "normalized": false,
34
  "rstrip": false,
35
  "single_word": false
36
  },
37
  "sep_token": {
38
+ "content": "</s>",
39
  "lstrip": false,
40
  "normalized": false,
41
  "rstrip": false,
42
  "single_word": false
43
  },
44
  "unk_token": {
45
+ "content": "<unk>",
46
  "lstrip": false,
47
  "normalized": false,
48
  "rstrip": false,
final/tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
final/tokenizer_config.json CHANGED
@@ -1,63 +1,55 @@
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
- "content": "[PAD]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
10
  },
11
- "100": {
12
- "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
- "101": {
20
- "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
- "102": {
28
- "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
- "103": {
36
- "content": "[MASK]",
37
- "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
42
  }
43
  },
 
44
  "clean_up_tokenization_spaces": true,
45
- "cls_token": "[CLS]",
46
- "do_lower_case": true,
47
  "extra_special_tokens": {},
48
- "mask_token": "[MASK]",
49
- "max_length": 512,
50
  "model_max_length": 512,
51
- "pad_to_multiple_of": null,
52
- "pad_token": "[PAD]",
53
- "pad_token_type_id": 0,
54
- "padding_side": "right",
55
- "sep_token": "[SEP]",
56
- "stride": 0,
57
- "strip_accents": null,
58
- "tokenize_chinese_chars": true,
59
- "tokenizer_class": "BertTokenizer",
60
- "truncation_side": "right",
61
- "truncation_strategy": "longest_first",
62
- "unk_token": "[UNK]"
63
  }
 
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
+ "content": "<s>",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
10
  },
11
+ "1": {
12
+ "content": "<pad>",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "2": {
20
+ "content": "</s>",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "3": {
28
+ "content": "<unk>",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
  "normalized": false,
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
42
  }
43
  },
44
+ "bos_token": "<s>",
45
  "clean_up_tokenization_spaces": true,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
  "extra_special_tokens": {},
49
+ "mask_token": "<mask>",
 
50
  "model_max_length": 512,
51
+ "pad_token": "<pad>",
52
+ "sep_token": "</s>",
53
+ "tokenizer_class": "XLMRobertaTokenizer",
54
+ "unk_token": "<unk>"
 
 
 
 
 
 
 
 
55
  }
final/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e5f6d5af216d17edfe1467da470e957780b3ac40160bc38ba9df9956fa7f181
3
- size 5841
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5685c8d944a8138d54ea8d9ac9bd7d9f7858cfa5fba16bf86c8fb5115d681ebd
3
+ size 5432
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5faca50eebc292cd4b2ba5c0e8868f3dc759f1fa3aafbcd8ff46da3d945f12ef
3
  size 1112201932
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2d871c1c9db3bf703b69015678e7daae94d8a6464b6fe788cc481f9665748a2
3
  size 1112201932
runs/Sep14_16-45-42_fs-cai-002/events.out.tfevents.1757868347.fs-cai-002.3596035.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2d927eb9933e4cb7f123aa3be027ce63758e0da4d9c757fde5cf1e782588a56f
3
- size 7920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:804be0591084ad58dd747c4f662291c0b1f1bce9f94fd6eab7070a41ea9858d1
3
+ size 8737