Model save
Browse files- README.md +0 -15
- all_results.json +5 -5
- model-00001-of-00003.safetensors +1 -1
- model-00002-of-00003.safetensors +1 -1
- model-00003-of-00003.safetensors +1 -1
- runs/Jun12_11-28-08_gilbreth-j001.rcac.purdue.edu/events.out.tfevents.1718206233.gilbreth-j001.rcac.purdue.edu.142436.0 +3 -0
- train_results.json +5 -5
- trainer_state.json +47 -487
- training_args.bin +1 -1
README.md
CHANGED
|
@@ -14,16 +14,6 @@ should probably proofread and complete it, then remove this comment. -->
|
|
| 14 |
# spin-v-iter0-v2
|
| 15 |
|
| 16 |
This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
|
| 17 |
-
It achieves the following results on the evaluation set:
|
| 18 |
-
- Loss: 0.0054
|
| 19 |
-
- Rewards/real: -1.6431
|
| 20 |
-
- Rewards/generated: -16.4195
|
| 21 |
-
- Rewards/accuracies: 1.0
|
| 22 |
-
- Rewards/margins: 14.7764
|
| 23 |
-
- Logps/generated: -257.4886
|
| 24 |
-
- Logps/real: -151.0967
|
| 25 |
-
- Logits/generated: -2.5437
|
| 26 |
-
- Logits/real: -2.1952
|
| 27 |
|
| 28 |
## Model description
|
| 29 |
|
|
@@ -57,11 +47,6 @@ The following hyperparameters were used during training:
|
|
| 57 |
|
| 58 |
### Training results
|
| 59 |
|
| 60 |
-
| Training Loss | Epoch | Step | Validation Loss | Rewards/real | Rewards/generated | Rewards/accuracies | Rewards/margins | Logps/generated | Logps/real | Logits/generated | Logits/real |
|
| 61 |
-
|:-------------:|:-----:|:----:|:---------------:|:------------:|:-----------------:|:------------------:|:---------------:|:---------------:|:----------:|:----------------:|:-----------:|
|
| 62 |
-
| 0.0106 | 0.64 | 100 | 0.0084 | -0.8594 | -11.5552 | 0.9992 | 10.6958 | -208.8447 | -143.2594 | -2.7086 | -2.4488 |
|
| 63 |
-
| 0.002 | 1.27 | 200 | 0.0057 | -0.9375 | -14.1180 | 1.0 | 13.1805 | -234.4735 | -144.0411 | -2.6005 | -2.2709 |
|
| 64 |
-
| 0.0007 | 1.91 | 300 | 0.0054 | -1.6431 | -16.4195 | 1.0 | 14.7764 | -257.4886 | -151.0967 | -2.5437 | -2.1952 |
|
| 65 |
|
| 66 |
|
| 67 |
### Framework versions
|
|
|
|
| 14 |
# spin-v-iter0-v2
|
| 15 |
|
| 16 |
This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
## Model description
|
| 19 |
|
|
|
|
| 47 |
|
| 48 |
### Training results
|
| 49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
|
| 51 |
|
| 52 |
### Framework versions
|
all_results.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
{
|
| 2 |
"epoch": 2.0,
|
| 3 |
-
"train_loss": 0.
|
| 4 |
-
"train_runtime":
|
| 5 |
-
"train_samples":
|
| 6 |
-
"train_samples_per_second":
|
| 7 |
-
"train_steps_per_second": 0.
|
| 8 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"epoch": 2.0,
|
| 3 |
+
"train_loss": 0.07844111844315194,
|
| 4 |
+
"train_runtime": 153.525,
|
| 5 |
+
"train_samples": 500,
|
| 6 |
+
"train_samples_per_second": 6.514,
|
| 7 |
+
"train_steps_per_second": 0.208
|
| 8 |
}
|
model-00001-of-00003.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 4943162336
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:03631b4ae686edda3177d16c4c3bf9bb423f686ec6898cba8eb6c6620d110265
|
| 3 |
size 4943162336
|
model-00002-of-00003.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 4999819336
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6e853594d2166f0abe7b77feaa554c9a90748d9816c2f9a0ff392e8f3d50d4b1
|
| 3 |
size 4999819336
|
model-00003-of-00003.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 4540516344
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c873ed19c3cb466622ed14889d7a83e1b034eb9c4266181a3bd272143b9ddc80
|
| 3 |
size 4540516344
|
runs/Jun12_11-28-08_gilbreth-j001.rcac.purdue.edu/events.out.tfevents.1718206233.gilbreth-j001.rcac.purdue.edu.142436.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ace26979a5a571c47bbaf167a27d640bf0aac226b44d25bfa5808c98bdc1946
|
| 3 |
+
size 7396
|
train_results.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
{
|
| 2 |
"epoch": 2.0,
|
| 3 |
-
"train_loss": 0.
|
| 4 |
-
"train_runtime":
|
| 5 |
-
"train_samples":
|
| 6 |
-
"train_samples_per_second":
|
| 7 |
-
"train_steps_per_second": 0.
|
| 8 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"epoch": 2.0,
|
| 3 |
+
"train_loss": 0.07844111844315194,
|
| 4 |
+
"train_runtime": 153.525,
|
| 5 |
+
"train_samples": 500,
|
| 6 |
+
"train_samples_per_second": 6.514,
|
| 7 |
+
"train_steps_per_second": 0.208
|
| 8 |
}
|
trainer_state.json
CHANGED
|
@@ -2,19 +2,19 @@
|
|
| 2 |
"best_metric": null,
|
| 3 |
"best_model_checkpoint": null,
|
| 4 |
"epoch": 2.0,
|
| 5 |
-
"eval_steps":
|
| 6 |
-
"global_step":
|
| 7 |
"is_hyper_param_search": false,
|
| 8 |
"is_local_process_zero": true,
|
| 9 |
"is_world_process_zero": true,
|
| 10 |
"log_history": [
|
| 11 |
{
|
| 12 |
-
"epoch": 0.
|
| 13 |
-
"learning_rate": 1.
|
| 14 |
-
"logits/generated": -
|
| 15 |
-
"logits/real": -2.
|
| 16 |
-
"logps/generated": -
|
| 17 |
-
"logps/real": -
|
| 18 |
"loss": 0.6931,
|
| 19 |
"rewards/accuracies": 0.0,
|
| 20 |
"rewards/generated": 0.0,
|
|
@@ -23,502 +23,62 @@
|
|
| 23 |
"step": 1
|
| 24 |
},
|
| 25 |
{
|
| 26 |
-
"epoch": 0.
|
| 27 |
-
"learning_rate":
|
| 28 |
-
"logits/generated": -3.
|
| 29 |
-
"logits/real": -2.
|
| 30 |
-
"logps/generated": -
|
| 31 |
-
"logps/real": -
|
| 32 |
-
"loss": 0.
|
| 33 |
-
"rewards/accuracies": 0.
|
| 34 |
-
"rewards/generated": -
|
| 35 |
-
"rewards/margins":
|
| 36 |
-
"rewards/real": 0.
|
| 37 |
"step": 10
|
| 38 |
},
|
| 39 |
{
|
| 40 |
-
"epoch":
|
| 41 |
-
"learning_rate":
|
| 42 |
-
"logits/generated": -
|
| 43 |
-
"logits/real": -2.
|
| 44 |
-
"logps/generated": -
|
| 45 |
-
"logps/real": -
|
| 46 |
-
"loss": 0.
|
| 47 |
"rewards/accuracies": 1.0,
|
| 48 |
-
"rewards/generated": -
|
| 49 |
-
"rewards/margins":
|
| 50 |
-
"rewards/real":
|
| 51 |
"step": 20
|
| 52 |
},
|
| 53 |
{
|
| 54 |
-
"epoch":
|
| 55 |
-
"learning_rate":
|
| 56 |
-
"logits/generated": -2.
|
| 57 |
-
"logits/real": -2.
|
| 58 |
-
"logps/generated": -
|
| 59 |
-
"logps/real": -
|
| 60 |
-
"loss": 0.
|
| 61 |
-
"rewards/accuracies": 1.0,
|
| 62 |
-
"rewards/generated": -3.964935302734375,
|
| 63 |
-
"rewards/margins": 4.9714460372924805,
|
| 64 |
-
"rewards/real": 1.0065107345581055,
|
| 65 |
-
"step": 30
|
| 66 |
-
},
|
| 67 |
-
{
|
| 68 |
-
"epoch": 0.25,
|
| 69 |
-
"learning_rate": 4.858156028368794e-07,
|
| 70 |
-
"logits/generated": -2.896350145339966,
|
| 71 |
-
"logits/real": -2.632850408554077,
|
| 72 |
-
"logps/generated": -149.0387420654297,
|
| 73 |
-
"logps/real": -131.4845733642578,
|
| 74 |
-
"loss": 0.0209,
|
| 75 |
-
"rewards/accuracies": 0.987500011920929,
|
| 76 |
-
"rewards/generated": -5.3386735916137695,
|
| 77 |
-
"rewards/margins": 6.378259658813477,
|
| 78 |
-
"rewards/real": 1.0395863056182861,
|
| 79 |
-
"step": 40
|
| 80 |
-
},
|
| 81 |
-
{
|
| 82 |
-
"epoch": 0.32,
|
| 83 |
-
"learning_rate": 4.6808510638297873e-07,
|
| 84 |
-
"logits/generated": -2.864506244659424,
|
| 85 |
-
"logits/real": -2.635793685913086,
|
| 86 |
-
"logps/generated": -161.58311462402344,
|
| 87 |
-
"logps/real": -133.5462646484375,
|
| 88 |
-
"loss": 0.0157,
|
| 89 |
-
"rewards/accuracies": 1.0,
|
| 90 |
-
"rewards/generated": -6.72866678237915,
|
| 91 |
-
"rewards/margins": 7.655924320220947,
|
| 92 |
-
"rewards/real": 0.9272577166557312,
|
| 93 |
-
"step": 50
|
| 94 |
-
},
|
| 95 |
-
{
|
| 96 |
-
"epoch": 0.38,
|
| 97 |
-
"learning_rate": 4.50354609929078e-07,
|
| 98 |
-
"logits/generated": -2.8477931022644043,
|
| 99 |
-
"logits/real": -2.5890610218048096,
|
| 100 |
-
"logps/generated": -163.30792236328125,
|
| 101 |
-
"logps/real": -120.96208190917969,
|
| 102 |
-
"loss": 0.016,
|
| 103 |
-
"rewards/accuracies": 1.0,
|
| 104 |
-
"rewards/generated": -7.271246433258057,
|
| 105 |
-
"rewards/margins": 7.847572326660156,
|
| 106 |
-
"rewards/real": 0.5763252973556519,
|
| 107 |
-
"step": 60
|
| 108 |
-
},
|
| 109 |
-
{
|
| 110 |
-
"epoch": 0.45,
|
| 111 |
-
"learning_rate": 4.326241134751773e-07,
|
| 112 |
-
"logits/generated": -2.817225694656372,
|
| 113 |
-
"logits/real": -2.618878126144409,
|
| 114 |
-
"logps/generated": -178.3530731201172,
|
| 115 |
-
"logps/real": -133.58116149902344,
|
| 116 |
-
"loss": 0.0134,
|
| 117 |
-
"rewards/accuracies": 1.0,
|
| 118 |
-
"rewards/generated": -8.513948440551758,
|
| 119 |
-
"rewards/margins": 9.06941032409668,
|
| 120 |
-
"rewards/real": 0.5554608702659607,
|
| 121 |
-
"step": 70
|
| 122 |
-
},
|
| 123 |
-
{
|
| 124 |
-
"epoch": 0.51,
|
| 125 |
-
"learning_rate": 4.148936170212766e-07,
|
| 126 |
-
"logits/generated": -2.743182420730591,
|
| 127 |
-
"logits/real": -2.5085911750793457,
|
| 128 |
-
"logps/generated": -182.21102905273438,
|
| 129 |
-
"logps/real": -141.52438354492188,
|
| 130 |
-
"loss": 0.0132,
|
| 131 |
-
"rewards/accuracies": 1.0,
|
| 132 |
-
"rewards/generated": -9.143477439880371,
|
| 133 |
-
"rewards/margins": 8.894549369812012,
|
| 134 |
-
"rewards/real": -0.24892878532409668,
|
| 135 |
-
"step": 80
|
| 136 |
-
},
|
| 137 |
-
{
|
| 138 |
-
"epoch": 0.57,
|
| 139 |
-
"learning_rate": 3.971631205673759e-07,
|
| 140 |
-
"logits/generated": -2.667142152786255,
|
| 141 |
-
"logits/real": -2.3680872917175293,
|
| 142 |
-
"logps/generated": -221.4196014404297,
|
| 143 |
-
"logps/real": -156.9970703125,
|
| 144 |
-
"loss": 0.0073,
|
| 145 |
-
"rewards/accuracies": 1.0,
|
| 146 |
-
"rewards/generated": -11.756684303283691,
|
| 147 |
-
"rewards/margins": 11.24407958984375,
|
| 148 |
-
"rewards/real": -0.5126041769981384,
|
| 149 |
-
"step": 90
|
| 150 |
-
},
|
| 151 |
-
{
|
| 152 |
-
"epoch": 0.64,
|
| 153 |
-
"learning_rate": 3.7943262411347514e-07,
|
| 154 |
-
"logits/generated": -2.716907024383545,
|
| 155 |
-
"logits/real": -2.384260654449463,
|
| 156 |
-
"logps/generated": -195.7281951904297,
|
| 157 |
-
"logps/real": -141.03294372558594,
|
| 158 |
-
"loss": 0.0106,
|
| 159 |
-
"rewards/accuracies": 1.0,
|
| 160 |
-
"rewards/generated": -10.495864868164062,
|
| 161 |
-
"rewards/margins": 10.102469444274902,
|
| 162 |
-
"rewards/real": -0.39339518547058105,
|
| 163 |
-
"step": 100
|
| 164 |
-
},
|
| 165 |
-
{
|
| 166 |
-
"epoch": 0.64,
|
| 167 |
-
"eval_logits/generated": -2.708615779876709,
|
| 168 |
-
"eval_logits/real": -2.4488255977630615,
|
| 169 |
-
"eval_logps/generated": -208.8447265625,
|
| 170 |
-
"eval_logps/real": -143.2593536376953,
|
| 171 |
-
"eval_loss": 0.008355233818292618,
|
| 172 |
-
"eval_rewards/accuracies": 0.9992038011550903,
|
| 173 |
-
"eval_rewards/generated": -11.555154800415039,
|
| 174 |
-
"eval_rewards/margins": 10.695781707763672,
|
| 175 |
-
"eval_rewards/real": -0.8593728542327881,
|
| 176 |
-
"eval_runtime": 338.0923,
|
| 177 |
-
"eval_samples_per_second": 14.789,
|
| 178 |
-
"eval_steps_per_second": 0.464,
|
| 179 |
-
"step": 100
|
| 180 |
-
},
|
| 181 |
-
{
|
| 182 |
-
"epoch": 0.7,
|
| 183 |
-
"learning_rate": 3.617021276595745e-07,
|
| 184 |
-
"logits/generated": -2.6799185276031494,
|
| 185 |
-
"logits/real": -2.4362854957580566,
|
| 186 |
-
"logps/generated": -229.9918975830078,
|
| 187 |
-
"logps/real": -144.79042053222656,
|
| 188 |
-
"loss": 0.0055,
|
| 189 |
-
"rewards/accuracies": 1.0,
|
| 190 |
-
"rewards/generated": -13.137945175170898,
|
| 191 |
-
"rewards/margins": 11.87834358215332,
|
| 192 |
-
"rewards/real": -1.259602427482605,
|
| 193 |
-
"step": 110
|
| 194 |
-
},
|
| 195 |
-
{
|
| 196 |
-
"epoch": 0.76,
|
| 197 |
-
"learning_rate": 3.4397163120567375e-07,
|
| 198 |
-
"logits/generated": -2.70621919631958,
|
| 199 |
-
"logits/real": -2.4253954887390137,
|
| 200 |
-
"logps/generated": -228.62405395507812,
|
| 201 |
-
"logps/real": -158.47494506835938,
|
| 202 |
-
"loss": 0.0066,
|
| 203 |
-
"rewards/accuracies": 1.0,
|
| 204 |
-
"rewards/generated": -13.207649230957031,
|
| 205 |
-
"rewards/margins": 12.149269104003906,
|
| 206 |
-
"rewards/real": -1.0583809614181519,
|
| 207 |
-
"step": 120
|
| 208 |
-
},
|
| 209 |
-
{
|
| 210 |
-
"epoch": 0.83,
|
| 211 |
-
"learning_rate": 3.2624113475177305e-07,
|
| 212 |
-
"logits/generated": -2.662909746170044,
|
| 213 |
-
"logits/real": -2.3800911903381348,
|
| 214 |
-
"logps/generated": -227.56619262695312,
|
| 215 |
-
"logps/real": -156.76431274414062,
|
| 216 |
-
"loss": 0.0066,
|
| 217 |
-
"rewards/accuracies": 1.0,
|
| 218 |
-
"rewards/generated": -13.599740982055664,
|
| 219 |
-
"rewards/margins": 12.402183532714844,
|
| 220 |
-
"rewards/real": -1.1975574493408203,
|
| 221 |
-
"step": 130
|
| 222 |
-
},
|
| 223 |
-
{
|
| 224 |
-
"epoch": 0.89,
|
| 225 |
-
"learning_rate": 3.085106382978723e-07,
|
| 226 |
-
"logits/generated": -2.69126033782959,
|
| 227 |
-
"logits/real": -2.4004807472229004,
|
| 228 |
-
"logps/generated": -229.597900390625,
|
| 229 |
-
"logps/real": -146.2201385498047,
|
| 230 |
-
"loss": 0.0055,
|
| 231 |
-
"rewards/accuracies": 1.0,
|
| 232 |
-
"rewards/generated": -13.650607109069824,
|
| 233 |
-
"rewards/margins": 12.299604415893555,
|
| 234 |
-
"rewards/real": -1.3510032892227173,
|
| 235 |
-
"step": 140
|
| 236 |
-
},
|
| 237 |
-
{
|
| 238 |
-
"epoch": 0.96,
|
| 239 |
-
"learning_rate": 2.907801418439716e-07,
|
| 240 |
-
"logits/generated": -2.6775763034820557,
|
| 241 |
-
"logits/real": -2.324918508529663,
|
| 242 |
-
"logps/generated": -218.3529510498047,
|
| 243 |
-
"logps/real": -140.95547485351562,
|
| 244 |
-
"loss": 0.0138,
|
| 245 |
-
"rewards/accuracies": 1.0,
|
| 246 |
-
"rewards/generated": -12.812429428100586,
|
| 247 |
-
"rewards/margins": 12.05975341796875,
|
| 248 |
-
"rewards/real": -0.752677321434021,
|
| 249 |
-
"step": 150
|
| 250 |
-
},
|
| 251 |
-
{
|
| 252 |
-
"epoch": 1.02,
|
| 253 |
-
"learning_rate": 2.730496453900709e-07,
|
| 254 |
-
"logits/generated": -2.6712429523468018,
|
| 255 |
-
"logits/real": -2.2760770320892334,
|
| 256 |
-
"logps/generated": -196.49375915527344,
|
| 257 |
-
"logps/real": -121.7732925415039,
|
| 258 |
-
"loss": 0.0069,
|
| 259 |
-
"rewards/accuracies": 1.0,
|
| 260 |
-
"rewards/generated": -10.348185539245605,
|
| 261 |
-
"rewards/margins": 9.94708251953125,
|
| 262 |
-
"rewards/real": -0.40110310912132263,
|
| 263 |
-
"step": 160
|
| 264 |
-
},
|
| 265 |
-
{
|
| 266 |
-
"epoch": 1.08,
|
| 267 |
-
"learning_rate": 2.5531914893617016e-07,
|
| 268 |
-
"logits/generated": -2.6528658866882324,
|
| 269 |
-
"logits/real": -2.1584651470184326,
|
| 270 |
-
"logps/generated": -213.1004638671875,
|
| 271 |
-
"logps/real": -121.10084533691406,
|
| 272 |
-
"loss": 0.003,
|
| 273 |
-
"rewards/accuracies": 1.0,
|
| 274 |
-
"rewards/generated": -11.93848991394043,
|
| 275 |
-
"rewards/margins": 11.793214797973633,
|
| 276 |
-
"rewards/real": -0.14527548849582672,
|
| 277 |
-
"step": 170
|
| 278 |
-
},
|
| 279 |
-
{
|
| 280 |
-
"epoch": 1.15,
|
| 281 |
-
"learning_rate": 2.375886524822695e-07,
|
| 282 |
-
"logits/generated": -2.6257143020629883,
|
| 283 |
-
"logits/real": -2.2728638648986816,
|
| 284 |
-
"logps/generated": -228.39248657226562,
|
| 285 |
-
"logps/real": -152.61045837402344,
|
| 286 |
-
"loss": 0.0032,
|
| 287 |
-
"rewards/accuracies": 1.0,
|
| 288 |
-
"rewards/generated": -13.100919723510742,
|
| 289 |
-
"rewards/margins": 12.766759872436523,
|
| 290 |
-
"rewards/real": -0.3341600298881531,
|
| 291 |
-
"step": 180
|
| 292 |
-
},
|
| 293 |
-
{
|
| 294 |
-
"epoch": 1.21,
|
| 295 |
-
"learning_rate": 2.198581560283688e-07,
|
| 296 |
-
"logits/generated": -2.609994411468506,
|
| 297 |
-
"logits/real": -2.2576498985290527,
|
| 298 |
-
"logps/generated": -222.8015594482422,
|
| 299 |
-
"logps/real": -145.87921142578125,
|
| 300 |
-
"loss": 0.0036,
|
| 301 |
"rewards/accuracies": 0.987500011920929,
|
| 302 |
-
"rewards/generated": -
|
| 303 |
-
"rewards/margins":
|
| 304 |
-
"rewards/real":
|
| 305 |
-
"step":
|
| 306 |
-
},
|
| 307 |
-
{
|
| 308 |
-
"epoch": 1.27,
|
| 309 |
-
"learning_rate": 2.0212765957446807e-07,
|
| 310 |
-
"logits/generated": -2.600148916244507,
|
| 311 |
-
"logits/real": -2.1764094829559326,
|
| 312 |
-
"logps/generated": -236.9789581298828,
|
| 313 |
-
"logps/real": -137.09503173828125,
|
| 314 |
-
"loss": 0.002,
|
| 315 |
-
"rewards/accuracies": 1.0,
|
| 316 |
-
"rewards/generated": -14.19157886505127,
|
| 317 |
-
"rewards/margins": 13.38233470916748,
|
| 318 |
-
"rewards/real": -0.8092441558837891,
|
| 319 |
-
"step": 200
|
| 320 |
-
},
|
| 321 |
-
{
|
| 322 |
-
"epoch": 1.27,
|
| 323 |
-
"eval_logits/generated": -2.600484609603882,
|
| 324 |
-
"eval_logits/real": -2.2709195613861084,
|
| 325 |
-
"eval_logps/generated": -234.47348022460938,
|
| 326 |
-
"eval_logps/real": -144.0410919189453,
|
| 327 |
-
"eval_loss": 0.0056638033129274845,
|
| 328 |
-
"eval_rewards/accuracies": 1.0,
|
| 329 |
-
"eval_rewards/generated": -14.118030548095703,
|
| 330 |
-
"eval_rewards/margins": 13.180484771728516,
|
| 331 |
-
"eval_rewards/real": -0.9375430345535278,
|
| 332 |
-
"eval_runtime": 334.9223,
|
| 333 |
-
"eval_samples_per_second": 14.929,
|
| 334 |
-
"eval_steps_per_second": 0.469,
|
| 335 |
-
"step": 200
|
| 336 |
-
},
|
| 337 |
-
{
|
| 338 |
-
"epoch": 1.34,
|
| 339 |
-
"learning_rate": 1.8439716312056735e-07,
|
| 340 |
-
"logits/generated": -2.617143154144287,
|
| 341 |
-
"logits/real": -2.263977289199829,
|
| 342 |
-
"logps/generated": -242.97866821289062,
|
| 343 |
-
"logps/real": -152.24502563476562,
|
| 344 |
-
"loss": 0.0025,
|
| 345 |
-
"rewards/accuracies": 1.0,
|
| 346 |
-
"rewards/generated": -14.723902702331543,
|
| 347 |
-
"rewards/margins": 13.91853141784668,
|
| 348 |
-
"rewards/real": -0.8053719401359558,
|
| 349 |
-
"step": 210
|
| 350 |
-
},
|
| 351 |
-
{
|
| 352 |
-
"epoch": 1.4,
|
| 353 |
-
"learning_rate": 1.6666666666666665e-07,
|
| 354 |
-
"logits/generated": -2.5576019287109375,
|
| 355 |
-
"logits/real": -2.3169806003570557,
|
| 356 |
-
"logps/generated": -233.3745880126953,
|
| 357 |
-
"logps/real": -145.44570922851562,
|
| 358 |
-
"loss": 0.0023,
|
| 359 |
-
"rewards/accuracies": 1.0,
|
| 360 |
-
"rewards/generated": -14.311078071594238,
|
| 361 |
-
"rewards/margins": 13.486970901489258,
|
| 362 |
-
"rewards/real": -0.8241075277328491,
|
| 363 |
-
"step": 220
|
| 364 |
-
},
|
| 365 |
-
{
|
| 366 |
-
"epoch": 1.46,
|
| 367 |
-
"learning_rate": 1.4893617021276595e-07,
|
| 368 |
-
"logits/generated": -2.553480386734009,
|
| 369 |
-
"logits/real": -2.315396785736084,
|
| 370 |
-
"logps/generated": -243.16702270507812,
|
| 371 |
-
"logps/real": -168.13119506835938,
|
| 372 |
-
"loss": 0.0013,
|
| 373 |
-
"rewards/accuracies": 1.0,
|
| 374 |
-
"rewards/generated": -14.701271057128906,
|
| 375 |
-
"rewards/margins": 13.865699768066406,
|
| 376 |
-
"rewards/real": -0.8355696797370911,
|
| 377 |
-
"step": 230
|
| 378 |
-
},
|
| 379 |
-
{
|
| 380 |
-
"epoch": 1.53,
|
| 381 |
-
"learning_rate": 1.3120567375886523e-07,
|
| 382 |
-
"logits/generated": -2.6132516860961914,
|
| 383 |
-
"logits/real": -2.2840161323547363,
|
| 384 |
-
"logps/generated": -246.56961059570312,
|
| 385 |
-
"logps/real": -152.701171875,
|
| 386 |
-
"loss": 0.0008,
|
| 387 |
-
"rewards/accuracies": 1.0,
|
| 388 |
-
"rewards/generated": -14.995321273803711,
|
| 389 |
-
"rewards/margins": 14.276300430297852,
|
| 390 |
-
"rewards/real": -0.7190229892730713,
|
| 391 |
-
"step": 240
|
| 392 |
-
},
|
| 393 |
-
{
|
| 394 |
-
"epoch": 1.59,
|
| 395 |
-
"learning_rate": 1.1347517730496453e-07,
|
| 396 |
-
"logits/generated": -2.591722249984741,
|
| 397 |
-
"logits/real": -2.277804374694824,
|
| 398 |
-
"logps/generated": -263.14129638671875,
|
| 399 |
-
"logps/real": -158.726806640625,
|
| 400 |
-
"loss": 0.0038,
|
| 401 |
-
"rewards/accuracies": 1.0,
|
| 402 |
-
"rewards/generated": -16.25307273864746,
|
| 403 |
-
"rewards/margins": 15.087217330932617,
|
| 404 |
-
"rewards/real": -1.1658554077148438,
|
| 405 |
-
"step": 250
|
| 406 |
-
},
|
| 407 |
-
{
|
| 408 |
-
"epoch": 1.66,
|
| 409 |
-
"learning_rate": 9.574468085106382e-08,
|
| 410 |
-
"logits/generated": -2.569918155670166,
|
| 411 |
-
"logits/real": -2.1422371864318848,
|
| 412 |
-
"logps/generated": -244.6776123046875,
|
| 413 |
-
"logps/real": -145.0582733154297,
|
| 414 |
-
"loss": 0.0019,
|
| 415 |
-
"rewards/accuracies": 1.0,
|
| 416 |
-
"rewards/generated": -15.619791984558105,
|
| 417 |
-
"rewards/margins": 14.159820556640625,
|
| 418 |
-
"rewards/real": -1.4599710702896118,
|
| 419 |
-
"step": 260
|
| 420 |
-
},
|
| 421 |
-
{
|
| 422 |
-
"epoch": 1.72,
|
| 423 |
-
"learning_rate": 7.801418439716311e-08,
|
| 424 |
-
"logits/generated": -2.539750099182129,
|
| 425 |
-
"logits/real": -2.137476682662964,
|
| 426 |
-
"logps/generated": -265.46429443359375,
|
| 427 |
-
"logps/real": -163.37588500976562,
|
| 428 |
-
"loss": 0.0013,
|
| 429 |
-
"rewards/accuracies": 1.0,
|
| 430 |
-
"rewards/generated": -16.41511344909668,
|
| 431 |
-
"rewards/margins": 14.795074462890625,
|
| 432 |
-
"rewards/real": -1.6200393438339233,
|
| 433 |
-
"step": 270
|
| 434 |
-
},
|
| 435 |
-
{
|
| 436 |
-
"epoch": 1.78,
|
| 437 |
-
"learning_rate": 6.02836879432624e-08,
|
| 438 |
-
"logits/generated": -2.574856996536255,
|
| 439 |
-
"logits/real": -2.0835378170013428,
|
| 440 |
-
"logps/generated": -262.6173095703125,
|
| 441 |
-
"logps/real": -142.27984619140625,
|
| 442 |
-
"loss": 0.0005,
|
| 443 |
-
"rewards/accuracies": 1.0,
|
| 444 |
-
"rewards/generated": -16.557262420654297,
|
| 445 |
-
"rewards/margins": 15.236516952514648,
|
| 446 |
-
"rewards/real": -1.3207473754882812,
|
| 447 |
-
"step": 280
|
| 448 |
-
},
|
| 449 |
-
{
|
| 450 |
-
"epoch": 1.85,
|
| 451 |
-
"learning_rate": 4.25531914893617e-08,
|
| 452 |
-
"logits/generated": -2.5489261150360107,
|
| 453 |
-
"logits/real": -2.2142765522003174,
|
| 454 |
-
"logps/generated": -268.306396484375,
|
| 455 |
-
"logps/real": -147.05963134765625,
|
| 456 |
-
"loss": 0.0016,
|
| 457 |
-
"rewards/accuracies": 1.0,
|
| 458 |
-
"rewards/generated": -17.089282989501953,
|
| 459 |
-
"rewards/margins": 15.543319702148438,
|
| 460 |
-
"rewards/real": -1.5459634065628052,
|
| 461 |
-
"step": 290
|
| 462 |
-
},
|
| 463 |
-
{
|
| 464 |
-
"epoch": 1.91,
|
| 465 |
-
"learning_rate": 2.4822695035460993e-08,
|
| 466 |
-
"logits/generated": -2.556087017059326,
|
| 467 |
-
"logits/real": -2.1201999187469482,
|
| 468 |
-
"logps/generated": -249.31875610351562,
|
| 469 |
-
"logps/real": -138.080322265625,
|
| 470 |
-
"loss": 0.0007,
|
| 471 |
-
"rewards/accuracies": 1.0,
|
| 472 |
-
"rewards/generated": -15.890460014343262,
|
| 473 |
-
"rewards/margins": 14.16200065612793,
|
| 474 |
-
"rewards/real": -1.728456735610962,
|
| 475 |
-
"step": 300
|
| 476 |
-
},
|
| 477 |
-
{
|
| 478 |
-
"epoch": 1.91,
|
| 479 |
-
"eval_logits/generated": -2.5437142848968506,
|
| 480 |
-
"eval_logits/real": -2.19522762298584,
|
| 481 |
-
"eval_logps/generated": -257.48858642578125,
|
| 482 |
-
"eval_logps/real": -151.0966796875,
|
| 483 |
-
"eval_loss": 0.005383754149079323,
|
| 484 |
-
"eval_rewards/accuracies": 1.0,
|
| 485 |
-
"eval_rewards/generated": -16.419538497924805,
|
| 486 |
-
"eval_rewards/margins": 14.776435852050781,
|
| 487 |
-
"eval_rewards/real": -1.6431050300598145,
|
| 488 |
-
"eval_runtime": 335.2989,
|
| 489 |
-
"eval_samples_per_second": 14.912,
|
| 490 |
-
"eval_steps_per_second": 0.468,
|
| 491 |
-
"step": 300
|
| 492 |
-
},
|
| 493 |
-
{
|
| 494 |
-
"epoch": 1.97,
|
| 495 |
-
"learning_rate": 7.092198581560283e-09,
|
| 496 |
-
"logits/generated": -2.5656914710998535,
|
| 497 |
-
"logits/real": -2.300166368484497,
|
| 498 |
-
"logps/generated": -260.5529479980469,
|
| 499 |
-
"logps/real": -153.91053771972656,
|
| 500 |
-
"loss": 0.0008,
|
| 501 |
-
"rewards/accuracies": 1.0,
|
| 502 |
-
"rewards/generated": -16.434940338134766,
|
| 503 |
-
"rewards/margins": 14.854705810546875,
|
| 504 |
-
"rewards/real": -1.5802339315414429,
|
| 505 |
-
"step": 310
|
| 506 |
},
|
| 507 |
{
|
| 508 |
"epoch": 2.0,
|
| 509 |
-
"step":
|
| 510 |
"total_flos": 0.0,
|
| 511 |
-
"train_loss": 0.
|
| 512 |
-
"train_runtime":
|
| 513 |
-
"train_samples_per_second":
|
| 514 |
-
"train_steps_per_second": 0.
|
| 515 |
}
|
| 516 |
],
|
| 517 |
"logging_steps": 10,
|
| 518 |
-
"max_steps":
|
| 519 |
"num_input_tokens_seen": 0,
|
| 520 |
"num_train_epochs": 2,
|
| 521 |
-
"save_steps":
|
| 522 |
"total_flos": 0.0,
|
| 523 |
"train_batch_size": 8,
|
| 524 |
"trial_name": null,
|
|
|
|
| 2 |
"best_metric": null,
|
| 3 |
"best_model_checkpoint": null,
|
| 4 |
"epoch": 2.0,
|
| 5 |
+
"eval_steps": 50,
|
| 6 |
+
"global_step": 32,
|
| 7 |
"is_hyper_param_search": false,
|
| 8 |
"is_local_process_zero": true,
|
| 9 |
"is_world_process_zero": true,
|
| 10 |
"log_history": [
|
| 11 |
{
|
| 12 |
+
"epoch": 0.06,
|
| 13 |
+
"learning_rate": 1.25e-07,
|
| 14 |
+
"logits/generated": -2.9780185222625732,
|
| 15 |
+
"logits/real": -2.891969919204712,
|
| 16 |
+
"logps/generated": -75.28430938720703,
|
| 17 |
+
"logps/real": -112.39949035644531,
|
| 18 |
"loss": 0.6931,
|
| 19 |
"rewards/accuracies": 0.0,
|
| 20 |
"rewards/generated": 0.0,
|
|
|
|
| 23 |
"step": 1
|
| 24 |
},
|
| 25 |
{
|
| 26 |
+
"epoch": 0.62,
|
| 27 |
+
"learning_rate": 3.928571428571428e-07,
|
| 28 |
+
"logits/generated": -3.0109641551971436,
|
| 29 |
+
"logits/real": -2.735833168029785,
|
| 30 |
+
"logps/generated": -129.67689514160156,
|
| 31 |
+
"logps/real": -122.11038970947266,
|
| 32 |
+
"loss": 0.1661,
|
| 33 |
+
"rewards/accuracies": 0.875,
|
| 34 |
+
"rewards/generated": -2.9385464191436768,
|
| 35 |
+
"rewards/margins": 3.736133098602295,
|
| 36 |
+
"rewards/real": 0.7975864410400391,
|
| 37 |
"step": 10
|
| 38 |
},
|
| 39 |
{
|
| 40 |
+
"epoch": 1.25,
|
| 41 |
+
"learning_rate": 2.1428571428571426e-07,
|
| 42 |
+
"logits/generated": -2.9212605953216553,
|
| 43 |
+
"logits/real": -2.739665985107422,
|
| 44 |
+
"logps/generated": -155.23023986816406,
|
| 45 |
+
"logps/real": -139.42665100097656,
|
| 46 |
+
"loss": 0.0224,
|
| 47 |
"rewards/accuracies": 1.0,
|
| 48 |
+
"rewards/generated": -5.471407890319824,
|
| 49 |
+
"rewards/margins": 6.653175354003906,
|
| 50 |
+
"rewards/real": 1.1817679405212402,
|
| 51 |
"step": 20
|
| 52 |
},
|
| 53 |
{
|
| 54 |
+
"epoch": 1.88,
|
| 55 |
+
"learning_rate": 3.571428571428571e-08,
|
| 56 |
+
"logits/generated": -2.919872760772705,
|
| 57 |
+
"logits/real": -2.64644455909729,
|
| 58 |
+
"logps/generated": -149.81588745117188,
|
| 59 |
+
"logps/real": -110.94522857666016,
|
| 60 |
+
"loss": 0.0084,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
"rewards/accuracies": 0.987500011920929,
|
| 62 |
+
"rewards/generated": -5.761648178100586,
|
| 63 |
+
"rewards/margins": 7.198714256286621,
|
| 64 |
+
"rewards/real": 1.4370663166046143,
|
| 65 |
+
"step": 30
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
},
|
| 67 |
{
|
| 68 |
"epoch": 2.0,
|
| 69 |
+
"step": 32,
|
| 70 |
"total_flos": 0.0,
|
| 71 |
+
"train_loss": 0.07844111844315194,
|
| 72 |
+
"train_runtime": 153.525,
|
| 73 |
+
"train_samples_per_second": 6.514,
|
| 74 |
+
"train_steps_per_second": 0.208
|
| 75 |
}
|
| 76 |
],
|
| 77 |
"logging_steps": 10,
|
| 78 |
+
"max_steps": 32,
|
| 79 |
"num_input_tokens_seen": 0,
|
| 80 |
"num_train_epochs": 2,
|
| 81 |
+
"save_steps": 50,
|
| 82 |
"total_flos": 0.0,
|
| 83 |
"train_batch_size": 8,
|
| 84 |
"trial_name": null,
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 5880
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c2053b17e1e63daf7a440393b29324ccb1103d71a0c880c3a1c46d2c9f72f053
|
| 3 |
size 5880
|