| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9964071856287425, | |
| "eval_steps": 500, | |
| "global_step": 104, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.0, | |
| "loss": 3.02, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.0, | |
| "loss": 2.8946, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 5e-05, | |
| "loss": 2.9989, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0001, | |
| "loss": 3.0185, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00015000000000000001, | |
| "loss": 1.9948, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.0002, | |
| "loss": 2.1701, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00019995065603657316, | |
| "loss": 2.1174, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019980267284282717, | |
| "loss": 2.067, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019955619646030802, | |
| "loss": 1.9723, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.0001992114701314478, | |
| "loss": 1.9417, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019876883405951377, | |
| "loss": 1.9781, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0001982287250728689, | |
| "loss": 1.8071, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.00019759167619387476, | |
| "loss": 1.8272, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0001968583161128631, | |
| "loss": 1.6803, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.0001960293685676943, | |
| "loss": 1.726, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019510565162951537, | |
| "loss": 1.7271, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019408807689542257, | |
| "loss": 1.7266, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00019297764858882514, | |
| "loss": 1.7272, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00019177546256839812, | |
| "loss": 1.6868, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00019048270524660196, | |
| "loss": 1.5478, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.0001891006524188368, | |
| "loss": 1.5898, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00018763066800438636, | |
| "loss": 1.6766, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.0001860742027003944, | |
| "loss": 1.5721, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00018443279255020152, | |
| "loss": 1.5771, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018270805742745617, | |
| "loss": 1.5692, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00018090169943749476, | |
| "loss": 1.5286, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00017901550123756906, | |
| "loss": 1.4898, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00017705132427757895, | |
| "loss": 1.5101, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00017501110696304596, | |
| "loss": 1.5265, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00017289686274214118, | |
| "loss": 1.4373, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00017071067811865476, | |
| "loss": 1.3691, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00016845471059286887, | |
| "loss": 1.5147, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00016613118653236518, | |
| "loss": 1.4216, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.000163742398974869, | |
| "loss": 1.3968, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00016129070536529766, | |
| "loss": 1.4057, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00015877852522924732, | |
| "loss": 1.4042, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00015620833778521307, | |
| "loss": 1.3287, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00015358267949789966, | |
| "loss": 1.2654, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00015090414157503714, | |
| "loss": 1.3084, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00014817536741017152, | |
| "loss": 1.2634, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.00014539904997395468, | |
| "loss": 1.2646, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00014257792915650728, | |
| "loss": 1.29, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00013971478906347806, | |
| "loss": 1.3563, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00013681245526846783, | |
| "loss": 1.272, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00013387379202452917, | |
| "loss": 1.299, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00013090169943749476, | |
| "loss": 1.3304, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00012789911060392294, | |
| "loss": 1.2768, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.0001248689887164855, | |
| "loss": 1.301, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00012181432413965428, | |
| "loss": 1.1637, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.00011873813145857249, | |
| "loss": 1.2437, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.0001156434465040231, | |
| "loss": 1.2699, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00011253332335643043, | |
| "loss": 1.2035, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00010941083133185146, | |
| "loss": 1.2354, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00010627905195293135, | |
| "loss": 1.3551, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00010314107590781284, | |
| "loss": 1.1651, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.0001, | |
| "loss": 1.1803, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 9.685892409218717e-05, | |
| "loss": 1.1379, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 9.372094804706867e-05, | |
| "loss": 1.1446, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 9.058916866814858e-05, | |
| "loss": 1.2089, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 8.746667664356956e-05, | |
| "loss": 1.1623, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 8.435655349597689e-05, | |
| "loss": 1.0943, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 8.126186854142752e-05, | |
| "loss": 1.2074, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 7.818567586034577e-05, | |
| "loss": 1.1739, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 7.513101128351454e-05, | |
| "loss": 1.2538, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 7.210088939607708e-05, | |
| "loss": 1.288, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 6.909830056250527e-05, | |
| "loss": 1.1144, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 6.612620797547087e-05, | |
| "loss": 1.2695, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 6.318754473153221e-05, | |
| "loss": 1.205, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 6.0285210936521955e-05, | |
| "loss": 1.1551, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 5.7422070843492734e-05, | |
| "loss": 1.1511, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 5.4600950026045326e-05, | |
| "loss": 1.1414, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 5.182463258982846e-05, | |
| "loss": 1.1637, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 4.909585842496287e-05, | |
| "loss": 1.1339, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 4.6417320502100316e-05, | |
| "loss": 1.1118, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 4.379166221478697e-05, | |
| "loss": 1.1822, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 4.12214747707527e-05, | |
| "loss": 1.2311, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 3.8709294634702376e-05, | |
| "loss": 1.1172, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 3.6257601025131026e-05, | |
| "loss": 1.0924, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 3.386881346763483e-05, | |
| "loss": 1.1534, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 3.154528940713113e-05, | |
| "loss": 1.2601, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 2.9289321881345254e-05, | |
| "loss": 1.0743, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 2.7103137257858868e-05, | |
| "loss": 1.1419, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 2.4988893036954043e-05, | |
| "loss": 1.0558, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 2.2948675722421086e-05, | |
| "loss": 1.1382, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 2.098449876243096e-05, | |
| "loss": 1.1775, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 1.9098300562505266e-05, | |
| "loss": 1.1861, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 1.7291942572543807e-05, | |
| "loss": 1.1331, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 1.5567207449798515e-05, | |
| "loss": 1.142, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 1.3925797299605647e-05, | |
| "loss": 1.1482, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 1.2369331995613665e-05, | |
| "loss": 1.0885, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 1.0899347581163221e-05, | |
| "loss": 1.2005, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 9.517294753398064e-06, | |
| "loss": 1.0784, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 8.224537431601886e-06, | |
| "loss": 1.133, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 7.022351411174866e-06, | |
| "loss": 1.1096, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 5.911923104577455e-06, | |
| "loss": 1.108, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 4.8943483704846475e-06, | |
| "loss": 0.9645, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 3.970631432305694e-06, | |
| "loss": 1.1942, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 3.1416838871368924e-06, | |
| "loss": 1.181, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 2.4083238061252567e-06, | |
| "loss": 1.0978, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 1.771274927131139e-06, | |
| "loss": 0.9967, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 1.231165940486234e-06, | |
| "loss": 1.2266, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 7.885298685522235e-07, | |
| "loss": 1.216, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 4.438035396920004e-07, | |
| "loss": 1.2119, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 1.973271571728441e-07, | |
| "loss": 1.1436, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 104, | |
| "total_flos": 1.0478460476588032e+16, | |
| "train_loss": 1.4066595842058842, | |
| "train_runtime": 3219.9547, | |
| "train_samples_per_second": 0.259, | |
| "train_steps_per_second": 0.032 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 104, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 104, | |
| "total_flos": 1.0478460476588032e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |