| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 804, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.018656716417910446, | |
| "grad_norm": 11.399012167837801, | |
| "learning_rate": 4.8780487804878055e-06, | |
| "loss": 1.6662, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.03731343283582089, | |
| "grad_norm": 2.7376566823470316, | |
| "learning_rate": 1.0975609756097562e-05, | |
| "loss": 1.2958, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.055970149253731345, | |
| "grad_norm": 0.7789979993380964, | |
| "learning_rate": 1.707317073170732e-05, | |
| "loss": 0.9493, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.07462686567164178, | |
| "grad_norm": 0.5128569616334213, | |
| "learning_rate": 2.3170731707317075e-05, | |
| "loss": 0.7989, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.09328358208955224, | |
| "grad_norm": 0.2952286751449961, | |
| "learning_rate": 2.926829268292683e-05, | |
| "loss": 0.7254, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.11194029850746269, | |
| "grad_norm": 0.2341313903311179, | |
| "learning_rate": 3.5365853658536584e-05, | |
| "loss": 0.6846, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.13059701492537312, | |
| "grad_norm": 0.19391262472859572, | |
| "learning_rate": 4.146341463414634e-05, | |
| "loss": 0.6538, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.14925373134328357, | |
| "grad_norm": 0.16560726663004716, | |
| "learning_rate": 4.75609756097561e-05, | |
| "loss": 0.6315, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.16791044776119404, | |
| "grad_norm": 0.13927004882169236, | |
| "learning_rate": 4.999828351434079e-05, | |
| "loss": 0.6127, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.1865671641791045, | |
| "grad_norm": 0.1455738320729682, | |
| "learning_rate": 4.998779482816942e-05, | |
| "loss": 0.6095, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.20522388059701493, | |
| "grad_norm": 0.12130331155218448, | |
| "learning_rate": 4.996777549883426e-05, | |
| "loss": 0.5824, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.22388059701492538, | |
| "grad_norm": 0.11003515209479892, | |
| "learning_rate": 4.9938234010808136e-05, | |
| "loss": 0.5861, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.24253731343283583, | |
| "grad_norm": 0.11060375781263691, | |
| "learning_rate": 4.989918288418841e-05, | |
| "loss": 0.5754, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.26119402985074625, | |
| "grad_norm": 0.1099882855423578, | |
| "learning_rate": 4.9850638669390816e-05, | |
| "loss": 0.5724, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.2798507462686567, | |
| "grad_norm": 0.10766352810165353, | |
| "learning_rate": 4.97926219401351e-05, | |
| "loss": 0.5608, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.29850746268656714, | |
| "grad_norm": 0.10140806004752735, | |
| "learning_rate": 4.9725157284725665e-05, | |
| "loss": 0.5623, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.31716417910447764, | |
| "grad_norm": 0.10445356548807731, | |
| "learning_rate": 4.964827329563061e-05, | |
| "loss": 0.5605, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.3358208955223881, | |
| "grad_norm": 0.09473190185241998, | |
| "learning_rate": 4.956200255736394e-05, | |
| "loss": 0.5492, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.35447761194029853, | |
| "grad_norm": 0.1104055037751782, | |
| "learning_rate": 4.9466381632675714e-05, | |
| "loss": 0.5452, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.373134328358209, | |
| "grad_norm": 0.11728567466154544, | |
| "learning_rate": 4.936145104705629e-05, | |
| "loss": 0.5431, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.3917910447761194, | |
| "grad_norm": 0.12324393210746246, | |
| "learning_rate": 4.9247255271560994e-05, | |
| "loss": 0.542, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.41044776119402987, | |
| "grad_norm": 0.10189522170485131, | |
| "learning_rate": 4.9123842703962754e-05, | |
| "loss": 0.5376, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.4291044776119403, | |
| "grad_norm": 0.09928359820416677, | |
| "learning_rate": 4.899126564824033e-05, | |
| "loss": 0.5386, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.44776119402985076, | |
| "grad_norm": 0.08893348279887758, | |
| "learning_rate": 4.884958029241127e-05, | |
| "loss": 0.5364, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.4664179104477612, | |
| "grad_norm": 0.11606708496242767, | |
| "learning_rate": 4.869884668471853e-05, | |
| "loss": 0.5352, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.48507462686567165, | |
| "grad_norm": 0.10286612666181358, | |
| "learning_rate": 4.8539128708181276e-05, | |
| "loss": 0.528, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.503731343283582, | |
| "grad_norm": 0.11732114273017366, | |
| "learning_rate": 4.8370494053520316e-05, | |
| "loss": 0.5365, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.5223880597014925, | |
| "grad_norm": 0.11109874771027929, | |
| "learning_rate": 4.8193014190469815e-05, | |
| "loss": 0.5304, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5410447761194029, | |
| "grad_norm": 0.10629574732330986, | |
| "learning_rate": 4.800676433748746e-05, | |
| "loss": 0.5176, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.5597014925373134, | |
| "grad_norm": 0.1225754806353354, | |
| "learning_rate": 4.781182342987577e-05, | |
| "loss": 0.5244, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.5783582089552238, | |
| "grad_norm": 0.11545798098173879, | |
| "learning_rate": 4.7608274086328275e-05, | |
| "loss": 0.5197, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.5970149253731343, | |
| "grad_norm": 0.10984434834330462, | |
| "learning_rate": 4.739620257391446e-05, | |
| "loss": 0.5273, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.6156716417910447, | |
| "grad_norm": 0.09384895592282082, | |
| "learning_rate": 4.7175698771518656e-05, | |
| "loss": 0.5193, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.6343283582089553, | |
| "grad_norm": 0.13242720525922766, | |
| "learning_rate": 4.6946856131748076e-05, | |
| "loss": 0.5193, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.6529850746268657, | |
| "grad_norm": 0.10950233032891146, | |
| "learning_rate": 4.6709771641326244e-05, | |
| "loss": 0.5145, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.6716417910447762, | |
| "grad_norm": 0.1069644733597589, | |
| "learning_rate": 4.6464545779988757e-05, | |
| "loss": 0.5167, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.6902985074626866, | |
| "grad_norm": 0.12573511591329042, | |
| "learning_rate": 4.621128247789846e-05, | |
| "loss": 0.5141, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.7089552238805971, | |
| "grad_norm": 0.10595364884016444, | |
| "learning_rate": 4.595008907159847e-05, | |
| "loss": 0.5081, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.7276119402985075, | |
| "grad_norm": 0.11269588046403602, | |
| "learning_rate": 4.568107625852136e-05, | |
| "loss": 0.503, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.746268656716418, | |
| "grad_norm": 0.09406398116395229, | |
| "learning_rate": 4.5404358050074115e-05, | |
| "loss": 0.5075, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7649253731343284, | |
| "grad_norm": 0.09993211084306963, | |
| "learning_rate": 4.512005172331842e-05, | |
| "loss": 0.5107, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.7835820895522388, | |
| "grad_norm": 0.11331241080632606, | |
| "learning_rate": 4.482827777126706e-05, | |
| "loss": 0.507, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.8022388059701493, | |
| "grad_norm": 0.11221212086046371, | |
| "learning_rate": 4.4529159851817255e-05, | |
| "loss": 0.5041, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.8208955223880597, | |
| "grad_norm": 0.11707450296579959, | |
| "learning_rate": 4.422282473534271e-05, | |
| "loss": 0.4989, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.8395522388059702, | |
| "grad_norm": 0.11427168269212384, | |
| "learning_rate": 4.3909402250966534e-05, | |
| "loss": 0.5151, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.8582089552238806, | |
| "grad_norm": 0.10434919827802294, | |
| "learning_rate": 4.358902523153791e-05, | |
| "loss": 0.5003, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.8768656716417911, | |
| "grad_norm": 0.09522718572477468, | |
| "learning_rate": 4.326182945733555e-05, | |
| "loss": 0.5083, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.8955223880597015, | |
| "grad_norm": 0.10303101606186355, | |
| "learning_rate": 4.292795359852221e-05, | |
| "loss": 0.5, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.914179104477612, | |
| "grad_norm": 0.11645760699383664, | |
| "learning_rate": 4.2587539156374295e-05, | |
| "loss": 0.5078, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.9328358208955224, | |
| "grad_norm": 0.11868485154529083, | |
| "learning_rate": 4.2240730403311586e-05, | |
| "loss": 0.5005, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.9514925373134329, | |
| "grad_norm": 0.1135775018055789, | |
| "learning_rate": 4.188767432175263e-05, | |
| "loss": 0.501, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.9701492537313433, | |
| "grad_norm": 0.10759776645567355, | |
| "learning_rate": 4.1528520541821506e-05, | |
| "loss": 0.4954, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.9888059701492538, | |
| "grad_norm": 0.11164466025815273, | |
| "learning_rate": 4.116342127793245e-05, | |
| "loss": 0.5027, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 1.007462686567164, | |
| "grad_norm": 0.1134164118788949, | |
| "learning_rate": 4.0792531264279285e-05, | |
| "loss": 0.4952, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.0261194029850746, | |
| "grad_norm": 0.1224166510766843, | |
| "learning_rate": 4.041600768925687e-05, | |
| "loss": 0.4755, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 1.044776119402985, | |
| "grad_norm": 0.10342344313016362, | |
| "learning_rate": 4.0034010128842484e-05, | |
| "loss": 0.4812, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.0634328358208955, | |
| "grad_norm": 0.09619486513423638, | |
| "learning_rate": 3.964670047896525e-05, | |
| "loss": 0.4832, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 1.0820895522388059, | |
| "grad_norm": 0.1148240507474565, | |
| "learning_rate": 3.925424288689239e-05, | |
| "loss": 0.4764, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.1007462686567164, | |
| "grad_norm": 0.09558761238740061, | |
| "learning_rate": 3.8856803681661296e-05, | |
| "loss": 0.4822, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 1.1194029850746268, | |
| "grad_norm": 0.10240411198398687, | |
| "learning_rate": 3.8454551303586964e-05, | |
| "loss": 0.4808, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.1380597014925373, | |
| "grad_norm": 0.11127995228587867, | |
| "learning_rate": 3.8047656232874624e-05, | |
| "loss": 0.484, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 1.1567164179104479, | |
| "grad_norm": 0.09882015115635996, | |
| "learning_rate": 3.763629091736781e-05, | |
| "loss": 0.4768, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.1753731343283582, | |
| "grad_norm": 0.09479435890268799, | |
| "learning_rate": 3.722062969946254e-05, | |
| "loss": 0.483, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 1.1940298507462686, | |
| "grad_norm": 0.09215846819136608, | |
| "learning_rate": 3.6800848742218644e-05, | |
| "loss": 0.4798, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.212686567164179, | |
| "grad_norm": 0.09072929789796806, | |
| "learning_rate": 3.6377125954699254e-05, | |
| "loss": 0.4847, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 1.2313432835820897, | |
| "grad_norm": 0.09912376706624307, | |
| "learning_rate": 3.5949640916570566e-05, | |
| "loss": 0.4788, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 0.09761642906266785, | |
| "learning_rate": 3.551857480199336e-05, | |
| "loss": 0.4807, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 1.2686567164179103, | |
| "grad_norm": 0.09469785325303005, | |
| "learning_rate": 3.5084110302838916e-05, | |
| "loss": 0.4773, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.287313432835821, | |
| "grad_norm": 0.09477509976342904, | |
| "learning_rate": 3.464643155126162e-05, | |
| "loss": 0.4731, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 1.3059701492537314, | |
| "grad_norm": 0.10492492806033109, | |
| "learning_rate": 3.4205724041661135e-05, | |
| "loss": 0.4748, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.3246268656716418, | |
| "grad_norm": 0.09366312938984638, | |
| "learning_rate": 3.376217455206732e-05, | |
| "loss": 0.4742, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 1.3432835820895521, | |
| "grad_norm": 0.08024810662631264, | |
| "learning_rate": 3.3315971064981025e-05, | |
| "loss": 0.4764, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.3619402985074627, | |
| "grad_norm": 0.09717797386193038, | |
| "learning_rate": 3.286730268770452e-05, | |
| "loss": 0.4785, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 1.3805970149253732, | |
| "grad_norm": 0.0939200638469148, | |
| "learning_rate": 3.2416359572195155e-05, | |
| "loss": 0.4844, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.3992537313432836, | |
| "grad_norm": 0.09294770035243786, | |
| "learning_rate": 3.1963332834476247e-05, | |
| "loss": 0.4775, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 1.417910447761194, | |
| "grad_norm": 0.08413530596038701, | |
| "learning_rate": 3.150841447363948e-05, | |
| "loss": 0.4803, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.4365671641791045, | |
| "grad_norm": 0.0865856672794535, | |
| "learning_rate": 3.1051797290472966e-05, | |
| "loss": 0.4721, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 1.455223880597015, | |
| "grad_norm": 0.08763506569892904, | |
| "learning_rate": 3.059367480574958e-05, | |
| "loss": 0.4742, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.4738805970149254, | |
| "grad_norm": 0.09707220296579894, | |
| "learning_rate": 3.0134241178210103e-05, | |
| "loss": 0.4703, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 1.4925373134328357, | |
| "grad_norm": 0.09647084631774662, | |
| "learning_rate": 2.9673691122276086e-05, | |
| "loss": 0.4716, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.5111940298507462, | |
| "grad_norm": 0.10414671302567495, | |
| "learning_rate": 2.9212219825527075e-05, | |
| "loss": 0.4741, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 1.5298507462686568, | |
| "grad_norm": 0.0964804559345882, | |
| "learning_rate": 2.8750022865977443e-05, | |
| "loss": 0.4773, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.5485074626865671, | |
| "grad_norm": 0.08300671453326233, | |
| "learning_rate": 2.82872961291876e-05, | |
| "loss": 0.4647, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 1.5671641791044775, | |
| "grad_norm": 0.09370690516566732, | |
| "learning_rate": 2.7824235725245042e-05, | |
| "loss": 0.4664, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.585820895522388, | |
| "grad_norm": 0.09401332348942805, | |
| "learning_rate": 2.7361037905650032e-05, | |
| "loss": 0.4697, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 1.6044776119402986, | |
| "grad_norm": 0.08965158350497503, | |
| "learning_rate": 2.689789898014155e-05, | |
| "loss": 0.4683, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.623134328358209, | |
| "grad_norm": 0.08803489279640653, | |
| "learning_rate": 2.6435015233498443e-05, | |
| "loss": 0.4721, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 1.6417910447761193, | |
| "grad_norm": 0.08576868029341782, | |
| "learning_rate": 2.5972582842351156e-05, | |
| "loss": 0.4664, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.6604477611940298, | |
| "grad_norm": 0.08790658081775465, | |
| "learning_rate": 2.551079779203932e-05, | |
| "loss": 0.4666, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 1.6791044776119404, | |
| "grad_norm": 0.09761982809729165, | |
| "learning_rate": 2.504985579355047e-05, | |
| "loss": 0.4708, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.6977611940298507, | |
| "grad_norm": 0.08600266340290913, | |
| "learning_rate": 2.458995220057491e-05, | |
| "loss": 0.4691, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 1.716417910447761, | |
| "grad_norm": 0.08117488167122497, | |
| "learning_rate": 2.4131281926712146e-05, | |
| "loss": 0.4735, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.7350746268656716, | |
| "grad_norm": 0.08203479577414656, | |
| "learning_rate": 2.3674039362863687e-05, | |
| "loss": 0.4687, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 1.7537313432835822, | |
| "grad_norm": 0.07800658853978873, | |
| "learning_rate": 2.3218418294847517e-05, | |
| "loss": 0.4683, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.7723880597014925, | |
| "grad_norm": 0.09056596240340838, | |
| "learning_rate": 2.2764611821268918e-05, | |
| "loss": 0.4648, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 1.7910447761194028, | |
| "grad_norm": 0.0964354324970012, | |
| "learning_rate": 2.231281227168257e-05, | |
| "loss": 0.4733, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.8097014925373134, | |
| "grad_norm": 0.08758293289634715, | |
| "learning_rate": 2.18632111250806e-05, | |
| "loss": 0.475, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 1.828358208955224, | |
| "grad_norm": 0.09150711680284437, | |
| "learning_rate": 2.141599892874107e-05, | |
| "loss": 0.4692, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.8470149253731343, | |
| "grad_norm": 0.08550519308452513, | |
| "learning_rate": 2.09713652174714e-05, | |
| "loss": 0.4652, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 1.8656716417910446, | |
| "grad_norm": 0.0871164016396725, | |
| "learning_rate": 2.0529498433280807e-05, | |
| "loss": 0.4674, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.8843283582089554, | |
| "grad_norm": 0.09480068977033189, | |
| "learning_rate": 2.0090585845516012e-05, | |
| "loss": 0.4708, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 1.9029850746268657, | |
| "grad_norm": 0.09417802812416548, | |
| "learning_rate": 1.965481347149376e-05, | |
| "loss": 0.4695, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.921641791044776, | |
| "grad_norm": 0.08791315041891376, | |
| "learning_rate": 1.9222365997664165e-05, | |
| "loss": 0.4676, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 1.9402985074626866, | |
| "grad_norm": 0.0821183823198808, | |
| "learning_rate": 1.8793426701337947e-05, | |
| "loss": 0.4648, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.9589552238805972, | |
| "grad_norm": 0.0744561938489921, | |
| "learning_rate": 1.8368177373010954e-05, | |
| "loss": 0.4732, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 1.9776119402985075, | |
| "grad_norm": 0.07817933947596517, | |
| "learning_rate": 1.7946798239318775e-05, | |
| "loss": 0.4664, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.9962686567164178, | |
| "grad_norm": 0.08379421148696033, | |
| "learning_rate": 1.75294678866542e-05, | |
| "loss": 0.4679, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 2.014925373134328, | |
| "grad_norm": 0.07984701953647755, | |
| "learning_rate": 1.7116363185479754e-05, | |
| "loss": 0.4602, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.033582089552239, | |
| "grad_norm": 0.08449268753051078, | |
| "learning_rate": 1.670765921536755e-05, | |
| "loss": 0.4569, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 2.0522388059701493, | |
| "grad_norm": 0.08095757523241218, | |
| "learning_rate": 1.6303529190798088e-05, | |
| "loss": 0.4545, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.0708955223880596, | |
| "grad_norm": 0.08363343932299715, | |
| "learning_rate": 1.590414438774954e-05, | |
| "loss": 0.4518, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 2.08955223880597, | |
| "grad_norm": 0.07463565896489559, | |
| "learning_rate": 1.550967407110856e-05, | |
| "loss": 0.4489, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 2.1082089552238807, | |
| "grad_norm": 0.08433265404002505, | |
| "learning_rate": 1.5120285422933478e-05, | |
| "loss": 0.4519, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 2.126865671641791, | |
| "grad_norm": 0.07395268788672169, | |
| "learning_rate": 1.4736143471600173e-05, | |
| "loss": 0.4491, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 2.1455223880597014, | |
| "grad_norm": 0.08342447109766311, | |
| "learning_rate": 1.4357411021860773e-05, | |
| "loss": 0.4544, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 2.1641791044776117, | |
| "grad_norm": 0.07519841036516427, | |
| "learning_rate": 1.3984248585844645e-05, | |
| "loss": 0.4602, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 2.1828358208955225, | |
| "grad_norm": 0.07504816361245825, | |
| "learning_rate": 1.3616814315031146e-05, | |
| "loss": 0.4531, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 2.201492537313433, | |
| "grad_norm": 0.08212433520197311, | |
| "learning_rate": 1.3255263933222833e-05, | |
| "loss": 0.4555, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 2.220149253731343, | |
| "grad_norm": 0.08163078572745201, | |
| "learning_rate": 1.2899750670547473e-05, | |
| "loss": 0.4521, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 2.2388059701492535, | |
| "grad_norm": 0.07590759096220778, | |
| "learning_rate": 1.2550425198516973e-05, | |
| "loss": 0.4495, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.2574626865671643, | |
| "grad_norm": 0.06904365467338676, | |
| "learning_rate": 1.2207435566170722e-05, | |
| "loss": 0.4491, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 2.2761194029850746, | |
| "grad_norm": 0.07342202054906058, | |
| "learning_rate": 1.1870927137330267e-05, | |
| "loss": 0.4558, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 2.294776119402985, | |
| "grad_norm": 0.08175407881974946, | |
| "learning_rate": 1.1541042528992152e-05, | |
| "loss": 0.4505, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 2.3134328358208958, | |
| "grad_norm": 0.07091832226253358, | |
| "learning_rate": 1.1217921550884774e-05, | |
| "loss": 0.4563, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 2.332089552238806, | |
| "grad_norm": 0.0735381212475432, | |
| "learning_rate": 1.0901701146215085e-05, | |
| "loss": 0.4503, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 2.3507462686567164, | |
| "grad_norm": 0.07454458223706102, | |
| "learning_rate": 1.0592515333630128e-05, | |
| "loss": 0.4471, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 2.3694029850746268, | |
| "grad_norm": 0.06918781672394263, | |
| "learning_rate": 1.029049515041808e-05, | |
| "loss": 0.4459, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 2.388059701492537, | |
| "grad_norm": 0.07042349253392675, | |
| "learning_rate": 9.99576859697277e-06, | |
| "loss": 0.4527, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 2.406716417910448, | |
| "grad_norm": 0.06990000224390586, | |
| "learning_rate": 9.708460582545337e-06, | |
| "loss": 0.4542, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 2.425373134328358, | |
| "grad_norm": 0.07589637943114948, | |
| "learning_rate": 9.428692872305925e-06, | |
| "loss": 0.4486, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 2.4440298507462686, | |
| "grad_norm": 0.07305499937497431, | |
| "learning_rate": 9.15658403573792e-06, | |
| "loss": 0.4613, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 2.4626865671641793, | |
| "grad_norm": 0.06847095800068362, | |
| "learning_rate": 8.892249396386513e-06, | |
| "loss": 0.4489, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 2.4813432835820897, | |
| "grad_norm": 0.07099198770234746, | |
| "learning_rate": 8.635800982982958e-06, | |
| "loss": 0.457, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.06902375733484567, | |
| "learning_rate": 8.387347481965244e-06, | |
| "loss": 0.4475, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 2.5186567164179103, | |
| "grad_norm": 0.07032630308928171, | |
| "learning_rate": 8.14699419141525e-06, | |
| "loss": 0.4553, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 2.5373134328358207, | |
| "grad_norm": 0.07177818923129327, | |
| "learning_rate": 7.914842976431932e-06, | |
| "loss": 0.4533, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 2.5559701492537314, | |
| "grad_norm": 0.07394610672019591, | |
| "learning_rate": 7.690992225959465e-06, | |
| "loss": 0.453, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 2.574626865671642, | |
| "grad_norm": 0.06821411644108501, | |
| "learning_rate": 7.4755368110886366e-06, | |
| "loss": 0.4515, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 2.593283582089552, | |
| "grad_norm": 0.0722058283703491, | |
| "learning_rate": 7.268568044849132e-06, | |
| "loss": 0.4594, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 2.611940298507463, | |
| "grad_norm": 0.0704583684801643, | |
| "learning_rate": 7.0701736435098155e-06, | |
| "loss": 0.4524, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.6305970149253732, | |
| "grad_norm": 0.06988823803890779, | |
| "learning_rate": 6.880437689403316e-06, | |
| "loss": 0.4526, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 2.6492537313432836, | |
| "grad_norm": 0.07259674165968212, | |
| "learning_rate": 6.699440595290754e-06, | |
| "loss": 0.4515, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 2.667910447761194, | |
| "grad_norm": 0.06745674865987111, | |
| "learning_rate": 6.527259070281722e-06, | |
| "loss": 0.4562, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 2.6865671641791042, | |
| "grad_norm": 0.09708148259376258, | |
| "learning_rate": 6.363966087323844e-06, | |
| "loss": 0.4544, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.705223880597015, | |
| "grad_norm": 0.0696892401955707, | |
| "learning_rate": 6.209630852275836e-06, | |
| "loss": 0.4459, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 2.7238805970149254, | |
| "grad_norm": 0.07185576991588484, | |
| "learning_rate": 6.06431877457709e-06, | |
| "loss": 0.4503, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.7425373134328357, | |
| "grad_norm": 0.07116338247532543, | |
| "learning_rate": 5.928091439526226e-06, | |
| "loss": 0.4472, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 2.7611940298507465, | |
| "grad_norm": 0.07012522976921934, | |
| "learning_rate": 5.801006582180398e-06, | |
| "loss": 0.4505, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.779850746268657, | |
| "grad_norm": 0.06996189785913512, | |
| "learning_rate": 5.683118062886346e-06, | |
| "loss": 0.4536, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 2.798507462686567, | |
| "grad_norm": 0.07136894351410067, | |
| "learning_rate": 5.574475844453634e-06, | |
| "loss": 0.4505, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.8171641791044775, | |
| "grad_norm": 0.07002535300791056, | |
| "learning_rate": 5.475125970979702e-06, | |
| "loss": 0.4515, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 2.835820895522388, | |
| "grad_norm": 0.07149166693243972, | |
| "learning_rate": 5.385110548335753e-06, | |
| "loss": 0.4568, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.8544776119402986, | |
| "grad_norm": 0.07020725124001985, | |
| "learning_rate": 5.30446772632166e-06, | |
| "loss": 0.4555, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 2.873134328358209, | |
| "grad_norm": 0.07259046375882247, | |
| "learning_rate": 5.233231682497572e-06, | |
| "loss": 0.4481, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 2.8917910447761193, | |
| "grad_norm": 0.06964811084889956, | |
| "learning_rate": 5.171432607698975e-06, | |
| "loss": 0.4478, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 2.91044776119403, | |
| "grad_norm": 0.06725150018988699, | |
| "learning_rate": 5.119096693241395e-06, | |
| "loss": 0.4524, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 2.9291044776119404, | |
| "grad_norm": 0.06616781886399527, | |
| "learning_rate": 5.07624611982014e-06, | |
| "loss": 0.4471, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 2.9477611940298507, | |
| "grad_norm": 0.06597264304324678, | |
| "learning_rate": 5.0428990481098275e-06, | |
| "loss": 0.4476, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.966417910447761, | |
| "grad_norm": 0.06838206427405408, | |
| "learning_rate": 5.01906961106762e-06, | |
| "loss": 0.4472, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 2.9850746268656714, | |
| "grad_norm": 0.06614701992126826, | |
| "learning_rate": 5.004767907943488e-06, | |
| "loss": 0.4458, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 804, | |
| "total_flos": 1465863748190208.0, | |
| "train_loss": 0.5050300278177309, | |
| "train_runtime": 14495.1223, | |
| "train_samples_per_second": 7.098, | |
| "train_steps_per_second": 0.055 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 804, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1465863748190208.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |