| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9990828492815653, | |
| "eval_steps": 500, | |
| "global_step": 817, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0012228676245796392, | |
| "grad_norm": 1.6473579833541931, | |
| "learning_rate": 1.2195121951219513e-05, | |
| "loss": 1.3725, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.006114338122898196, | |
| "grad_norm": 1.527729602912934, | |
| "learning_rate": 6.097560975609756e-05, | |
| "loss": 1.3787, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.012228676245796393, | |
| "grad_norm": 1.5736263638617907, | |
| "learning_rate": 0.00012195121951219512, | |
| "loss": 1.3231, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.01834301436869459, | |
| "grad_norm": 0.5769364715702556, | |
| "learning_rate": 0.00018292682926829268, | |
| "loss": 1.2224, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.024457352491592785, | |
| "grad_norm": 0.4139930056267442, | |
| "learning_rate": 0.00024390243902439024, | |
| "loss": 1.1395, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.03057169061449098, | |
| "grad_norm": 0.262099130716011, | |
| "learning_rate": 0.0003048780487804878, | |
| "loss": 1.0913, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.03668602873738918, | |
| "grad_norm": 0.15253051567021142, | |
| "learning_rate": 0.00036585365853658537, | |
| "loss": 1.0795, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.042800366860287375, | |
| "grad_norm": 0.1464835767250418, | |
| "learning_rate": 0.0004268292682926829, | |
| "loss": 1.0587, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.04891470498318557, | |
| "grad_norm": 0.12891302801103366, | |
| "learning_rate": 0.0004878048780487805, | |
| "loss": 1.0502, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.055029043106083766, | |
| "grad_norm": 0.1085934848109559, | |
| "learning_rate": 0.0005487804878048781, | |
| "loss": 1.0451, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.06114338122898196, | |
| "grad_norm": 0.1226751965745002, | |
| "learning_rate": 0.0006097560975609756, | |
| "loss": 1.0181, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06725771935188016, | |
| "grad_norm": 0.1203151628753708, | |
| "learning_rate": 0.0006707317073170732, | |
| "loss": 1.0252, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.07337205747477836, | |
| "grad_norm": 0.13789018529599276, | |
| "learning_rate": 0.0007317073170731707, | |
| "loss": 1.0093, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.07948639559767655, | |
| "grad_norm": 0.12826204349422166, | |
| "learning_rate": 0.0007926829268292683, | |
| "loss": 1.0131, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.08560073372057475, | |
| "grad_norm": 0.1364465428872305, | |
| "learning_rate": 0.0008536585365853659, | |
| "loss": 0.9985, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.09171507184347294, | |
| "grad_norm": 0.14196429462926946, | |
| "learning_rate": 0.0009146341463414635, | |
| "loss": 0.9959, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.09782940996637114, | |
| "grad_norm": 0.12106099937186893, | |
| "learning_rate": 0.000975609756097561, | |
| "loss": 1.0092, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.10394374808926933, | |
| "grad_norm": 0.18450188337900664, | |
| "learning_rate": 0.0009999588943391596, | |
| "loss": 0.9995, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.11005808621216753, | |
| "grad_norm": 0.14339050170878104, | |
| "learning_rate": 0.0009997077175540067, | |
| "loss": 0.9967, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.11617242433506574, | |
| "grad_norm": 0.14026732631424904, | |
| "learning_rate": 0.0009992283150399447, | |
| "loss": 0.9929, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.12228676245796392, | |
| "grad_norm": 0.1432651360969384, | |
| "learning_rate": 0.000998520905748941, | |
| "loss": 0.9849, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.12840110058086213, | |
| "grad_norm": 0.13147108970650698, | |
| "learning_rate": 0.0009975858127678633, | |
| "loss": 0.9883, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.13451543870376031, | |
| "grad_norm": 0.12146848904095599, | |
| "learning_rate": 0.0009964234631709187, | |
| "loss": 0.985, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.1406297768266585, | |
| "grad_norm": 0.15932664420891876, | |
| "learning_rate": 0.0009950343878246009, | |
| "loss": 0.983, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.14674411494955672, | |
| "grad_norm": 0.13873375461481144, | |
| "learning_rate": 0.0009934192211452344, | |
| "loss": 0.98, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.1528584530724549, | |
| "grad_norm": 0.1538004585641361, | |
| "learning_rate": 0.0009915787008092246, | |
| "loss": 0.9721, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.1589727911953531, | |
| "grad_norm": 0.1382569798418763, | |
| "learning_rate": 0.0009895136674161465, | |
| "loss": 0.9741, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.16508712931825129, | |
| "grad_norm": 0.15833788706187227, | |
| "learning_rate": 0.0009872250641048289, | |
| "loss": 0.9781, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.1712014674411495, | |
| "grad_norm": 0.14959772029321822, | |
| "learning_rate": 0.0009847139361226047, | |
| "loss": 0.9804, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.1773158055640477, | |
| "grad_norm": 0.15126558344190147, | |
| "learning_rate": 0.0009819814303479266, | |
| "loss": 0.9753, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.18343014368694588, | |
| "grad_norm": 0.12558000003386655, | |
| "learning_rate": 0.0009790287947665682, | |
| "loss": 0.9753, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.1895444818098441, | |
| "grad_norm": 0.15082368481634573, | |
| "learning_rate": 0.0009758573779016438, | |
| "loss": 0.9738, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.19565881993274228, | |
| "grad_norm": 0.1436091644716082, | |
| "learning_rate": 0.0009724686281977146, | |
| "loss": 0.9721, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.20177315805564047, | |
| "grad_norm": 0.13376422992258633, | |
| "learning_rate": 0.0009688640933592572, | |
| "loss": 0.9588, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.20788749617853866, | |
| "grad_norm": 0.172820103639061, | |
| "learning_rate": 0.0009650454196437975, | |
| "loss": 0.9618, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.21400183430143688, | |
| "grad_norm": 0.11027045474496103, | |
| "learning_rate": 0.0009610143511100353, | |
| "loss": 0.9663, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.22011617242433507, | |
| "grad_norm": 0.12032276235435292, | |
| "learning_rate": 0.0009567727288213005, | |
| "loss": 0.9537, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.22623051054723325, | |
| "grad_norm": 0.11615954231864511, | |
| "learning_rate": 0.0009523224900047051, | |
| "loss": 0.958, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.23234484867013147, | |
| "grad_norm": 0.10504587748815435, | |
| "learning_rate": 0.0009476656671663766, | |
| "loss": 0.9604, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.23845918679302966, | |
| "grad_norm": 0.13568715189393674, | |
| "learning_rate": 0.0009428043871631739, | |
| "loss": 0.9531, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.24457352491592785, | |
| "grad_norm": 0.1447001714428606, | |
| "learning_rate": 0.0009377408702313137, | |
| "loss": 0.9462, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.25068786303882606, | |
| "grad_norm": 0.1241647609635287, | |
| "learning_rate": 0.0009324774289723468, | |
| "loss": 0.9443, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.25680220116172425, | |
| "grad_norm": 0.1355644783905131, | |
| "learning_rate": 0.0009270164672969508, | |
| "loss": 0.9457, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.26291653928462244, | |
| "grad_norm": 0.14124706011650312, | |
| "learning_rate": 0.0009213604793270196, | |
| "loss": 0.9492, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.26903087740752063, | |
| "grad_norm": 0.14975483262187333, | |
| "learning_rate": 0.000915512048256552, | |
| "loss": 0.9364, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.2751452155304188, | |
| "grad_norm": 0.1078100835280595, | |
| "learning_rate": 0.0009094738451718594, | |
| "loss": 0.9435, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.281259553653317, | |
| "grad_norm": 0.12309862917298761, | |
| "learning_rate": 0.0009032486278316315, | |
| "loss": 0.9356, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.28737389177621525, | |
| "grad_norm": 0.12723435610628442, | |
| "learning_rate": 0.0008968392394074163, | |
| "loss": 0.9378, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.29348822989911344, | |
| "grad_norm": 0.10462231095819303, | |
| "learning_rate": 0.0008902486071850926, | |
| "loss": 0.9442, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.29960256802201163, | |
| "grad_norm": 0.13925829008530255, | |
| "learning_rate": 0.0008834797412279236, | |
| "loss": 0.9334, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.3057169061449098, | |
| "grad_norm": 0.1273487577497673, | |
| "learning_rate": 0.0008765357330018055, | |
| "loss": 0.931, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.311831244267808, | |
| "grad_norm": 0.12116196314846202, | |
| "learning_rate": 0.0008694197539633384, | |
| "loss": 0.9383, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.3179455823907062, | |
| "grad_norm": 0.1250843203236674, | |
| "learning_rate": 0.0008621350541113637, | |
| "loss": 0.9319, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.3240599205136044, | |
| "grad_norm": 0.12080788537731575, | |
| "learning_rate": 0.0008546849605026289, | |
| "loss": 0.9285, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.33017425863650257, | |
| "grad_norm": 0.17314003785998686, | |
| "learning_rate": 0.0008470728757322603, | |
| "loss": 0.9332, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.3362885967594008, | |
| "grad_norm": 0.1431625193541668, | |
| "learning_rate": 0.0008393022763797346, | |
| "loss": 0.9318, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.342402934882299, | |
| "grad_norm": 0.16735418777113265, | |
| "learning_rate": 0.0008313767114210615, | |
| "loss": 0.9323, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.3485172730051972, | |
| "grad_norm": 0.1363188019161297, | |
| "learning_rate": 0.0008232998006078997, | |
| "loss": 0.935, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.3546316111280954, | |
| "grad_norm": 0.11610423464025475, | |
| "learning_rate": 0.0008150752328143514, | |
| "loss": 0.9174, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.36074594925099357, | |
| "grad_norm": 0.10924803560485702, | |
| "learning_rate": 0.0008067067643521834, | |
| "loss": 0.9118, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.36686028737389176, | |
| "grad_norm": 0.10253606792051606, | |
| "learning_rate": 0.0007981982172552517, | |
| "loss": 0.9151, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.37297462549678995, | |
| "grad_norm": 0.13982974888872052, | |
| "learning_rate": 0.0007895534775339084, | |
| "loss": 0.9255, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.3790889636196882, | |
| "grad_norm": 0.09882318142435366, | |
| "learning_rate": 0.0007807764934001874, | |
| "loss": 0.9177, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.3852033017425864, | |
| "grad_norm": 0.10319437561877241, | |
| "learning_rate": 0.000771871273464585, | |
| "loss": 0.9094, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.39131763986548457, | |
| "grad_norm": 0.11417378850578246, | |
| "learning_rate": 0.0007628418849052523, | |
| "loss": 0.9096, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.39743197798838276, | |
| "grad_norm": 0.12295800667011886, | |
| "learning_rate": 0.0007536924516104411, | |
| "loss": 0.9095, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.40354631611128094, | |
| "grad_norm": 0.11159190643662502, | |
| "learning_rate": 0.0007444271522950469, | |
| "loss": 0.9021, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.40966065423417913, | |
| "grad_norm": 0.1252629153454932, | |
| "learning_rate": 0.0007350502185921132, | |
| "loss": 0.9076, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.4157749923570773, | |
| "grad_norm": 0.11464728685850215, | |
| "learning_rate": 0.0007255659331201672, | |
| "loss": 0.9113, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.42188933047997557, | |
| "grad_norm": 0.13306027654586686, | |
| "learning_rate": 0.0007159786275272686, | |
| "loss": 0.9104, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.42800366860287375, | |
| "grad_norm": 0.1187107905375174, | |
| "learning_rate": 0.0007062926805126653, | |
| "loss": 0.9164, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.43411800672577194, | |
| "grad_norm": 0.12255410907403633, | |
| "learning_rate": 0.0006965125158269618, | |
| "loss": 0.8986, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.44023234484867013, | |
| "grad_norm": 0.09613158587511622, | |
| "learning_rate": 0.0006866426002517105, | |
| "loss": 0.9009, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.4463466829715683, | |
| "grad_norm": 0.1169754004784448, | |
| "learning_rate": 0.0006766874415593496, | |
| "loss": 0.909, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.4524610210944665, | |
| "grad_norm": 0.11351098683885749, | |
| "learning_rate": 0.0006666515864544209, | |
| "loss": 0.9019, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.4585753592173647, | |
| "grad_norm": 0.12147663137303133, | |
| "learning_rate": 0.0006565396184970059, | |
| "loss": 0.9042, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.46468969734026294, | |
| "grad_norm": 0.1114287532695913, | |
| "learning_rate": 0.0006463561560093292, | |
| "loss": 0.9007, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.47080403546316113, | |
| "grad_norm": 0.10708526766281388, | |
| "learning_rate": 0.0006361058499664855, | |
| "loss": 0.8949, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.4769183735860593, | |
| "grad_norm": 0.10655002270489923, | |
| "learning_rate": 0.0006257933818722543, | |
| "loss": 0.8998, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.4830327117089575, | |
| "grad_norm": 0.1016499106218396, | |
| "learning_rate": 0.0006154234616209693, | |
| "loss": 0.887, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.4891470498318557, | |
| "grad_norm": 0.10775455001043291, | |
| "learning_rate": 0.0006050008253464246, | |
| "loss": 0.8892, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.4952613879547539, | |
| "grad_norm": 0.09438287053968446, | |
| "learning_rate": 0.0005945302332587938, | |
| "loss": 0.886, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.5013757260776521, | |
| "grad_norm": 0.10561263991422343, | |
| "learning_rate": 0.0005840164674705543, | |
| "loss": 0.8904, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.5074900642005503, | |
| "grad_norm": 0.15358893461343107, | |
| "learning_rate": 0.000573464329812409, | |
| "loss": 0.8992, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.5136044023234485, | |
| "grad_norm": 0.10760823616000371, | |
| "learning_rate": 0.0005628786396402013, | |
| "loss": 0.8919, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.5197187404463467, | |
| "grad_norm": 0.0983532065047366, | |
| "learning_rate": 0.0005522642316338268, | |
| "loss": 0.8873, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.5258330785692449, | |
| "grad_norm": 0.10566645548622697, | |
| "learning_rate": 0.0005416259535891447, | |
| "loss": 0.8985, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.5319474166921431, | |
| "grad_norm": 0.09400617437059618, | |
| "learning_rate": 0.0005309686642039016, | |
| "loss": 0.8855, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.5380617548150413, | |
| "grad_norm": 0.10458511844644205, | |
| "learning_rate": 0.0005202972308586735, | |
| "loss": 0.8805, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.5441760929379394, | |
| "grad_norm": 0.1069055482540605, | |
| "learning_rate": 0.0005096165273938436, | |
| "loss": 0.8743, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.5502904310608376, | |
| "grad_norm": 0.09403124483054168, | |
| "learning_rate": 0.0004989314318836302, | |
| "loss": 0.8794, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5564047691837358, | |
| "grad_norm": 0.10699257109922651, | |
| "learning_rate": 0.00048824682440817927, | |
| "loss": 0.8806, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.562519107306634, | |
| "grad_norm": 0.09985948385062245, | |
| "learning_rate": 0.0004775675848247427, | |
| "loss": 0.8813, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.5686334454295322, | |
| "grad_norm": 0.105036623631725, | |
| "learning_rate": 0.0004668985905389563, | |
| "loss": 0.887, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.5747477835524305, | |
| "grad_norm": 0.11080094841828096, | |
| "learning_rate": 0.0004562447142772404, | |
| "loss": 0.8744, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.5808621216753287, | |
| "grad_norm": 0.1250806107949013, | |
| "learning_rate": 0.0004456108218613346, | |
| "loss": 0.8698, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.5869764597982269, | |
| "grad_norm": 0.0972143382654124, | |
| "learning_rate": 0.00043500176998598775, | |
| "loss": 0.8805, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.5930907979211251, | |
| "grad_norm": 0.11219773661945918, | |
| "learning_rate": 0.0004244224040008156, | |
| "loss": 0.8801, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.5992051360440233, | |
| "grad_norm": 0.14071498565703944, | |
| "learning_rate": 0.00041387755569734057, | |
| "loss": 0.8807, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.6053194741669214, | |
| "grad_norm": 0.10783176136023623, | |
| "learning_rate": 0.0004033720411022235, | |
| "loss": 0.8697, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.6114338122898196, | |
| "grad_norm": 0.09520310912796215, | |
| "learning_rate": 0.00039291065827769484, | |
| "loss": 0.8777, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6175481504127178, | |
| "grad_norm": 0.09620451010405635, | |
| "learning_rate": 0.0003824981851301924, | |
| "loss": 0.8723, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.623662488535616, | |
| "grad_norm": 0.08307849142839015, | |
| "learning_rate": 0.0003721393772282022, | |
| "loss": 0.8678, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.6297768266585142, | |
| "grad_norm": 0.09253756305585324, | |
| "learning_rate": 0.00036183896563030295, | |
| "loss": 0.8799, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.6358911647814124, | |
| "grad_norm": 0.08933396229258163, | |
| "learning_rate": 0.0003516016547244047, | |
| "loss": 0.8705, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.6420055029043106, | |
| "grad_norm": 0.10619698692809078, | |
| "learning_rate": 0.00034143212007916793, | |
| "loss": 0.8619, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.6481198410272088, | |
| "grad_norm": 0.09349371215537762, | |
| "learning_rate": 0.00033133500630858504, | |
| "loss": 0.881, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.654234179150107, | |
| "grad_norm": 0.09046629782340958, | |
| "learning_rate": 0.0003213149249506997, | |
| "loss": 0.8696, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.6603485172730051, | |
| "grad_norm": 0.09293695191807286, | |
| "learning_rate": 0.00031137645236143204, | |
| "loss": 0.872, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.6664628553959034, | |
| "grad_norm": 0.09042774606106192, | |
| "learning_rate": 0.0003015241276244729, | |
| "loss": 0.866, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.6725771935188016, | |
| "grad_norm": 0.08943853887177049, | |
| "learning_rate": 0.00029176245047820063, | |
| "loss": 0.8663, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6786915316416998, | |
| "grad_norm": 0.10703770717270873, | |
| "learning_rate": 0.0002820958792605669, | |
| "loss": 0.8676, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.684805869764598, | |
| "grad_norm": 0.08492021626424746, | |
| "learning_rate": 0.00027252882887289287, | |
| "loss": 0.8554, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.6909202078874962, | |
| "grad_norm": 0.10407953654970142, | |
| "learning_rate": 0.0002630656687635007, | |
| "loss": 0.8583, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.6970345460103944, | |
| "grad_norm": 0.0824461160121573, | |
| "learning_rate": 0.0002537107209321074, | |
| "loss": 0.8628, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.7031488841332926, | |
| "grad_norm": 0.10428889280009748, | |
| "learning_rate": 0.0002444682579558872, | |
| "loss": 0.8535, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.7092632222561908, | |
| "grad_norm": 0.07965447039376655, | |
| "learning_rate": 0.00023534250103810628, | |
| "loss": 0.8545, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.715377560379089, | |
| "grad_norm": 0.0928006027867118, | |
| "learning_rate": 0.00022633761808022273, | |
| "loss": 0.8524, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.7214918985019871, | |
| "grad_norm": 0.08872667637386111, | |
| "learning_rate": 0.00021745772177832756, | |
| "loss": 0.8512, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.7276062366248853, | |
| "grad_norm": 0.1079467742979241, | |
| "learning_rate": 0.00020870686774480197, | |
| "loss": 0.8599, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.7337205747477835, | |
| "grad_norm": 0.09888079366817214, | |
| "learning_rate": 0.00020008905265604316, | |
| "loss": 0.858, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7398349128706817, | |
| "grad_norm": 0.1058830999608511, | |
| "learning_rate": 0.00019160821242710958, | |
| "loss": 0.8602, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.7459492509935799, | |
| "grad_norm": 0.08962748847918732, | |
| "learning_rate": 0.00018326822041411523, | |
| "loss": 0.8671, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.7520635891164782, | |
| "grad_norm": 0.09399010413130322, | |
| "learning_rate": 0.00017507288564519647, | |
| "loss": 0.8526, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.7581779272393764, | |
| "grad_norm": 0.08270084851060709, | |
| "learning_rate": 0.00016702595108085945, | |
| "loss": 0.8624, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.7642922653622746, | |
| "grad_norm": 0.09980027261787928, | |
| "learning_rate": 0.0001591310919045003, | |
| "loss": 0.8612, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.7704066034851728, | |
| "grad_norm": 0.09445752377399474, | |
| "learning_rate": 0.00015139191384388095, | |
| "loss": 0.8619, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.776520941608071, | |
| "grad_norm": 0.09149316315284103, | |
| "learning_rate": 0.00014381195152432768, | |
| "loss": 0.8569, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.7826352797309691, | |
| "grad_norm": 0.10490270600046576, | |
| "learning_rate": 0.00013639466685440134, | |
| "loss": 0.8603, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.7887496178538673, | |
| "grad_norm": 0.08352199090543881, | |
| "learning_rate": 0.00012914344744478112, | |
| "loss": 0.863, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.7948639559767655, | |
| "grad_norm": 0.1037013152834401, | |
| "learning_rate": 0.0001220616050610791, | |
| "loss": 0.8465, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.8009782940996637, | |
| "grad_norm": 0.08763593791016736, | |
| "learning_rate": 0.00011515237411129698, | |
| "loss": 0.8504, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.8070926322225619, | |
| "grad_norm": 0.08445574275325193, | |
| "learning_rate": 0.00010841891016861154, | |
| "loss": 0.8623, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.8132069703454601, | |
| "grad_norm": 0.08168853426345901, | |
| "learning_rate": 0.00010186428853016605, | |
| "loss": 0.8615, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.8193213084683583, | |
| "grad_norm": 0.09147819158262574, | |
| "learning_rate": 9.549150281252633e-05, | |
| "loss": 0.8567, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.8254356465912565, | |
| "grad_norm": 0.10248465777136823, | |
| "learning_rate": 8.930346358443952e-05, | |
| "loss": 0.8619, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.8315499847141546, | |
| "grad_norm": 0.08649839904042995, | |
| "learning_rate": 8.330299703752498e-05, | |
| "loss": 0.8465, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.8376643228370529, | |
| "grad_norm": 0.0952129204342795, | |
| "learning_rate": 7.749284369549953e-05, | |
| "loss": 0.8524, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 0.8437786609599511, | |
| "grad_norm": 0.08211788402495825, | |
| "learning_rate": 7.187565716252992e-05, | |
| "loss": 0.8509, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.8498929990828493, | |
| "grad_norm": 0.08571973496189347, | |
| "learning_rate": 6.645400291128356e-05, | |
| "loss": 0.8577, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 0.8560073372057475, | |
| "grad_norm": 0.08082502620292945, | |
| "learning_rate": 6.123035711122859e-05, | |
| "loss": 0.8476, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8621216753286457, | |
| "grad_norm": 0.07815053165759356, | |
| "learning_rate": 5.6207105497722956e-05, | |
| "loss": 0.8363, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 0.8682360134515439, | |
| "grad_norm": 0.08165596121429411, | |
| "learning_rate": 5.138654228240425e-05, | |
| "loss": 0.838, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.8743503515744421, | |
| "grad_norm": 0.08037559269668637, | |
| "learning_rate": 4.677086910538092e-05, | |
| "loss": 0.8466, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 0.8804646896973403, | |
| "grad_norm": 0.08019657403458592, | |
| "learning_rate": 4.236219402970326e-05, | |
| "loss": 0.858, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.8865790278202385, | |
| "grad_norm": 0.08867793444411678, | |
| "learning_rate": 3.816253057857144e-05, | |
| "loss": 0.8427, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.8926933659431366, | |
| "grad_norm": 0.08359801409970409, | |
| "learning_rate": 3.417379681572297e-05, | |
| "loss": 0.8602, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.8988077040660348, | |
| "grad_norm": 0.07709654354498045, | |
| "learning_rate": 3.0397814469416973e-05, | |
| "loss": 0.8478, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 0.904922042188933, | |
| "grad_norm": 0.08504934863051679, | |
| "learning_rate": 2.683630810041787e-05, | |
| "loss": 0.8522, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.9110363803118312, | |
| "grad_norm": 0.08097550153449702, | |
| "learning_rate": 2.349090431435641e-05, | |
| "loss": 0.855, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 0.9171507184347294, | |
| "grad_norm": 0.07983590236151124, | |
| "learning_rate": 2.0363131018828753e-05, | |
| "loss": 0.8503, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.9232650565576276, | |
| "grad_norm": 0.08750545593776848, | |
| "learning_rate": 1.7454416725573353e-05, | |
| "loss": 0.8439, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 0.9293793946805259, | |
| "grad_norm": 0.08281156948532688, | |
| "learning_rate": 1.4766089898042678e-05, | |
| "loss": 0.8457, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.9354937328034241, | |
| "grad_norm": 0.08034480195198145, | |
| "learning_rate": 1.2299378344669988e-05, | |
| "loss": 0.8474, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 0.9416080709263223, | |
| "grad_norm": 0.07959517933222772, | |
| "learning_rate": 1.0055408658106447e-05, | |
| "loss": 0.8493, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.9477224090492204, | |
| "grad_norm": 0.07800509939986551, | |
| "learning_rate": 8.035205700685167e-06, | |
| "loss": 0.8397, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.9538367471721186, | |
| "grad_norm": 0.0826449021240808, | |
| "learning_rate": 6.239692136348285e-06, | |
| "loss": 0.8467, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.9599510852950168, | |
| "grad_norm": 0.07635093172829255, | |
| "learning_rate": 4.669688009248607e-06, | |
| "loss": 0.8435, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 0.966065423417915, | |
| "grad_norm": 0.0793118068228326, | |
| "learning_rate": 3.325910369220975e-06, | |
| "loss": 0.8465, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.9721797615408132, | |
| "grad_norm": 0.07689348809541706, | |
| "learning_rate": 2.20897294429212e-06, | |
| "loss": 0.8475, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 0.9782940996637114, | |
| "grad_norm": 0.07630687024952551, | |
| "learning_rate": 1.3193858603794961e-06, | |
| "loss": 0.8565, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.9844084377866096, | |
| "grad_norm": 0.08756903255033648, | |
| "learning_rate": 6.575554083078084e-07, | |
| "loss": 0.86, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 0.9905227759095078, | |
| "grad_norm": 0.08038874760992833, | |
| "learning_rate": 2.2378385824833868e-07, | |
| "loss": 0.8346, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.996637114032406, | |
| "grad_norm": 0.09307333259332833, | |
| "learning_rate": 1.8269321666375404e-08, | |
| "loss": 0.8459, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 0.9990828492815653, | |
| "eval_loss": 1.2038620710372925, | |
| "eval_runtime": 112.7414, | |
| "eval_samples_per_second": 185.832, | |
| "eval_steps_per_second": 5.81, | |
| "step": 817 | |
| }, | |
| { | |
| "epoch": 0.9990828492815653, | |
| "step": 817, | |
| "total_flos": 80161328332800.0, | |
| "train_loss": 0.9149785230034276, | |
| "train_runtime": 1861.2659, | |
| "train_samples_per_second": 56.226, | |
| "train_steps_per_second": 0.439 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 817, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 80161328332800.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |