| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 15.715596330275229, | |
| "eval_steps": 500, | |
| "global_step": 864, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01834862385321101, | |
| "grad_norm": 0.04378490149974823, | |
| "learning_rate": 4.999989423013716e-05, | |
| "loss": 0.6713, | |
| "num_input_tokens_seen": 44136, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.03669724770642202, | |
| "grad_norm": 0.040646992623806, | |
| "learning_rate": 4.999957692144361e-05, | |
| "loss": 0.533, | |
| "num_input_tokens_seen": 83096, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.05504587155963303, | |
| "grad_norm": 0.04658753052353859, | |
| "learning_rate": 4.999904807660428e-05, | |
| "loss": 0.6048, | |
| "num_input_tokens_seen": 122112, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.07339449541284404, | |
| "grad_norm": 0.04322144016623497, | |
| "learning_rate": 4.999830770009406e-05, | |
| "loss": 0.4948, | |
| "num_input_tokens_seen": 163064, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.09174311926605505, | |
| "grad_norm": 0.06536195427179337, | |
| "learning_rate": 4.999735579817769e-05, | |
| "loss": 0.6607, | |
| "num_input_tokens_seen": 203808, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.11009174311926606, | |
| "grad_norm": 0.059904925525188446, | |
| "learning_rate": 4.9996192378909786e-05, | |
| "loss": 0.5802, | |
| "num_input_tokens_seen": 241824, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.12844036697247707, | |
| "grad_norm": 0.19818365573883057, | |
| "learning_rate": 4.999481745213471e-05, | |
| "loss": 0.5148, | |
| "num_input_tokens_seen": 287608, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.14678899082568808, | |
| "grad_norm": 0.05985472351312637, | |
| "learning_rate": 4.9993231029486544e-05, | |
| "loss": 0.5714, | |
| "num_input_tokens_seen": 325320, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.1651376146788991, | |
| "grad_norm": 0.061375778168439865, | |
| "learning_rate": 4.999143312438893e-05, | |
| "loss": 0.6812, | |
| "num_input_tokens_seen": 369848, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.1834862385321101, | |
| "grad_norm": 0.06196414306759834, | |
| "learning_rate": 4.998942375205502e-05, | |
| "loss": 0.5358, | |
| "num_input_tokens_seen": 415104, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.2018348623853211, | |
| "grad_norm": 0.07861393690109253, | |
| "learning_rate": 4.9987202929487275e-05, | |
| "loss": 0.6527, | |
| "num_input_tokens_seen": 467224, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.22018348623853212, | |
| "grad_norm": 0.05596446990966797, | |
| "learning_rate": 4.99847706754774e-05, | |
| "loss": 0.5354, | |
| "num_input_tokens_seen": 502824, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.23853211009174313, | |
| "grad_norm": 0.05289844051003456, | |
| "learning_rate": 4.998212701060612e-05, | |
| "loss": 0.5263, | |
| "num_input_tokens_seen": 544744, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.25688073394495414, | |
| "grad_norm": 0.04996591433882713, | |
| "learning_rate": 4.997927195724303e-05, | |
| "loss": 0.5536, | |
| "num_input_tokens_seen": 591136, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.27522935779816515, | |
| "grad_norm": 0.05822828412055969, | |
| "learning_rate": 4.997620553954645e-05, | |
| "loss": 0.6106, | |
| "num_input_tokens_seen": 629664, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.29357798165137616, | |
| "grad_norm": 0.06353770196437836, | |
| "learning_rate": 4.997292778346312e-05, | |
| "loss": 0.5129, | |
| "num_input_tokens_seen": 663392, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.3119266055045872, | |
| "grad_norm": 0.07256966829299927, | |
| "learning_rate": 4.996943871672807e-05, | |
| "loss": 0.6377, | |
| "num_input_tokens_seen": 698360, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.3302752293577982, | |
| "grad_norm": 0.055458713322877884, | |
| "learning_rate": 4.996573836886435e-05, | |
| "loss": 0.4083, | |
| "num_input_tokens_seen": 737520, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.3486238532110092, | |
| "grad_norm": 0.07792335003614426, | |
| "learning_rate": 4.9961826771182784e-05, | |
| "loss": 0.6086, | |
| "num_input_tokens_seen": 768056, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.3669724770642202, | |
| "grad_norm": 0.06627275049686432, | |
| "learning_rate": 4.995770395678171e-05, | |
| "loss": 0.4591, | |
| "num_input_tokens_seen": 806256, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.3853211009174312, | |
| "grad_norm": 0.05830290913581848, | |
| "learning_rate": 4.9953369960546676e-05, | |
| "loss": 0.3731, | |
| "num_input_tokens_seen": 842336, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.4036697247706422, | |
| "grad_norm": 0.07277437299489975, | |
| "learning_rate": 4.9948824819150185e-05, | |
| "loss": 0.6243, | |
| "num_input_tokens_seen": 876672, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.42201834862385323, | |
| "grad_norm": 0.07477546483278275, | |
| "learning_rate": 4.994406857105136e-05, | |
| "loss": 0.5788, | |
| "num_input_tokens_seen": 915192, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.44036697247706424, | |
| "grad_norm": 0.06912907212972641, | |
| "learning_rate": 4.993910125649561e-05, | |
| "loss": 0.4753, | |
| "num_input_tokens_seen": 951904, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.45871559633027525, | |
| "grad_norm": 0.0655476376414299, | |
| "learning_rate": 4.993392291751431e-05, | |
| "loss": 0.4518, | |
| "num_input_tokens_seen": 1001816, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.47706422018348627, | |
| "grad_norm": 0.06466512382030487, | |
| "learning_rate": 4.992853359792444e-05, | |
| "loss": 0.5638, | |
| "num_input_tokens_seen": 1053064, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.4954128440366973, | |
| "grad_norm": 0.0645688995718956, | |
| "learning_rate": 4.99229333433282e-05, | |
| "loss": 0.4644, | |
| "num_input_tokens_seen": 1086688, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.5137614678899083, | |
| "grad_norm": 0.07181251049041748, | |
| "learning_rate": 4.9917122201112656e-05, | |
| "loss": 0.6191, | |
| "num_input_tokens_seen": 1134824, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.5321100917431193, | |
| "grad_norm": 0.07322589308023453, | |
| "learning_rate": 4.9911100220449293e-05, | |
| "loss": 0.6752, | |
| "num_input_tokens_seen": 1172072, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.5504587155963303, | |
| "grad_norm": 0.06396070122718811, | |
| "learning_rate": 4.990486745229364e-05, | |
| "loss": 0.3587, | |
| "num_input_tokens_seen": 1211096, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5688073394495413, | |
| "grad_norm": 0.07803395390510559, | |
| "learning_rate": 4.989842394938482e-05, | |
| "loss": 0.459, | |
| "num_input_tokens_seen": 1259456, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.5871559633027523, | |
| "grad_norm": 0.05974648892879486, | |
| "learning_rate": 4.989176976624511e-05, | |
| "loss": 0.4148, | |
| "num_input_tokens_seen": 1306944, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.6055045871559633, | |
| "grad_norm": 0.09784268587827682, | |
| "learning_rate": 4.988490495917947e-05, | |
| "loss": 0.539, | |
| "num_input_tokens_seen": 1353744, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.6238532110091743, | |
| "grad_norm": 0.09906516224145889, | |
| "learning_rate": 4.987782958627508e-05, | |
| "loss": 0.5453, | |
| "num_input_tokens_seen": 1394736, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.6422018348623854, | |
| "grad_norm": 0.08984062820672989, | |
| "learning_rate": 4.987054370740083e-05, | |
| "loss": 0.468, | |
| "num_input_tokens_seen": 1442048, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.6605504587155964, | |
| "grad_norm": 0.08672655373811722, | |
| "learning_rate": 4.9863047384206835e-05, | |
| "loss": 0.4078, | |
| "num_input_tokens_seen": 1478440, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.6788990825688074, | |
| "grad_norm": 0.1327345073223114, | |
| "learning_rate": 4.9855340680123905e-05, | |
| "loss": 0.5299, | |
| "num_input_tokens_seen": 1525992, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.6972477064220184, | |
| "grad_norm": 0.09178602695465088, | |
| "learning_rate": 4.9847423660363e-05, | |
| "loss": 0.439, | |
| "num_input_tokens_seen": 1555608, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.7155963302752294, | |
| "grad_norm": 0.09418320655822754, | |
| "learning_rate": 4.983929639191469e-05, | |
| "loss": 0.5337, | |
| "num_input_tokens_seen": 1597392, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.7339449541284404, | |
| "grad_norm": 0.08294719457626343, | |
| "learning_rate": 4.983095894354858e-05, | |
| "loss": 0.4536, | |
| "num_input_tokens_seen": 1649656, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.7522935779816514, | |
| "grad_norm": 0.09774205833673477, | |
| "learning_rate": 4.982241138581273e-05, | |
| "loss": 0.5221, | |
| "num_input_tokens_seen": 1695952, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.7706422018348624, | |
| "grad_norm": 0.09319107979536057, | |
| "learning_rate": 4.9813653791033057e-05, | |
| "loss": 0.4279, | |
| "num_input_tokens_seen": 1737224, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.7889908256880734, | |
| "grad_norm": 0.09561405330896378, | |
| "learning_rate": 4.980468623331273e-05, | |
| "loss": 0.5121, | |
| "num_input_tokens_seen": 1772320, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.8073394495412844, | |
| "grad_norm": 0.08274025470018387, | |
| "learning_rate": 4.979550878853154e-05, | |
| "loss": 0.54, | |
| "num_input_tokens_seen": 1823888, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.8256880733944955, | |
| "grad_norm": 0.08728913217782974, | |
| "learning_rate": 4.9786121534345265e-05, | |
| "loss": 0.4488, | |
| "num_input_tokens_seen": 1872488, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.8440366972477065, | |
| "grad_norm": 0.0787016749382019, | |
| "learning_rate": 4.9776524550184965e-05, | |
| "loss": 0.4353, | |
| "num_input_tokens_seen": 1924744, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.8623853211009175, | |
| "grad_norm": 0.10952188074588776, | |
| "learning_rate": 4.97667179172564e-05, | |
| "loss": 0.4784, | |
| "num_input_tokens_seen": 1959936, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.8807339449541285, | |
| "grad_norm": 0.08525826781988144, | |
| "learning_rate": 4.975670171853926e-05, | |
| "loss": 0.3586, | |
| "num_input_tokens_seen": 2003896, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.8990825688073395, | |
| "grad_norm": 0.10409987717866898, | |
| "learning_rate": 4.9746476038786496e-05, | |
| "loss": 0.4451, | |
| "num_input_tokens_seen": 2047632, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.9174311926605505, | |
| "grad_norm": 0.0782993957400322, | |
| "learning_rate": 4.973604096452361e-05, | |
| "loss": 0.3591, | |
| "num_input_tokens_seen": 2096928, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.9357798165137615, | |
| "grad_norm": 0.09829951077699661, | |
| "learning_rate": 4.9725396584047925e-05, | |
| "loss": 0.3415, | |
| "num_input_tokens_seen": 2129536, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.9541284403669725, | |
| "grad_norm": 0.10606162995100021, | |
| "learning_rate": 4.971454298742779e-05, | |
| "loss": 0.3758, | |
| "num_input_tokens_seen": 2169144, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.9724770642201835, | |
| "grad_norm": 0.09280356764793396, | |
| "learning_rate": 4.97034802665019e-05, | |
| "loss": 0.485, | |
| "num_input_tokens_seen": 2207720, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.9908256880733946, | |
| "grad_norm": 0.11888203024864197, | |
| "learning_rate": 4.9692208514878444e-05, | |
| "loss": 0.3469, | |
| "num_input_tokens_seen": 2236392, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.13222463428974152, | |
| "learning_rate": 4.9680727827934354e-05, | |
| "loss": 0.4284, | |
| "num_input_tokens_seen": 2259088, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.018348623853211, | |
| "grad_norm": 0.10572745651006699, | |
| "learning_rate": 4.966903830281449e-05, | |
| "loss": 0.4186, | |
| "num_input_tokens_seen": 2298496, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.036697247706422, | |
| "grad_norm": 0.11462350189685822, | |
| "learning_rate": 4.965714003843079e-05, | |
| "loss": 0.4696, | |
| "num_input_tokens_seen": 2333016, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 1.0550458715596331, | |
| "grad_norm": 0.11215240508317947, | |
| "learning_rate": 4.9645033135461494e-05, | |
| "loss": 0.3905, | |
| "num_input_tokens_seen": 2367992, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.073394495412844, | |
| "grad_norm": 0.0973561555147171, | |
| "learning_rate": 4.963271769635024e-05, | |
| "loss": 0.3588, | |
| "num_input_tokens_seen": 2415328, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 1.091743119266055, | |
| "grad_norm": 0.10240709036588669, | |
| "learning_rate": 4.962019382530521e-05, | |
| "loss": 0.5532, | |
| "num_input_tokens_seen": 2454792, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.110091743119266, | |
| "grad_norm": 0.0959337130188942, | |
| "learning_rate": 4.9607461628298244e-05, | |
| "loss": 0.331, | |
| "num_input_tokens_seen": 2503072, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 1.1284403669724772, | |
| "grad_norm": 0.10228750854730606, | |
| "learning_rate": 4.9594521213063974e-05, | |
| "loss": 0.3728, | |
| "num_input_tokens_seen": 2546960, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 1.146788990825688, | |
| "grad_norm": 0.09403488785028458, | |
| "learning_rate": 4.958137268909887e-05, | |
| "loss": 0.4695, | |
| "num_input_tokens_seen": 2595432, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 1.165137614678899, | |
| "grad_norm": 0.11396344751119614, | |
| "learning_rate": 4.9568016167660334e-05, | |
| "loss": 0.3653, | |
| "num_input_tokens_seen": 2633912, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 1.18348623853211, | |
| "grad_norm": 0.09487481415271759, | |
| "learning_rate": 4.9554451761765766e-05, | |
| "loss": 0.3498, | |
| "num_input_tokens_seen": 2680792, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.2018348623853212, | |
| "grad_norm": 0.1249895691871643, | |
| "learning_rate": 4.9540679586191605e-05, | |
| "loss": 0.4053, | |
| "num_input_tokens_seen": 2716584, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 1.2201834862385321, | |
| "grad_norm": 0.12268221378326416, | |
| "learning_rate": 4.952669975747232e-05, | |
| "loss": 0.4189, | |
| "num_input_tokens_seen": 2757088, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 1.238532110091743, | |
| "grad_norm": 0.12126032263040543, | |
| "learning_rate": 4.951251239389948e-05, | |
| "loss": 0.4994, | |
| "num_input_tokens_seen": 2795664, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 1.2568807339449541, | |
| "grad_norm": 0.1069057360291481, | |
| "learning_rate": 4.949811761552074e-05, | |
| "loss": 0.3275, | |
| "num_input_tokens_seen": 2840936, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 1.2752293577981653, | |
| "grad_norm": 0.10893313586711884, | |
| "learning_rate": 4.948351554413879e-05, | |
| "loss": 0.4366, | |
| "num_input_tokens_seen": 2886768, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.2935779816513762, | |
| "grad_norm": 0.12898756563663483, | |
| "learning_rate": 4.9468706303310355e-05, | |
| "loss": 0.3916, | |
| "num_input_tokens_seen": 2919328, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 1.311926605504587, | |
| "grad_norm": 0.12405356019735336, | |
| "learning_rate": 4.9453690018345144e-05, | |
| "loss": 0.3249, | |
| "num_input_tokens_seen": 2966744, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 1.3302752293577982, | |
| "grad_norm": 0.13137595355510712, | |
| "learning_rate": 4.943846681630479e-05, | |
| "loss": 0.3956, | |
| "num_input_tokens_seen": 3007248, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 1.3486238532110093, | |
| "grad_norm": 0.13920250535011292, | |
| "learning_rate": 4.942303682600178e-05, | |
| "loss": 0.3956, | |
| "num_input_tokens_seen": 3050960, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 1.3669724770642202, | |
| "grad_norm": 0.1255589872598648, | |
| "learning_rate": 4.940740017799833e-05, | |
| "loss": 0.3773, | |
| "num_input_tokens_seen": 3088592, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.385321100917431, | |
| "grad_norm": 0.10222747176885605, | |
| "learning_rate": 4.939155700460536e-05, | |
| "loss": 0.4, | |
| "num_input_tokens_seen": 3153520, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 1.4036697247706422, | |
| "grad_norm": 0.13205283880233765, | |
| "learning_rate": 4.9375507439881266e-05, | |
| "loss": 0.4343, | |
| "num_input_tokens_seen": 3199272, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.4220183486238533, | |
| "grad_norm": 0.11005694419145584, | |
| "learning_rate": 4.9359251619630886e-05, | |
| "loss": 0.3881, | |
| "num_input_tokens_seen": 3247128, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.4403669724770642, | |
| "grad_norm": 0.14799247682094574, | |
| "learning_rate": 4.9342789681404275e-05, | |
| "loss": 0.3972, | |
| "num_input_tokens_seen": 3294192, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.4587155963302751, | |
| "grad_norm": 0.1279418021440506, | |
| "learning_rate": 4.9326121764495596e-05, | |
| "loss": 0.3438, | |
| "num_input_tokens_seen": 3329736, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.4770642201834863, | |
| "grad_norm": 0.11036807298660278, | |
| "learning_rate": 4.9309248009941914e-05, | |
| "loss": 0.3189, | |
| "num_input_tokens_seen": 3371376, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.4954128440366974, | |
| "grad_norm": 0.11855707317590714, | |
| "learning_rate": 4.9292168560522014e-05, | |
| "loss": 0.401, | |
| "num_input_tokens_seen": 3412368, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.5137614678899083, | |
| "grad_norm": 0.13195356726646423, | |
| "learning_rate": 4.9274883560755156e-05, | |
| "loss": 0.4973, | |
| "num_input_tokens_seen": 3455000, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.5321100917431192, | |
| "grad_norm": 0.1462787538766861, | |
| "learning_rate": 4.925739315689991e-05, | |
| "loss": 0.3768, | |
| "num_input_tokens_seen": 3488960, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.5504587155963303, | |
| "grad_norm": 0.13765974342823029, | |
| "learning_rate": 4.92396974969529e-05, | |
| "loss": 0.2999, | |
| "num_input_tokens_seen": 3521320, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.5688073394495414, | |
| "grad_norm": 0.15276113152503967, | |
| "learning_rate": 4.9221796730647516e-05, | |
| "loss": 0.3638, | |
| "num_input_tokens_seen": 3559464, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.5871559633027523, | |
| "grad_norm": 0.1441674381494522, | |
| "learning_rate": 4.92036910094527e-05, | |
| "loss": 0.3919, | |
| "num_input_tokens_seen": 3598080, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.6055045871559632, | |
| "grad_norm": 0.1780252456665039, | |
| "learning_rate": 4.9185380486571595e-05, | |
| "loss": 0.3626, | |
| "num_input_tokens_seen": 3630064, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.6238532110091743, | |
| "grad_norm": 0.16947726905345917, | |
| "learning_rate": 4.916686531694035e-05, | |
| "loss": 0.3439, | |
| "num_input_tokens_seen": 3661408, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 1.6422018348623855, | |
| "grad_norm": 0.1552971601486206, | |
| "learning_rate": 4.914814565722671e-05, | |
| "loss": 0.3236, | |
| "num_input_tokens_seen": 3695480, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.6605504587155964, | |
| "grad_norm": 0.14925938844680786, | |
| "learning_rate": 4.912922166582874e-05, | |
| "loss": 0.4255, | |
| "num_input_tokens_seen": 3734560, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.6788990825688073, | |
| "grad_norm": 0.1332874596118927, | |
| "learning_rate": 4.9110093502873476e-05, | |
| "loss": 0.3061, | |
| "num_input_tokens_seen": 3773112, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.6972477064220184, | |
| "grad_norm": 0.15471243858337402, | |
| "learning_rate": 4.909076133021557e-05, | |
| "loss": 0.3275, | |
| "num_input_tokens_seen": 3813392, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.7155963302752295, | |
| "grad_norm": 0.16010524332523346, | |
| "learning_rate": 4.907122531143594e-05, | |
| "loss": 0.4179, | |
| "num_input_tokens_seen": 3856416, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.7339449541284404, | |
| "grad_norm": 0.13423003256320953, | |
| "learning_rate": 4.905148561184033e-05, | |
| "loss": 0.3593, | |
| "num_input_tokens_seen": 3899472, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.7522935779816513, | |
| "grad_norm": 0.14900773763656616, | |
| "learning_rate": 4.9031542398457974e-05, | |
| "loss": 0.5007, | |
| "num_input_tokens_seen": 3962976, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.7706422018348624, | |
| "grad_norm": 0.15728624165058136, | |
| "learning_rate": 4.9011395840040144e-05, | |
| "loss": 0.3484, | |
| "num_input_tokens_seen": 4000696, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.7889908256880735, | |
| "grad_norm": 0.11092367768287659, | |
| "learning_rate": 4.8991046107058735e-05, | |
| "loss": 0.2889, | |
| "num_input_tokens_seen": 4045256, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.8073394495412844, | |
| "grad_norm": 0.1289113610982895, | |
| "learning_rate": 4.8970493371704826e-05, | |
| "loss": 0.2203, | |
| "num_input_tokens_seen": 4076800, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.8256880733944953, | |
| "grad_norm": 0.18886639177799225, | |
| "learning_rate": 4.894973780788722e-05, | |
| "loss": 0.3966, | |
| "num_input_tokens_seen": 4119840, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.8440366972477065, | |
| "grad_norm": 0.1563039869070053, | |
| "learning_rate": 4.892877959123097e-05, | |
| "loss": 0.4417, | |
| "num_input_tokens_seen": 4165848, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 1.8623853211009176, | |
| "grad_norm": 0.16883380711078644, | |
| "learning_rate": 4.890761889907589e-05, | |
| "loss": 0.4258, | |
| "num_input_tokens_seen": 4202824, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 1.8807339449541285, | |
| "grad_norm": 0.18241995573043823, | |
| "learning_rate": 4.8886255910475054e-05, | |
| "loss": 0.3952, | |
| "num_input_tokens_seen": 4233888, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 1.8990825688073394, | |
| "grad_norm": 0.19913265109062195, | |
| "learning_rate": 4.88646908061933e-05, | |
| "loss": 0.3241, | |
| "num_input_tokens_seen": 4267064, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.9174311926605505, | |
| "grad_norm": 0.18295545876026154, | |
| "learning_rate": 4.884292376870567e-05, | |
| "loss": 0.4239, | |
| "num_input_tokens_seen": 4312536, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.9357798165137616, | |
| "grad_norm": 0.16657495498657227, | |
| "learning_rate": 4.8820954982195905e-05, | |
| "loss": 0.2579, | |
| "num_input_tokens_seen": 4356656, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 1.9541284403669725, | |
| "grad_norm": 0.18504932522773743, | |
| "learning_rate": 4.879878463255483e-05, | |
| "loss": 0.44, | |
| "num_input_tokens_seen": 4400216, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 1.9724770642201834, | |
| "grad_norm": 0.1923118382692337, | |
| "learning_rate": 4.877641290737884e-05, | |
| "loss": 0.2662, | |
| "num_input_tokens_seen": 4436968, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.9908256880733946, | |
| "grad_norm": 0.19636788964271545, | |
| "learning_rate": 4.875383999596828e-05, | |
| "loss": 0.4211, | |
| "num_input_tokens_seen": 4488232, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.3168099820613861, | |
| "learning_rate": 4.873106608932585e-05, | |
| "loss": 0.2499, | |
| "num_input_tokens_seen": 4518176, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.018348623853211, | |
| "grad_norm": 0.14410308003425598, | |
| "learning_rate": 4.8708091380154984e-05, | |
| "loss": 0.2722, | |
| "num_input_tokens_seen": 4570896, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 2.036697247706422, | |
| "grad_norm": 0.17840909957885742, | |
| "learning_rate": 4.868491606285823e-05, | |
| "loss": 0.2758, | |
| "num_input_tokens_seen": 4613576, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 2.055045871559633, | |
| "grad_norm": 0.178523987531662, | |
| "learning_rate": 4.866154033353561e-05, | |
| "loss": 0.3361, | |
| "num_input_tokens_seen": 4652896, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 2.073394495412844, | |
| "grad_norm": 0.17396725714206696, | |
| "learning_rate": 4.8637964389982926e-05, | |
| "loss": 0.2667, | |
| "num_input_tokens_seen": 4694256, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 2.091743119266055, | |
| "grad_norm": 0.19471587240695953, | |
| "learning_rate": 4.8614188431690125e-05, | |
| "loss": 0.3628, | |
| "num_input_tokens_seen": 4747552, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 2.1100917431192663, | |
| "grad_norm": 0.1722450852394104, | |
| "learning_rate": 4.859021265983959e-05, | |
| "loss": 0.3599, | |
| "num_input_tokens_seen": 4794080, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 2.128440366972477, | |
| "grad_norm": 0.20137006044387817, | |
| "learning_rate": 4.856603727730447e-05, | |
| "loss": 0.4262, | |
| "num_input_tokens_seen": 4847912, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 2.146788990825688, | |
| "grad_norm": 0.19395771622657776, | |
| "learning_rate": 4.854166248864689e-05, | |
| "loss": 0.3118, | |
| "num_input_tokens_seen": 4885480, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 2.165137614678899, | |
| "grad_norm": 0.209548681974411, | |
| "learning_rate": 4.85170885001163e-05, | |
| "loss": 0.3725, | |
| "num_input_tokens_seen": 4921240, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 2.18348623853211, | |
| "grad_norm": 0.18228279054164886, | |
| "learning_rate": 4.849231551964771e-05, | |
| "loss": 0.3816, | |
| "num_input_tokens_seen": 4960224, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.2018348623853212, | |
| "grad_norm": 0.24349354207515717, | |
| "learning_rate": 4.846734375685989e-05, | |
| "loss": 0.3383, | |
| "num_input_tokens_seen": 4990536, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 2.220183486238532, | |
| "grad_norm": 0.17600344121456146, | |
| "learning_rate": 4.844217342305363e-05, | |
| "loss": 0.3011, | |
| "num_input_tokens_seen": 5044296, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 2.238532110091743, | |
| "grad_norm": 0.18766675889492035, | |
| "learning_rate": 4.8416804731209945e-05, | |
| "loss": 0.4458, | |
| "num_input_tokens_seen": 5088368, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 2.2568807339449544, | |
| "grad_norm": 0.17657820880413055, | |
| "learning_rate": 4.839123789598829e-05, | |
| "loss": 0.2564, | |
| "num_input_tokens_seen": 5133472, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 2.2752293577981653, | |
| "grad_norm": 0.20606014132499695, | |
| "learning_rate": 4.836547313372471e-05, | |
| "loss": 0.313, | |
| "num_input_tokens_seen": 5167768, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.293577981651376, | |
| "grad_norm": 0.23511061072349548, | |
| "learning_rate": 4.8339510662430046e-05, | |
| "loss": 0.2963, | |
| "num_input_tokens_seen": 5209400, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 2.311926605504587, | |
| "grad_norm": 0.18234293162822723, | |
| "learning_rate": 4.8313350701788054e-05, | |
| "loss": 0.2566, | |
| "num_input_tokens_seen": 5249360, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 2.330275229357798, | |
| "grad_norm": 0.2223992496728897, | |
| "learning_rate": 4.828699347315356e-05, | |
| "loss": 0.2833, | |
| "num_input_tokens_seen": 5300808, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 2.3486238532110093, | |
| "grad_norm": 0.23101739585399628, | |
| "learning_rate": 4.826043919955062e-05, | |
| "loss": 0.3099, | |
| "num_input_tokens_seen": 5332960, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 2.36697247706422, | |
| "grad_norm": 0.26640889048576355, | |
| "learning_rate": 4.823368810567056e-05, | |
| "loss": 0.3238, | |
| "num_input_tokens_seen": 5365008, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.385321100917431, | |
| "grad_norm": 0.2374572902917862, | |
| "learning_rate": 4.820674041787017e-05, | |
| "loss": 0.3153, | |
| "num_input_tokens_seen": 5400184, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 2.4036697247706424, | |
| "grad_norm": 0.22812288999557495, | |
| "learning_rate": 4.817959636416969e-05, | |
| "loss": 0.2997, | |
| "num_input_tokens_seen": 5440320, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 2.4220183486238533, | |
| "grad_norm": 0.20079149305820465, | |
| "learning_rate": 4.815225617425095e-05, | |
| "loss": 0.2373, | |
| "num_input_tokens_seen": 5480832, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 2.4403669724770642, | |
| "grad_norm": 0.196709543466568, | |
| "learning_rate": 4.81247200794554e-05, | |
| "loss": 0.2456, | |
| "num_input_tokens_seen": 5526936, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 2.458715596330275, | |
| "grad_norm": 0.17305873334407806, | |
| "learning_rate": 4.8096988312782174e-05, | |
| "loss": 0.2099, | |
| "num_input_tokens_seen": 5566384, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.477064220183486, | |
| "grad_norm": 3.584635019302368, | |
| "learning_rate": 4.806906110888606e-05, | |
| "loss": 0.3485, | |
| "num_input_tokens_seen": 5629896, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 2.4954128440366974, | |
| "grad_norm": 0.23481500148773193, | |
| "learning_rate": 4.80409387040756e-05, | |
| "loss": 0.2231, | |
| "num_input_tokens_seen": 5674504, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 2.5137614678899083, | |
| "grad_norm": 0.27899855375289917, | |
| "learning_rate": 4.8012621336311016e-05, | |
| "loss": 0.4285, | |
| "num_input_tokens_seen": 5714000, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 2.532110091743119, | |
| "grad_norm": 0.24404938519001007, | |
| "learning_rate": 4.798410924520223e-05, | |
| "loss": 0.3343, | |
| "num_input_tokens_seen": 5756856, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 2.5504587155963305, | |
| "grad_norm": 0.26869162917137146, | |
| "learning_rate": 4.7955402672006854e-05, | |
| "loss": 0.2497, | |
| "num_input_tokens_seen": 5781192, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.5688073394495414, | |
| "grad_norm": 0.2057972550392151, | |
| "learning_rate": 4.79265018596281e-05, | |
| "loss": 0.2991, | |
| "num_input_tokens_seen": 5824024, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 2.5871559633027523, | |
| "grad_norm": 0.2184937596321106, | |
| "learning_rate": 4.789740705261278e-05, | |
| "loss": 0.2406, | |
| "num_input_tokens_seen": 5862584, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 2.6055045871559632, | |
| "grad_norm": 0.23603741824626923, | |
| "learning_rate": 4.786811849714918e-05, | |
| "loss": 0.2722, | |
| "num_input_tokens_seen": 5897344, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 2.623853211009174, | |
| "grad_norm": 0.22983981668949127, | |
| "learning_rate": 4.783863644106502e-05, | |
| "loss": 0.374, | |
| "num_input_tokens_seen": 5931736, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 2.6422018348623855, | |
| "grad_norm": 0.2825419306755066, | |
| "learning_rate": 4.780896113382536e-05, | |
| "loss": 0.3386, | |
| "num_input_tokens_seen": 5972784, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.6605504587155964, | |
| "grad_norm": 0.4502134621143341, | |
| "learning_rate": 4.777909282653042e-05, | |
| "loss": 0.2289, | |
| "num_input_tokens_seen": 6018968, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 2.6788990825688073, | |
| "grad_norm": 0.2428288459777832, | |
| "learning_rate": 4.7749031771913584e-05, | |
| "loss": 0.4061, | |
| "num_input_tokens_seen": 6062520, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 2.6972477064220186, | |
| "grad_norm": 0.2685629725456238, | |
| "learning_rate": 4.771877822433911e-05, | |
| "loss": 0.2198, | |
| "num_input_tokens_seen": 6087928, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 2.7155963302752295, | |
| "grad_norm": 0.24021446704864502, | |
| "learning_rate": 4.7688332439800096e-05, | |
| "loss": 0.34, | |
| "num_input_tokens_seen": 6134792, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 2.7339449541284404, | |
| "grad_norm": 0.2568534314632416, | |
| "learning_rate": 4.765769467591625e-05, | |
| "loss": 0.3292, | |
| "num_input_tokens_seen": 6183296, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.7522935779816513, | |
| "grad_norm": 0.20823974907398224, | |
| "learning_rate": 4.762686519193175e-05, | |
| "loss": 0.2539, | |
| "num_input_tokens_seen": 6225840, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 2.770642201834862, | |
| "grad_norm": 0.23333317041397095, | |
| "learning_rate": 4.759584424871302e-05, | |
| "loss": 0.3571, | |
| "num_input_tokens_seen": 6274760, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 2.7889908256880735, | |
| "grad_norm": 0.20232398808002472, | |
| "learning_rate": 4.756463210874652e-05, | |
| "loss": 0.2783, | |
| "num_input_tokens_seen": 6326168, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 2.8073394495412844, | |
| "grad_norm": 0.3479433059692383, | |
| "learning_rate": 4.7533229036136553e-05, | |
| "loss": 0.2925, | |
| "num_input_tokens_seen": 6360312, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 2.8256880733944953, | |
| "grad_norm": 0.2659524083137512, | |
| "learning_rate": 4.750163529660303e-05, | |
| "loss": 0.2606, | |
| "num_input_tokens_seen": 6395496, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.8440366972477067, | |
| "grad_norm": 0.24823158979415894, | |
| "learning_rate": 4.7469851157479177e-05, | |
| "loss": 0.3721, | |
| "num_input_tokens_seen": 6437064, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 2.8623853211009176, | |
| "grad_norm": 0.32034072279930115, | |
| "learning_rate": 4.743787688770932e-05, | |
| "loss": 0.3931, | |
| "num_input_tokens_seen": 6477616, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 2.8807339449541285, | |
| "grad_norm": 0.23295725882053375, | |
| "learning_rate": 4.740571275784659e-05, | |
| "loss": 0.2201, | |
| "num_input_tokens_seen": 6518680, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 2.8990825688073394, | |
| "grad_norm": 0.2758423984050751, | |
| "learning_rate": 4.737335904005063e-05, | |
| "loss": 0.2579, | |
| "num_input_tokens_seen": 6549768, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 2.9174311926605503, | |
| "grad_norm": 0.26690953969955444, | |
| "learning_rate": 4.734081600808531e-05, | |
| "loss": 0.2575, | |
| "num_input_tokens_seen": 6581000, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.9357798165137616, | |
| "grad_norm": 0.26657482981681824, | |
| "learning_rate": 4.730808393731639e-05, | |
| "loss": 0.2597, | |
| "num_input_tokens_seen": 6612632, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 2.9541284403669725, | |
| "grad_norm": 0.22647295892238617, | |
| "learning_rate": 4.72751631047092e-05, | |
| "loss": 0.3335, | |
| "num_input_tokens_seen": 6654288, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 2.9724770642201834, | |
| "grad_norm": 0.2863366901874542, | |
| "learning_rate": 4.72420537888263e-05, | |
| "loss": 0.374, | |
| "num_input_tokens_seen": 6707208, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 2.9908256880733948, | |
| "grad_norm": 0.2606408894062042, | |
| "learning_rate": 4.7208756269825104e-05, | |
| "loss": 0.3477, | |
| "num_input_tokens_seen": 6748448, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.440924733877182, | |
| "learning_rate": 4.717527082945554e-05, | |
| "loss": 0.3214, | |
| "num_input_tokens_seen": 6777264, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 3.018348623853211, | |
| "grad_norm": 0.27583903074264526, | |
| "learning_rate": 4.714159775105765e-05, | |
| "loss": 0.2681, | |
| "num_input_tokens_seen": 6809456, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 3.036697247706422, | |
| "grad_norm": 0.2995987832546234, | |
| "learning_rate": 4.7107737319559176e-05, | |
| "loss": 0.2633, | |
| "num_input_tokens_seen": 6845768, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 3.055045871559633, | |
| "grad_norm": 0.23999951779842377, | |
| "learning_rate": 4.707368982147318e-05, | |
| "loss": 0.1961, | |
| "num_input_tokens_seen": 6893056, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 3.073394495412844, | |
| "grad_norm": 0.23356525599956512, | |
| "learning_rate": 4.703945554489558e-05, | |
| "loss": 0.2836, | |
| "num_input_tokens_seen": 6932480, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 3.091743119266055, | |
| "grad_norm": 0.29919493198394775, | |
| "learning_rate": 4.700503477950278e-05, | |
| "loss": 0.2838, | |
| "num_input_tokens_seen": 6975992, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 3.1100917431192663, | |
| "grad_norm": 0.3350690007209778, | |
| "learning_rate": 4.697042781654913e-05, | |
| "loss": 0.3489, | |
| "num_input_tokens_seen": 7021840, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 3.128440366972477, | |
| "grad_norm": 0.2837466895580292, | |
| "learning_rate": 4.693563494886455e-05, | |
| "loss": 0.3797, | |
| "num_input_tokens_seen": 7065192, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 3.146788990825688, | |
| "grad_norm": 0.24601787328720093, | |
| "learning_rate": 4.6900656470851964e-05, | |
| "loss": 0.2046, | |
| "num_input_tokens_seen": 7114544, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 3.165137614678899, | |
| "grad_norm": 0.32290250062942505, | |
| "learning_rate": 4.6865492678484895e-05, | |
| "loss": 0.2596, | |
| "num_input_tokens_seen": 7152736, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 3.18348623853211, | |
| "grad_norm": 0.33591920137405396, | |
| "learning_rate": 4.68301438693049e-05, | |
| "loss": 0.3045, | |
| "num_input_tokens_seen": 7207464, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 3.2018348623853212, | |
| "grad_norm": 0.25471043586730957, | |
| "learning_rate": 4.679461034241906e-05, | |
| "loss": 0.2096, | |
| "num_input_tokens_seen": 7238640, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 3.220183486238532, | |
| "grad_norm": 0.31238994002342224, | |
| "learning_rate": 4.6758892398497494e-05, | |
| "loss": 0.2226, | |
| "num_input_tokens_seen": 7279112, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 3.238532110091743, | |
| "grad_norm": 0.35679712891578674, | |
| "learning_rate": 4.672299033977076e-05, | |
| "loss": 0.2403, | |
| "num_input_tokens_seen": 7311632, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 3.2568807339449544, | |
| "grad_norm": 0.326914519071579, | |
| "learning_rate": 4.6686904470027316e-05, | |
| "loss": 0.2156, | |
| "num_input_tokens_seen": 7344864, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 3.2752293577981653, | |
| "grad_norm": 0.3293381929397583, | |
| "learning_rate": 4.665063509461097e-05, | |
| "loss": 0.238, | |
| "num_input_tokens_seen": 7389944, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 3.293577981651376, | |
| "grad_norm": 0.3313307762145996, | |
| "learning_rate": 4.661418252041827e-05, | |
| "loss": 0.2251, | |
| "num_input_tokens_seen": 7423672, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 3.311926605504587, | |
| "grad_norm": 0.3328595459461212, | |
| "learning_rate": 4.657754705589591e-05, | |
| "loss": 0.2922, | |
| "num_input_tokens_seen": 7459576, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 3.330275229357798, | |
| "grad_norm": 0.2721710801124573, | |
| "learning_rate": 4.6540729011038146e-05, | |
| "loss": 0.2698, | |
| "num_input_tokens_seen": 7511736, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 3.3486238532110093, | |
| "grad_norm": 0.2488890290260315, | |
| "learning_rate": 4.650372869738414e-05, | |
| "loss": 0.173, | |
| "num_input_tokens_seen": 7558552, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 3.36697247706422, | |
| "grad_norm": 0.3800615668296814, | |
| "learning_rate": 4.6466546428015336e-05, | |
| "loss": 0.32, | |
| "num_input_tokens_seen": 7599040, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 3.385321100917431, | |
| "grad_norm": 0.3377014100551605, | |
| "learning_rate": 4.642918251755281e-05, | |
| "loss": 0.3058, | |
| "num_input_tokens_seen": 7653264, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 3.4036697247706424, | |
| "grad_norm": 0.25239789485931396, | |
| "learning_rate": 4.639163728215463e-05, | |
| "loss": 0.1896, | |
| "num_input_tokens_seen": 7694272, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 3.4220183486238533, | |
| "grad_norm": 0.34607502818107605, | |
| "learning_rate": 4.6353911039513145e-05, | |
| "loss": 0.2933, | |
| "num_input_tokens_seen": 7730848, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 3.4403669724770642, | |
| "grad_norm": 0.30653324723243713, | |
| "learning_rate": 4.6316004108852305e-05, | |
| "loss": 0.2625, | |
| "num_input_tokens_seen": 7781200, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 3.458715596330275, | |
| "grad_norm": 0.2943236231803894, | |
| "learning_rate": 4.627791681092499e-05, | |
| "loss": 0.3372, | |
| "num_input_tokens_seen": 7825032, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 3.477064220183486, | |
| "grad_norm": 0.30080685019493103, | |
| "learning_rate": 4.623964946801027e-05, | |
| "loss": 0.2229, | |
| "num_input_tokens_seen": 7855840, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 3.4954128440366974, | |
| "grad_norm": 0.3511403799057007, | |
| "learning_rate": 4.620120240391065e-05, | |
| "loss": 0.3967, | |
| "num_input_tokens_seen": 7905928, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 3.5137614678899083, | |
| "grad_norm": 0.273583322763443, | |
| "learning_rate": 4.61625759439494e-05, | |
| "loss": 0.2254, | |
| "num_input_tokens_seen": 7955992, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 3.532110091743119, | |
| "grad_norm": 0.3457902669906616, | |
| "learning_rate": 4.612377041496776e-05, | |
| "loss": 0.2553, | |
| "num_input_tokens_seen": 7998024, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 3.5504587155963305, | |
| "grad_norm": 0.31968954205513, | |
| "learning_rate": 4.608478614532215e-05, | |
| "loss": 0.2197, | |
| "num_input_tokens_seen": 8055672, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 3.5688073394495414, | |
| "grad_norm": 0.34753403067588806, | |
| "learning_rate": 4.604562346488144e-05, | |
| "loss": 0.2507, | |
| "num_input_tokens_seen": 8090848, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 3.5871559633027523, | |
| "grad_norm": 0.3808669149875641, | |
| "learning_rate": 4.6006282705024144e-05, | |
| "loss": 0.2422, | |
| "num_input_tokens_seen": 8136680, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 3.6055045871559632, | |
| "grad_norm": 0.3004499673843384, | |
| "learning_rate": 4.5966764198635606e-05, | |
| "loss": 0.2107, | |
| "num_input_tokens_seen": 8187472, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 3.623853211009174, | |
| "grad_norm": 0.30718186497688293, | |
| "learning_rate": 4.592706828010518e-05, | |
| "loss": 0.1854, | |
| "num_input_tokens_seen": 8225216, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 3.6422018348623855, | |
| "grad_norm": 0.23112858831882477, | |
| "learning_rate": 4.588719528532342e-05, | |
| "loss": 0.1687, | |
| "num_input_tokens_seen": 8274456, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.6605504587155964, | |
| "grad_norm": 0.25966888666152954, | |
| "learning_rate": 4.5847145551679206e-05, | |
| "loss": 0.2549, | |
| "num_input_tokens_seen": 8317016, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 3.6788990825688073, | |
| "grad_norm": 0.25600987672805786, | |
| "learning_rate": 4.580691941805695e-05, | |
| "loss": 0.1602, | |
| "num_input_tokens_seen": 8361856, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 3.6972477064220186, | |
| "grad_norm": 0.33986184000968933, | |
| "learning_rate": 4.5766517224833637e-05, | |
| "loss": 0.2495, | |
| "num_input_tokens_seen": 8410696, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 3.7155963302752295, | |
| "grad_norm": 0.36899781227111816, | |
| "learning_rate": 4.572593931387604e-05, | |
| "loss": 0.2012, | |
| "num_input_tokens_seen": 8441872, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 3.7339449541284404, | |
| "grad_norm": 0.42072632908821106, | |
| "learning_rate": 4.568518602853776e-05, | |
| "loss": 0.2373, | |
| "num_input_tokens_seen": 8482544, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 3.7522935779816513, | |
| "grad_norm": 0.40593233704566956, | |
| "learning_rate": 4.5644257713656356e-05, | |
| "loss": 0.233, | |
| "num_input_tokens_seen": 8519856, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 3.770642201834862, | |
| "grad_norm": 0.38003161549568176, | |
| "learning_rate": 4.5603154715550386e-05, | |
| "loss": 0.225, | |
| "num_input_tokens_seen": 8551392, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 3.7889908256880735, | |
| "grad_norm": 0.2564244568347931, | |
| "learning_rate": 4.556187738201656e-05, | |
| "loss": 0.2975, | |
| "num_input_tokens_seen": 8599472, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 3.8073394495412844, | |
| "grad_norm": 0.29023391008377075, | |
| "learning_rate": 4.552042606232668e-05, | |
| "loss": 0.2033, | |
| "num_input_tokens_seen": 8631880, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 3.8256880733944953, | |
| "grad_norm": 0.32886001467704773, | |
| "learning_rate": 4.54788011072248e-05, | |
| "loss": 0.2024, | |
| "num_input_tokens_seen": 8675016, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 3.8440366972477067, | |
| "grad_norm": 0.36749884486198425, | |
| "learning_rate": 4.5437002868924166e-05, | |
| "loss": 0.2304, | |
| "num_input_tokens_seen": 8713248, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 3.8623853211009176, | |
| "grad_norm": 0.3055097758769989, | |
| "learning_rate": 4.539503170110431e-05, | |
| "loss": 0.2928, | |
| "num_input_tokens_seen": 8748800, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 3.8807339449541285, | |
| "grad_norm": 0.38436686992645264, | |
| "learning_rate": 4.535288795890798e-05, | |
| "loss": 0.2214, | |
| "num_input_tokens_seen": 8787832, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 3.8990825688073394, | |
| "grad_norm": 0.44330883026123047, | |
| "learning_rate": 4.531057199893824e-05, | |
| "loss": 0.2168, | |
| "num_input_tokens_seen": 8819616, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 3.9174311926605503, | |
| "grad_norm": 0.28318527340888977, | |
| "learning_rate": 4.526808417925531e-05, | |
| "loss": 0.279, | |
| "num_input_tokens_seen": 8860744, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 3.9357798165137616, | |
| "grad_norm": 0.3287319839000702, | |
| "learning_rate": 4.522542485937369e-05, | |
| "loss": 0.2597, | |
| "num_input_tokens_seen": 8906432, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 3.9541284403669725, | |
| "grad_norm": 0.35815751552581787, | |
| "learning_rate": 4.5182594400259e-05, | |
| "loss": 0.241, | |
| "num_input_tokens_seen": 8955104, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 3.9724770642201834, | |
| "grad_norm": 0.3299608528614044, | |
| "learning_rate": 4.5139593164324986e-05, | |
| "loss": 0.2157, | |
| "num_input_tokens_seen": 8990200, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 3.9908256880733948, | |
| "grad_norm": 0.2916093170642853, | |
| "learning_rate": 4.509642151543043e-05, | |
| "loss": 0.2046, | |
| "num_input_tokens_seen": 9020760, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.40076637268066406, | |
| "learning_rate": 4.50530798188761e-05, | |
| "loss": 0.1714, | |
| "num_input_tokens_seen": 9036352, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 4.018348623853211, | |
| "grad_norm": 0.40515249967575073, | |
| "learning_rate": 4.50095684414016e-05, | |
| "loss": 0.1811, | |
| "num_input_tokens_seen": 9091776, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 4.036697247706422, | |
| "grad_norm": 0.32984718680381775, | |
| "learning_rate": 4.496588775118232e-05, | |
| "loss": 0.2101, | |
| "num_input_tokens_seen": 9134080, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 4.055045871559633, | |
| "grad_norm": 0.27288541197776794, | |
| "learning_rate": 4.4922038117826334e-05, | |
| "loss": 0.1444, | |
| "num_input_tokens_seen": 9172720, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 4.073394495412844, | |
| "grad_norm": 0.5168021321296692, | |
| "learning_rate": 4.48780199123712e-05, | |
| "loss": 0.2342, | |
| "num_input_tokens_seen": 9213664, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 4.091743119266055, | |
| "grad_norm": 0.3986498713493347, | |
| "learning_rate": 4.4833833507280884e-05, | |
| "loss": 0.1676, | |
| "num_input_tokens_seen": 9261768, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 4.110091743119266, | |
| "grad_norm": 0.4472793936729431, | |
| "learning_rate": 4.478947927644258e-05, | |
| "loss": 0.295, | |
| "num_input_tokens_seen": 9300928, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 4.128440366972477, | |
| "grad_norm": 0.39240705966949463, | |
| "learning_rate": 4.474495759516358e-05, | |
| "loss": 0.17, | |
| "num_input_tokens_seen": 9329472, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 4.146788990825688, | |
| "grad_norm": 0.354526549577713, | |
| "learning_rate": 4.4700268840168045e-05, | |
| "loss": 0.1759, | |
| "num_input_tokens_seen": 9365640, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 4.165137614678899, | |
| "grad_norm": 0.3216766119003296, | |
| "learning_rate": 4.4655413389593856e-05, | |
| "loss": 0.1878, | |
| "num_input_tokens_seen": 9410552, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 4.18348623853211, | |
| "grad_norm": 0.30976617336273193, | |
| "learning_rate": 4.4610391622989396e-05, | |
| "loss": 0.1637, | |
| "num_input_tokens_seen": 9452416, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 4.201834862385321, | |
| "grad_norm": 0.385437935590744, | |
| "learning_rate": 4.456520392131035e-05, | |
| "loss": 0.2748, | |
| "num_input_tokens_seen": 9503528, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 4.220183486238533, | |
| "grad_norm": 0.37948480248451233, | |
| "learning_rate": 4.4519850666916484e-05, | |
| "loss": 0.2635, | |
| "num_input_tokens_seen": 9541592, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 4.238532110091743, | |
| "grad_norm": 0.36141568422317505, | |
| "learning_rate": 4.447433224356839e-05, | |
| "loss": 0.2027, | |
| "num_input_tokens_seen": 9586064, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 4.256880733944954, | |
| "grad_norm": 0.4549350440502167, | |
| "learning_rate": 4.442864903642428e-05, | |
| "loss": 0.2107, | |
| "num_input_tokens_seen": 9641688, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 4.275229357798165, | |
| "grad_norm": 0.3979765474796295, | |
| "learning_rate": 4.438280143203665e-05, | |
| "loss": 0.2879, | |
| "num_input_tokens_seen": 9686240, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 4.293577981651376, | |
| "grad_norm": 0.35011065006256104, | |
| "learning_rate": 4.43367898183491e-05, | |
| "loss": 0.2594, | |
| "num_input_tokens_seen": 9735632, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 4.3119266055045875, | |
| "grad_norm": 0.3999224007129669, | |
| "learning_rate": 4.4290614584693004e-05, | |
| "loss": 0.1907, | |
| "num_input_tokens_seen": 9766536, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 4.330275229357798, | |
| "grad_norm": 0.39629611372947693, | |
| "learning_rate": 4.4244276121784195e-05, | |
| "loss": 0.1805, | |
| "num_input_tokens_seen": 9796400, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 4.348623853211009, | |
| "grad_norm": 0.36784592270851135, | |
| "learning_rate": 4.4197774821719714e-05, | |
| "loss": 0.1824, | |
| "num_input_tokens_seen": 9831992, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 4.36697247706422, | |
| "grad_norm": 0.3408430516719818, | |
| "learning_rate": 4.415111107797445e-05, | |
| "loss": 0.1721, | |
| "num_input_tokens_seen": 9875640, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 4.385321100917431, | |
| "grad_norm": 0.3232553005218506, | |
| "learning_rate": 4.410428528539783e-05, | |
| "loss": 0.275, | |
| "num_input_tokens_seen": 9916816, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 4.4036697247706424, | |
| "grad_norm": 0.38150206208229065, | |
| "learning_rate": 4.405729784021046e-05, | |
| "loss": 0.1963, | |
| "num_input_tokens_seen": 9962928, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 4.422018348623853, | |
| "grad_norm": 0.4176963269710541, | |
| "learning_rate": 4.401014914000078e-05, | |
| "loss": 0.1626, | |
| "num_input_tokens_seen": 9997224, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 4.440366972477064, | |
| "grad_norm": 0.38855600357055664, | |
| "learning_rate": 4.396283958372173e-05, | |
| "loss": 0.1733, | |
| "num_input_tokens_seen": 10036248, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 4.458715596330276, | |
| "grad_norm": 0.3860638737678528, | |
| "learning_rate": 4.391536957168733e-05, | |
| "loss": 0.1936, | |
| "num_input_tokens_seen": 10070312, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 4.477064220183486, | |
| "grad_norm": 0.31510865688323975, | |
| "learning_rate": 4.386773950556931e-05, | |
| "loss": 0.1847, | |
| "num_input_tokens_seen": 10114568, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 4.495412844036697, | |
| "grad_norm": 0.3280925154685974, | |
| "learning_rate": 4.381994978839371e-05, | |
| "loss": 0.1981, | |
| "num_input_tokens_seen": 10150280, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 4.513761467889909, | |
| "grad_norm": 0.33136090636253357, | |
| "learning_rate": 4.377200082453749e-05, | |
| "loss": 0.1681, | |
| "num_input_tokens_seen": 10194000, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 4.532110091743119, | |
| "grad_norm": 0.43955501914024353, | |
| "learning_rate": 4.372389301972506e-05, | |
| "loss": 0.2111, | |
| "num_input_tokens_seen": 10232264, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 4.5504587155963305, | |
| "grad_norm": 0.28938886523246765, | |
| "learning_rate": 4.36756267810249e-05, | |
| "loss": 0.2307, | |
| "num_input_tokens_seen": 10271880, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 4.568807339449541, | |
| "grad_norm": 0.37232765555381775, | |
| "learning_rate": 4.36272025168461e-05, | |
| "loss": 0.1609, | |
| "num_input_tokens_seen": 10317720, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 4.587155963302752, | |
| "grad_norm": 0.6248548030853271, | |
| "learning_rate": 4.357862063693486e-05, | |
| "loss": 0.2456, | |
| "num_input_tokens_seen": 10362168, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 4.605504587155964, | |
| "grad_norm": 0.32828131318092346, | |
| "learning_rate": 4.3529881552371096e-05, | |
| "loss": 0.3159, | |
| "num_input_tokens_seen": 10414312, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 4.623853211009174, | |
| "grad_norm": 0.4158332049846649, | |
| "learning_rate": 4.34809856755649e-05, | |
| "loss": 0.2194, | |
| "num_input_tokens_seen": 10451800, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 4.6422018348623855, | |
| "grad_norm": 0.3648194670677185, | |
| "learning_rate": 4.34319334202531e-05, | |
| "loss": 0.1846, | |
| "num_input_tokens_seen": 10496224, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 4.660550458715596, | |
| "grad_norm": 0.36835575103759766, | |
| "learning_rate": 4.3382725201495723e-05, | |
| "loss": 0.1906, | |
| "num_input_tokens_seen": 10536392, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 4.678899082568807, | |
| "grad_norm": 0.3501613140106201, | |
| "learning_rate": 4.333336143567247e-05, | |
| "loss": 0.1793, | |
| "num_input_tokens_seen": 10577640, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 4.697247706422019, | |
| "grad_norm": 0.3431616425514221, | |
| "learning_rate": 4.3283842540479264e-05, | |
| "loss": 0.1576, | |
| "num_input_tokens_seen": 10613376, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 4.715596330275229, | |
| "grad_norm": 0.3290237784385681, | |
| "learning_rate": 4.3234168934924636e-05, | |
| "loss": 0.1447, | |
| "num_input_tokens_seen": 10647232, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 4.73394495412844, | |
| "grad_norm": 0.40264153480529785, | |
| "learning_rate": 4.318434103932622e-05, | |
| "loss": 0.1488, | |
| "num_input_tokens_seen": 10696024, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 4.752293577981652, | |
| "grad_norm": 0.3453703820705414, | |
| "learning_rate": 4.313435927530719e-05, | |
| "loss": 0.1597, | |
| "num_input_tokens_seen": 10730408, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 4.770642201834862, | |
| "grad_norm": 0.3993653655052185, | |
| "learning_rate": 4.30842240657927e-05, | |
| "loss": 0.2266, | |
| "num_input_tokens_seen": 10764776, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 4.7889908256880735, | |
| "grad_norm": 0.4080798923969269, | |
| "learning_rate": 4.303393583500628e-05, | |
| "loss": 0.1562, | |
| "num_input_tokens_seen": 10792272, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 4.807339449541285, | |
| "grad_norm": 0.3028413951396942, | |
| "learning_rate": 4.2983495008466276e-05, | |
| "loss": 0.1504, | |
| "num_input_tokens_seen": 10825240, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 4.825688073394495, | |
| "grad_norm": 0.3980019688606262, | |
| "learning_rate": 4.293290201298223e-05, | |
| "loss": 0.1648, | |
| "num_input_tokens_seen": 10883592, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 4.844036697247707, | |
| "grad_norm": 0.42035797238349915, | |
| "learning_rate": 4.288215727665129e-05, | |
| "loss": 0.1652, | |
| "num_input_tokens_seen": 10922640, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 4.862385321100917, | |
| "grad_norm": 0.37766003608703613, | |
| "learning_rate": 4.2831261228854544e-05, | |
| "loss": 0.1817, | |
| "num_input_tokens_seen": 10967288, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 4.8807339449541285, | |
| "grad_norm": 0.3495088815689087, | |
| "learning_rate": 4.278021430025343e-05, | |
| "loss": 0.2066, | |
| "num_input_tokens_seen": 11011152, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 4.89908256880734, | |
| "grad_norm": 0.34071093797683716, | |
| "learning_rate": 4.272901692278609e-05, | |
| "loss": 0.1522, | |
| "num_input_tokens_seen": 11055608, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 4.91743119266055, | |
| "grad_norm": 0.33437398076057434, | |
| "learning_rate": 4.267766952966369e-05, | |
| "loss": 0.2201, | |
| "num_input_tokens_seen": 11101992, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 4.935779816513762, | |
| "grad_norm": 0.3340584337711334, | |
| "learning_rate": 4.262617255536676e-05, | |
| "loss": 0.2777, | |
| "num_input_tokens_seen": 11141408, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 4.954128440366972, | |
| "grad_norm": 0.348679780960083, | |
| "learning_rate": 4.257452643564155e-05, | |
| "loss": 0.1857, | |
| "num_input_tokens_seen": 11185344, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 4.972477064220183, | |
| "grad_norm": 0.3691309094429016, | |
| "learning_rate": 4.2522731607496275e-05, | |
| "loss": 0.1653, | |
| "num_input_tokens_seen": 11216160, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 4.990825688073395, | |
| "grad_norm": 0.4692390561103821, | |
| "learning_rate": 4.24707885091975e-05, | |
| "loss": 0.1761, | |
| "num_input_tokens_seen": 11276200, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.4963766932487488, | |
| "learning_rate": 4.241869758026638e-05, | |
| "loss": 0.1582, | |
| "num_input_tokens_seen": 11295440, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 5.018348623853211, | |
| "grad_norm": 0.37284544110298157, | |
| "learning_rate": 4.2366459261474933e-05, | |
| "loss": 0.1538, | |
| "num_input_tokens_seen": 11336400, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 5.036697247706422, | |
| "grad_norm": 0.2949577867984772, | |
| "learning_rate": 4.231407399484236e-05, | |
| "loss": 0.1319, | |
| "num_input_tokens_seen": 11374056, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 5.055045871559633, | |
| "grad_norm": 0.32850027084350586, | |
| "learning_rate": 4.226154222363124e-05, | |
| "loss": 0.174, | |
| "num_input_tokens_seen": 11414776, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 5.073394495412844, | |
| "grad_norm": 0.3812579810619354, | |
| "learning_rate": 4.220886439234385e-05, | |
| "loss": 0.1831, | |
| "num_input_tokens_seen": 11465456, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 5.091743119266055, | |
| "grad_norm": 0.3396337032318115, | |
| "learning_rate": 4.215604094671835e-05, | |
| "loss": 0.1515, | |
| "num_input_tokens_seen": 11500184, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 5.110091743119266, | |
| "grad_norm": 0.35079169273376465, | |
| "learning_rate": 4.2103072333725e-05, | |
| "loss": 0.1295, | |
| "num_input_tokens_seen": 11537112, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 5.128440366972477, | |
| "grad_norm": 0.3811327815055847, | |
| "learning_rate": 4.2049959001562464e-05, | |
| "loss": 0.1339, | |
| "num_input_tokens_seen": 11579440, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 5.146788990825688, | |
| "grad_norm": 0.3935602009296417, | |
| "learning_rate": 4.199670139965393e-05, | |
| "loss": 0.1909, | |
| "num_input_tokens_seen": 11643272, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 5.165137614678899, | |
| "grad_norm": 0.3952607810497284, | |
| "learning_rate": 4.194329997864331e-05, | |
| "loss": 0.2334, | |
| "num_input_tokens_seen": 11677528, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 5.18348623853211, | |
| "grad_norm": 0.3687472939491272, | |
| "learning_rate": 4.188975519039151e-05, | |
| "loss": 0.1406, | |
| "num_input_tokens_seen": 11727944, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 5.201834862385321, | |
| "grad_norm": 0.3518407344818115, | |
| "learning_rate": 4.183606748797251e-05, | |
| "loss": 0.138, | |
| "num_input_tokens_seen": 11779568, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 5.220183486238533, | |
| "grad_norm": 0.4072832763195038, | |
| "learning_rate": 4.1782237325669595e-05, | |
| "loss": 0.159, | |
| "num_input_tokens_seen": 11824600, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 5.238532110091743, | |
| "grad_norm": 0.386280357837677, | |
| "learning_rate": 4.172826515897146e-05, | |
| "loss": 0.2517, | |
| "num_input_tokens_seen": 11873736, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 5.256880733944954, | |
| "grad_norm": 0.3576860725879669, | |
| "learning_rate": 4.167415144456841e-05, | |
| "loss": 0.1349, | |
| "num_input_tokens_seen": 11909608, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 5.275229357798165, | |
| "grad_norm": 0.3952435851097107, | |
| "learning_rate": 4.1619896640348445e-05, | |
| "loss": 0.1348, | |
| "num_input_tokens_seen": 11945440, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 5.293577981651376, | |
| "grad_norm": 0.3565181493759155, | |
| "learning_rate": 4.1565501205393445e-05, | |
| "loss": 0.1331, | |
| "num_input_tokens_seen": 11985568, | |
| "step": 291 | |
| }, | |
| { | |
| "epoch": 5.3119266055045875, | |
| "grad_norm": 0.40558820962905884, | |
| "learning_rate": 4.1510965599975196e-05, | |
| "loss": 0.2337, | |
| "num_input_tokens_seen": 12034320, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 5.330275229357798, | |
| "grad_norm": 0.36426106095314026, | |
| "learning_rate": 4.1456290285551596e-05, | |
| "loss": 0.1299, | |
| "num_input_tokens_seen": 12070184, | |
| "step": 293 | |
| }, | |
| { | |
| "epoch": 5.348623853211009, | |
| "grad_norm": 0.33881404995918274, | |
| "learning_rate": 4.140147572476268e-05, | |
| "loss": 0.1239, | |
| "num_input_tokens_seen": 12111512, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 5.36697247706422, | |
| "grad_norm": 0.38019630312919617, | |
| "learning_rate": 4.1346522381426744e-05, | |
| "loss": 0.133, | |
| "num_input_tokens_seen": 12156792, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 5.385321100917431, | |
| "grad_norm": 0.38179296255111694, | |
| "learning_rate": 4.129143072053638e-05, | |
| "loss": 0.1301, | |
| "num_input_tokens_seen": 12185168, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 5.4036697247706424, | |
| "grad_norm": 0.35054150223731995, | |
| "learning_rate": 4.123620120825459e-05, | |
| "loss": 0.1298, | |
| "num_input_tokens_seen": 12222256, | |
| "step": 297 | |
| }, | |
| { | |
| "epoch": 5.422018348623853, | |
| "grad_norm": 0.3463946580886841, | |
| "learning_rate": 4.118083431191081e-05, | |
| "loss": 0.2088, | |
| "num_input_tokens_seen": 12257536, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 5.440366972477064, | |
| "grad_norm": 0.42584434151649475, | |
| "learning_rate": 4.112533049999696e-05, | |
| "loss": 0.1062, | |
| "num_input_tokens_seen": 12290576, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 5.458715596330276, | |
| "grad_norm": 0.4341515600681305, | |
| "learning_rate": 4.1069690242163484e-05, | |
| "loss": 0.1989, | |
| "num_input_tokens_seen": 12323416, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 5.477064220183486, | |
| "grad_norm": 0.4299938380718231, | |
| "learning_rate": 4.101391400921538e-05, | |
| "loss": 0.1243, | |
| "num_input_tokens_seen": 12370264, | |
| "step": 301 | |
| }, | |
| { | |
| "epoch": 5.495412844036697, | |
| "grad_norm": 0.4070415198802948, | |
| "learning_rate": 4.095800227310821e-05, | |
| "loss": 0.2281, | |
| "num_input_tokens_seen": 12410568, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 5.513761467889909, | |
| "grad_norm": 0.4446506202220917, | |
| "learning_rate": 4.09019555069441e-05, | |
| "loss": 0.1462, | |
| "num_input_tokens_seen": 12442880, | |
| "step": 303 | |
| }, | |
| { | |
| "epoch": 5.532110091743119, | |
| "grad_norm": 0.36110538244247437, | |
| "learning_rate": 4.0845774184967754e-05, | |
| "loss": 0.1497, | |
| "num_input_tokens_seen": 12487016, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 5.5504587155963305, | |
| "grad_norm": 0.42756903171539307, | |
| "learning_rate": 4.078945878256244e-05, | |
| "loss": 0.2082, | |
| "num_input_tokens_seen": 12525072, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 5.568807339449541, | |
| "grad_norm": 0.360649049282074, | |
| "learning_rate": 4.073300977624594e-05, | |
| "loss": 0.1214, | |
| "num_input_tokens_seen": 12555792, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 5.587155963302752, | |
| "grad_norm": 0.3768391013145447, | |
| "learning_rate": 4.067642764366654e-05, | |
| "loss": 0.1278, | |
| "num_input_tokens_seen": 12601616, | |
| "step": 307 | |
| }, | |
| { | |
| "epoch": 5.605504587155964, | |
| "grad_norm": 0.3882285952568054, | |
| "learning_rate": 4.0619712863599e-05, | |
| "loss": 0.1485, | |
| "num_input_tokens_seen": 12634360, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 5.623853211009174, | |
| "grad_norm": 0.38816162943840027, | |
| "learning_rate": 4.0562865915940496e-05, | |
| "loss": 0.1221, | |
| "num_input_tokens_seen": 12674808, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 5.6422018348623855, | |
| "grad_norm": 0.41803887486457825, | |
| "learning_rate": 4.05058872817065e-05, | |
| "loss": 0.1388, | |
| "num_input_tokens_seen": 12710864, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 5.660550458715596, | |
| "grad_norm": 0.3802741467952728, | |
| "learning_rate": 4.044877744302683e-05, | |
| "loss": 0.1349, | |
| "num_input_tokens_seen": 12750920, | |
| "step": 311 | |
| }, | |
| { | |
| "epoch": 5.678899082568807, | |
| "grad_norm": 0.48202404379844666, | |
| "learning_rate": 4.039153688314145e-05, | |
| "loss": 0.1555, | |
| "num_input_tokens_seen": 12789488, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 5.697247706422019, | |
| "grad_norm": 0.3168826103210449, | |
| "learning_rate": 4.0334166086396484e-05, | |
| "loss": 0.1063, | |
| "num_input_tokens_seen": 12831408, | |
| "step": 313 | |
| }, | |
| { | |
| "epoch": 5.715596330275229, | |
| "grad_norm": 0.43414828181266785, | |
| "learning_rate": 4.0276665538239996e-05, | |
| "loss": 0.127, | |
| "num_input_tokens_seen": 12872584, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 5.73394495412844, | |
| "grad_norm": 0.47761547565460205, | |
| "learning_rate": 4.021903572521802e-05, | |
| "loss": 0.1428, | |
| "num_input_tokens_seen": 12910528, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 5.752293577981652, | |
| "grad_norm": 0.3542017936706543, | |
| "learning_rate": 4.0161277134970345e-05, | |
| "loss": 0.1279, | |
| "num_input_tokens_seen": 12942800, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 5.770642201834862, | |
| "grad_norm": 0.31866851449012756, | |
| "learning_rate": 4.010339025622641e-05, | |
| "loss": 0.1459, | |
| "num_input_tokens_seen": 12989136, | |
| "step": 317 | |
| }, | |
| { | |
| "epoch": 5.7889908256880735, | |
| "grad_norm": 0.44918256998062134, | |
| "learning_rate": 4.0045375578801214e-05, | |
| "loss": 0.1429, | |
| "num_input_tokens_seen": 13035472, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 5.807339449541285, | |
| "grad_norm": 0.32426726818084717, | |
| "learning_rate": 3.99872335935911e-05, | |
| "loss": 0.1257, | |
| "num_input_tokens_seen": 13074952, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 5.825688073394495, | |
| "grad_norm": 0.6903991103172302, | |
| "learning_rate": 3.9928964792569655e-05, | |
| "loss": 0.1807, | |
| "num_input_tokens_seen": 13124624, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 5.844036697247707, | |
| "grad_norm": 0.3665274679660797, | |
| "learning_rate": 3.9870569668783536e-05, | |
| "loss": 0.1853, | |
| "num_input_tokens_seen": 13171464, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 5.862385321100917, | |
| "grad_norm": 0.41457998752593994, | |
| "learning_rate": 3.981204871634827e-05, | |
| "loss": 0.214, | |
| "num_input_tokens_seen": 13225240, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 5.8807339449541285, | |
| "grad_norm": 0.4047159254550934, | |
| "learning_rate": 3.9753402430444116e-05, | |
| "loss": 0.1907, | |
| "num_input_tokens_seen": 13275848, | |
| "step": 323 | |
| }, | |
| { | |
| "epoch": 5.89908256880734, | |
| "grad_norm": 0.4578211307525635, | |
| "learning_rate": 3.969463130731183e-05, | |
| "loss": 0.27, | |
| "num_input_tokens_seen": 13311096, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 5.91743119266055, | |
| "grad_norm": 0.42030438780784607, | |
| "learning_rate": 3.963573584424852e-05, | |
| "loss": 0.1775, | |
| "num_input_tokens_seen": 13375064, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 5.935779816513762, | |
| "grad_norm": 0.3531726598739624, | |
| "learning_rate": 3.957671653960337e-05, | |
| "loss": 0.1206, | |
| "num_input_tokens_seen": 13427072, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 5.954128440366972, | |
| "grad_norm": 0.41528916358947754, | |
| "learning_rate": 3.9517573892773494e-05, | |
| "loss": 0.1232, | |
| "num_input_tokens_seen": 13454088, | |
| "step": 327 | |
| }, | |
| { | |
| "epoch": 5.972477064220183, | |
| "grad_norm": 0.5357607007026672, | |
| "learning_rate": 3.945830840419966e-05, | |
| "loss": 0.1696, | |
| "num_input_tokens_seen": 13486504, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 5.990825688073395, | |
| "grad_norm": 0.32135313749313354, | |
| "learning_rate": 3.9398920575362086e-05, | |
| "loss": 0.1055, | |
| "num_input_tokens_seen": 13523736, | |
| "step": 329 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.5267924666404724, | |
| "learning_rate": 3.933941090877615e-05, | |
| "loss": 0.2192, | |
| "num_input_tokens_seen": 13554528, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 6.018348623853211, | |
| "grad_norm": 0.38375183939933777, | |
| "learning_rate": 3.9279779907988215e-05, | |
| "loss": 0.1318, | |
| "num_input_tokens_seen": 13587424, | |
| "step": 331 | |
| }, | |
| { | |
| "epoch": 6.036697247706422, | |
| "grad_norm": 0.38274672627449036, | |
| "learning_rate": 3.9220028077571295e-05, | |
| "loss": 0.1136, | |
| "num_input_tokens_seen": 13624376, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 6.055045871559633, | |
| "grad_norm": 0.3571849465370178, | |
| "learning_rate": 3.916015592312082e-05, | |
| "loss": 0.0891, | |
| "num_input_tokens_seen": 13652848, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 6.073394495412844, | |
| "grad_norm": 0.3958500921726227, | |
| "learning_rate": 3.910016395125037e-05, | |
| "loss": 0.1477, | |
| "num_input_tokens_seen": 13719880, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 6.091743119266055, | |
| "grad_norm": 0.3360799551010132, | |
| "learning_rate": 3.9040052669587325e-05, | |
| "loss": 0.0943, | |
| "num_input_tokens_seen": 13764864, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 6.110091743119266, | |
| "grad_norm": 0.4332111179828644, | |
| "learning_rate": 3.897982258676867e-05, | |
| "loss": 0.1039, | |
| "num_input_tokens_seen": 13795048, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 6.128440366972477, | |
| "grad_norm": 0.4539923071861267, | |
| "learning_rate": 3.891947421243661e-05, | |
| "loss": 0.1294, | |
| "num_input_tokens_seen": 13830064, | |
| "step": 337 | |
| }, | |
| { | |
| "epoch": 6.146788990825688, | |
| "grad_norm": 0.43116849660873413, | |
| "learning_rate": 3.885900805723429e-05, | |
| "loss": 0.1233, | |
| "num_input_tokens_seen": 13870392, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 6.165137614678899, | |
| "grad_norm": 0.40204253792762756, | |
| "learning_rate": 3.879842463280145e-05, | |
| "loss": 0.1252, | |
| "num_input_tokens_seen": 13913144, | |
| "step": 339 | |
| }, | |
| { | |
| "epoch": 6.18348623853211, | |
| "grad_norm": 0.4300351142883301, | |
| "learning_rate": 3.873772445177015e-05, | |
| "loss": 0.1191, | |
| "num_input_tokens_seen": 13950384, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 6.201834862385321, | |
| "grad_norm": 0.3162730038166046, | |
| "learning_rate": 3.8676908027760364e-05, | |
| "loss": 0.1028, | |
| "num_input_tokens_seen": 13990464, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 6.220183486238533, | |
| "grad_norm": 0.4316507875919342, | |
| "learning_rate": 3.861597587537568e-05, | |
| "loss": 0.0996, | |
| "num_input_tokens_seen": 14029080, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 6.238532110091743, | |
| "grad_norm": 0.34346097707748413, | |
| "learning_rate": 3.855492851019893e-05, | |
| "loss": 0.172, | |
| "num_input_tokens_seen": 14069824, | |
| "step": 343 | |
| }, | |
| { | |
| "epoch": 6.256880733944954, | |
| "grad_norm": 0.41037800908088684, | |
| "learning_rate": 3.8493766448787825e-05, | |
| "loss": 0.1244, | |
| "num_input_tokens_seen": 14107616, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 6.275229357798165, | |
| "grad_norm": 0.5019925236701965, | |
| "learning_rate": 3.84324902086706e-05, | |
| "loss": 0.1584, | |
| "num_input_tokens_seen": 14146224, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 6.293577981651376, | |
| "grad_norm": 0.40119829773902893, | |
| "learning_rate": 3.837110030834161e-05, | |
| "loss": 0.1154, | |
| "num_input_tokens_seen": 14183880, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 6.3119266055045875, | |
| "grad_norm": 0.37845325469970703, | |
| "learning_rate": 3.830959726725697e-05, | |
| "loss": 0.098, | |
| "num_input_tokens_seen": 14214632, | |
| "step": 347 | |
| }, | |
| { | |
| "epoch": 6.330275229357798, | |
| "grad_norm": 0.338489294052124, | |
| "learning_rate": 3.824798160583012e-05, | |
| "loss": 0.2212, | |
| "num_input_tokens_seen": 14263168, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 6.348623853211009, | |
| "grad_norm": 0.4595409631729126, | |
| "learning_rate": 3.81862538454275e-05, | |
| "loss": 0.1423, | |
| "num_input_tokens_seen": 14303680, | |
| "step": 349 | |
| }, | |
| { | |
| "epoch": 6.36697247706422, | |
| "grad_norm": 0.38874781131744385, | |
| "learning_rate": 3.8124414508364e-05, | |
| "loss": 0.1119, | |
| "num_input_tokens_seen": 14347672, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 6.385321100917431, | |
| "grad_norm": 0.3326081931591034, | |
| "learning_rate": 3.8062464117898724e-05, | |
| "loss": 0.1068, | |
| "num_input_tokens_seen": 14394848, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 6.4036697247706424, | |
| "grad_norm": 0.40956059098243713, | |
| "learning_rate": 3.8000403198230387e-05, | |
| "loss": 0.1051, | |
| "num_input_tokens_seen": 14438856, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 6.422018348623853, | |
| "grad_norm": 0.353677362203598, | |
| "learning_rate": 3.7938232274493e-05, | |
| "loss": 0.2208, | |
| "num_input_tokens_seen": 14498032, | |
| "step": 353 | |
| }, | |
| { | |
| "epoch": 6.440366972477064, | |
| "grad_norm": 0.45626017451286316, | |
| "learning_rate": 3.787595187275136e-05, | |
| "loss": 0.1131, | |
| "num_input_tokens_seen": 14534256, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 6.458715596330276, | |
| "grad_norm": 0.4512569308280945, | |
| "learning_rate": 3.781356251999663e-05, | |
| "loss": 0.1207, | |
| "num_input_tokens_seen": 14575016, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 6.477064220183486, | |
| "grad_norm": 0.3183128237724304, | |
| "learning_rate": 3.775106474414188e-05, | |
| "loss": 0.0887, | |
| "num_input_tokens_seen": 14627272, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 6.495412844036697, | |
| "grad_norm": 0.36176785826683044, | |
| "learning_rate": 3.7688459074017606e-05, | |
| "loss": 0.1022, | |
| "num_input_tokens_seen": 14658696, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 6.513761467889909, | |
| "grad_norm": 0.362131804227829, | |
| "learning_rate": 3.762574603936725e-05, | |
| "loss": 0.1582, | |
| "num_input_tokens_seen": 14702208, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 6.532110091743119, | |
| "grad_norm": 0.27560174465179443, | |
| "learning_rate": 3.756292617084275e-05, | |
| "loss": 0.0855, | |
| "num_input_tokens_seen": 14757376, | |
| "step": 359 | |
| }, | |
| { | |
| "epoch": 6.5504587155963305, | |
| "grad_norm": 0.4014551043510437, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.1923, | |
| "num_input_tokens_seen": 14796592, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 6.568807339449541, | |
| "grad_norm": 0.38982701301574707, | |
| "learning_rate": 3.7436968059294414e-05, | |
| "loss": 0.1125, | |
| "num_input_tokens_seen": 14834656, | |
| "step": 361 | |
| }, | |
| { | |
| "epoch": 6.587155963302752, | |
| "grad_norm": 0.3973703384399414, | |
| "learning_rate": 3.7373830882076354e-05, | |
| "loss": 0.1947, | |
| "num_input_tokens_seen": 14869528, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 6.605504587155964, | |
| "grad_norm": 0.43253588676452637, | |
| "learning_rate": 3.731058900258668e-05, | |
| "loss": 0.1173, | |
| "num_input_tokens_seen": 14909720, | |
| "step": 363 | |
| }, | |
| { | |
| "epoch": 6.623853211009174, | |
| "grad_norm": 0.4936161935329437, | |
| "learning_rate": 3.7247242955952175e-05, | |
| "loss": 0.1523, | |
| "num_input_tokens_seen": 14952704, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 6.6422018348623855, | |
| "grad_norm": 0.43221941590309143, | |
| "learning_rate": 3.718379327818106e-05, | |
| "loss": 0.1313, | |
| "num_input_tokens_seen": 14989640, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 6.660550458715596, | |
| "grad_norm": 0.3976401686668396, | |
| "learning_rate": 3.712024050615843e-05, | |
| "loss": 0.0985, | |
| "num_input_tokens_seen": 15028200, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 6.678899082568807, | |
| "grad_norm": 0.3843483030796051, | |
| "learning_rate": 3.705658517764172e-05, | |
| "loss": 0.1905, | |
| "num_input_tokens_seen": 15075960, | |
| "step": 367 | |
| }, | |
| { | |
| "epoch": 6.697247706422019, | |
| "grad_norm": 0.45637550950050354, | |
| "learning_rate": 3.699282783125616e-05, | |
| "loss": 0.1166, | |
| "num_input_tokens_seen": 15113096, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 6.715596330275229, | |
| "grad_norm": 0.3767227232456207, | |
| "learning_rate": 3.692896900649021e-05, | |
| "loss": 0.138, | |
| "num_input_tokens_seen": 15162432, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 6.73394495412844, | |
| "grad_norm": 0.3972814977169037, | |
| "learning_rate": 3.686500924369101e-05, | |
| "loss": 0.1612, | |
| "num_input_tokens_seen": 15203656, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 6.752293577981652, | |
| "grad_norm": 0.39401331543922424, | |
| "learning_rate": 3.680094908405978e-05, | |
| "loss": 0.1131, | |
| "num_input_tokens_seen": 15251056, | |
| "step": 371 | |
| }, | |
| { | |
| "epoch": 6.770642201834862, | |
| "grad_norm": 0.5119125247001648, | |
| "learning_rate": 3.673678906964727e-05, | |
| "loss": 0.1264, | |
| "num_input_tokens_seen": 15283552, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 6.7889908256880735, | |
| "grad_norm": 0.38780996203422546, | |
| "learning_rate": 3.6672529743349146e-05, | |
| "loss": 0.1167, | |
| "num_input_tokens_seen": 15338944, | |
| "step": 373 | |
| }, | |
| { | |
| "epoch": 6.807339449541285, | |
| "grad_norm": 0.45605945587158203, | |
| "learning_rate": 3.660817164890143e-05, | |
| "loss": 0.131, | |
| "num_input_tokens_seen": 15369936, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 6.825688073394495, | |
| "grad_norm": 0.4043484926223755, | |
| "learning_rate": 3.654371533087586e-05, | |
| "loss": 0.095, | |
| "num_input_tokens_seen": 15411576, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 6.844036697247707, | |
| "grad_norm": 0.38716065883636475, | |
| "learning_rate": 3.6479161334675296e-05, | |
| "loss": 0.0937, | |
| "num_input_tokens_seen": 15457520, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 6.862385321100917, | |
| "grad_norm": 0.38785362243652344, | |
| "learning_rate": 3.641451020652914e-05, | |
| "loss": 0.1017, | |
| "num_input_tokens_seen": 15485536, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 6.8807339449541285, | |
| "grad_norm": 0.3736408054828644, | |
| "learning_rate": 3.634976249348867e-05, | |
| "loss": 0.1075, | |
| "num_input_tokens_seen": 15526704, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 6.89908256880734, | |
| "grad_norm": 0.5013216733932495, | |
| "learning_rate": 3.6284918743422425e-05, | |
| "loss": 0.132, | |
| "num_input_tokens_seen": 15567344, | |
| "step": 379 | |
| }, | |
| { | |
| "epoch": 6.91743119266055, | |
| "grad_norm": 0.5258253216743469, | |
| "learning_rate": 3.621997950501156e-05, | |
| "loss": 0.1435, | |
| "num_input_tokens_seen": 15608408, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 6.935779816513762, | |
| "grad_norm": 0.3486093282699585, | |
| "learning_rate": 3.615494532774522e-05, | |
| "loss": 0.0958, | |
| "num_input_tokens_seen": 15667496, | |
| "step": 381 | |
| }, | |
| { | |
| "epoch": 6.954128440366972, | |
| "grad_norm": 0.39199239015579224, | |
| "learning_rate": 3.6089816761915906e-05, | |
| "loss": 0.1465, | |
| "num_input_tokens_seen": 15715496, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 6.972477064220183, | |
| "grad_norm": 0.3722994923591614, | |
| "learning_rate": 3.602459435861475e-05, | |
| "loss": 0.1111, | |
| "num_input_tokens_seen": 15755688, | |
| "step": 383 | |
| }, | |
| { | |
| "epoch": 6.990825688073395, | |
| "grad_norm": 0.41041186451911926, | |
| "learning_rate": 3.5959278669726935e-05, | |
| "loss": 0.1084, | |
| "num_input_tokens_seen": 15798304, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.6807722449302673, | |
| "learning_rate": 3.589387024792699e-05, | |
| "loss": 0.1234, | |
| "num_input_tokens_seen": 15813616, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 7.018348623853211, | |
| "grad_norm": 0.3617340922355652, | |
| "learning_rate": 3.582836964667408e-05, | |
| "loss": 0.1167, | |
| "num_input_tokens_seen": 15855256, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 7.036697247706422, | |
| "grad_norm": 0.36471137404441833, | |
| "learning_rate": 3.576277742020738e-05, | |
| "loss": 0.0829, | |
| "num_input_tokens_seen": 15887512, | |
| "step": 387 | |
| }, | |
| { | |
| "epoch": 7.055045871559633, | |
| "grad_norm": 0.8828345537185669, | |
| "learning_rate": 3.569709412354136e-05, | |
| "loss": 0.1213, | |
| "num_input_tokens_seen": 15934400, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 7.073394495412844, | |
| "grad_norm": 0.4129248261451721, | |
| "learning_rate": 3.563132031246108e-05, | |
| "loss": 0.0914, | |
| "num_input_tokens_seen": 15969904, | |
| "step": 389 | |
| }, | |
| { | |
| "epoch": 7.091743119266055, | |
| "grad_norm": 0.38659873604774475, | |
| "learning_rate": 3.556545654351749e-05, | |
| "loss": 0.09, | |
| "num_input_tokens_seen": 16010760, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 7.110091743119266, | |
| "grad_norm": 0.46532171964645386, | |
| "learning_rate": 3.549950337402274e-05, | |
| "loss": 0.0832, | |
| "num_input_tokens_seen": 16059680, | |
| "step": 391 | |
| }, | |
| { | |
| "epoch": 7.128440366972477, | |
| "grad_norm": 0.3775540888309479, | |
| "learning_rate": 3.543346136204545e-05, | |
| "loss": 0.079, | |
| "num_input_tokens_seen": 16095992, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 7.146788990825688, | |
| "grad_norm": 0.5437320470809937, | |
| "learning_rate": 3.536733106640598e-05, | |
| "loss": 0.1, | |
| "num_input_tokens_seen": 16135568, | |
| "step": 393 | |
| }, | |
| { | |
| "epoch": 7.165137614678899, | |
| "grad_norm": 0.3908531963825226, | |
| "learning_rate": 3.5301113046671714e-05, | |
| "loss": 0.0962, | |
| "num_input_tokens_seen": 16165016, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 7.18348623853211, | |
| "grad_norm": 0.40789636969566345, | |
| "learning_rate": 3.523480786315231e-05, | |
| "loss": 0.0923, | |
| "num_input_tokens_seen": 16208344, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 7.201834862385321, | |
| "grad_norm": 0.4682011902332306, | |
| "learning_rate": 3.516841607689501e-05, | |
| "loss": 0.101, | |
| "num_input_tokens_seen": 16244312, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 7.220183486238533, | |
| "grad_norm": 0.5206179618835449, | |
| "learning_rate": 3.5101938249679794e-05, | |
| "loss": 0.1124, | |
| "num_input_tokens_seen": 16283848, | |
| "step": 397 | |
| }, | |
| { | |
| "epoch": 7.238532110091743, | |
| "grad_norm": 0.5803564786911011, | |
| "learning_rate": 3.503537494401473e-05, | |
| "loss": 0.1054, | |
| "num_input_tokens_seen": 16336288, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 7.256880733944954, | |
| "grad_norm": 0.44892576336860657, | |
| "learning_rate": 3.496872672313116e-05, | |
| "loss": 0.1781, | |
| "num_input_tokens_seen": 16375448, | |
| "step": 399 | |
| }, | |
| { | |
| "epoch": 7.275229357798165, | |
| "grad_norm": 0.42554035782814026, | |
| "learning_rate": 3.490199415097892e-05, | |
| "loss": 0.0935, | |
| "num_input_tokens_seen": 16412608, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 7.293577981651376, | |
| "grad_norm": 0.3525730073451996, | |
| "learning_rate": 3.483517779222163e-05, | |
| "loss": 0.0944, | |
| "num_input_tokens_seen": 16455904, | |
| "step": 401 | |
| }, | |
| { | |
| "epoch": 7.3119266055045875, | |
| "grad_norm": 0.3382134735584259, | |
| "learning_rate": 3.476827821223184e-05, | |
| "loss": 0.0878, | |
| "num_input_tokens_seen": 16496824, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 7.330275229357798, | |
| "grad_norm": 0.38020089268684387, | |
| "learning_rate": 3.4701295977086324e-05, | |
| "loss": 0.0911, | |
| "num_input_tokens_seen": 16529344, | |
| "step": 403 | |
| }, | |
| { | |
| "epoch": 7.348623853211009, | |
| "grad_norm": 0.44909772276878357, | |
| "learning_rate": 3.463423165356121e-05, | |
| "loss": 0.1047, | |
| "num_input_tokens_seen": 16563664, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 7.36697247706422, | |
| "grad_norm": 0.3612752854824066, | |
| "learning_rate": 3.456708580912725e-05, | |
| "loss": 0.1228, | |
| "num_input_tokens_seen": 16607272, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 7.385321100917431, | |
| "grad_norm": 0.5135468244552612, | |
| "learning_rate": 3.449985901194498e-05, | |
| "loss": 0.1801, | |
| "num_input_tokens_seen": 16646040, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 7.4036697247706424, | |
| "grad_norm": 0.33949241042137146, | |
| "learning_rate": 3.443255183085993e-05, | |
| "loss": 0.0883, | |
| "num_input_tokens_seen": 16693544, | |
| "step": 407 | |
| }, | |
| { | |
| "epoch": 7.422018348623853, | |
| "grad_norm": 0.3560762405395508, | |
| "learning_rate": 3.436516483539781e-05, | |
| "loss": 0.083, | |
| "num_input_tokens_seen": 16742128, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 7.440366972477064, | |
| "grad_norm": 0.32824087142944336, | |
| "learning_rate": 3.4297698595759664e-05, | |
| "loss": 0.1247, | |
| "num_input_tokens_seen": 16792000, | |
| "step": 409 | |
| }, | |
| { | |
| "epoch": 7.458715596330276, | |
| "grad_norm": 0.44831863045692444, | |
| "learning_rate": 3.423015368281711e-05, | |
| "loss": 0.0948, | |
| "num_input_tokens_seen": 16824312, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 7.477064220183486, | |
| "grad_norm": 0.4942893981933594, | |
| "learning_rate": 3.4162530668107434e-05, | |
| "loss": 0.0993, | |
| "num_input_tokens_seen": 16856344, | |
| "step": 411 | |
| }, | |
| { | |
| "epoch": 7.495412844036697, | |
| "grad_norm": 0.3783268332481384, | |
| "learning_rate": 3.409483012382879e-05, | |
| "loss": 0.1273, | |
| "num_input_tokens_seen": 16898256, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 7.513761467889909, | |
| "grad_norm": 0.5532792210578918, | |
| "learning_rate": 3.402705262283537e-05, | |
| "loss": 0.1171, | |
| "num_input_tokens_seen": 16939304, | |
| "step": 413 | |
| }, | |
| { | |
| "epoch": 7.532110091743119, | |
| "grad_norm": 0.3667624592781067, | |
| "learning_rate": 3.39591987386325e-05, | |
| "loss": 0.1753, | |
| "num_input_tokens_seen": 16978632, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 7.5504587155963305, | |
| "grad_norm": 0.42965346574783325, | |
| "learning_rate": 3.389126904537192e-05, | |
| "loss": 0.1567, | |
| "num_input_tokens_seen": 17016552, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 7.568807339449541, | |
| "grad_norm": 0.41699931025505066, | |
| "learning_rate": 3.382326411784672e-05, | |
| "loss": 0.0941, | |
| "num_input_tokens_seen": 17067688, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 7.587155963302752, | |
| "grad_norm": 0.3647185266017914, | |
| "learning_rate": 3.375518453148669e-05, | |
| "loss": 0.0961, | |
| "num_input_tokens_seen": 17126008, | |
| "step": 417 | |
| }, | |
| { | |
| "epoch": 7.605504587155964, | |
| "grad_norm": 0.5344395041465759, | |
| "learning_rate": 3.3687030862353286e-05, | |
| "loss": 0.1974, | |
| "num_input_tokens_seen": 17165800, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 7.623853211009174, | |
| "grad_norm": 0.38556504249572754, | |
| "learning_rate": 3.361880368713486e-05, | |
| "loss": 0.094, | |
| "num_input_tokens_seen": 17199832, | |
| "step": 419 | |
| }, | |
| { | |
| "epoch": 7.6422018348623855, | |
| "grad_norm": 0.3512709438800812, | |
| "learning_rate": 3.355050358314172e-05, | |
| "loss": 0.0717, | |
| "num_input_tokens_seen": 17237952, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 7.660550458715596, | |
| "grad_norm": 0.35488539934158325, | |
| "learning_rate": 3.348213112830128e-05, | |
| "loss": 0.0943, | |
| "num_input_tokens_seen": 17278384, | |
| "step": 421 | |
| }, | |
| { | |
| "epoch": 7.678899082568807, | |
| "grad_norm": 0.4220011234283447, | |
| "learning_rate": 3.3413686901153165e-05, | |
| "loss": 0.0814, | |
| "num_input_tokens_seen": 17309544, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 7.697247706422019, | |
| "grad_norm": 0.42773446440696716, | |
| "learning_rate": 3.3345171480844275e-05, | |
| "loss": 0.1192, | |
| "num_input_tokens_seen": 17344896, | |
| "step": 423 | |
| }, | |
| { | |
| "epoch": 7.715596330275229, | |
| "grad_norm": 0.30265432596206665, | |
| "learning_rate": 3.327658544712395e-05, | |
| "loss": 0.104, | |
| "num_input_tokens_seen": 17413176, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 7.73394495412844, | |
| "grad_norm": 0.38354116678237915, | |
| "learning_rate": 3.3207929380339034e-05, | |
| "loss": 0.0965, | |
| "num_input_tokens_seen": 17450416, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 7.752293577981652, | |
| "grad_norm": 0.33634060621261597, | |
| "learning_rate": 3.313920386142892e-05, | |
| "loss": 0.0775, | |
| "num_input_tokens_seen": 17491256, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 7.770642201834862, | |
| "grad_norm": 0.4931448698043823, | |
| "learning_rate": 3.3070409471920726e-05, | |
| "loss": 0.1651, | |
| "num_input_tokens_seen": 17521600, | |
| "step": 427 | |
| }, | |
| { | |
| "epoch": 7.7889908256880735, | |
| "grad_norm": 0.4020994007587433, | |
| "learning_rate": 3.3001546793924285e-05, | |
| "loss": 0.1071, | |
| "num_input_tokens_seen": 17564288, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 7.807339449541285, | |
| "grad_norm": 0.47053155303001404, | |
| "learning_rate": 3.293261641012731e-05, | |
| "loss": 0.0987, | |
| "num_input_tokens_seen": 17604304, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 7.825688073394495, | |
| "grad_norm": 0.6638731956481934, | |
| "learning_rate": 3.2863618903790346e-05, | |
| "loss": 0.1743, | |
| "num_input_tokens_seen": 17641384, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 7.844036697247707, | |
| "grad_norm": 0.4213615357875824, | |
| "learning_rate": 3.279455485874195e-05, | |
| "loss": 0.0854, | |
| "num_input_tokens_seen": 17680560, | |
| "step": 431 | |
| }, | |
| { | |
| "epoch": 7.862385321100917, | |
| "grad_norm": 0.47264209389686584, | |
| "learning_rate": 3.272542485937369e-05, | |
| "loss": 0.0949, | |
| "num_input_tokens_seen": 17715312, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 7.8807339449541285, | |
| "grad_norm": 0.3375228941440582, | |
| "learning_rate": 3.26562294906352e-05, | |
| "loss": 0.1377, | |
| "num_input_tokens_seen": 17766056, | |
| "step": 433 | |
| }, | |
| { | |
| "epoch": 7.89908256880734, | |
| "grad_norm": 0.4427188038825989, | |
| "learning_rate": 3.2586969338029274e-05, | |
| "loss": 0.1035, | |
| "num_input_tokens_seen": 17809040, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 7.91743119266055, | |
| "grad_norm": 0.3992091119289398, | |
| "learning_rate": 3.251764498760683e-05, | |
| "loss": 0.0844, | |
| "num_input_tokens_seen": 17856896, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 7.935779816513762, | |
| "grad_norm": 0.4064525365829468, | |
| "learning_rate": 3.244825702596205e-05, | |
| "loss": 0.1295, | |
| "num_input_tokens_seen": 17917200, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 7.954128440366972, | |
| "grad_norm": 0.33894112706184387, | |
| "learning_rate": 3.237880604022735e-05, | |
| "loss": 0.0662, | |
| "num_input_tokens_seen": 17967840, | |
| "step": 437 | |
| }, | |
| { | |
| "epoch": 7.972477064220183, | |
| "grad_norm": 0.3915395140647888, | |
| "learning_rate": 3.230929261806842e-05, | |
| "loss": 0.087, | |
| "num_input_tokens_seen": 18001712, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 7.990825688073395, | |
| "grad_norm": 0.4242343008518219, | |
| "learning_rate": 3.223971734767928e-05, | |
| "loss": 0.0808, | |
| "num_input_tokens_seen": 18044712, | |
| "step": 439 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 0.5116259455680847, | |
| "learning_rate": 3.217008081777726e-05, | |
| "loss": 0.09, | |
| "num_input_tokens_seen": 18072704, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 8.01834862385321, | |
| "grad_norm": 0.3611031174659729, | |
| "learning_rate": 3.210038361759807e-05, | |
| "loss": 0.0775, | |
| "num_input_tokens_seen": 18103680, | |
| "step": 441 | |
| }, | |
| { | |
| "epoch": 8.036697247706423, | |
| "grad_norm": 0.2993970513343811, | |
| "learning_rate": 3.203062633689077e-05, | |
| "loss": 0.0667, | |
| "num_input_tokens_seen": 18133248, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 8.055045871559633, | |
| "grad_norm": 0.3495657444000244, | |
| "learning_rate": 3.1960809565912794e-05, | |
| "loss": 0.0616, | |
| "num_input_tokens_seen": 18185672, | |
| "step": 443 | |
| }, | |
| { | |
| "epoch": 8.073394495412844, | |
| "grad_norm": 0.41082751750946045, | |
| "learning_rate": 3.1890933895424976e-05, | |
| "loss": 0.0909, | |
| "num_input_tokens_seen": 18228240, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 8.091743119266056, | |
| "grad_norm": 0.3840062618255615, | |
| "learning_rate": 3.182099991668653e-05, | |
| "loss": 0.0688, | |
| "num_input_tokens_seen": 18271416, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 8.110091743119266, | |
| "grad_norm": 0.385258287191391, | |
| "learning_rate": 3.1751008221450025e-05, | |
| "loss": 0.1181, | |
| "num_input_tokens_seen": 18314208, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 8.128440366972477, | |
| "grad_norm": 0.3149656057357788, | |
| "learning_rate": 3.168095940195642e-05, | |
| "loss": 0.1408, | |
| "num_input_tokens_seen": 18372552, | |
| "step": 447 | |
| }, | |
| { | |
| "epoch": 8.146788990825687, | |
| "grad_norm": 0.3338538408279419, | |
| "learning_rate": 3.161085405093006e-05, | |
| "loss": 0.0613, | |
| "num_input_tokens_seen": 18418048, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 8.1651376146789, | |
| "grad_norm": 0.35740089416503906, | |
| "learning_rate": 3.154069276157359e-05, | |
| "loss": 0.071, | |
| "num_input_tokens_seen": 18459656, | |
| "step": 449 | |
| }, | |
| { | |
| "epoch": 8.18348623853211, | |
| "grad_norm": 0.44650277495384216, | |
| "learning_rate": 3.147047612756302e-05, | |
| "loss": 0.0781, | |
| "num_input_tokens_seen": 18489056, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 8.20183486238532, | |
| "grad_norm": 0.38402098417282104, | |
| "learning_rate": 3.140020474304265e-05, | |
| "loss": 0.1724, | |
| "num_input_tokens_seen": 18536296, | |
| "step": 451 | |
| }, | |
| { | |
| "epoch": 8.220183486238533, | |
| "grad_norm": 0.3836778700351715, | |
| "learning_rate": 3.132987920262005e-05, | |
| "loss": 0.072, | |
| "num_input_tokens_seen": 18569984, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 8.238532110091743, | |
| "grad_norm": 0.47142115235328674, | |
| "learning_rate": 3.125950010136104e-05, | |
| "loss": 0.0831, | |
| "num_input_tokens_seen": 18600496, | |
| "step": 453 | |
| }, | |
| { | |
| "epoch": 8.256880733944953, | |
| "grad_norm": 0.3867137134075165, | |
| "learning_rate": 3.118906803478465e-05, | |
| "loss": 0.0763, | |
| "num_input_tokens_seen": 18644784, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 8.275229357798166, | |
| "grad_norm": 0.344103068113327, | |
| "learning_rate": 3.11185835988581e-05, | |
| "loss": 0.0693, | |
| "num_input_tokens_seen": 18682928, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 8.293577981651376, | |
| "grad_norm": 0.3809478282928467, | |
| "learning_rate": 3.104804738999169e-05, | |
| "loss": 0.069, | |
| "num_input_tokens_seen": 18735736, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 8.311926605504587, | |
| "grad_norm": 0.433005690574646, | |
| "learning_rate": 3.097746000503386e-05, | |
| "loss": 0.0716, | |
| "num_input_tokens_seen": 18779416, | |
| "step": 457 | |
| }, | |
| { | |
| "epoch": 8.330275229357799, | |
| "grad_norm": 0.4366289973258972, | |
| "learning_rate": 3.090682204126604e-05, | |
| "loss": 0.081, | |
| "num_input_tokens_seen": 18813088, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 8.34862385321101, | |
| "grad_norm": 0.44695258140563965, | |
| "learning_rate": 3.083613409639764e-05, | |
| "loss": 0.088, | |
| "num_input_tokens_seen": 18838800, | |
| "step": 459 | |
| }, | |
| { | |
| "epoch": 8.36697247706422, | |
| "grad_norm": 0.3648586571216583, | |
| "learning_rate": 3.076539676856101e-05, | |
| "loss": 0.0616, | |
| "num_input_tokens_seen": 18882128, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 8.385321100917432, | |
| "grad_norm": 0.35021987557411194, | |
| "learning_rate": 3.069461065630634e-05, | |
| "loss": 0.0685, | |
| "num_input_tokens_seen": 18927120, | |
| "step": 461 | |
| }, | |
| { | |
| "epoch": 8.403669724770642, | |
| "grad_norm": 0.4578837752342224, | |
| "learning_rate": 3.062377635859663e-05, | |
| "loss": 0.1019, | |
| "num_input_tokens_seen": 18968240, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 8.422018348623853, | |
| "grad_norm": 0.39133012294769287, | |
| "learning_rate": 3.0552894474802584e-05, | |
| "loss": 0.1115, | |
| "num_input_tokens_seen": 19013248, | |
| "step": 463 | |
| }, | |
| { | |
| "epoch": 8.440366972477065, | |
| "grad_norm": 0.3527860939502716, | |
| "learning_rate": 3.048196560469758e-05, | |
| "loss": 0.0648, | |
| "num_input_tokens_seen": 19058904, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 8.458715596330276, | |
| "grad_norm": 0.3016138970851898, | |
| "learning_rate": 3.0410990348452573e-05, | |
| "loss": 0.0678, | |
| "num_input_tokens_seen": 19103128, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 8.477064220183486, | |
| "grad_norm": 0.4393775761127472, | |
| "learning_rate": 3.0339969306631005e-05, | |
| "loss": 0.1322, | |
| "num_input_tokens_seen": 19144648, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 8.495412844036696, | |
| "grad_norm": 0.4702647626399994, | |
| "learning_rate": 3.0268903080183743e-05, | |
| "loss": 0.0935, | |
| "num_input_tokens_seen": 19182568, | |
| "step": 467 | |
| }, | |
| { | |
| "epoch": 8.513761467889909, | |
| "grad_norm": 0.44502392411231995, | |
| "learning_rate": 3.0197792270443982e-05, | |
| "loss": 0.1373, | |
| "num_input_tokens_seen": 19218336, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 8.53211009174312, | |
| "grad_norm": 0.4069594144821167, | |
| "learning_rate": 3.0126637479122193e-05, | |
| "loss": 0.1189, | |
| "num_input_tokens_seen": 19258632, | |
| "step": 469 | |
| }, | |
| { | |
| "epoch": 8.55045871559633, | |
| "grad_norm": 0.3544933795928955, | |
| "learning_rate": 3.0055439308300952e-05, | |
| "loss": 0.0683, | |
| "num_input_tokens_seen": 19296928, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 8.568807339449542, | |
| "grad_norm": 0.4445359408855438, | |
| "learning_rate": 2.9984198360429932e-05, | |
| "loss": 0.0766, | |
| "num_input_tokens_seen": 19338416, | |
| "step": 471 | |
| }, | |
| { | |
| "epoch": 8.587155963302752, | |
| "grad_norm": 0.3744385838508606, | |
| "learning_rate": 2.9912915238320754e-05, | |
| "loss": 0.1101, | |
| "num_input_tokens_seen": 19392040, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 8.605504587155963, | |
| "grad_norm": 0.4125937521457672, | |
| "learning_rate": 2.9841590545141906e-05, | |
| "loss": 0.0811, | |
| "num_input_tokens_seen": 19430360, | |
| "step": 473 | |
| }, | |
| { | |
| "epoch": 8.623853211009175, | |
| "grad_norm": 0.33389994502067566, | |
| "learning_rate": 2.9770224884413623e-05, | |
| "loss": 0.0665, | |
| "num_input_tokens_seen": 19487640, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 8.642201834862385, | |
| "grad_norm": 0.393798291683197, | |
| "learning_rate": 2.9698818860002797e-05, | |
| "loss": 0.0856, | |
| "num_input_tokens_seen": 19520352, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 8.660550458715596, | |
| "grad_norm": 0.379146933555603, | |
| "learning_rate": 2.9627373076117863e-05, | |
| "loss": 0.0709, | |
| "num_input_tokens_seen": 19570472, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 8.678899082568808, | |
| "grad_norm": 0.4116472005844116, | |
| "learning_rate": 2.9555888137303695e-05, | |
| "loss": 0.0774, | |
| "num_input_tokens_seen": 19600648, | |
| "step": 477 | |
| }, | |
| { | |
| "epoch": 8.697247706422019, | |
| "grad_norm": 0.3610764741897583, | |
| "learning_rate": 2.9484364648436437e-05, | |
| "loss": 0.0758, | |
| "num_input_tokens_seen": 19649200, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 8.715596330275229, | |
| "grad_norm": 0.3889961540699005, | |
| "learning_rate": 2.941280321471848e-05, | |
| "loss": 0.0833, | |
| "num_input_tokens_seen": 19680328, | |
| "step": 479 | |
| }, | |
| { | |
| "epoch": 8.73394495412844, | |
| "grad_norm": 0.3711479902267456, | |
| "learning_rate": 2.9341204441673266e-05, | |
| "loss": 0.0763, | |
| "num_input_tokens_seen": 19728296, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 8.752293577981652, | |
| "grad_norm": 0.5468257665634155, | |
| "learning_rate": 2.9269568935140174e-05, | |
| "loss": 0.1141, | |
| "num_input_tokens_seen": 19764920, | |
| "step": 481 | |
| }, | |
| { | |
| "epoch": 8.770642201834862, | |
| "grad_norm": 0.4884963631629944, | |
| "learning_rate": 2.9197897301269435e-05, | |
| "loss": 0.137, | |
| "num_input_tokens_seen": 19806568, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 8.788990825688073, | |
| "grad_norm": 0.28893986344337463, | |
| "learning_rate": 2.9126190146516942e-05, | |
| "loss": 0.059, | |
| "num_input_tokens_seen": 19851352, | |
| "step": 483 | |
| }, | |
| { | |
| "epoch": 8.807339449541285, | |
| "grad_norm": 0.3916391432285309, | |
| "learning_rate": 2.905444807763919e-05, | |
| "loss": 0.0993, | |
| "num_input_tokens_seen": 19897640, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 8.825688073394495, | |
| "grad_norm": 0.3886198103427887, | |
| "learning_rate": 2.898267170168807e-05, | |
| "loss": 0.0721, | |
| "num_input_tokens_seen": 19936472, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 8.844036697247706, | |
| "grad_norm": 0.4273735582828522, | |
| "learning_rate": 2.8910861626005776e-05, | |
| "loss": 0.0875, | |
| "num_input_tokens_seen": 19967128, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 8.862385321100918, | |
| "grad_norm": 0.3653796315193176, | |
| "learning_rate": 2.8839018458219653e-05, | |
| "loss": 0.0784, | |
| "num_input_tokens_seen": 20010472, | |
| "step": 487 | |
| }, | |
| { | |
| "epoch": 8.880733944954128, | |
| "grad_norm": 0.4664580225944519, | |
| "learning_rate": 2.876714280623708e-05, | |
| "loss": 0.0949, | |
| "num_input_tokens_seen": 20056984, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 8.899082568807339, | |
| "grad_norm": 0.37970954179763794, | |
| "learning_rate": 2.8695235278240272e-05, | |
| "loss": 0.0718, | |
| "num_input_tokens_seen": 20105712, | |
| "step": 489 | |
| }, | |
| { | |
| "epoch": 8.917431192660551, | |
| "grad_norm": 0.5023646354675293, | |
| "learning_rate": 2.8623296482681166e-05, | |
| "loss": 0.0904, | |
| "num_input_tokens_seen": 20147136, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 8.935779816513762, | |
| "grad_norm": 0.4095667004585266, | |
| "learning_rate": 2.8551327028276314e-05, | |
| "loss": 0.0652, | |
| "num_input_tokens_seen": 20190008, | |
| "step": 491 | |
| }, | |
| { | |
| "epoch": 8.954128440366972, | |
| "grad_norm": 0.37509602308273315, | |
| "learning_rate": 2.8479327524001636e-05, | |
| "loss": 0.1347, | |
| "num_input_tokens_seen": 20235552, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 8.972477064220184, | |
| "grad_norm": 0.3403775095939636, | |
| "learning_rate": 2.8407298579087365e-05, | |
| "loss": 0.0642, | |
| "num_input_tokens_seen": 20278504, | |
| "step": 493 | |
| }, | |
| { | |
| "epoch": 8.990825688073395, | |
| "grad_norm": 0.3543892204761505, | |
| "learning_rate": 2.833524080301282e-05, | |
| "loss": 0.0689, | |
| "num_input_tokens_seen": 20314024, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 0.5081737637519836, | |
| "learning_rate": 2.8263154805501297e-05, | |
| "loss": 0.0735, | |
| "num_input_tokens_seen": 20331792, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 9.01834862385321, | |
| "grad_norm": 0.39798879623413086, | |
| "learning_rate": 2.8191041196514873e-05, | |
| "loss": 0.0695, | |
| "num_input_tokens_seen": 20359504, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 9.036697247706423, | |
| "grad_norm": 0.4060456454753876, | |
| "learning_rate": 2.8118900586249263e-05, | |
| "loss": 0.1075, | |
| "num_input_tokens_seen": 20393544, | |
| "step": 497 | |
| }, | |
| { | |
| "epoch": 9.055045871559633, | |
| "grad_norm": 0.3525485694408417, | |
| "learning_rate": 2.8046733585128687e-05, | |
| "loss": 0.0658, | |
| "num_input_tokens_seen": 20419560, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 9.073394495412844, | |
| "grad_norm": 0.3256167471408844, | |
| "learning_rate": 2.7974540803800637e-05, | |
| "loss": 0.0581, | |
| "num_input_tokens_seen": 20462936, | |
| "step": 499 | |
| }, | |
| { | |
| "epoch": 9.091743119266056, | |
| "grad_norm": 0.5703425407409668, | |
| "learning_rate": 2.7902322853130757e-05, | |
| "loss": 0.0845, | |
| "num_input_tokens_seen": 20494032, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 9.110091743119266, | |
| "grad_norm": 0.415742963552475, | |
| "learning_rate": 2.7830080344197674e-05, | |
| "loss": 0.0543, | |
| "num_input_tokens_seen": 20553072, | |
| "step": 501 | |
| }, | |
| { | |
| "epoch": 9.128440366972477, | |
| "grad_norm": 0.48353156447410583, | |
| "learning_rate": 2.7757813888287798e-05, | |
| "loss": 0.2288, | |
| "num_input_tokens_seen": 20605784, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 9.146788990825687, | |
| "grad_norm": 0.38978058099746704, | |
| "learning_rate": 2.7685524096890185e-05, | |
| "loss": 0.0636, | |
| "num_input_tokens_seen": 20651856, | |
| "step": 503 | |
| }, | |
| { | |
| "epoch": 9.1651376146789, | |
| "grad_norm": 0.3198350965976715, | |
| "learning_rate": 2.761321158169134e-05, | |
| "loss": 0.0524, | |
| "num_input_tokens_seen": 20696944, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 9.18348623853211, | |
| "grad_norm": 0.39467597007751465, | |
| "learning_rate": 2.7540876954570048e-05, | |
| "loss": 0.0652, | |
| "num_input_tokens_seen": 20728072, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 9.20183486238532, | |
| "grad_norm": 0.29402676224708557, | |
| "learning_rate": 2.7468520827592197e-05, | |
| "loss": 0.05, | |
| "num_input_tokens_seen": 20765264, | |
| "step": 506 | |
| }, | |
| { | |
| "epoch": 9.220183486238533, | |
| "grad_norm": 0.432449609041214, | |
| "learning_rate": 2.7396143813005602e-05, | |
| "loss": 0.0763, | |
| "num_input_tokens_seen": 20803144, | |
| "step": 507 | |
| }, | |
| { | |
| "epoch": 9.238532110091743, | |
| "grad_norm": 0.3020356595516205, | |
| "learning_rate": 2.732374652323481e-05, | |
| "loss": 0.0495, | |
| "num_input_tokens_seen": 20857912, | |
| "step": 508 | |
| }, | |
| { | |
| "epoch": 9.256880733944953, | |
| "grad_norm": 0.33522287011146545, | |
| "learning_rate": 2.7251329570875934e-05, | |
| "loss": 0.0518, | |
| "num_input_tokens_seen": 20901536, | |
| "step": 509 | |
| }, | |
| { | |
| "epoch": 9.275229357798166, | |
| "grad_norm": 0.3918211758136749, | |
| "learning_rate": 2.717889356869146e-05, | |
| "loss": 0.0605, | |
| "num_input_tokens_seen": 20944592, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 9.293577981651376, | |
| "grad_norm": 0.32528579235076904, | |
| "learning_rate": 2.7106439129605072e-05, | |
| "loss": 0.056, | |
| "num_input_tokens_seen": 20981640, | |
| "step": 511 | |
| }, | |
| { | |
| "epoch": 9.311926605504587, | |
| "grad_norm": 0.45761916041374207, | |
| "learning_rate": 2.7033966866696457e-05, | |
| "loss": 0.0661, | |
| "num_input_tokens_seen": 21011280, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 9.330275229357799, | |
| "grad_norm": 0.39354491233825684, | |
| "learning_rate": 2.6961477393196126e-05, | |
| "loss": 0.0735, | |
| "num_input_tokens_seen": 21056896, | |
| "step": 513 | |
| }, | |
| { | |
| "epoch": 9.34862385321101, | |
| "grad_norm": 0.37278589606285095, | |
| "learning_rate": 2.6888971322480205e-05, | |
| "loss": 0.0541, | |
| "num_input_tokens_seen": 21096440, | |
| "step": 514 | |
| }, | |
| { | |
| "epoch": 9.36697247706422, | |
| "grad_norm": 0.547535240650177, | |
| "learning_rate": 2.681644926806527e-05, | |
| "loss": 0.1135, | |
| "num_input_tokens_seen": 21136776, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 9.385321100917432, | |
| "grad_norm": 0.3297383785247803, | |
| "learning_rate": 2.674391184360313e-05, | |
| "loss": 0.0572, | |
| "num_input_tokens_seen": 21177016, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 9.403669724770642, | |
| "grad_norm": 0.34031882882118225, | |
| "learning_rate": 2.6671359662875684e-05, | |
| "loss": 0.0465, | |
| "num_input_tokens_seen": 21216216, | |
| "step": 517 | |
| }, | |
| { | |
| "epoch": 9.422018348623853, | |
| "grad_norm": 0.36236777901649475, | |
| "learning_rate": 2.659879333978964e-05, | |
| "loss": 0.0596, | |
| "num_input_tokens_seen": 21278344, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 9.440366972477065, | |
| "grad_norm": 0.3755556344985962, | |
| "learning_rate": 2.6526213488371427e-05, | |
| "loss": 0.1131, | |
| "num_input_tokens_seen": 21330704, | |
| "step": 519 | |
| }, | |
| { | |
| "epoch": 9.458715596330276, | |
| "grad_norm": 0.38200709223747253, | |
| "learning_rate": 2.6453620722761896e-05, | |
| "loss": 0.0587, | |
| "num_input_tokens_seen": 21377568, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 9.477064220183486, | |
| "grad_norm": 0.3465506136417389, | |
| "learning_rate": 2.6381015657211215e-05, | |
| "loss": 0.0529, | |
| "num_input_tokens_seen": 21421808, | |
| "step": 521 | |
| }, | |
| { | |
| "epoch": 9.495412844036696, | |
| "grad_norm": 0.3480084538459778, | |
| "learning_rate": 2.63083989060736e-05, | |
| "loss": 0.0592, | |
| "num_input_tokens_seen": 21465904, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 9.513761467889909, | |
| "grad_norm": 0.3983739912509918, | |
| "learning_rate": 2.623577108380215e-05, | |
| "loss": 0.0952, | |
| "num_input_tokens_seen": 21507888, | |
| "step": 523 | |
| }, | |
| { | |
| "epoch": 9.53211009174312, | |
| "grad_norm": 0.43550482392311096, | |
| "learning_rate": 2.6163132804943675e-05, | |
| "loss": 0.0649, | |
| "num_input_tokens_seen": 21541544, | |
| "step": 524 | |
| }, | |
| { | |
| "epoch": 9.55045871559633, | |
| "grad_norm": 0.43490955233573914, | |
| "learning_rate": 2.6090484684133404e-05, | |
| "loss": 0.0557, | |
| "num_input_tokens_seen": 21576072, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 9.568807339449542, | |
| "grad_norm": 0.29154589772224426, | |
| "learning_rate": 2.60178273360899e-05, | |
| "loss": 0.1018, | |
| "num_input_tokens_seen": 21623464, | |
| "step": 526 | |
| }, | |
| { | |
| "epoch": 9.587155963302752, | |
| "grad_norm": 0.41565775871276855, | |
| "learning_rate": 2.5945161375609778e-05, | |
| "loss": 0.0629, | |
| "num_input_tokens_seen": 21656096, | |
| "step": 527 | |
| }, | |
| { | |
| "epoch": 9.605504587155963, | |
| "grad_norm": 0.3849506080150604, | |
| "learning_rate": 2.587248741756253e-05, | |
| "loss": 0.0725, | |
| "num_input_tokens_seen": 21685872, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 9.623853211009175, | |
| "grad_norm": 0.425530344247818, | |
| "learning_rate": 2.5799806076885326e-05, | |
| "loss": 0.0605, | |
| "num_input_tokens_seen": 21740704, | |
| "step": 529 | |
| }, | |
| { | |
| "epoch": 9.642201834862385, | |
| "grad_norm": 0.47837600111961365, | |
| "learning_rate": 2.5727117968577784e-05, | |
| "loss": 0.0821, | |
| "num_input_tokens_seen": 21775920, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 9.660550458715596, | |
| "grad_norm": 0.404117226600647, | |
| "learning_rate": 2.5654423707696833e-05, | |
| "loss": 0.0622, | |
| "num_input_tokens_seen": 21822360, | |
| "step": 531 | |
| }, | |
| { | |
| "epoch": 9.678899082568808, | |
| "grad_norm": 0.3648509979248047, | |
| "learning_rate": 2.5581723909351406e-05, | |
| "loss": 0.066, | |
| "num_input_tokens_seen": 21872168, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 9.697247706422019, | |
| "grad_norm": 0.3271898329257965, | |
| "learning_rate": 2.5509019188697343e-05, | |
| "loss": 0.0555, | |
| "num_input_tokens_seen": 21914080, | |
| "step": 533 | |
| }, | |
| { | |
| "epoch": 9.715596330275229, | |
| "grad_norm": 0.35200148820877075, | |
| "learning_rate": 2.5436310160932092e-05, | |
| "loss": 0.0605, | |
| "num_input_tokens_seen": 21959648, | |
| "step": 534 | |
| }, | |
| { | |
| "epoch": 9.73394495412844, | |
| "grad_norm": 0.35883811116218567, | |
| "learning_rate": 2.536359744128957e-05, | |
| "loss": 0.0622, | |
| "num_input_tokens_seen": 22000280, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 9.752293577981652, | |
| "grad_norm": 0.3236100673675537, | |
| "learning_rate": 2.5290881645034932e-05, | |
| "loss": 0.1505, | |
| "num_input_tokens_seen": 22058744, | |
| "step": 536 | |
| }, | |
| { | |
| "epoch": 9.770642201834862, | |
| "grad_norm": 0.39779284596443176, | |
| "learning_rate": 2.521816338745935e-05, | |
| "loss": 0.0939, | |
| "num_input_tokens_seen": 22102616, | |
| "step": 537 | |
| }, | |
| { | |
| "epoch": 9.788990825688073, | |
| "grad_norm": 0.3473663926124573, | |
| "learning_rate": 2.5145443283874848e-05, | |
| "loss": 0.0507, | |
| "num_input_tokens_seen": 22141096, | |
| "step": 538 | |
| }, | |
| { | |
| "epoch": 9.807339449541285, | |
| "grad_norm": 0.3695826530456543, | |
| "learning_rate": 2.5072721949609053e-05, | |
| "loss": 0.0659, | |
| "num_input_tokens_seen": 22179672, | |
| "step": 539 | |
| }, | |
| { | |
| "epoch": 9.825688073394495, | |
| "grad_norm": 0.41754114627838135, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.0683, | |
| "num_input_tokens_seen": 22214744, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 9.844036697247706, | |
| "grad_norm": 0.397276371717453, | |
| "learning_rate": 2.4927278050390956e-05, | |
| "loss": 0.0633, | |
| "num_input_tokens_seen": 22256776, | |
| "step": 541 | |
| }, | |
| { | |
| "epoch": 9.862385321100918, | |
| "grad_norm": 0.569098711013794, | |
| "learning_rate": 2.485455671612515e-05, | |
| "loss": 0.1099, | |
| "num_input_tokens_seen": 22300168, | |
| "step": 542 | |
| }, | |
| { | |
| "epoch": 9.880733944954128, | |
| "grad_norm": 0.3297954797744751, | |
| "learning_rate": 2.4781836612540657e-05, | |
| "loss": 0.0571, | |
| "num_input_tokens_seen": 22345376, | |
| "step": 543 | |
| }, | |
| { | |
| "epoch": 9.899082568807339, | |
| "grad_norm": 0.38287127017974854, | |
| "learning_rate": 2.470911835496508e-05, | |
| "loss": 0.0552, | |
| "num_input_tokens_seen": 22374072, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 9.917431192660551, | |
| "grad_norm": 0.3791472613811493, | |
| "learning_rate": 2.4636402558710432e-05, | |
| "loss": 0.1122, | |
| "num_input_tokens_seen": 22415592, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 9.935779816513762, | |
| "grad_norm": 0.3753291368484497, | |
| "learning_rate": 2.4563689839067913e-05, | |
| "loss": 0.0643, | |
| "num_input_tokens_seen": 22458208, | |
| "step": 546 | |
| }, | |
| { | |
| "epoch": 9.954128440366972, | |
| "grad_norm": 0.3445354104042053, | |
| "learning_rate": 2.4490980811302656e-05, | |
| "loss": 0.0535, | |
| "num_input_tokens_seen": 22497376, | |
| "step": 547 | |
| }, | |
| { | |
| "epoch": 9.972477064220184, | |
| "grad_norm": 0.46824514865875244, | |
| "learning_rate": 2.4418276090648596e-05, | |
| "loss": 0.1055, | |
| "num_input_tokens_seen": 22534016, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 9.990825688073395, | |
| "grad_norm": 0.3591933846473694, | |
| "learning_rate": 2.4345576292303176e-05, | |
| "loss": 0.0527, | |
| "num_input_tokens_seen": 22573896, | |
| "step": 549 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.35627591609954834, | |
| "learning_rate": 2.4272882031422215e-05, | |
| "loss": 0.0589, | |
| "num_input_tokens_seen": 22590880, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 10.01834862385321, | |
| "grad_norm": 0.36426064372062683, | |
| "learning_rate": 2.4200193923114683e-05, | |
| "loss": 0.0605, | |
| "num_input_tokens_seen": 22629088, | |
| "step": 551 | |
| }, | |
| { | |
| "epoch": 10.036697247706423, | |
| "grad_norm": 0.39051762223243713, | |
| "learning_rate": 2.4127512582437485e-05, | |
| "loss": 0.0537, | |
| "num_input_tokens_seen": 22668208, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 10.055045871559633, | |
| "grad_norm": 0.2818565368652344, | |
| "learning_rate": 2.405483862439023e-05, | |
| "loss": 0.0458, | |
| "num_input_tokens_seen": 22706992, | |
| "step": 553 | |
| }, | |
| { | |
| "epoch": 10.073394495412844, | |
| "grad_norm": 0.3922857344150543, | |
| "learning_rate": 2.3982172663910108e-05, | |
| "loss": 0.057, | |
| "num_input_tokens_seen": 22740280, | |
| "step": 554 | |
| }, | |
| { | |
| "epoch": 10.091743119266056, | |
| "grad_norm": 0.3783634305000305, | |
| "learning_rate": 2.3909515315866605e-05, | |
| "loss": 0.1281, | |
| "num_input_tokens_seen": 22779464, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 10.110091743119266, | |
| "grad_norm": 0.3341287076473236, | |
| "learning_rate": 2.3836867195056335e-05, | |
| "loss": 0.0505, | |
| "num_input_tokens_seen": 22819232, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 10.128440366972477, | |
| "grad_norm": 0.353397399187088, | |
| "learning_rate": 2.3764228916197855e-05, | |
| "loss": 0.0533, | |
| "num_input_tokens_seen": 22860296, | |
| "step": 557 | |
| }, | |
| { | |
| "epoch": 10.146788990825687, | |
| "grad_norm": 0.5371742248535156, | |
| "learning_rate": 2.3691601093926404e-05, | |
| "loss": 0.0657, | |
| "num_input_tokens_seen": 22895040, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 10.1651376146789, | |
| "grad_norm": 0.2916604280471802, | |
| "learning_rate": 2.361898434278879e-05, | |
| "loss": 0.0448, | |
| "num_input_tokens_seen": 22939848, | |
| "step": 559 | |
| }, | |
| { | |
| "epoch": 10.18348623853211, | |
| "grad_norm": 0.3729498088359833, | |
| "learning_rate": 2.3546379277238107e-05, | |
| "loss": 0.0511, | |
| "num_input_tokens_seen": 22990280, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 10.20183486238532, | |
| "grad_norm": 0.39111775159835815, | |
| "learning_rate": 2.3473786511628575e-05, | |
| "loss": 0.0557, | |
| "num_input_tokens_seen": 23034808, | |
| "step": 561 | |
| }, | |
| { | |
| "epoch": 10.220183486238533, | |
| "grad_norm": 0.3621370196342468, | |
| "learning_rate": 2.3401206660210363e-05, | |
| "loss": 0.0481, | |
| "num_input_tokens_seen": 23082600, | |
| "step": 562 | |
| }, | |
| { | |
| "epoch": 10.238532110091743, | |
| "grad_norm": 0.4028044641017914, | |
| "learning_rate": 2.3328640337124326e-05, | |
| "loss": 0.0408, | |
| "num_input_tokens_seen": 23125248, | |
| "step": 563 | |
| }, | |
| { | |
| "epoch": 10.256880733944953, | |
| "grad_norm": 0.7029250264167786, | |
| "learning_rate": 2.3256088156396868e-05, | |
| "loss": 0.1393, | |
| "num_input_tokens_seen": 23169104, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 10.275229357798166, | |
| "grad_norm": 0.34606480598449707, | |
| "learning_rate": 2.3183550731934735e-05, | |
| "loss": 0.0445, | |
| "num_input_tokens_seen": 23213000, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 10.293577981651376, | |
| "grad_norm": 0.3310895562171936, | |
| "learning_rate": 2.3111028677519804e-05, | |
| "loss": 0.0491, | |
| "num_input_tokens_seen": 23252584, | |
| "step": 566 | |
| }, | |
| { | |
| "epoch": 10.311926605504587, | |
| "grad_norm": 0.3020009398460388, | |
| "learning_rate": 2.303852260680388e-05, | |
| "loss": 0.0438, | |
| "num_input_tokens_seen": 23294424, | |
| "step": 567 | |
| }, | |
| { | |
| "epoch": 10.330275229357799, | |
| "grad_norm": 0.41868507862091064, | |
| "learning_rate": 2.2966033133303545e-05, | |
| "loss": 0.0818, | |
| "num_input_tokens_seen": 23331696, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 10.34862385321101, | |
| "grad_norm": 0.4141708016395569, | |
| "learning_rate": 2.289356087039493e-05, | |
| "loss": 0.0733, | |
| "num_input_tokens_seen": 23370592, | |
| "step": 569 | |
| }, | |
| { | |
| "epoch": 10.36697247706422, | |
| "grad_norm": 0.39040666818618774, | |
| "learning_rate": 2.2821106431308544e-05, | |
| "loss": 0.0576, | |
| "num_input_tokens_seen": 23406728, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 10.385321100917432, | |
| "grad_norm": 0.29972052574157715, | |
| "learning_rate": 2.274867042912408e-05, | |
| "loss": 0.0399, | |
| "num_input_tokens_seen": 23456880, | |
| "step": 571 | |
| }, | |
| { | |
| "epoch": 10.403669724770642, | |
| "grad_norm": 0.4595007002353668, | |
| "learning_rate": 2.2676253476765196e-05, | |
| "loss": 0.1006, | |
| "num_input_tokens_seen": 23501632, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 10.422018348623853, | |
| "grad_norm": 0.3600683808326721, | |
| "learning_rate": 2.26038561869944e-05, | |
| "loss": 0.048, | |
| "num_input_tokens_seen": 23546696, | |
| "step": 573 | |
| }, | |
| { | |
| "epoch": 10.440366972477065, | |
| "grad_norm": 0.33539336919784546, | |
| "learning_rate": 2.2531479172407805e-05, | |
| "loss": 0.038, | |
| "num_input_tokens_seen": 23590976, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 10.458715596330276, | |
| "grad_norm": 0.37528175115585327, | |
| "learning_rate": 2.2459123045429954e-05, | |
| "loss": 0.0475, | |
| "num_input_tokens_seen": 23632096, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 10.477064220183486, | |
| "grad_norm": 0.4022611379623413, | |
| "learning_rate": 2.238678841830867e-05, | |
| "loss": 0.0385, | |
| "num_input_tokens_seen": 23661968, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 10.495412844036696, | |
| "grad_norm": 0.5055795907974243, | |
| "learning_rate": 2.2314475903109825e-05, | |
| "loss": 0.0602, | |
| "num_input_tokens_seen": 23709568, | |
| "step": 577 | |
| }, | |
| { | |
| "epoch": 10.513761467889909, | |
| "grad_norm": 0.3705989122390747, | |
| "learning_rate": 2.2242186111712208e-05, | |
| "loss": 0.0812, | |
| "num_input_tokens_seen": 23749216, | |
| "step": 578 | |
| }, | |
| { | |
| "epoch": 10.53211009174312, | |
| "grad_norm": 0.46322381496429443, | |
| "learning_rate": 2.2169919655802335e-05, | |
| "loss": 0.1103, | |
| "num_input_tokens_seen": 23786848, | |
| "step": 579 | |
| }, | |
| { | |
| "epoch": 10.55045871559633, | |
| "grad_norm": 0.40934255719184875, | |
| "learning_rate": 2.2097677146869242e-05, | |
| "loss": 0.048, | |
| "num_input_tokens_seen": 23827336, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 10.568807339449542, | |
| "grad_norm": 0.7357956767082214, | |
| "learning_rate": 2.202545919619937e-05, | |
| "loss": 0.1118, | |
| "num_input_tokens_seen": 23880272, | |
| "step": 581 | |
| }, | |
| { | |
| "epoch": 10.587155963302752, | |
| "grad_norm": 0.4258132576942444, | |
| "learning_rate": 2.195326641487132e-05, | |
| "loss": 0.0512, | |
| "num_input_tokens_seen": 23923208, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 10.605504587155963, | |
| "grad_norm": 0.3786598742008209, | |
| "learning_rate": 2.1881099413750733e-05, | |
| "loss": 0.0809, | |
| "num_input_tokens_seen": 23961424, | |
| "step": 583 | |
| }, | |
| { | |
| "epoch": 10.623853211009175, | |
| "grad_norm": 0.37131381034851074, | |
| "learning_rate": 2.1808958803485136e-05, | |
| "loss": 0.0446, | |
| "num_input_tokens_seen": 24000184, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 10.642201834862385, | |
| "grad_norm": 0.3253346383571625, | |
| "learning_rate": 2.173684519449872e-05, | |
| "loss": 0.0472, | |
| "num_input_tokens_seen": 24049488, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 10.660550458715596, | |
| "grad_norm": 0.4104836881160736, | |
| "learning_rate": 2.1664759196987182e-05, | |
| "loss": 0.0522, | |
| "num_input_tokens_seen": 24085104, | |
| "step": 586 | |
| }, | |
| { | |
| "epoch": 10.678899082568808, | |
| "grad_norm": 0.5492331385612488, | |
| "learning_rate": 2.1592701420912644e-05, | |
| "loss": 0.1182, | |
| "num_input_tokens_seen": 24124768, | |
| "step": 587 | |
| }, | |
| { | |
| "epoch": 10.697247706422019, | |
| "grad_norm": 0.315057635307312, | |
| "learning_rate": 2.1520672475998373e-05, | |
| "loss": 0.0452, | |
| "num_input_tokens_seen": 24167872, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 10.715596330275229, | |
| "grad_norm": 0.3825458586215973, | |
| "learning_rate": 2.144867297172369e-05, | |
| "loss": 0.0586, | |
| "num_input_tokens_seen": 24198824, | |
| "step": 589 | |
| }, | |
| { | |
| "epoch": 10.73394495412844, | |
| "grad_norm": 0.3713642656803131, | |
| "learning_rate": 2.1376703517318837e-05, | |
| "loss": 0.0547, | |
| "num_input_tokens_seen": 24249320, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 10.752293577981652, | |
| "grad_norm": 0.4260292947292328, | |
| "learning_rate": 2.1304764721759733e-05, | |
| "loss": 0.0562, | |
| "num_input_tokens_seen": 24292592, | |
| "step": 591 | |
| }, | |
| { | |
| "epoch": 10.770642201834862, | |
| "grad_norm": 0.4098452627658844, | |
| "learning_rate": 2.1232857193762924e-05, | |
| "loss": 0.056, | |
| "num_input_tokens_seen": 24331648, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 10.788990825688073, | |
| "grad_norm": 0.4303808808326721, | |
| "learning_rate": 2.116098154178035e-05, | |
| "loss": 0.0538, | |
| "num_input_tokens_seen": 24386904, | |
| "step": 593 | |
| }, | |
| { | |
| "epoch": 10.807339449541285, | |
| "grad_norm": 0.41580283641815186, | |
| "learning_rate": 2.1089138373994223e-05, | |
| "loss": 0.0924, | |
| "num_input_tokens_seen": 24435360, | |
| "step": 594 | |
| }, | |
| { | |
| "epoch": 10.825688073394495, | |
| "grad_norm": 0.3460528254508972, | |
| "learning_rate": 2.101732829831194e-05, | |
| "loss": 0.0468, | |
| "num_input_tokens_seen": 24465400, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 10.844036697247706, | |
| "grad_norm": 0.4775545299053192, | |
| "learning_rate": 2.0945551922360818e-05, | |
| "loss": 0.057, | |
| "num_input_tokens_seen": 24497984, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 10.862385321100918, | |
| "grad_norm": 0.36002540588378906, | |
| "learning_rate": 2.087380985348306e-05, | |
| "loss": 0.0472, | |
| "num_input_tokens_seen": 24534576, | |
| "step": 597 | |
| }, | |
| { | |
| "epoch": 10.880733944954128, | |
| "grad_norm": 0.38081276416778564, | |
| "learning_rate": 2.0802102698730574e-05, | |
| "loss": 0.0592, | |
| "num_input_tokens_seen": 24572160, | |
| "step": 598 | |
| }, | |
| { | |
| "epoch": 10.899082568807339, | |
| "grad_norm": 0.3270852863788605, | |
| "learning_rate": 2.0730431064859836e-05, | |
| "loss": 0.0392, | |
| "num_input_tokens_seen": 24619744, | |
| "step": 599 | |
| }, | |
| { | |
| "epoch": 10.917431192660551, | |
| "grad_norm": 0.36131611466407776, | |
| "learning_rate": 2.0658795558326743e-05, | |
| "loss": 0.048, | |
| "num_input_tokens_seen": 24671048, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 10.935779816513762, | |
| "grad_norm": 0.40473103523254395, | |
| "learning_rate": 2.0587196785281525e-05, | |
| "loss": 0.0512, | |
| "num_input_tokens_seen": 24710152, | |
| "step": 601 | |
| }, | |
| { | |
| "epoch": 10.954128440366972, | |
| "grad_norm": 0.35128137469291687, | |
| "learning_rate": 2.0515635351563565e-05, | |
| "loss": 0.0466, | |
| "num_input_tokens_seen": 24744584, | |
| "step": 602 | |
| }, | |
| { | |
| "epoch": 10.972477064220184, | |
| "grad_norm": 0.6386734843254089, | |
| "learning_rate": 2.0444111862696314e-05, | |
| "loss": 0.0911, | |
| "num_input_tokens_seen": 24788584, | |
| "step": 603 | |
| }, | |
| { | |
| "epoch": 10.990825688073395, | |
| "grad_norm": 0.4696199297904968, | |
| "learning_rate": 2.037262692388214e-05, | |
| "loss": 0.0516, | |
| "num_input_tokens_seen": 24823152, | |
| "step": 604 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "grad_norm": 0.4481979310512543, | |
| "learning_rate": 2.0301181139997205e-05, | |
| "loss": 0.1007, | |
| "num_input_tokens_seen": 24849968, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 11.01834862385321, | |
| "grad_norm": 0.3045099079608917, | |
| "learning_rate": 2.022977511558638e-05, | |
| "loss": 0.0425, | |
| "num_input_tokens_seen": 24892784, | |
| "step": 606 | |
| }, | |
| { | |
| "epoch": 11.036697247706423, | |
| "grad_norm": 0.2954024374485016, | |
| "learning_rate": 2.0158409454858103e-05, | |
| "loss": 0.0887, | |
| "num_input_tokens_seen": 24937192, | |
| "step": 607 | |
| }, | |
| { | |
| "epoch": 11.055045871559633, | |
| "grad_norm": 0.302165150642395, | |
| "learning_rate": 2.0087084761679245e-05, | |
| "loss": 0.0425, | |
| "num_input_tokens_seen": 24980608, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 11.073394495412844, | |
| "grad_norm": 0.36207833886146545, | |
| "learning_rate": 2.0015801639570074e-05, | |
| "loss": 0.0788, | |
| "num_input_tokens_seen": 25029016, | |
| "step": 609 | |
| }, | |
| { | |
| "epoch": 11.091743119266056, | |
| "grad_norm": 0.3311103582382202, | |
| "learning_rate": 1.9944560691699057e-05, | |
| "loss": 0.0387, | |
| "num_input_tokens_seen": 25071880, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 11.110091743119266, | |
| "grad_norm": 0.3376132547855377, | |
| "learning_rate": 1.9873362520877813e-05, | |
| "loss": 0.0534, | |
| "num_input_tokens_seen": 25117112, | |
| "step": 611 | |
| }, | |
| { | |
| "epoch": 11.128440366972477, | |
| "grad_norm": 0.3412828743457794, | |
| "learning_rate": 1.980220772955602e-05, | |
| "loss": 0.0509, | |
| "num_input_tokens_seen": 25154456, | |
| "step": 612 | |
| }, | |
| { | |
| "epoch": 11.146788990825687, | |
| "grad_norm": 0.40508487820625305, | |
| "learning_rate": 1.973109691981627e-05, | |
| "loss": 0.0436, | |
| "num_input_tokens_seen": 25193264, | |
| "step": 613 | |
| }, | |
| { | |
| "epoch": 11.1651376146789, | |
| "grad_norm": 0.3498331606388092, | |
| "learning_rate": 1.9660030693369004e-05, | |
| "loss": 0.04, | |
| "num_input_tokens_seen": 25231416, | |
| "step": 614 | |
| }, | |
| { | |
| "epoch": 11.18348623853211, | |
| "grad_norm": 0.36831074953079224, | |
| "learning_rate": 1.958900965154743e-05, | |
| "loss": 0.0371, | |
| "num_input_tokens_seen": 25268136, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 11.20183486238532, | |
| "grad_norm": 0.4536377787590027, | |
| "learning_rate": 1.9518034395302414e-05, | |
| "loss": 0.0986, | |
| "num_input_tokens_seen": 25316600, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 11.220183486238533, | |
| "grad_norm": 0.3427221477031708, | |
| "learning_rate": 1.9447105525197425e-05, | |
| "loss": 0.0391, | |
| "num_input_tokens_seen": 25358424, | |
| "step": 617 | |
| }, | |
| { | |
| "epoch": 11.238532110091743, | |
| "grad_norm": 0.3086804151535034, | |
| "learning_rate": 1.937622364140338e-05, | |
| "loss": 0.0378, | |
| "num_input_tokens_seen": 25403744, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 11.256880733944953, | |
| "grad_norm": 0.37512463331222534, | |
| "learning_rate": 1.9305389343693664e-05, | |
| "loss": 0.0412, | |
| "num_input_tokens_seen": 25436496, | |
| "step": 619 | |
| }, | |
| { | |
| "epoch": 11.275229357798166, | |
| "grad_norm": 0.48066776990890503, | |
| "learning_rate": 1.9234603231438995e-05, | |
| "loss": 0.0473, | |
| "num_input_tokens_seen": 25476544, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 11.293577981651376, | |
| "grad_norm": 0.3921424448490143, | |
| "learning_rate": 1.9163865903602374e-05, | |
| "loss": 0.0874, | |
| "num_input_tokens_seen": 25514768, | |
| "step": 621 | |
| }, | |
| { | |
| "epoch": 11.311926605504587, | |
| "grad_norm": 0.36180391907691956, | |
| "learning_rate": 1.9093177958733966e-05, | |
| "loss": 0.0377, | |
| "num_input_tokens_seen": 25562136, | |
| "step": 622 | |
| }, | |
| { | |
| "epoch": 11.330275229357799, | |
| "grad_norm": 0.3051501512527466, | |
| "learning_rate": 1.9022539994966147e-05, | |
| "loss": 0.0338, | |
| "num_input_tokens_seen": 25599784, | |
| "step": 623 | |
| }, | |
| { | |
| "epoch": 11.34862385321101, | |
| "grad_norm": 0.3278719186782837, | |
| "learning_rate": 1.895195261000831e-05, | |
| "loss": 0.0368, | |
| "num_input_tokens_seen": 25633664, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 11.36697247706422, | |
| "grad_norm": 0.31365832686424255, | |
| "learning_rate": 1.8881416401141904e-05, | |
| "loss": 0.0817, | |
| "num_input_tokens_seen": 25686344, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 11.385321100917432, | |
| "grad_norm": 0.31113532185554504, | |
| "learning_rate": 1.8810931965215356e-05, | |
| "loss": 0.042, | |
| "num_input_tokens_seen": 25731184, | |
| "step": 626 | |
| }, | |
| { | |
| "epoch": 11.403669724770642, | |
| "grad_norm": 0.6552867889404297, | |
| "learning_rate": 1.874049989863896e-05, | |
| "loss": 0.0803, | |
| "num_input_tokens_seen": 25773632, | |
| "step": 627 | |
| }, | |
| { | |
| "epoch": 11.422018348623853, | |
| "grad_norm": 0.3581204116344452, | |
| "learning_rate": 1.8670120797379958e-05, | |
| "loss": 0.043, | |
| "num_input_tokens_seen": 25803376, | |
| "step": 628 | |
| }, | |
| { | |
| "epoch": 11.440366972477065, | |
| "grad_norm": 0.308876097202301, | |
| "learning_rate": 1.859979525695736e-05, | |
| "loss": 0.0389, | |
| "num_input_tokens_seen": 25855248, | |
| "step": 629 | |
| }, | |
| { | |
| "epoch": 11.458715596330276, | |
| "grad_norm": 0.3815799951553345, | |
| "learning_rate": 1.852952387243698e-05, | |
| "loss": 0.0429, | |
| "num_input_tokens_seen": 25885840, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 11.477064220183486, | |
| "grad_norm": 0.34968090057373047, | |
| "learning_rate": 1.8459307238426416e-05, | |
| "loss": 0.0466, | |
| "num_input_tokens_seen": 25938800, | |
| "step": 631 | |
| }, | |
| { | |
| "epoch": 11.495412844036696, | |
| "grad_norm": 0.4210070073604584, | |
| "learning_rate": 1.838914594906995e-05, | |
| "loss": 0.0543, | |
| "num_input_tokens_seen": 25973752, | |
| "step": 632 | |
| }, | |
| { | |
| "epoch": 11.513761467889909, | |
| "grad_norm": 0.3628455698490143, | |
| "learning_rate": 1.831904059804358e-05, | |
| "loss": 0.0408, | |
| "num_input_tokens_seen": 26011480, | |
| "step": 633 | |
| }, | |
| { | |
| "epoch": 11.53211009174312, | |
| "grad_norm": 0.3586367964744568, | |
| "learning_rate": 1.8248991778549984e-05, | |
| "loss": 0.0462, | |
| "num_input_tokens_seen": 26054744, | |
| "step": 634 | |
| }, | |
| { | |
| "epoch": 11.55045871559633, | |
| "grad_norm": 0.44129693508148193, | |
| "learning_rate": 1.8179000083313483e-05, | |
| "loss": 0.0478, | |
| "num_input_tokens_seen": 26083512, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 11.568807339449542, | |
| "grad_norm": 0.3320532739162445, | |
| "learning_rate": 1.8109066104575023e-05, | |
| "loss": 0.0376, | |
| "num_input_tokens_seen": 26127504, | |
| "step": 636 | |
| }, | |
| { | |
| "epoch": 11.587155963302752, | |
| "grad_norm": 0.34192633628845215, | |
| "learning_rate": 1.8039190434087212e-05, | |
| "loss": 0.0399, | |
| "num_input_tokens_seen": 26170568, | |
| "step": 637 | |
| }, | |
| { | |
| "epoch": 11.605504587155963, | |
| "grad_norm": 0.5145437121391296, | |
| "learning_rate": 1.7969373663109234e-05, | |
| "loss": 0.0658, | |
| "num_input_tokens_seen": 26221816, | |
| "step": 638 | |
| }, | |
| { | |
| "epoch": 11.623853211009175, | |
| "grad_norm": 0.3665778636932373, | |
| "learning_rate": 1.7899616382401936e-05, | |
| "loss": 0.0384, | |
| "num_input_tokens_seen": 26262800, | |
| "step": 639 | |
| }, | |
| { | |
| "epoch": 11.642201834862385, | |
| "grad_norm": 0.31278637051582336, | |
| "learning_rate": 1.7829919182222752e-05, | |
| "loss": 0.0357, | |
| "num_input_tokens_seen": 26311200, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 11.660550458715596, | |
| "grad_norm": 0.3833770751953125, | |
| "learning_rate": 1.776028265232073e-05, | |
| "loss": 0.0445, | |
| "num_input_tokens_seen": 26359400, | |
| "step": 641 | |
| }, | |
| { | |
| "epoch": 11.678899082568808, | |
| "grad_norm": 0.3182731568813324, | |
| "learning_rate": 1.7690707381931583e-05, | |
| "loss": 0.0352, | |
| "num_input_tokens_seen": 26401144, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 11.697247706422019, | |
| "grad_norm": 0.3680736720561981, | |
| "learning_rate": 1.7621193959772657e-05, | |
| "loss": 0.0381, | |
| "num_input_tokens_seen": 26429056, | |
| "step": 643 | |
| }, | |
| { | |
| "epoch": 11.715596330275229, | |
| "grad_norm": 0.30601194500923157, | |
| "learning_rate": 1.755174297403795e-05, | |
| "loss": 0.0448, | |
| "num_input_tokens_seen": 26478272, | |
| "step": 644 | |
| }, | |
| { | |
| "epoch": 11.73394495412844, | |
| "grad_norm": 0.35907667875289917, | |
| "learning_rate": 1.7482355012393177e-05, | |
| "loss": 0.044, | |
| "num_input_tokens_seen": 26525592, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 11.752293577981652, | |
| "grad_norm": 0.33723318576812744, | |
| "learning_rate": 1.7413030661970742e-05, | |
| "loss": 0.0392, | |
| "num_input_tokens_seen": 26565280, | |
| "step": 646 | |
| }, | |
| { | |
| "epoch": 11.770642201834862, | |
| "grad_norm": 0.3269751965999603, | |
| "learning_rate": 1.73437705093648e-05, | |
| "loss": 0.0356, | |
| "num_input_tokens_seen": 26601768, | |
| "step": 647 | |
| }, | |
| { | |
| "epoch": 11.788990825688073, | |
| "grad_norm": 0.339697927236557, | |
| "learning_rate": 1.7274575140626318e-05, | |
| "loss": 0.0425, | |
| "num_input_tokens_seen": 26634496, | |
| "step": 648 | |
| }, | |
| { | |
| "epoch": 11.807339449541285, | |
| "grad_norm": 0.37456247210502625, | |
| "learning_rate": 1.720544514125805e-05, | |
| "loss": 0.0504, | |
| "num_input_tokens_seen": 26679232, | |
| "step": 649 | |
| }, | |
| { | |
| "epoch": 11.825688073394495, | |
| "grad_norm": 0.42360228300094604, | |
| "learning_rate": 1.7136381096209664e-05, | |
| "loss": 0.0566, | |
| "num_input_tokens_seen": 26723144, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 11.844036697247706, | |
| "grad_norm": 0.3331366181373596, | |
| "learning_rate": 1.7067383589872703e-05, | |
| "loss": 0.0397, | |
| "num_input_tokens_seen": 26760448, | |
| "step": 651 | |
| }, | |
| { | |
| "epoch": 11.862385321100918, | |
| "grad_norm": 0.30122336745262146, | |
| "learning_rate": 1.699845320607571e-05, | |
| "loss": 0.0372, | |
| "num_input_tokens_seen": 26820856, | |
| "step": 652 | |
| }, | |
| { | |
| "epoch": 11.880733944954128, | |
| "grad_norm": 0.8716153502464294, | |
| "learning_rate": 1.692959052807928e-05, | |
| "loss": 0.0932, | |
| "num_input_tokens_seen": 26851736, | |
| "step": 653 | |
| }, | |
| { | |
| "epoch": 11.899082568807339, | |
| "grad_norm": 0.3729649484157562, | |
| "learning_rate": 1.686079613857109e-05, | |
| "loss": 0.0632, | |
| "num_input_tokens_seen": 26892976, | |
| "step": 654 | |
| }, | |
| { | |
| "epoch": 11.917431192660551, | |
| "grad_norm": 0.4268723130226135, | |
| "learning_rate": 1.6792070619660975e-05, | |
| "loss": 0.0806, | |
| "num_input_tokens_seen": 26933888, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 11.935779816513762, | |
| "grad_norm": 0.3705332279205322, | |
| "learning_rate": 1.672341455287605e-05, | |
| "loss": 0.0414, | |
| "num_input_tokens_seen": 26970576, | |
| "step": 656 | |
| }, | |
| { | |
| "epoch": 11.954128440366972, | |
| "grad_norm": 0.3180471360683441, | |
| "learning_rate": 1.665482851915573e-05, | |
| "loss": 0.0356, | |
| "num_input_tokens_seen": 27008400, | |
| "step": 657 | |
| }, | |
| { | |
| "epoch": 11.972477064220184, | |
| "grad_norm": 0.6832396984100342, | |
| "learning_rate": 1.658631309884684e-05, | |
| "loss": 0.074, | |
| "num_input_tokens_seen": 27043704, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 11.990825688073395, | |
| "grad_norm": 0.4499323070049286, | |
| "learning_rate": 1.6517868871698725e-05, | |
| "loss": 0.0473, | |
| "num_input_tokens_seen": 27084456, | |
| "step": 659 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "grad_norm": 0.4537651240825653, | |
| "learning_rate": 1.6449496416858284e-05, | |
| "loss": 0.0369, | |
| "num_input_tokens_seen": 27109056, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 12.01834862385321, | |
| "grad_norm": 0.3181232213973999, | |
| "learning_rate": 1.6381196312865145e-05, | |
| "loss": 0.0767, | |
| "num_input_tokens_seen": 27147704, | |
| "step": 661 | |
| }, | |
| { | |
| "epoch": 12.036697247706423, | |
| "grad_norm": 0.30549949407577515, | |
| "learning_rate": 1.6312969137646716e-05, | |
| "loss": 0.0398, | |
| "num_input_tokens_seen": 27197376, | |
| "step": 662 | |
| }, | |
| { | |
| "epoch": 12.055045871559633, | |
| "grad_norm": 0.29217857122421265, | |
| "learning_rate": 1.6244815468513315e-05, | |
| "loss": 0.0373, | |
| "num_input_tokens_seen": 27244056, | |
| "step": 663 | |
| }, | |
| { | |
| "epoch": 12.073394495412844, | |
| "grad_norm": 0.3700779378414154, | |
| "learning_rate": 1.617673588215328e-05, | |
| "loss": 0.042, | |
| "num_input_tokens_seen": 27269576, | |
| "step": 664 | |
| }, | |
| { | |
| "epoch": 12.091743119266056, | |
| "grad_norm": 0.3830711245536804, | |
| "learning_rate": 1.6108730954628093e-05, | |
| "loss": 0.0317, | |
| "num_input_tokens_seen": 27314640, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 12.110091743119266, | |
| "grad_norm": 0.332509845495224, | |
| "learning_rate": 1.6040801261367493e-05, | |
| "loss": 0.0382, | |
| "num_input_tokens_seen": 27358840, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 12.128440366972477, | |
| "grad_norm": 0.35123559832572937, | |
| "learning_rate": 1.5972947377164645e-05, | |
| "loss": 0.0378, | |
| "num_input_tokens_seen": 27399112, | |
| "step": 667 | |
| }, | |
| { | |
| "epoch": 12.146788990825687, | |
| "grad_norm": 0.3346501588821411, | |
| "learning_rate": 1.5905169876171223e-05, | |
| "loss": 0.0322, | |
| "num_input_tokens_seen": 27434064, | |
| "step": 668 | |
| }, | |
| { | |
| "epoch": 12.1651376146789, | |
| "grad_norm": 0.38763749599456787, | |
| "learning_rate": 1.583746933189257e-05, | |
| "loss": 0.0421, | |
| "num_input_tokens_seen": 27468528, | |
| "step": 669 | |
| }, | |
| { | |
| "epoch": 12.18348623853211, | |
| "grad_norm": 0.3860892653465271, | |
| "learning_rate": 1.5769846317182893e-05, | |
| "loss": 0.0682, | |
| "num_input_tokens_seen": 27508512, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 12.20183486238532, | |
| "grad_norm": 0.31347358226776123, | |
| "learning_rate": 1.570230140424033e-05, | |
| "loss": 0.0328, | |
| "num_input_tokens_seen": 27555912, | |
| "step": 671 | |
| }, | |
| { | |
| "epoch": 12.220183486238533, | |
| "grad_norm": 0.3007822632789612, | |
| "learning_rate": 1.56348351646022e-05, | |
| "loss": 0.0346, | |
| "num_input_tokens_seen": 27597608, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 12.238532110091743, | |
| "grad_norm": 0.38240882754325867, | |
| "learning_rate": 1.556744816914008e-05, | |
| "loss": 0.037, | |
| "num_input_tokens_seen": 27633496, | |
| "step": 673 | |
| }, | |
| { | |
| "epoch": 12.256880733944953, | |
| "grad_norm": 0.2740360498428345, | |
| "learning_rate": 1.550014098805503e-05, | |
| "loss": 0.0286, | |
| "num_input_tokens_seen": 27680040, | |
| "step": 674 | |
| }, | |
| { | |
| "epoch": 12.275229357798166, | |
| "grad_norm": 0.4563816487789154, | |
| "learning_rate": 1.5432914190872757e-05, | |
| "loss": 0.0308, | |
| "num_input_tokens_seen": 27721872, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 12.293577981651376, | |
| "grad_norm": 0.27564939856529236, | |
| "learning_rate": 1.5365768346438797e-05, | |
| "loss": 0.0294, | |
| "num_input_tokens_seen": 27762968, | |
| "step": 676 | |
| }, | |
| { | |
| "epoch": 12.311926605504587, | |
| "grad_norm": 0.28814566135406494, | |
| "learning_rate": 1.529870402291368e-05, | |
| "loss": 0.0283, | |
| "num_input_tokens_seen": 27809496, | |
| "step": 677 | |
| }, | |
| { | |
| "epoch": 12.330275229357799, | |
| "grad_norm": 0.29550161957740784, | |
| "learning_rate": 1.523172178776816e-05, | |
| "loss": 0.027, | |
| "num_input_tokens_seen": 27857160, | |
| "step": 678 | |
| }, | |
| { | |
| "epoch": 12.34862385321101, | |
| "grad_norm": 0.31906190514564514, | |
| "learning_rate": 1.5164822207778379e-05, | |
| "loss": 0.0351, | |
| "num_input_tokens_seen": 27920616, | |
| "step": 679 | |
| }, | |
| { | |
| "epoch": 12.36697247706422, | |
| "grad_norm": 0.4574970602989197, | |
| "learning_rate": 1.509800584902108e-05, | |
| "loss": 0.0749, | |
| "num_input_tokens_seen": 27971920, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 12.385321100917432, | |
| "grad_norm": 0.4354063868522644, | |
| "learning_rate": 1.5031273276868845e-05, | |
| "loss": 0.048, | |
| "num_input_tokens_seen": 28008960, | |
| "step": 681 | |
| }, | |
| { | |
| "epoch": 12.403669724770642, | |
| "grad_norm": 0.32136741280555725, | |
| "learning_rate": 1.4964625055985265e-05, | |
| "loss": 0.0319, | |
| "num_input_tokens_seen": 28055416, | |
| "step": 682 | |
| }, | |
| { | |
| "epoch": 12.422018348623853, | |
| "grad_norm": 0.3991897702217102, | |
| "learning_rate": 1.4898061750320212e-05, | |
| "loss": 0.0403, | |
| "num_input_tokens_seen": 28093136, | |
| "step": 683 | |
| }, | |
| { | |
| "epoch": 12.440366972477065, | |
| "grad_norm": 0.29797106981277466, | |
| "learning_rate": 1.4831583923104999e-05, | |
| "loss": 0.0377, | |
| "num_input_tokens_seen": 28152744, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 12.458715596330276, | |
| "grad_norm": 0.8847760558128357, | |
| "learning_rate": 1.4765192136847685e-05, | |
| "loss": 0.0631, | |
| "num_input_tokens_seen": 28205712, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 12.477064220183486, | |
| "grad_norm": 0.28264713287353516, | |
| "learning_rate": 1.4698886953328292e-05, | |
| "loss": 0.0304, | |
| "num_input_tokens_seen": 28248432, | |
| "step": 686 | |
| }, | |
| { | |
| "epoch": 12.495412844036696, | |
| "grad_norm": 0.5237520337104797, | |
| "learning_rate": 1.463266893359403e-05, | |
| "loss": 0.0612, | |
| "num_input_tokens_seen": 28282728, | |
| "step": 687 | |
| }, | |
| { | |
| "epoch": 12.513761467889909, | |
| "grad_norm": 0.3867344260215759, | |
| "learning_rate": 1.4566538637954554e-05, | |
| "loss": 0.0436, | |
| "num_input_tokens_seen": 28330200, | |
| "step": 688 | |
| }, | |
| { | |
| "epoch": 12.53211009174312, | |
| "grad_norm": 0.34402939677238464, | |
| "learning_rate": 1.4500496625977264e-05, | |
| "loss": 0.0339, | |
| "num_input_tokens_seen": 28368752, | |
| "step": 689 | |
| }, | |
| { | |
| "epoch": 12.55045871559633, | |
| "grad_norm": 0.3360813558101654, | |
| "learning_rate": 1.443454345648252e-05, | |
| "loss": 0.0332, | |
| "num_input_tokens_seen": 28405528, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 12.568807339449542, | |
| "grad_norm": 0.3713105618953705, | |
| "learning_rate": 1.436867968753893e-05, | |
| "loss": 0.0378, | |
| "num_input_tokens_seen": 28451400, | |
| "step": 691 | |
| }, | |
| { | |
| "epoch": 12.587155963302752, | |
| "grad_norm": 0.328673392534256, | |
| "learning_rate": 1.430290587645865e-05, | |
| "loss": 0.0407, | |
| "num_input_tokens_seen": 28501600, | |
| "step": 692 | |
| }, | |
| { | |
| "epoch": 12.605504587155963, | |
| "grad_norm": 0.37341922521591187, | |
| "learning_rate": 1.4237222579792618e-05, | |
| "loss": 0.0362, | |
| "num_input_tokens_seen": 28532400, | |
| "step": 693 | |
| }, | |
| { | |
| "epoch": 12.623853211009175, | |
| "grad_norm": 0.416866660118103, | |
| "learning_rate": 1.4171630353325932e-05, | |
| "loss": 0.0423, | |
| "num_input_tokens_seen": 28568872, | |
| "step": 694 | |
| }, | |
| { | |
| "epoch": 12.642201834862385, | |
| "grad_norm": 0.3353414535522461, | |
| "learning_rate": 1.4106129752073022e-05, | |
| "loss": 0.0365, | |
| "num_input_tokens_seen": 28610976, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 12.660550458715596, | |
| "grad_norm": 0.4057430028915405, | |
| "learning_rate": 1.4040721330273062e-05, | |
| "loss": 0.083, | |
| "num_input_tokens_seen": 28657064, | |
| "step": 696 | |
| }, | |
| { | |
| "epoch": 12.678899082568808, | |
| "grad_norm": 0.26201528310775757, | |
| "learning_rate": 1.3975405641385252e-05, | |
| "loss": 0.0285, | |
| "num_input_tokens_seen": 28702912, | |
| "step": 697 | |
| }, | |
| { | |
| "epoch": 12.697247706422019, | |
| "grad_norm": 0.3357066214084625, | |
| "learning_rate": 1.3910183238084112e-05, | |
| "loss": 0.0346, | |
| "num_input_tokens_seen": 28754368, | |
| "step": 698 | |
| }, | |
| { | |
| "epoch": 12.715596330275229, | |
| "grad_norm": 0.30762168765068054, | |
| "learning_rate": 1.3845054672254781e-05, | |
| "loss": 0.0289, | |
| "num_input_tokens_seen": 28795120, | |
| "step": 699 | |
| }, | |
| { | |
| "epoch": 12.73394495412844, | |
| "grad_norm": 0.30095961689949036, | |
| "learning_rate": 1.3780020494988446e-05, | |
| "loss": 0.0288, | |
| "num_input_tokens_seen": 28837920, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 12.752293577981652, | |
| "grad_norm": 0.4343203008174896, | |
| "learning_rate": 1.3715081256577582e-05, | |
| "loss": 0.041, | |
| "num_input_tokens_seen": 28874200, | |
| "step": 701 | |
| }, | |
| { | |
| "epoch": 12.770642201834862, | |
| "grad_norm": 0.37679317593574524, | |
| "learning_rate": 1.3650237506511331e-05, | |
| "loss": 0.0304, | |
| "num_input_tokens_seen": 28911744, | |
| "step": 702 | |
| }, | |
| { | |
| "epoch": 12.788990825688073, | |
| "grad_norm": 0.3850303292274475, | |
| "learning_rate": 1.3585489793470862e-05, | |
| "loss": 0.0292, | |
| "num_input_tokens_seen": 28938696, | |
| "step": 703 | |
| }, | |
| { | |
| "epoch": 12.807339449541285, | |
| "grad_norm": 0.35162273049354553, | |
| "learning_rate": 1.3520838665324703e-05, | |
| "loss": 0.0321, | |
| "num_input_tokens_seen": 28978360, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 12.825688073394495, | |
| "grad_norm": 0.35139572620391846, | |
| "learning_rate": 1.3456284669124158e-05, | |
| "loss": 0.0311, | |
| "num_input_tokens_seen": 29008048, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 12.844036697247706, | |
| "grad_norm": 0.3229581117630005, | |
| "learning_rate": 1.3391828351098578e-05, | |
| "loss": 0.0307, | |
| "num_input_tokens_seen": 29047928, | |
| "step": 706 | |
| }, | |
| { | |
| "epoch": 12.862385321100918, | |
| "grad_norm": 0.3116603493690491, | |
| "learning_rate": 1.3327470256650848e-05, | |
| "loss": 0.033, | |
| "num_input_tokens_seen": 29080376, | |
| "step": 707 | |
| }, | |
| { | |
| "epoch": 12.880733944954128, | |
| "grad_norm": 0.331969290971756, | |
| "learning_rate": 1.3263210930352737e-05, | |
| "loss": 0.0299, | |
| "num_input_tokens_seen": 29113856, | |
| "step": 708 | |
| }, | |
| { | |
| "epoch": 12.899082568807339, | |
| "grad_norm": 0.32986903190612793, | |
| "learning_rate": 1.3199050915940225e-05, | |
| "loss": 0.0373, | |
| "num_input_tokens_seen": 29162904, | |
| "step": 709 | |
| }, | |
| { | |
| "epoch": 12.917431192660551, | |
| "grad_norm": 0.3875369727611542, | |
| "learning_rate": 1.313499075630899e-05, | |
| "loss": 0.0344, | |
| "num_input_tokens_seen": 29195920, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 12.935779816513762, | |
| "grad_norm": 0.3268345892429352, | |
| "learning_rate": 1.3071030993509788e-05, | |
| "loss": 0.0357, | |
| "num_input_tokens_seen": 29230376, | |
| "step": 711 | |
| }, | |
| { | |
| "epoch": 12.954128440366972, | |
| "grad_norm": 1.2907904386520386, | |
| "learning_rate": 1.3007172168743854e-05, | |
| "loss": 0.058, | |
| "num_input_tokens_seen": 29266464, | |
| "step": 712 | |
| }, | |
| { | |
| "epoch": 12.972477064220184, | |
| "grad_norm": 0.29891443252563477, | |
| "learning_rate": 1.2943414822358285e-05, | |
| "loss": 0.0332, | |
| "num_input_tokens_seen": 29308840, | |
| "step": 713 | |
| }, | |
| { | |
| "epoch": 12.990825688073395, | |
| "grad_norm": 0.3546947240829468, | |
| "learning_rate": 1.2879759493841575e-05, | |
| "loss": 0.0387, | |
| "num_input_tokens_seen": 29346656, | |
| "step": 714 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "grad_norm": 0.4480898678302765, | |
| "learning_rate": 1.2816206721818944e-05, | |
| "loss": 0.0294, | |
| "num_input_tokens_seen": 29368144, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 13.01834862385321, | |
| "grad_norm": 0.27912405133247375, | |
| "learning_rate": 1.2752757044047827e-05, | |
| "loss": 0.029, | |
| "num_input_tokens_seen": 29406696, | |
| "step": 716 | |
| }, | |
| { | |
| "epoch": 13.036697247706423, | |
| "grad_norm": 0.2766615152359009, | |
| "learning_rate": 1.2689410997413325e-05, | |
| "loss": 0.0292, | |
| "num_input_tokens_seen": 29447448, | |
| "step": 717 | |
| }, | |
| { | |
| "epoch": 13.055045871559633, | |
| "grad_norm": 0.29729965329170227, | |
| "learning_rate": 1.262616911792365e-05, | |
| "loss": 0.031, | |
| "num_input_tokens_seen": 29493904, | |
| "step": 718 | |
| }, | |
| { | |
| "epoch": 13.073394495412844, | |
| "grad_norm": 0.6886661052703857, | |
| "learning_rate": 1.2563031940705594e-05, | |
| "loss": 0.033, | |
| "num_input_tokens_seen": 29533272, | |
| "step": 719 | |
| }, | |
| { | |
| "epoch": 13.091743119266056, | |
| "grad_norm": 0.30814382433891296, | |
| "learning_rate": 1.2500000000000006e-05, | |
| "loss": 0.0336, | |
| "num_input_tokens_seen": 29565728, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 13.110091743119266, | |
| "grad_norm": 0.2744746804237366, | |
| "learning_rate": 1.243707382915725e-05, | |
| "loss": 0.0237, | |
| "num_input_tokens_seen": 29604480, | |
| "step": 721 | |
| }, | |
| { | |
| "epoch": 13.128440366972477, | |
| "grad_norm": 0.29700982570648193, | |
| "learning_rate": 1.2374253960632757e-05, | |
| "loss": 0.0282, | |
| "num_input_tokens_seen": 29639856, | |
| "step": 722 | |
| }, | |
| { | |
| "epoch": 13.146788990825687, | |
| "grad_norm": 0.33870425820350647, | |
| "learning_rate": 1.2311540925982403e-05, | |
| "loss": 0.0324, | |
| "num_input_tokens_seen": 29677448, | |
| "step": 723 | |
| }, | |
| { | |
| "epoch": 13.1651376146789, | |
| "grad_norm": 0.2735939025878906, | |
| "learning_rate": 1.2248935255858117e-05, | |
| "loss": 0.0269, | |
| "num_input_tokens_seen": 29723888, | |
| "step": 724 | |
| }, | |
| { | |
| "epoch": 13.18348623853211, | |
| "grad_norm": 0.2972582280635834, | |
| "learning_rate": 1.2186437480003372e-05, | |
| "loss": 0.0252, | |
| "num_input_tokens_seen": 29767992, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 13.20183486238532, | |
| "grad_norm": 0.33630624413490295, | |
| "learning_rate": 1.2124048127248644e-05, | |
| "loss": 0.0386, | |
| "num_input_tokens_seen": 29814304, | |
| "step": 726 | |
| }, | |
| { | |
| "epoch": 13.220183486238533, | |
| "grad_norm": 0.351375013589859, | |
| "learning_rate": 1.2061767725507006e-05, | |
| "loss": 0.0336, | |
| "num_input_tokens_seen": 29852040, | |
| "step": 727 | |
| }, | |
| { | |
| "epoch": 13.238532110091743, | |
| "grad_norm": 0.3030643165111542, | |
| "learning_rate": 1.1999596801769616e-05, | |
| "loss": 0.0262, | |
| "num_input_tokens_seen": 29889096, | |
| "step": 728 | |
| }, | |
| { | |
| "epoch": 13.256880733944953, | |
| "grad_norm": 0.2586018443107605, | |
| "learning_rate": 1.1937535882101281e-05, | |
| "loss": 0.0251, | |
| "num_input_tokens_seen": 29927888, | |
| "step": 729 | |
| }, | |
| { | |
| "epoch": 13.275229357798166, | |
| "grad_norm": 0.2849186956882477, | |
| "learning_rate": 1.1875585491636e-05, | |
| "loss": 0.0315, | |
| "num_input_tokens_seen": 29967064, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 13.293577981651376, | |
| "grad_norm": 0.26484936475753784, | |
| "learning_rate": 1.1813746154572514e-05, | |
| "loss": 0.0259, | |
| "num_input_tokens_seen": 30016736, | |
| "step": 731 | |
| }, | |
| { | |
| "epoch": 13.311926605504587, | |
| "grad_norm": 0.3491249084472656, | |
| "learning_rate": 1.175201839416988e-05, | |
| "loss": 0.0274, | |
| "num_input_tokens_seen": 30056888, | |
| "step": 732 | |
| }, | |
| { | |
| "epoch": 13.330275229357799, | |
| "grad_norm": 0.3179028630256653, | |
| "learning_rate": 1.1690402732743042e-05, | |
| "loss": 0.0272, | |
| "num_input_tokens_seen": 30088720, | |
| "step": 733 | |
| }, | |
| { | |
| "epoch": 13.34862385321101, | |
| "grad_norm": 0.9645937085151672, | |
| "learning_rate": 1.1628899691658399e-05, | |
| "loss": 0.0443, | |
| "num_input_tokens_seen": 30131848, | |
| "step": 734 | |
| }, | |
| { | |
| "epoch": 13.36697247706422, | |
| "grad_norm": 0.2913883626461029, | |
| "learning_rate": 1.1567509791329401e-05, | |
| "loss": 0.032, | |
| "num_input_tokens_seen": 30186336, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 13.385321100917432, | |
| "grad_norm": 0.269525408744812, | |
| "learning_rate": 1.1506233551212186e-05, | |
| "loss": 0.0234, | |
| "num_input_tokens_seen": 30242096, | |
| "step": 736 | |
| }, | |
| { | |
| "epoch": 13.403669724770642, | |
| "grad_norm": 0.30820366740226746, | |
| "learning_rate": 1.1445071489801073e-05, | |
| "loss": 0.0242, | |
| "num_input_tokens_seen": 30273336, | |
| "step": 737 | |
| }, | |
| { | |
| "epoch": 13.422018348623853, | |
| "grad_norm": 0.31679078936576843, | |
| "learning_rate": 1.1384024124624324e-05, | |
| "loss": 0.0328, | |
| "num_input_tokens_seen": 30300544, | |
| "step": 738 | |
| }, | |
| { | |
| "epoch": 13.440366972477065, | |
| "grad_norm": 0.2609107196331024, | |
| "learning_rate": 1.1323091972239635e-05, | |
| "loss": 0.0223, | |
| "num_input_tokens_seen": 30343952, | |
| "step": 739 | |
| }, | |
| { | |
| "epoch": 13.458715596330276, | |
| "grad_norm": 0.31270915269851685, | |
| "learning_rate": 1.126227554822985e-05, | |
| "loss": 0.0237, | |
| "num_input_tokens_seen": 30393960, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 13.477064220183486, | |
| "grad_norm": 0.31794360280036926, | |
| "learning_rate": 1.1201575367198547e-05, | |
| "loss": 0.0513, | |
| "num_input_tokens_seen": 30441256, | |
| "step": 741 | |
| }, | |
| { | |
| "epoch": 13.495412844036696, | |
| "grad_norm": 0.2859482169151306, | |
| "learning_rate": 1.1140991942765713e-05, | |
| "loss": 0.0276, | |
| "num_input_tokens_seen": 30480016, | |
| "step": 742 | |
| }, | |
| { | |
| "epoch": 13.513761467889909, | |
| "grad_norm": 0.666972815990448, | |
| "learning_rate": 1.1080525787563393e-05, | |
| "loss": 0.0544, | |
| "num_input_tokens_seen": 30531440, | |
| "step": 743 | |
| }, | |
| { | |
| "epoch": 13.53211009174312, | |
| "grad_norm": 0.3061029613018036, | |
| "learning_rate": 1.1020177413231334e-05, | |
| "loss": 0.0252, | |
| "num_input_tokens_seen": 30572720, | |
| "step": 744 | |
| }, | |
| { | |
| "epoch": 13.55045871559633, | |
| "grad_norm": 0.3531540036201477, | |
| "learning_rate": 1.0959947330412682e-05, | |
| "loss": 0.0328, | |
| "num_input_tokens_seen": 30610416, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 13.568807339449542, | |
| "grad_norm": 0.2804684638977051, | |
| "learning_rate": 1.0899836048749645e-05, | |
| "loss": 0.0232, | |
| "num_input_tokens_seen": 30656448, | |
| "step": 746 | |
| }, | |
| { | |
| "epoch": 13.587155963302752, | |
| "grad_norm": 0.32002997398376465, | |
| "learning_rate": 1.0839844076879185e-05, | |
| "loss": 0.0277, | |
| "num_input_tokens_seen": 30685584, | |
| "step": 747 | |
| }, | |
| { | |
| "epoch": 13.605504587155963, | |
| "grad_norm": 0.2907179296016693, | |
| "learning_rate": 1.0779971922428711e-05, | |
| "loss": 0.0289, | |
| "num_input_tokens_seen": 30729472, | |
| "step": 748 | |
| }, | |
| { | |
| "epoch": 13.623853211009175, | |
| "grad_norm": 0.45727285742759705, | |
| "learning_rate": 1.0720220092011782e-05, | |
| "loss": 0.0371, | |
| "num_input_tokens_seen": 30765400, | |
| "step": 749 | |
| }, | |
| { | |
| "epoch": 13.642201834862385, | |
| "grad_norm": 0.31665271520614624, | |
| "learning_rate": 1.0660589091223855e-05, | |
| "loss": 0.032, | |
| "num_input_tokens_seen": 30812288, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 13.660550458715596, | |
| "grad_norm": 0.31468474864959717, | |
| "learning_rate": 1.0601079424637917e-05, | |
| "loss": 0.027, | |
| "num_input_tokens_seen": 30855592, | |
| "step": 751 | |
| }, | |
| { | |
| "epoch": 13.678899082568808, | |
| "grad_norm": 0.32163867354393005, | |
| "learning_rate": 1.0541691595800337e-05, | |
| "loss": 0.0337, | |
| "num_input_tokens_seen": 30890072, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 13.697247706422019, | |
| "grad_norm": 0.26351386308670044, | |
| "learning_rate": 1.0482426107226507e-05, | |
| "loss": 0.0238, | |
| "num_input_tokens_seen": 30927336, | |
| "step": 753 | |
| }, | |
| { | |
| "epoch": 13.715596330275229, | |
| "grad_norm": 0.3523198664188385, | |
| "learning_rate": 1.0423283460396633e-05, | |
| "loss": 0.0294, | |
| "num_input_tokens_seen": 30966920, | |
| "step": 754 | |
| }, | |
| { | |
| "epoch": 13.73394495412844, | |
| "grad_norm": 0.262052059173584, | |
| "learning_rate": 1.0364264155751488e-05, | |
| "loss": 0.0281, | |
| "num_input_tokens_seen": 31015544, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 13.752293577981652, | |
| "grad_norm": 0.3480219542980194, | |
| "learning_rate": 1.0305368692688174e-05, | |
| "loss": 0.0311, | |
| "num_input_tokens_seen": 31052688, | |
| "step": 756 | |
| }, | |
| { | |
| "epoch": 13.770642201834862, | |
| "grad_norm": 0.27989810705184937, | |
| "learning_rate": 1.0246597569555894e-05, | |
| "loss": 0.0205, | |
| "num_input_tokens_seen": 31092736, | |
| "step": 757 | |
| }, | |
| { | |
| "epoch": 13.788990825688073, | |
| "grad_norm": 0.40765848755836487, | |
| "learning_rate": 1.0187951283651736e-05, | |
| "loss": 0.038, | |
| "num_input_tokens_seen": 31123536, | |
| "step": 758 | |
| }, | |
| { | |
| "epoch": 13.807339449541285, | |
| "grad_norm": 0.3290911316871643, | |
| "learning_rate": 1.0129430331216471e-05, | |
| "loss": 0.0321, | |
| "num_input_tokens_seen": 31157144, | |
| "step": 759 | |
| }, | |
| { | |
| "epoch": 13.825688073394495, | |
| "grad_norm": 0.2592953145503998, | |
| "learning_rate": 1.0071035207430352e-05, | |
| "loss": 0.0267, | |
| "num_input_tokens_seen": 31222320, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 13.844036697247706, | |
| "grad_norm": 0.2886260449886322, | |
| "learning_rate": 1.001276640640891e-05, | |
| "loss": 0.0261, | |
| "num_input_tokens_seen": 31277880, | |
| "step": 761 | |
| }, | |
| { | |
| "epoch": 13.862385321100918, | |
| "grad_norm": 0.6212909817695618, | |
| "learning_rate": 9.954624421198792e-06, | |
| "loss": 0.0639, | |
| "num_input_tokens_seen": 31338856, | |
| "step": 762 | |
| }, | |
| { | |
| "epoch": 13.880733944954128, | |
| "grad_norm": 0.33612295985221863, | |
| "learning_rate": 9.89660974377359e-06, | |
| "loss": 0.0338, | |
| "num_input_tokens_seen": 31376304, | |
| "step": 763 | |
| }, | |
| { | |
| "epoch": 13.899082568807339, | |
| "grad_norm": 0.3871086835861206, | |
| "learning_rate": 9.838722865029673e-06, | |
| "loss": 0.0371, | |
| "num_input_tokens_seen": 31410672, | |
| "step": 764 | |
| }, | |
| { | |
| "epoch": 13.917431192660551, | |
| "grad_norm": 0.3187819719314575, | |
| "learning_rate": 9.780964274781984e-06, | |
| "loss": 0.0241, | |
| "num_input_tokens_seen": 31444928, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 13.935779816513762, | |
| "grad_norm": 0.32425767183303833, | |
| "learning_rate": 9.723334461760006e-06, | |
| "loss": 0.0357, | |
| "num_input_tokens_seen": 31482232, | |
| "step": 766 | |
| }, | |
| { | |
| "epoch": 13.954128440366972, | |
| "grad_norm": 0.3090585470199585, | |
| "learning_rate": 9.665833913603523e-06, | |
| "loss": 0.0312, | |
| "num_input_tokens_seen": 31513856, | |
| "step": 767 | |
| }, | |
| { | |
| "epoch": 13.972477064220184, | |
| "grad_norm": 0.36073872447013855, | |
| "learning_rate": 9.608463116858542e-06, | |
| "loss": 0.0584, | |
| "num_input_tokens_seen": 31563800, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 13.990825688073395, | |
| "grad_norm": 0.3506834805011749, | |
| "learning_rate": 9.551222556973172e-06, | |
| "loss": 0.0356, | |
| "num_input_tokens_seen": 31606368, | |
| "step": 769 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "grad_norm": 0.5738533735275269, | |
| "learning_rate": 9.494112718293501e-06, | |
| "loss": 0.0511, | |
| "num_input_tokens_seen": 31627232, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 14.01834862385321, | |
| "grad_norm": 0.24552291631698608, | |
| "learning_rate": 9.437134084059515e-06, | |
| "loss": 0.0237, | |
| "num_input_tokens_seen": 31669296, | |
| "step": 771 | |
| }, | |
| { | |
| "epoch": 14.036697247706423, | |
| "grad_norm": 0.25540077686309814, | |
| "learning_rate": 9.380287136401e-06, | |
| "loss": 0.026, | |
| "num_input_tokens_seen": 31704400, | |
| "step": 772 | |
| }, | |
| { | |
| "epoch": 14.055045871559633, | |
| "grad_norm": 0.293837308883667, | |
| "learning_rate": 9.323572356333454e-06, | |
| "loss": 0.0252, | |
| "num_input_tokens_seen": 31739048, | |
| "step": 773 | |
| }, | |
| { | |
| "epoch": 14.073394495412844, | |
| "grad_norm": 0.2588525116443634, | |
| "learning_rate": 9.266990223754069e-06, | |
| "loss": 0.0175, | |
| "num_input_tokens_seen": 31776056, | |
| "step": 774 | |
| }, | |
| { | |
| "epoch": 14.091743119266056, | |
| "grad_norm": 0.28983962535858154, | |
| "learning_rate": 9.210541217437565e-06, | |
| "loss": 0.0269, | |
| "num_input_tokens_seen": 31814504, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 14.110091743119266, | |
| "grad_norm": 0.32418689131736755, | |
| "learning_rate": 9.154225815032242e-06, | |
| "loss": 0.0283, | |
| "num_input_tokens_seen": 31846928, | |
| "step": 776 | |
| }, | |
| { | |
| "epoch": 14.128440366972477, | |
| "grad_norm": 0.26463258266448975, | |
| "learning_rate": 9.098044493055899e-06, | |
| "loss": 0.0256, | |
| "num_input_tokens_seen": 31893152, | |
| "step": 777 | |
| }, | |
| { | |
| "epoch": 14.146788990825687, | |
| "grad_norm": 0.24525673687458038, | |
| "learning_rate": 9.0419977268918e-06, | |
| "loss": 0.0202, | |
| "num_input_tokens_seen": 31937456, | |
| "step": 778 | |
| }, | |
| { | |
| "epoch": 14.1651376146789, | |
| "grad_norm": 0.300974577665329, | |
| "learning_rate": 8.98608599078462e-06, | |
| "loss": 0.0301, | |
| "num_input_tokens_seen": 31969928, | |
| "step": 779 | |
| }, | |
| { | |
| "epoch": 14.18348623853211, | |
| "grad_norm": 0.31297603249549866, | |
| "learning_rate": 8.930309757836517e-06, | |
| "loss": 0.0256, | |
| "num_input_tokens_seen": 32032920, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 14.20183486238532, | |
| "grad_norm": 0.2529805898666382, | |
| "learning_rate": 8.874669500003049e-06, | |
| "loss": 0.0247, | |
| "num_input_tokens_seen": 32087928, | |
| "step": 781 | |
| }, | |
| { | |
| "epoch": 14.220183486238533, | |
| "grad_norm": 0.2599085867404938, | |
| "learning_rate": 8.819165688089193e-06, | |
| "loss": 0.026, | |
| "num_input_tokens_seen": 32133632, | |
| "step": 782 | |
| }, | |
| { | |
| "epoch": 14.238532110091743, | |
| "grad_norm": 0.3724299967288971, | |
| "learning_rate": 8.763798791745411e-06, | |
| "loss": 0.0307, | |
| "num_input_tokens_seen": 32178024, | |
| "step": 783 | |
| }, | |
| { | |
| "epoch": 14.256880733944953, | |
| "grad_norm": 0.29776036739349365, | |
| "learning_rate": 8.708569279463622e-06, | |
| "loss": 0.028, | |
| "num_input_tokens_seen": 32229216, | |
| "step": 784 | |
| }, | |
| { | |
| "epoch": 14.275229357798166, | |
| "grad_norm": 0.326056569814682, | |
| "learning_rate": 8.65347761857326e-06, | |
| "loss": 0.047, | |
| "num_input_tokens_seen": 32279544, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 14.293577981651376, | |
| "grad_norm": 0.24950319528579712, | |
| "learning_rate": 8.598524275237322e-06, | |
| "loss": 0.0221, | |
| "num_input_tokens_seen": 32314736, | |
| "step": 786 | |
| }, | |
| { | |
| "epoch": 14.311926605504587, | |
| "grad_norm": 0.2621147632598877, | |
| "learning_rate": 8.543709714448403e-06, | |
| "loss": 0.0211, | |
| "num_input_tokens_seen": 32357232, | |
| "step": 787 | |
| }, | |
| { | |
| "epoch": 14.330275229357799, | |
| "grad_norm": 0.3251303732395172, | |
| "learning_rate": 8.489034400024812e-06, | |
| "loss": 0.0304, | |
| "num_input_tokens_seen": 32392992, | |
| "step": 788 | |
| }, | |
| { | |
| "epoch": 14.34862385321101, | |
| "grad_norm": 0.26626235246658325, | |
| "learning_rate": 8.434498794606568e-06, | |
| "loss": 0.0225, | |
| "num_input_tokens_seen": 32434840, | |
| "step": 789 | |
| }, | |
| { | |
| "epoch": 14.36697247706422, | |
| "grad_norm": 0.24833904206752777, | |
| "learning_rate": 8.380103359651553e-06, | |
| "loss": 0.0195, | |
| "num_input_tokens_seen": 32480592, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 14.385321100917432, | |
| "grad_norm": 0.6969995498657227, | |
| "learning_rate": 8.325848555431595e-06, | |
| "loss": 0.0258, | |
| "num_input_tokens_seen": 32514440, | |
| "step": 791 | |
| }, | |
| { | |
| "epoch": 14.403669724770642, | |
| "grad_norm": 0.30866381525993347, | |
| "learning_rate": 8.271734841028553e-06, | |
| "loss": 0.045, | |
| "num_input_tokens_seen": 32557176, | |
| "step": 792 | |
| }, | |
| { | |
| "epoch": 14.422018348623853, | |
| "grad_norm": 0.33566537499427795, | |
| "learning_rate": 8.217762674330413e-06, | |
| "loss": 0.0304, | |
| "num_input_tokens_seen": 32594416, | |
| "step": 793 | |
| }, | |
| { | |
| "epoch": 14.440366972477065, | |
| "grad_norm": 0.23681727051734924, | |
| "learning_rate": 8.163932512027492e-06, | |
| "loss": 0.0178, | |
| "num_input_tokens_seen": 32638344, | |
| "step": 794 | |
| }, | |
| { | |
| "epoch": 14.458715596330276, | |
| "grad_norm": 0.3150688111782074, | |
| "learning_rate": 8.110244809608495e-06, | |
| "loss": 0.0336, | |
| "num_input_tokens_seen": 32674416, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 14.477064220183486, | |
| "grad_norm": 0.2548944354057312, | |
| "learning_rate": 8.056700021356694e-06, | |
| "loss": 0.0309, | |
| "num_input_tokens_seen": 32725808, | |
| "step": 796 | |
| }, | |
| { | |
| "epoch": 14.495412844036696, | |
| "grad_norm": 0.3117126524448395, | |
| "learning_rate": 8.003298600346085e-06, | |
| "loss": 0.0247, | |
| "num_input_tokens_seen": 32761224, | |
| "step": 797 | |
| }, | |
| { | |
| "epoch": 14.513761467889909, | |
| "grad_norm": 0.2600093483924866, | |
| "learning_rate": 7.950040998437542e-06, | |
| "loss": 0.0235, | |
| "num_input_tokens_seen": 32812208, | |
| "step": 798 | |
| }, | |
| { | |
| "epoch": 14.53211009174312, | |
| "grad_norm": 0.24391524493694305, | |
| "learning_rate": 7.896927666275006e-06, | |
| "loss": 0.0195, | |
| "num_input_tokens_seen": 32858672, | |
| "step": 799 | |
| }, | |
| { | |
| "epoch": 14.55045871559633, | |
| "grad_norm": 0.3656960129737854, | |
| "learning_rate": 7.843959053281663e-06, | |
| "loss": 0.0397, | |
| "num_input_tokens_seen": 32905792, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 14.568807339449542, | |
| "grad_norm": 0.249402716755867, | |
| "learning_rate": 7.791135607656147e-06, | |
| "loss": 0.0216, | |
| "num_input_tokens_seen": 32946456, | |
| "step": 801 | |
| }, | |
| { | |
| "epoch": 14.587155963302752, | |
| "grad_norm": 0.2628585696220398, | |
| "learning_rate": 7.738457776368766e-06, | |
| "loss": 0.0201, | |
| "num_input_tokens_seen": 32985008, | |
| "step": 802 | |
| }, | |
| { | |
| "epoch": 14.605504587155963, | |
| "grad_norm": 0.26252928376197815, | |
| "learning_rate": 7.685926005157651e-06, | |
| "loss": 0.0229, | |
| "num_input_tokens_seen": 33029600, | |
| "step": 803 | |
| }, | |
| { | |
| "epoch": 14.623853211009175, | |
| "grad_norm": 0.33849260210990906, | |
| "learning_rate": 7.633540738525066e-06, | |
| "loss": 0.0268, | |
| "num_input_tokens_seen": 33064056, | |
| "step": 804 | |
| }, | |
| { | |
| "epoch": 14.642201834862385, | |
| "grad_norm": 0.2663592994213104, | |
| "learning_rate": 7.581302419733632e-06, | |
| "loss": 0.0241, | |
| "num_input_tokens_seen": 33099880, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 14.660550458715596, | |
| "grad_norm": 0.2511848211288452, | |
| "learning_rate": 7.529211490802498e-06, | |
| "loss": 0.0244, | |
| "num_input_tokens_seen": 33134376, | |
| "step": 806 | |
| }, | |
| { | |
| "epoch": 14.678899082568808, | |
| "grad_norm": 0.2512349784374237, | |
| "learning_rate": 7.477268392503728e-06, | |
| "loss": 0.0225, | |
| "num_input_tokens_seen": 33172056, | |
| "step": 807 | |
| }, | |
| { | |
| "epoch": 14.697247706422019, | |
| "grad_norm": 0.2771822512149811, | |
| "learning_rate": 7.4254735643584564e-06, | |
| "loss": 0.0241, | |
| "num_input_tokens_seen": 33224448, | |
| "step": 808 | |
| }, | |
| { | |
| "epoch": 14.715596330275229, | |
| "grad_norm": 0.24601776897907257, | |
| "learning_rate": 7.3738274446332415e-06, | |
| "loss": 0.0228, | |
| "num_input_tokens_seen": 33264800, | |
| "step": 809 | |
| }, | |
| { | |
| "epoch": 14.73394495412844, | |
| "grad_norm": 0.33897870779037476, | |
| "learning_rate": 7.3223304703363135e-06, | |
| "loss": 0.025, | |
| "num_input_tokens_seen": 33300896, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 14.752293577981652, | |
| "grad_norm": 0.2866343855857849, | |
| "learning_rate": 7.270983077213911e-06, | |
| "loss": 0.0243, | |
| "num_input_tokens_seen": 33335304, | |
| "step": 811 | |
| }, | |
| { | |
| "epoch": 14.770642201834862, | |
| "grad_norm": 0.22896356880664825, | |
| "learning_rate": 7.219785699746573e-06, | |
| "loss": 0.0186, | |
| "num_input_tokens_seen": 33382912, | |
| "step": 812 | |
| }, | |
| { | |
| "epoch": 14.788990825688073, | |
| "grad_norm": 0.2809698283672333, | |
| "learning_rate": 7.168738771145464e-06, | |
| "loss": 0.0299, | |
| "num_input_tokens_seen": 33422688, | |
| "step": 813 | |
| }, | |
| { | |
| "epoch": 14.807339449541285, | |
| "grad_norm": 0.32832175493240356, | |
| "learning_rate": 7.117842723348717e-06, | |
| "loss": 0.0301, | |
| "num_input_tokens_seen": 33456520, | |
| "step": 814 | |
| }, | |
| { | |
| "epoch": 14.825688073394495, | |
| "grad_norm": 0.3265829384326935, | |
| "learning_rate": 7.067097987017762e-06, | |
| "loss": 0.0285, | |
| "num_input_tokens_seen": 33501112, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 14.844036697247706, | |
| "grad_norm": 0.3356791138648987, | |
| "learning_rate": 7.016504991533726e-06, | |
| "loss": 0.0314, | |
| "num_input_tokens_seen": 33535160, | |
| "step": 816 | |
| }, | |
| { | |
| "epoch": 14.862385321100918, | |
| "grad_norm": 0.28199252486228943, | |
| "learning_rate": 6.9660641649937155e-06, | |
| "loss": 0.0289, | |
| "num_input_tokens_seen": 33583896, | |
| "step": 817 | |
| }, | |
| { | |
| "epoch": 14.880733944954128, | |
| "grad_norm": 0.28438156843185425, | |
| "learning_rate": 6.9157759342072995e-06, | |
| "loss": 0.0225, | |
| "num_input_tokens_seen": 33616680, | |
| "step": 818 | |
| }, | |
| { | |
| "epoch": 14.899082568807339, | |
| "grad_norm": 0.35118597745895386, | |
| "learning_rate": 6.865640724692815e-06, | |
| "loss": 0.029, | |
| "num_input_tokens_seen": 33652496, | |
| "step": 819 | |
| }, | |
| { | |
| "epoch": 14.917431192660551, | |
| "grad_norm": 0.27889585494995117, | |
| "learning_rate": 6.815658960673782e-06, | |
| "loss": 0.0379, | |
| "num_input_tokens_seen": 33695760, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 14.935779816513762, | |
| "grad_norm": 0.26556557416915894, | |
| "learning_rate": 6.765831065075367e-06, | |
| "loss": 0.0224, | |
| "num_input_tokens_seen": 33734472, | |
| "step": 821 | |
| }, | |
| { | |
| "epoch": 14.954128440366972, | |
| "grad_norm": 0.22440935671329498, | |
| "learning_rate": 6.716157459520739e-06, | |
| "loss": 0.0243, | |
| "num_input_tokens_seen": 33792456, | |
| "step": 822 | |
| }, | |
| { | |
| "epoch": 14.972477064220184, | |
| "grad_norm": 0.24052810668945312, | |
| "learning_rate": 6.666638564327532e-06, | |
| "loss": 0.0203, | |
| "num_input_tokens_seen": 33837616, | |
| "step": 823 | |
| }, | |
| { | |
| "epoch": 14.990825688073395, | |
| "grad_norm": 0.3531548082828522, | |
| "learning_rate": 6.617274798504286e-06, | |
| "loss": 0.0316, | |
| "num_input_tokens_seen": 33871912, | |
| "step": 824 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "grad_norm": 0.4734267592430115, | |
| "learning_rate": 6.568066579746901e-06, | |
| "loss": 0.038, | |
| "num_input_tokens_seen": 33886320, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 15.01834862385321, | |
| "grad_norm": 0.22462400794029236, | |
| "learning_rate": 6.519014324435102e-06, | |
| "loss": 0.0212, | |
| "num_input_tokens_seen": 33929160, | |
| "step": 826 | |
| }, | |
| { | |
| "epoch": 15.036697247706423, | |
| "grad_norm": 0.29892125725746155, | |
| "learning_rate": 6.470118447628912e-06, | |
| "loss": 0.0303, | |
| "num_input_tokens_seen": 33976056, | |
| "step": 827 | |
| }, | |
| { | |
| "epoch": 15.055045871559633, | |
| "grad_norm": 0.2654046416282654, | |
| "learning_rate": 6.421379363065142e-06, | |
| "loss": 0.0394, | |
| "num_input_tokens_seen": 34016992, | |
| "step": 828 | |
| }, | |
| { | |
| "epoch": 15.073394495412844, | |
| "grad_norm": 0.24058304727077484, | |
| "learning_rate": 6.3727974831539115e-06, | |
| "loss": 0.0238, | |
| "num_input_tokens_seen": 34054944, | |
| "step": 829 | |
| }, | |
| { | |
| "epoch": 15.091743119266056, | |
| "grad_norm": 0.2786811888217926, | |
| "learning_rate": 6.324373218975105e-06, | |
| "loss": 0.0382, | |
| "num_input_tokens_seen": 34100400, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 15.110091743119266, | |
| "grad_norm": 0.24469152092933655, | |
| "learning_rate": 6.2761069802749455e-06, | |
| "loss": 0.0195, | |
| "num_input_tokens_seen": 34154080, | |
| "step": 831 | |
| }, | |
| { | |
| "epoch": 15.128440366972477, | |
| "grad_norm": 0.21530218422412872, | |
| "learning_rate": 6.22799917546252e-06, | |
| "loss": 0.0192, | |
| "num_input_tokens_seen": 34211416, | |
| "step": 832 | |
| }, | |
| { | |
| "epoch": 15.146788990825687, | |
| "grad_norm": 0.25735238194465637, | |
| "learning_rate": 6.180050211606303e-06, | |
| "loss": 0.0225, | |
| "num_input_tokens_seen": 34250208, | |
| "step": 833 | |
| }, | |
| { | |
| "epoch": 15.1651376146789, | |
| "grad_norm": 0.2362961620092392, | |
| "learning_rate": 6.1322604944307e-06, | |
| "loss": 0.0188, | |
| "num_input_tokens_seen": 34305792, | |
| "step": 834 | |
| }, | |
| { | |
| "epoch": 15.18348623853211, | |
| "grad_norm": 0.23253802955150604, | |
| "learning_rate": 6.08463042831268e-06, | |
| "loss": 0.0205, | |
| "num_input_tokens_seen": 34345272, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 15.20183486238532, | |
| "grad_norm": 0.2991214394569397, | |
| "learning_rate": 6.037160416278278e-06, | |
| "loss": 0.0241, | |
| "num_input_tokens_seen": 34381752, | |
| "step": 836 | |
| }, | |
| { | |
| "epoch": 15.220183486238533, | |
| "grad_norm": 0.257041335105896, | |
| "learning_rate": 5.989850859999227e-06, | |
| "loss": 0.0203, | |
| "num_input_tokens_seen": 34423552, | |
| "step": 837 | |
| }, | |
| { | |
| "epoch": 15.238532110091743, | |
| "grad_norm": 0.22987478971481323, | |
| "learning_rate": 5.942702159789554e-06, | |
| "loss": 0.0204, | |
| "num_input_tokens_seen": 34463296, | |
| "step": 838 | |
| }, | |
| { | |
| "epoch": 15.256880733944953, | |
| "grad_norm": 0.3145504295825958, | |
| "learning_rate": 5.895714714602171e-06, | |
| "loss": 0.0262, | |
| "num_input_tokens_seen": 34494592, | |
| "step": 839 | |
| }, | |
| { | |
| "epoch": 15.275229357798166, | |
| "grad_norm": 0.2484779804944992, | |
| "learning_rate": 5.848888922025553e-06, | |
| "loss": 0.0232, | |
| "num_input_tokens_seen": 34536560, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 15.293577981651376, | |
| "grad_norm": 0.28855112195014954, | |
| "learning_rate": 5.802225178280288e-06, | |
| "loss": 0.0244, | |
| "num_input_tokens_seen": 34565424, | |
| "step": 841 | |
| }, | |
| { | |
| "epoch": 15.311926605504587, | |
| "grad_norm": 0.21636433899402618, | |
| "learning_rate": 5.755723878215802e-06, | |
| "loss": 0.0178, | |
| "num_input_tokens_seen": 34615248, | |
| "step": 842 | |
| }, | |
| { | |
| "epoch": 15.330275229357799, | |
| "grad_norm": 0.30022677779197693, | |
| "learning_rate": 5.709385415307006e-06, | |
| "loss": 0.0237, | |
| "num_input_tokens_seen": 34643648, | |
| "step": 843 | |
| }, | |
| { | |
| "epoch": 15.34862385321101, | |
| "grad_norm": 0.23669451475143433, | |
| "learning_rate": 5.663210181650905e-06, | |
| "loss": 0.0189, | |
| "num_input_tokens_seen": 34678888, | |
| "step": 844 | |
| }, | |
| { | |
| "epoch": 15.36697247706422, | |
| "grad_norm": 0.3009994924068451, | |
| "learning_rate": 5.617198567963352e-06, | |
| "loss": 0.0228, | |
| "num_input_tokens_seen": 34717536, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 15.385321100917432, | |
| "grad_norm": 0.2476666420698166, | |
| "learning_rate": 5.571350963575728e-06, | |
| "loss": 0.0187, | |
| "num_input_tokens_seen": 34755840, | |
| "step": 846 | |
| }, | |
| { | |
| "epoch": 15.403669724770642, | |
| "grad_norm": 0.2747964859008789, | |
| "learning_rate": 5.525667756431616e-06, | |
| "loss": 0.0277, | |
| "num_input_tokens_seen": 34788648, | |
| "step": 847 | |
| }, | |
| { | |
| "epoch": 15.422018348623853, | |
| "grad_norm": 0.3019561469554901, | |
| "learning_rate": 5.48014933308352e-06, | |
| "loss": 0.0261, | |
| "num_input_tokens_seen": 34818368, | |
| "step": 848 | |
| }, | |
| { | |
| "epoch": 15.440366972477065, | |
| "grad_norm": 0.22335736453533173, | |
| "learning_rate": 5.434796078689652e-06, | |
| "loss": 0.0243, | |
| "num_input_tokens_seen": 34877832, | |
| "step": 849 | |
| }, | |
| { | |
| "epoch": 15.458715596330276, | |
| "grad_norm": 0.34833160042762756, | |
| "learning_rate": 5.389608377010608e-06, | |
| "loss": 0.0195, | |
| "num_input_tokens_seen": 34929560, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 15.477064220183486, | |
| "grad_norm": 0.2435968518257141, | |
| "learning_rate": 5.344586610406146e-06, | |
| "loss": 0.0198, | |
| "num_input_tokens_seen": 34969976, | |
| "step": 851 | |
| }, | |
| { | |
| "epoch": 15.495412844036696, | |
| "grad_norm": 0.22303880751132965, | |
| "learning_rate": 5.299731159831953e-06, | |
| "loss": 0.0169, | |
| "num_input_tokens_seen": 35008288, | |
| "step": 852 | |
| }, | |
| { | |
| "epoch": 15.513761467889909, | |
| "grad_norm": 0.30280816555023193, | |
| "learning_rate": 5.2550424048364185e-06, | |
| "loss": 0.0314, | |
| "num_input_tokens_seen": 35045016, | |
| "step": 853 | |
| }, | |
| { | |
| "epoch": 15.53211009174312, | |
| "grad_norm": 0.2565707266330719, | |
| "learning_rate": 5.210520723557419e-06, | |
| "loss": 0.0192, | |
| "num_input_tokens_seen": 35085312, | |
| "step": 854 | |
| }, | |
| { | |
| "epoch": 15.55045871559633, | |
| "grad_norm": 0.24315547943115234, | |
| "learning_rate": 5.166166492719124e-06, | |
| "loss": 0.0243, | |
| "num_input_tokens_seen": 35130040, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 15.568807339449542, | |
| "grad_norm": 0.30669093132019043, | |
| "learning_rate": 5.121980087628803e-06, | |
| "loss": 0.026, | |
| "num_input_tokens_seen": 35169456, | |
| "step": 856 | |
| }, | |
| { | |
| "epoch": 15.587155963302752, | |
| "grad_norm": 0.207157701253891, | |
| "learning_rate": 5.077961882173676e-06, | |
| "loss": 0.0177, | |
| "num_input_tokens_seen": 35216632, | |
| "step": 857 | |
| }, | |
| { | |
| "epoch": 15.605504587155963, | |
| "grad_norm": 0.2158558964729309, | |
| "learning_rate": 5.034112248817685e-06, | |
| "loss": 0.0175, | |
| "num_input_tokens_seen": 35263048, | |
| "step": 858 | |
| }, | |
| { | |
| "epoch": 15.623853211009175, | |
| "grad_norm": 0.2432718127965927, | |
| "learning_rate": 4.990431558598408e-06, | |
| "loss": 0.0182, | |
| "num_input_tokens_seen": 35306376, | |
| "step": 859 | |
| }, | |
| { | |
| "epoch": 15.642201834862385, | |
| "grad_norm": 0.2845425009727478, | |
| "learning_rate": 4.946920181123904e-06, | |
| "loss": 0.0236, | |
| "num_input_tokens_seen": 35341168, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 15.660550458715596, | |
| "grad_norm": 0.23385128378868103, | |
| "learning_rate": 4.903578484569568e-06, | |
| "loss": 0.0178, | |
| "num_input_tokens_seen": 35376920, | |
| "step": 861 | |
| }, | |
| { | |
| "epoch": 15.678899082568808, | |
| "grad_norm": 0.2769944965839386, | |
| "learning_rate": 4.860406835675016e-06, | |
| "loss": 0.0257, | |
| "num_input_tokens_seen": 35430304, | |
| "step": 862 | |
| }, | |
| { | |
| "epoch": 15.697247706422019, | |
| "grad_norm": 0.24330423772335052, | |
| "learning_rate": 4.817405599741004e-06, | |
| "loss": 0.0218, | |
| "num_input_tokens_seen": 35466728, | |
| "step": 863 | |
| }, | |
| { | |
| "epoch": 15.715596330275229, | |
| "grad_norm": 0.25154146552085876, | |
| "learning_rate": 4.7745751406263165e-06, | |
| "loss": 0.0231, | |
| "num_input_tokens_seen": 35503792, | |
| "step": 864 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 1080, | |
| "num_input_tokens_seen": 35503792, | |
| "num_train_epochs": 20, | |
| "save_steps": 54, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.9951592839910523e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |