Upload checkpoint from train/ckpts/qwen_the-real-gabagool/s1-dynamic-cheatsheet-shuffled-v2_bs16_lr1e-5_epoch5_wd1e-4_20250530_123513/checkpoint-142
61b13c6
verified
| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 142, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.014084507042253521, | |
| "grad_norm": 6.787696838378906, | |
| "learning_rate": 0.0, | |
| "loss": 0.5349, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.028169014084507043, | |
| "grad_norm": 4.965597629547119, | |
| "learning_rate": 5.555555555555555e-07, | |
| "loss": 0.4341, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.04225352112676056, | |
| "grad_norm": 6.162815570831299, | |
| "learning_rate": 1.111111111111111e-06, | |
| "loss": 0.4737, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.056338028169014086, | |
| "grad_norm": 5.477490425109863, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "loss": 0.4147, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.07042253521126761, | |
| "grad_norm": 3.8193578720092773, | |
| "learning_rate": 2.222222222222222e-06, | |
| "loss": 0.3063, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.08450704225352113, | |
| "grad_norm": 2.9905662536621094, | |
| "learning_rate": 2.7777777777777783e-06, | |
| "loss": 0.2417, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.09859154929577464, | |
| "grad_norm": 3.6921825408935547, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.3977, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.11267605633802817, | |
| "grad_norm": 3.400188684463501, | |
| "learning_rate": 3.88888888888889e-06, | |
| "loss": 0.3384, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.1267605633802817, | |
| "grad_norm": 2.7354023456573486, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 0.2933, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.14084507042253522, | |
| "grad_norm": 2.590258836746216, | |
| "learning_rate": 5e-06, | |
| "loss": 0.3457, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.15492957746478872, | |
| "grad_norm": 2.350748062133789, | |
| "learning_rate": 5.555555555555557e-06, | |
| "loss": 0.2827, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.16901408450704225, | |
| "grad_norm": 2.5647623538970947, | |
| "learning_rate": 6.111111111111112e-06, | |
| "loss": 0.3674, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.18309859154929578, | |
| "grad_norm": 2.025217294692993, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.2451, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.19718309859154928, | |
| "grad_norm": 2.718487024307251, | |
| "learning_rate": 7.222222222222223e-06, | |
| "loss": 0.3155, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.2112676056338028, | |
| "grad_norm": 2.435072660446167, | |
| "learning_rate": 7.77777777777778e-06, | |
| "loss": 0.3048, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.22535211267605634, | |
| "grad_norm": 2.4881649017333984, | |
| "learning_rate": 8.333333333333334e-06, | |
| "loss": 0.3065, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.23943661971830985, | |
| "grad_norm": 2.4629642963409424, | |
| "learning_rate": 8.888888888888888e-06, | |
| "loss": 0.3362, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.2535211267605634, | |
| "grad_norm": 2.108853816986084, | |
| "learning_rate": 9.444444444444445e-06, | |
| "loss": 0.3038, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.2676056338028169, | |
| "grad_norm": 1.947784185409546, | |
| "learning_rate": 1e-05, | |
| "loss": 0.2637, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.28169014084507044, | |
| "grad_norm": 1.2261523008346558, | |
| "learning_rate": 9.99978274148479e-06, | |
| "loss": 0.1147, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.29577464788732394, | |
| "grad_norm": 1.9560741186141968, | |
| "learning_rate": 9.999130984819662e-06, | |
| "loss": 0.2295, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.30985915492957744, | |
| "grad_norm": 2.593855142593384, | |
| "learning_rate": 9.998044786644492e-06, | |
| "loss": 0.3603, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.323943661971831, | |
| "grad_norm": 1.855962872505188, | |
| "learning_rate": 9.9965242413536e-06, | |
| "loss": 0.2106, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.3380281690140845, | |
| "grad_norm": 2.5020267963409424, | |
| "learning_rate": 9.994569481087552e-06, | |
| "loss": 0.2625, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.352112676056338, | |
| "grad_norm": 2.110041618347168, | |
| "learning_rate": 9.992180675721671e-06, | |
| "loss": 0.2667, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.36619718309859156, | |
| "grad_norm": 1.7297585010528564, | |
| "learning_rate": 9.989358032851283e-06, | |
| "loss": 0.2252, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.38028169014084506, | |
| "grad_norm": 2.069056749343872, | |
| "learning_rate": 9.986101797773667e-06, | |
| "loss": 0.314, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.39436619718309857, | |
| "grad_norm": 2.0800154209136963, | |
| "learning_rate": 9.98241225346674e-06, | |
| "loss": 0.2847, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.4084507042253521, | |
| "grad_norm": 2.1581532955169678, | |
| "learning_rate": 9.978289720564471e-06, | |
| "loss": 0.3219, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.4225352112676056, | |
| "grad_norm": 1.8568209409713745, | |
| "learning_rate": 9.97373455732901e-06, | |
| "loss": 0.2438, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.43661971830985913, | |
| "grad_norm": 1.5374771356582642, | |
| "learning_rate": 9.968747159619556e-06, | |
| "loss": 0.1542, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.4507042253521127, | |
| "grad_norm": 2.23075795173645, | |
| "learning_rate": 9.963327960857962e-06, | |
| "loss": 0.2957, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.4647887323943662, | |
| "grad_norm": 1.8911073207855225, | |
| "learning_rate": 9.957477431991053e-06, | |
| "loss": 0.2476, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.4788732394366197, | |
| "grad_norm": 1.6688034534454346, | |
| "learning_rate": 9.95119608144972e-06, | |
| "loss": 0.2525, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.49295774647887325, | |
| "grad_norm": 2.2326338291168213, | |
| "learning_rate": 9.944484455104716e-06, | |
| "loss": 0.3041, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.5070422535211268, | |
| "grad_norm": 2.0912158489227295, | |
| "learning_rate": 9.937343136219234e-06, | |
| "loss": 0.3142, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.5211267605633803, | |
| "grad_norm": 1.928329348564148, | |
| "learning_rate": 9.929772745398207e-06, | |
| "loss": 0.2444, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.5352112676056338, | |
| "grad_norm": 1.6784964799880981, | |
| "learning_rate": 9.921773940534382e-06, | |
| "loss": 0.2081, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.5492957746478874, | |
| "grad_norm": 1.3511768579483032, | |
| "learning_rate": 9.913347416751148e-06, | |
| "loss": 0.1685, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.5633802816901409, | |
| "grad_norm": 1.875441074371338, | |
| "learning_rate": 9.904493906342124e-06, | |
| "loss": 0.3032, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5774647887323944, | |
| "grad_norm": 1.5829033851623535, | |
| "learning_rate": 9.895214178707516e-06, | |
| "loss": 0.1834, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.5915492957746479, | |
| "grad_norm": 1.9584465026855469, | |
| "learning_rate": 9.885509040287267e-06, | |
| "loss": 0.2435, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.6056338028169014, | |
| "grad_norm": 1.9996111392974854, | |
| "learning_rate": 9.875379334490962e-06, | |
| "loss": 0.299, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.6197183098591549, | |
| "grad_norm": 2.02445650100708, | |
| "learning_rate": 9.864825941624538e-06, | |
| "loss": 0.2722, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.6338028169014085, | |
| "grad_norm": 1.4250928163528442, | |
| "learning_rate": 9.853849778813777e-06, | |
| "loss": 0.179, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.647887323943662, | |
| "grad_norm": 1.2550164461135864, | |
| "learning_rate": 9.842451799924616e-06, | |
| "loss": 0.1616, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.6619718309859155, | |
| "grad_norm": 1.7907028198242188, | |
| "learning_rate": 9.830632995480243e-06, | |
| "loss": 0.2634, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.676056338028169, | |
| "grad_norm": 1.7356576919555664, | |
| "learning_rate": 9.818394392575018e-06, | |
| "loss": 0.2308, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.6901408450704225, | |
| "grad_norm": 2.0789928436279297, | |
| "learning_rate": 9.805737054785223e-06, | |
| "loss": 0.3531, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.704225352112676, | |
| "grad_norm": 1.5568856000900269, | |
| "learning_rate": 9.792662082076618e-06, | |
| "loss": 0.1905, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.7183098591549296, | |
| "grad_norm": 1.8074111938476562, | |
| "learning_rate": 9.779170610708872e-06, | |
| "loss": 0.2218, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.7323943661971831, | |
| "grad_norm": 1.9247379302978516, | |
| "learning_rate": 9.765263813136796e-06, | |
| "loss": 0.3052, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.7464788732394366, | |
| "grad_norm": 2.1542413234710693, | |
| "learning_rate": 9.750942897908468e-06, | |
| "loss": 0.3466, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.7605633802816901, | |
| "grad_norm": 1.5956591367721558, | |
| "learning_rate": 9.736209109560201e-06, | |
| "loss": 0.2695, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.7746478873239436, | |
| "grad_norm": 1.5499773025512695, | |
| "learning_rate": 9.721063728508384e-06, | |
| "loss": 0.2895, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7887323943661971, | |
| "grad_norm": 1.5727671384811401, | |
| "learning_rate": 9.705508070938219e-06, | |
| "loss": 0.2148, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.8028169014084507, | |
| "grad_norm": 1.3888278007507324, | |
| "learning_rate": 9.689543488689332e-06, | |
| "loss": 0.1939, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.8169014084507042, | |
| "grad_norm": 1.3385534286499023, | |
| "learning_rate": 9.673171369138297e-06, | |
| "loss": 0.1833, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.8309859154929577, | |
| "grad_norm": 1.526450514793396, | |
| "learning_rate": 9.656393135078067e-06, | |
| "loss": 0.2019, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.8450704225352113, | |
| "grad_norm": 1.2741210460662842, | |
| "learning_rate": 9.639210244594335e-06, | |
| "loss": 0.182, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.8591549295774648, | |
| "grad_norm": 1.6508445739746094, | |
| "learning_rate": 9.621624190938802e-06, | |
| "loss": 0.1788, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.8732394366197183, | |
| "grad_norm": 1.8370975255966187, | |
| "learning_rate": 9.603636502399436e-06, | |
| "loss": 0.2833, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.8873239436619719, | |
| "grad_norm": 1.660441517829895, | |
| "learning_rate": 9.585248742167638e-06, | |
| "loss": 0.2616, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.9014084507042254, | |
| "grad_norm": 1.6623897552490234, | |
| "learning_rate": 9.566462508202403e-06, | |
| "loss": 0.2243, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.9154929577464789, | |
| "grad_norm": 1.598024845123291, | |
| "learning_rate": 9.547279433091446e-06, | |
| "loss": 0.2011, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.9295774647887324, | |
| "grad_norm": 1.6546164751052856, | |
| "learning_rate": 9.527701183909336e-06, | |
| "loss": 0.2153, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.9436619718309859, | |
| "grad_norm": 1.0513715744018555, | |
| "learning_rate": 9.507729462072615e-06, | |
| "loss": 0.1112, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.9577464788732394, | |
| "grad_norm": 1.4106223583221436, | |
| "learning_rate": 9.48736600319193e-06, | |
| "loss": 0.176, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.971830985915493, | |
| "grad_norm": 1.285122275352478, | |
| "learning_rate": 9.466612576921223e-06, | |
| "loss": 0.1877, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.9859154929577465, | |
| "grad_norm": 1.2862564325332642, | |
| "learning_rate": 9.445470986803922e-06, | |
| "loss": 0.1956, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.750409483909607, | |
| "learning_rate": 9.423943070116219e-06, | |
| "loss": 0.3075, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 1.0140845070422535, | |
| "grad_norm": 0.9604122638702393, | |
| "learning_rate": 9.402030697707398e-06, | |
| "loss": 0.1081, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 1.028169014084507, | |
| "grad_norm": 0.9925539493560791, | |
| "learning_rate": 9.37973577383726e-06, | |
| "loss": 0.0882, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 1.0422535211267605, | |
| "grad_norm": 1.1930879354476929, | |
| "learning_rate": 9.357060236010626e-06, | |
| "loss": 0.1311, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 1.056338028169014, | |
| "grad_norm": 1.162947177886963, | |
| "learning_rate": 9.334006054808966e-06, | |
| "loss": 0.0969, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.0704225352112675, | |
| "grad_norm": 1.1028999090194702, | |
| "learning_rate": 9.310575233719155e-06, | |
| "loss": 0.0973, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 1.084507042253521, | |
| "grad_norm": 0.9311047196388245, | |
| "learning_rate": 9.28676980895935e-06, | |
| "loss": 0.0578, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.0985915492957747, | |
| "grad_norm": 1.0016084909439087, | |
| "learning_rate": 9.262591849302049e-06, | |
| "loss": 0.0739, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.1126760563380282, | |
| "grad_norm": 1.4408442974090576, | |
| "learning_rate": 9.238043455894294e-06, | |
| "loss": 0.1362, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.1267605633802817, | |
| "grad_norm": 1.0656688213348389, | |
| "learning_rate": 9.213126762075088e-06, | |
| "loss": 0.0985, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.1408450704225352, | |
| "grad_norm": 1.530941128730774, | |
| "learning_rate": 9.187843933189994e-06, | |
| "loss": 0.1337, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.1549295774647887, | |
| "grad_norm": 1.6130133867263794, | |
| "learning_rate": 9.162197166402957e-06, | |
| "loss": 0.1163, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.1690140845070423, | |
| "grad_norm": 1.248587727546692, | |
| "learning_rate": 9.136188690505363e-06, | |
| "loss": 0.0919, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.1830985915492958, | |
| "grad_norm": 1.1857032775878906, | |
| "learning_rate": 9.109820765722357e-06, | |
| "loss": 0.0855, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.1971830985915493, | |
| "grad_norm": 1.2156505584716797, | |
| "learning_rate": 9.083095683516414e-06, | |
| "loss": 0.0621, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.2112676056338028, | |
| "grad_norm": 1.6800888776779175, | |
| "learning_rate": 9.056015766388205e-06, | |
| "loss": 0.1451, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.2253521126760563, | |
| "grad_norm": 1.522156000137329, | |
| "learning_rate": 9.028583367674767e-06, | |
| "loss": 0.1409, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.2394366197183098, | |
| "grad_norm": 1.3571350574493408, | |
| "learning_rate": 9.00080087134498e-06, | |
| "loss": 0.1127, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.2535211267605635, | |
| "grad_norm": 1.3612865209579468, | |
| "learning_rate": 8.972670691792409e-06, | |
| "loss": 0.1029, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 1.267605633802817, | |
| "grad_norm": 1.5368050336837769, | |
| "learning_rate": 8.944195273625472e-06, | |
| "loss": 0.1387, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.2816901408450705, | |
| "grad_norm": 1.335974931716919, | |
| "learning_rate": 8.915377091454992e-06, | |
| "loss": 0.1082, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.295774647887324, | |
| "grad_norm": 1.3290001153945923, | |
| "learning_rate": 8.886218649679162e-06, | |
| "loss": 0.1146, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.3098591549295775, | |
| "grad_norm": 1.3291598558425903, | |
| "learning_rate": 8.856722482265886e-06, | |
| "loss": 0.1178, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.323943661971831, | |
| "grad_norm": 1.3716644048690796, | |
| "learning_rate": 8.826891152532579e-06, | |
| "loss": 0.1047, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.3380281690140845, | |
| "grad_norm": 1.2163374423980713, | |
| "learning_rate": 8.796727252923403e-06, | |
| "loss": 0.1344, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.352112676056338, | |
| "grad_norm": 1.2695890665054321, | |
| "learning_rate": 8.766233404783975e-06, | |
| "loss": 0.1234, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.3661971830985915, | |
| "grad_norm": 1.4254324436187744, | |
| "learning_rate": 8.735412258133562e-06, | |
| "loss": 0.1125, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.380281690140845, | |
| "grad_norm": 0.8492692112922668, | |
| "learning_rate": 8.704266491434787e-06, | |
| "loss": 0.0657, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.3943661971830985, | |
| "grad_norm": 1.3621972799301147, | |
| "learning_rate": 8.672798811360863e-06, | |
| "loss": 0.1065, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.408450704225352, | |
| "grad_norm": 1.2731101512908936, | |
| "learning_rate": 8.641011952560372e-06, | |
| "loss": 0.105, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.4225352112676055, | |
| "grad_norm": 1.2345783710479736, | |
| "learning_rate": 8.608908677419606e-06, | |
| "loss": 0.1002, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 1.436619718309859, | |
| "grad_norm": 1.2637572288513184, | |
| "learning_rate": 8.576491775822527e-06, | |
| "loss": 0.1139, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 1.4507042253521127, | |
| "grad_norm": 1.0004093647003174, | |
| "learning_rate": 8.543764064908295e-06, | |
| "loss": 0.0615, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 1.4647887323943662, | |
| "grad_norm": 1.4274156093597412, | |
| "learning_rate": 8.510728388826464e-06, | |
| "loss": 0.1197, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.4788732394366197, | |
| "grad_norm": 1.1272127628326416, | |
| "learning_rate": 8.477387618489808e-06, | |
| "loss": 0.0831, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.4929577464788732, | |
| "grad_norm": 1.4650448560714722, | |
| "learning_rate": 8.443744651324828e-06, | |
| "loss": 0.1203, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 1.5070422535211268, | |
| "grad_norm": 1.2147066593170166, | |
| "learning_rate": 8.409802411019962e-06, | |
| "loss": 0.0941, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 1.5211267605633803, | |
| "grad_norm": 1.2476495504379272, | |
| "learning_rate": 8.375563847271506e-06, | |
| "loss": 0.0993, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.5352112676056338, | |
| "grad_norm": 1.6603260040283203, | |
| "learning_rate": 8.341031935527267e-06, | |
| "loss": 0.1399, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 1.5492957746478875, | |
| "grad_norm": 1.4281073808670044, | |
| "learning_rate": 8.306209676727994e-06, | |
| "loss": 0.103, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.563380281690141, | |
| "grad_norm": 1.447100043296814, | |
| "learning_rate": 8.271100097046585e-06, | |
| "loss": 0.1, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 1.5774647887323945, | |
| "grad_norm": 0.8041463494300842, | |
| "learning_rate": 8.235706247625098e-06, | |
| "loss": 0.0352, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 1.591549295774648, | |
| "grad_norm": 1.3247321844100952, | |
| "learning_rate": 8.200031204309604e-06, | |
| "loss": 0.121, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 1.6056338028169015, | |
| "grad_norm": 1.3480138778686523, | |
| "learning_rate": 8.16407806738288e-06, | |
| "loss": 0.0966, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 1.619718309859155, | |
| "grad_norm": 1.2064282894134521, | |
| "learning_rate": 8.127849961294984e-06, | |
| "loss": 0.1109, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.6338028169014085, | |
| "grad_norm": 0.6428666114807129, | |
| "learning_rate": 8.091350034391732e-06, | |
| "loss": 0.0397, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 1.647887323943662, | |
| "grad_norm": 1.3523837327957153, | |
| "learning_rate": 8.05458145864109e-06, | |
| "loss": 0.1158, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 1.6619718309859155, | |
| "grad_norm": 1.3387340307235718, | |
| "learning_rate": 8.017547429357532e-06, | |
| "loss": 0.1084, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 1.676056338028169, | |
| "grad_norm": 1.4241299629211426, | |
| "learning_rate": 7.980251164924342e-06, | |
| "loss": 0.1236, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 1.6901408450704225, | |
| "grad_norm": 1.107030987739563, | |
| "learning_rate": 7.94269590651393e-06, | |
| "loss": 0.0888, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.704225352112676, | |
| "grad_norm": 1.2900511026382446, | |
| "learning_rate": 7.904884917806174e-06, | |
| "loss": 0.1354, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 1.7183098591549295, | |
| "grad_norm": 1.2041940689086914, | |
| "learning_rate": 7.866821484704777e-06, | |
| "loss": 0.1315, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 1.732394366197183, | |
| "grad_norm": 1.159213662147522, | |
| "learning_rate": 7.828508915051724e-06, | |
| "loss": 0.0946, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 1.7464788732394365, | |
| "grad_norm": 1.2799243927001953, | |
| "learning_rate": 7.789950538339813e-06, | |
| "loss": 0.0961, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 1.76056338028169, | |
| "grad_norm": 1.131466269493103, | |
| "learning_rate": 7.751149705423313e-06, | |
| "loss": 0.0836, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.7746478873239435, | |
| "grad_norm": 1.5034639835357666, | |
| "learning_rate": 7.712109788226763e-06, | |
| "loss": 0.1151, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 1.788732394366197, | |
| "grad_norm": 0.8533386588096619, | |
| "learning_rate": 7.672834179451943e-06, | |
| "loss": 0.0717, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 1.8028169014084507, | |
| "grad_norm": 1.32114839553833, | |
| "learning_rate": 7.633326292283028e-06, | |
| "loss": 0.1369, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.8169014084507042, | |
| "grad_norm": 1.005871295928955, | |
| "learning_rate": 7.593589560089984e-06, | |
| "loss": 0.0997, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 1.8309859154929577, | |
| "grad_norm": 1.405820369720459, | |
| "learning_rate": 7.553627436130183e-06, | |
| "loss": 0.151, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.8450704225352113, | |
| "grad_norm": 1.3368356227874756, | |
| "learning_rate": 7.513443393248312e-06, | |
| "loss": 0.1328, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 1.8591549295774648, | |
| "grad_norm": 1.2265535593032837, | |
| "learning_rate": 7.473040923574567e-06, | |
| "loss": 0.0974, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 1.8732394366197183, | |
| "grad_norm": 0.844828188419342, | |
| "learning_rate": 7.432423538221179e-06, | |
| "loss": 0.0488, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 1.887323943661972, | |
| "grad_norm": 1.2954161167144775, | |
| "learning_rate": 7.391594766977277e-06, | |
| "loss": 0.1256, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 1.9014084507042255, | |
| "grad_norm": 1.123158574104309, | |
| "learning_rate": 7.350558158002154e-06, | |
| "loss": 0.105, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.915492957746479, | |
| "grad_norm": 0.968773603439331, | |
| "learning_rate": 7.3093172775169e-06, | |
| "loss": 0.0445, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 1.9295774647887325, | |
| "grad_norm": 1.4426188468933105, | |
| "learning_rate": 7.2678757094945e-06, | |
| "loss": 0.1021, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 1.943661971830986, | |
| "grad_norm": 1.4261107444763184, | |
| "learning_rate": 7.226237055348369e-06, | |
| "loss": 0.1145, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 1.9577464788732395, | |
| "grad_norm": 1.3227378129959106, | |
| "learning_rate": 7.184404933619377e-06, | |
| "loss": 0.1179, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 1.971830985915493, | |
| "grad_norm": 1.053186297416687, | |
| "learning_rate": 7.142382979661386e-06, | |
| "loss": 0.064, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.9859154929577465, | |
| "grad_norm": 1.126188039779663, | |
| "learning_rate": 7.100174845325327e-06, | |
| "loss": 0.1138, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 1.4676939249038696, | |
| "learning_rate": 7.057784198641835e-06, | |
| "loss": 0.0991, | |
| "step": 142 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 355, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.4084084712013824e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |