| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.4, | |
| "eval_steps": 2000, | |
| "global_step": 8000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 39.588829040527344, | |
| "learning_rate": 9.499999999999999e-07, | |
| "loss": 1.2125, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 24.040372848510742, | |
| "learning_rate": 9.904040404040403e-07, | |
| "loss": 1.0367, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 23.623809814453125, | |
| "learning_rate": 9.803030303030302e-07, | |
| "loss": 0.9513, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 0.24086859822273254, | |
| "learning_rate": 9.7020202020202e-07, | |
| "loss": 0.8004, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 22.336132049560547, | |
| "learning_rate": 9.601010101010101e-07, | |
| "loss": 0.8263, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 42.77682876586914, | |
| "learning_rate": 9.499999999999999e-07, | |
| "loss": 0.769, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 16.354448318481445, | |
| "learning_rate": 9.398989898989899e-07, | |
| "loss": 0.7974, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 40.2994270324707, | |
| "learning_rate": 9.297979797979798e-07, | |
| "loss": 0.8112, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 15.487586975097656, | |
| "learning_rate": 9.196969696969696e-07, | |
| "loss": 0.7657, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 20.020811080932617, | |
| "learning_rate": 9.095959595959596e-07, | |
| "loss": 0.7554, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 1.6317849159240723, | |
| "learning_rate": 8.995959595959596e-07, | |
| "loss": 0.7687, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 8.649179458618164, | |
| "learning_rate": 8.894949494949494e-07, | |
| "loss": 0.7567, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 27.93072509765625, | |
| "learning_rate": 8.793939393939394e-07, | |
| "loss": 0.7014, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 34.16617202758789, | |
| "learning_rate": 8.692929292929293e-07, | |
| "loss": 0.6642, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 61.1443977355957, | |
| "learning_rate": 8.591919191919191e-07, | |
| "loss": 0.7243, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 69.2451400756836, | |
| "learning_rate": 8.490909090909091e-07, | |
| "loss": 0.7365, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 30.576330184936523, | |
| "learning_rate": 8.38989898989899e-07, | |
| "loss": 0.657, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 15.208847045898438, | |
| "learning_rate": 8.288888888888888e-07, | |
| "loss": 0.7884, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 39.78324508666992, | |
| "learning_rate": 8.187878787878788e-07, | |
| "loss": 0.7657, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 43.55222702026367, | |
| "learning_rate": 8.086868686868686e-07, | |
| "loss": 0.6678, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "eval_loss": 0.751860499382019, | |
| "eval_runtime": 241.5179, | |
| "eval_samples_per_second": 4.14, | |
| "eval_steps_per_second": 1.035, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 26.86771011352539, | |
| "learning_rate": 7.985858585858586e-07, | |
| "loss": 0.6361, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 25.721782684326172, | |
| "learning_rate": 7.884848484848484e-07, | |
| "loss": 0.8334, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 10.995915412902832, | |
| "learning_rate": 7.783838383838383e-07, | |
| "loss": 0.7052, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 33.5030403137207, | |
| "learning_rate": 7.682828282828282e-07, | |
| "loss": 0.6972, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 3.3341565132141113, | |
| "learning_rate": 7.581818181818182e-07, | |
| "loss": 0.7283, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 10.039573669433594, | |
| "learning_rate": 7.480808080808081e-07, | |
| "loss": 0.725, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 41.933143615722656, | |
| "learning_rate": 7.379797979797979e-07, | |
| "loss": 0.6081, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 32.512107849121094, | |
| "learning_rate": 7.278787878787879e-07, | |
| "loss": 0.7128, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 28.775693893432617, | |
| "learning_rate": 7.177777777777777e-07, | |
| "loss": 0.6757, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 47.666160583496094, | |
| "learning_rate": 7.076767676767677e-07, | |
| "loss": 0.587, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 23.707677841186523, | |
| "learning_rate": 6.975757575757576e-07, | |
| "loss": 0.7217, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 38.1664924621582, | |
| "learning_rate": 6.874747474747474e-07, | |
| "loss": 0.7153, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 43.21916198730469, | |
| "learning_rate": 6.773737373737374e-07, | |
| "loss": 0.7504, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 28.080886840820312, | |
| "learning_rate": 6.674747474747474e-07, | |
| "loss": 0.6828, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 24.497976303100586, | |
| "learning_rate": 6.573737373737374e-07, | |
| "loss": 0.6419, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 71.72598266601562, | |
| "learning_rate": 6.472727272727272e-07, | |
| "loss": 0.6036, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 30.938745498657227, | |
| "learning_rate": 6.371717171717171e-07, | |
| "loss": 0.705, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 27.01025390625, | |
| "learning_rate": 6.270707070707071e-07, | |
| "loss": 0.5679, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 24.72421646118164, | |
| "learning_rate": 6.169696969696969e-07, | |
| "loss": 0.6707, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 17.81386375427246, | |
| "learning_rate": 6.068686868686869e-07, | |
| "loss": 0.653, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "eval_loss": 0.6371002197265625, | |
| "eval_runtime": 241.2671, | |
| "eval_samples_per_second": 4.145, | |
| "eval_steps_per_second": 1.036, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 22.987863540649414, | |
| "learning_rate": 5.967676767676767e-07, | |
| "loss": 0.6111, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 28.384014129638672, | |
| "learning_rate": 5.866666666666666e-07, | |
| "loss": 0.7237, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 9.547513961791992, | |
| "learning_rate": 5.765656565656566e-07, | |
| "loss": 0.6313, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 35.17210388183594, | |
| "learning_rate": 5.664646464646465e-07, | |
| "loss": 0.6212, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 27.639326095581055, | |
| "learning_rate": 5.563636363636363e-07, | |
| "loss": 0.6434, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 47.38414764404297, | |
| "learning_rate": 5.462626262626262e-07, | |
| "loss": 0.6834, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 24.167831420898438, | |
| "learning_rate": 5.361616161616162e-07, | |
| "loss": 0.6669, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 32.78892517089844, | |
| "learning_rate": 5.26060606060606e-07, | |
| "loss": 0.6857, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 14.563433647155762, | |
| "learning_rate": 5.159595959595959e-07, | |
| "loss": 0.6279, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 24.523794174194336, | |
| "learning_rate": 5.058585858585858e-07, | |
| "loss": 0.5623, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 54.16361618041992, | |
| "learning_rate": 4.957575757575757e-07, | |
| "loss": 0.6236, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 39.292449951171875, | |
| "learning_rate": 4.856565656565657e-07, | |
| "loss": 0.695, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 27.683238983154297, | |
| "learning_rate": 4.7555555555555554e-07, | |
| "loss": 0.6286, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 42.612430572509766, | |
| "learning_rate": 4.6545454545454546e-07, | |
| "loss": 0.5533, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 12.680336952209473, | |
| "learning_rate": 4.553535353535353e-07, | |
| "loss": 0.6878, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 29.51133918762207, | |
| "learning_rate": 4.4525252525252524e-07, | |
| "loss": 0.6772, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 60.08015823364258, | |
| "learning_rate": 4.3515151515151515e-07, | |
| "loss": 0.5839, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 32.535728454589844, | |
| "learning_rate": 4.2505050505050507e-07, | |
| "loss": 0.7245, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 27.34471893310547, | |
| "learning_rate": 4.1494949494949493e-07, | |
| "loss": 0.6402, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 5.2399139404296875, | |
| "learning_rate": 4.0484848484848485e-07, | |
| "loss": 0.5824, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "eval_loss": 0.6514798402786255, | |
| "eval_runtime": 241.275, | |
| "eval_samples_per_second": 4.145, | |
| "eval_steps_per_second": 1.036, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 54.833457946777344, | |
| "learning_rate": 3.947474747474747e-07, | |
| "loss": 0.6579, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 29.26833152770996, | |
| "learning_rate": 3.8464646464646463e-07, | |
| "loss": 0.5482, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 20.299970626831055, | |
| "learning_rate": 3.7454545454545454e-07, | |
| "loss": 0.5854, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 6.119832515716553, | |
| "learning_rate": 3.6444444444444446e-07, | |
| "loss": 0.613, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 14.71923828125, | |
| "learning_rate": 3.543434343434343e-07, | |
| "loss": 0.7214, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 3.2650656700134277, | |
| "learning_rate": 3.4424242424242424e-07, | |
| "loss": 0.5775, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 16.577056884765625, | |
| "learning_rate": 3.341414141414141e-07, | |
| "loss": 0.5611, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 28.803749084472656, | |
| "learning_rate": 3.24040404040404e-07, | |
| "loss": 0.6372, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 16.91265869140625, | |
| "learning_rate": 3.1393939393939394e-07, | |
| "loss": 0.6302, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 21.173931121826172, | |
| "learning_rate": 3.0383838383838385e-07, | |
| "loss": 0.6071, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 44.6800537109375, | |
| "learning_rate": 2.937373737373737e-07, | |
| "loss": 0.588, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 25.73281478881836, | |
| "learning_rate": 2.8363636363636363e-07, | |
| "loss": 0.6173, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 92.61457061767578, | |
| "learning_rate": 2.735353535353535e-07, | |
| "loss": 0.6438, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 47.508995056152344, | |
| "learning_rate": 2.634343434343434e-07, | |
| "loss": 0.5693, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 49.36333084106445, | |
| "learning_rate": 2.533333333333333e-07, | |
| "loss": 0.6199, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 4.993002891540527, | |
| "learning_rate": 2.4323232323232324e-07, | |
| "loss": 0.4921, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 49.10454177856445, | |
| "learning_rate": 2.3313131313131313e-07, | |
| "loss": 0.6299, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 23.325441360473633, | |
| "learning_rate": 2.2303030303030302e-07, | |
| "loss": 0.6931, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.007521418854594231, | |
| "learning_rate": 2.1303030303030304e-07, | |
| "loss": 0.5363, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 27.83951187133789, | |
| "learning_rate": 2.0292929292929293e-07, | |
| "loss": 0.5613, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "eval_loss": 0.600296139717102, | |
| "eval_runtime": 241.351, | |
| "eval_samples_per_second": 4.143, | |
| "eval_steps_per_second": 1.036, | |
| "step": 8000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 10000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 2000, | |
| "total_flos": 7.54256789372928e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |