| { | |
| "best_global_step": 3000, | |
| "best_metric": 3.614897001279198, | |
| "best_model_checkpoint": "checkpoints/gpt-2_seq1024_mla0-128-0/checkpoint-3000", | |
| "epoch": 0.9042272624519629, | |
| "eval_steps": 300, | |
| "global_step": 3000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.006028181749679753, | |
| "grad_norm": 2.0992038249969482, | |
| "learning_rate": 3.166666666666667e-05, | |
| "loss": 10.4611, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.012056363499359506, | |
| "grad_norm": 1.4955384731292725, | |
| "learning_rate": 6.500000000000001e-05, | |
| "loss": 9.0904, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.01808454524903926, | |
| "grad_norm": 1.5293831825256348, | |
| "learning_rate": 9.833333333333333e-05, | |
| "loss": 8.0384, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.02411272699871901, | |
| "grad_norm": 0.5085040926933289, | |
| "learning_rate": 0.00013166666666666665, | |
| "loss": 7.2875, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.030140908748398764, | |
| "grad_norm": 0.6033186912536621, | |
| "learning_rate": 0.000165, | |
| "loss": 6.8708, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03616909049807852, | |
| "grad_norm": 0.6809529066085815, | |
| "learning_rate": 0.00019833333333333335, | |
| "loss": 6.5803, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.04219727224775827, | |
| "grad_norm": 0.7910824418067932, | |
| "learning_rate": 0.00023166666666666667, | |
| "loss": 6.3863, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.04822545399743802, | |
| "grad_norm": 1.0070773363113403, | |
| "learning_rate": 0.00026500000000000004, | |
| "loss": 6.2393, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.05425363574711778, | |
| "grad_norm": 0.7891514897346497, | |
| "learning_rate": 0.00029833333333333334, | |
| "loss": 6.0613, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.06028181749679753, | |
| "grad_norm": 0.8121163249015808, | |
| "learning_rate": 0.0003316666666666667, | |
| "loss": 5.912, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.06630999924647728, | |
| "grad_norm": 1.0883384943008423, | |
| "learning_rate": 0.000365, | |
| "loss": 5.7569, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.07233818099615703, | |
| "grad_norm": 0.8521329164505005, | |
| "learning_rate": 0.00039833333333333333, | |
| "loss": 5.6241, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.07836636274583679, | |
| "grad_norm": 0.9475963115692139, | |
| "learning_rate": 0.0004316666666666667, | |
| "loss": 5.5001, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.08439454449551655, | |
| "grad_norm": 0.5242730379104614, | |
| "learning_rate": 0.000465, | |
| "loss": 5.372, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.09042272624519629, | |
| "grad_norm": 0.599181592464447, | |
| "learning_rate": 0.0004983333333333334, | |
| "loss": 5.2824, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.09042272624519629, | |
| "eval_loss": 5.1891092537402255, | |
| "eval_perplexity": 179.30876316619444, | |
| "eval_runtime": 112.5481, | |
| "eval_samples_per_second": 14.74, | |
| "eval_steps_per_second": 0.231, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.09645090799487605, | |
| "grad_norm": 0.34896278381347656, | |
| "learning_rate": 0.0004964814814814814, | |
| "loss": 5.1596, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.1024790897445558, | |
| "grad_norm": 0.8048242330551147, | |
| "learning_rate": 0.0004927777777777777, | |
| "loss": 5.0746, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.10850727149423556, | |
| "grad_norm": 0.3908897042274475, | |
| "learning_rate": 0.0004890740740740741, | |
| "loss": 5.0077, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.1145354532439153, | |
| "grad_norm": 0.7325732111930847, | |
| "learning_rate": 0.0004853703703703704, | |
| "loss": 4.9149, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.12056363499359506, | |
| "grad_norm": 0.3958197832107544, | |
| "learning_rate": 0.0004816666666666667, | |
| "loss": 4.8885, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.1265918167432748, | |
| "grad_norm": 0.3405342996120453, | |
| "learning_rate": 0.00047796296296296297, | |
| "loss": 4.8009, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.13261999849295456, | |
| "grad_norm": 0.6988040804862976, | |
| "learning_rate": 0.0004742592592592593, | |
| "loss": 4.7684, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.1386481802426343, | |
| "grad_norm": 0.298909455537796, | |
| "learning_rate": 0.00047055555555555555, | |
| "loss": 4.7265, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.14467636199231407, | |
| "grad_norm": 0.5806158185005188, | |
| "learning_rate": 0.00046685185185185187, | |
| "loss": 4.6676, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.15070454374199382, | |
| "grad_norm": 0.5041508078575134, | |
| "learning_rate": 0.00046314814814814813, | |
| "loss": 4.6359, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.15673272549167358, | |
| "grad_norm": 0.3164711594581604, | |
| "learning_rate": 0.00045944444444444445, | |
| "loss": 4.6046, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.16276090724135334, | |
| "grad_norm": 0.7678843140602112, | |
| "learning_rate": 0.0004557407407407407, | |
| "loss": 4.5528, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.1687890889910331, | |
| "grad_norm": 0.351477712392807, | |
| "learning_rate": 0.00045203703703703703, | |
| "loss": 4.5292, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.17481727074071282, | |
| "grad_norm": 0.6850087642669678, | |
| "learning_rate": 0.0004483333333333333, | |
| "loss": 4.4822, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.18084545249039258, | |
| "grad_norm": 0.7454901337623596, | |
| "learning_rate": 0.00044462962962962967, | |
| "loss": 4.4581, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.18084545249039258, | |
| "eval_loss": 4.409127496012449, | |
| "eval_perplexity": 82.1977143744931, | |
| "eval_runtime": 112.2901, | |
| "eval_samples_per_second": 14.774, | |
| "eval_steps_per_second": 0.232, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.18687363424007233, | |
| "grad_norm": 0.3134106397628784, | |
| "learning_rate": 0.00044092592592592594, | |
| "loss": 4.4146, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.1929018159897521, | |
| "grad_norm": 0.5055473446846008, | |
| "learning_rate": 0.00043722222222222225, | |
| "loss": 4.3757, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.19892999773943185, | |
| "grad_norm": 0.5237579941749573, | |
| "learning_rate": 0.0004335185185185185, | |
| "loss": 4.3453, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.2049581794891116, | |
| "grad_norm": 0.398079514503479, | |
| "learning_rate": 0.00042981481481481484, | |
| "loss": 4.3276, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.21098636123879136, | |
| "grad_norm": 0.4433188736438751, | |
| "learning_rate": 0.0004261111111111111, | |
| "loss": 4.2892, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.21701454298847112, | |
| "grad_norm": 0.42719537019729614, | |
| "learning_rate": 0.0004224074074074074, | |
| "loss": 4.2615, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.22304272473815084, | |
| "grad_norm": 0.4303497076034546, | |
| "learning_rate": 0.0004187037037037037, | |
| "loss": 4.233, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.2290709064878306, | |
| "grad_norm": 0.5092538595199585, | |
| "learning_rate": 0.000415, | |
| "loss": 4.2113, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.23509908823751036, | |
| "grad_norm": 0.6650151014328003, | |
| "learning_rate": 0.00041129629629629627, | |
| "loss": 4.204, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.2411272699871901, | |
| "grad_norm": 0.43139395117759705, | |
| "learning_rate": 0.00040759259259259264, | |
| "loss": 4.1842, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.24715545173686987, | |
| "grad_norm": 0.39379051327705383, | |
| "learning_rate": 0.0004038888888888889, | |
| "loss": 4.157, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.2531836334865496, | |
| "grad_norm": 0.5419613718986511, | |
| "learning_rate": 0.0004001851851851852, | |
| "loss": 4.1432, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.2592118152362294, | |
| "grad_norm": 0.3231978416442871, | |
| "learning_rate": 0.0003964814814814815, | |
| "loss": 4.1259, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.2652399969859091, | |
| "grad_norm": 0.3947347104549408, | |
| "learning_rate": 0.0003927777777777778, | |
| "loss": 4.1225, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.2712681787355889, | |
| "grad_norm": 0.4849476218223572, | |
| "learning_rate": 0.00038907407407407407, | |
| "loss": 4.0945, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.2712681787355889, | |
| "eval_loss": 4.043287290524684, | |
| "eval_perplexity": 57.01345488110377, | |
| "eval_runtime": 112.1778, | |
| "eval_samples_per_second": 14.789, | |
| "eval_steps_per_second": 0.232, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.2772963604852686, | |
| "grad_norm": 0.46728333830833435, | |
| "learning_rate": 0.0003853703703703704, | |
| "loss": 4.0828, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.2833245422349484, | |
| "grad_norm": 0.3571609854698181, | |
| "learning_rate": 0.00038166666666666666, | |
| "loss": 4.0707, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.28935272398462814, | |
| "grad_norm": 0.36861199140548706, | |
| "learning_rate": 0.000377962962962963, | |
| "loss": 4.0545, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.29538090573430786, | |
| "grad_norm": 0.4048626124858856, | |
| "learning_rate": 0.00037425925925925924, | |
| "loss": 4.0493, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.30140908748398765, | |
| "grad_norm": 0.41902783513069153, | |
| "learning_rate": 0.0003705555555555556, | |
| "loss": 4.0363, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.3074372692336674, | |
| "grad_norm": 0.4772008955478668, | |
| "learning_rate": 0.0003668518518518519, | |
| "loss": 4.0277, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.31346545098334716, | |
| "grad_norm": 0.4841766953468323, | |
| "learning_rate": 0.0003631481481481482, | |
| "loss": 4.0073, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.3194936327330269, | |
| "grad_norm": 0.4445819854736328, | |
| "learning_rate": 0.00035944444444444446, | |
| "loss": 4.0099, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.3255218144827067, | |
| "grad_norm": 0.31589964032173157, | |
| "learning_rate": 0.0003557407407407408, | |
| "loss": 3.9954, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.3315499962323864, | |
| "grad_norm": 0.32809144258499146, | |
| "learning_rate": 0.00035203703703703704, | |
| "loss": 3.9842, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.3375781779820662, | |
| "grad_norm": 0.5585801005363464, | |
| "learning_rate": 0.00034833333333333336, | |
| "loss": 3.9731, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.3436063597317459, | |
| "grad_norm": 0.33329740166664124, | |
| "learning_rate": 0.0003446296296296296, | |
| "loss": 3.9664, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.34963454148142564, | |
| "grad_norm": 0.39245834946632385, | |
| "learning_rate": 0.0003409259259259259, | |
| "loss": 3.9531, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.3556627232311054, | |
| "grad_norm": 0.34083107113838196, | |
| "learning_rate": 0.0003372222222222222, | |
| "loss": 3.9481, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.36169090498078516, | |
| "grad_norm": 0.363365113735199, | |
| "learning_rate": 0.0003335185185185185, | |
| "loss": 3.9454, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.36169090498078516, | |
| "eval_loss": 3.891763558483393, | |
| "eval_perplexity": 48.997219837139, | |
| "eval_runtime": 112.3031, | |
| "eval_samples_per_second": 14.773, | |
| "eval_steps_per_second": 0.232, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.36771908673046494, | |
| "grad_norm": 0.34547457098960876, | |
| "learning_rate": 0.00032981481481481485, | |
| "loss": 3.9326, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.37374726848014467, | |
| "grad_norm": 0.3942719101905823, | |
| "learning_rate": 0.0003261111111111111, | |
| "loss": 3.931, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.37977545022982445, | |
| "grad_norm": 0.41379085183143616, | |
| "learning_rate": 0.00032240740740740743, | |
| "loss": 3.9149, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.3858036319795042, | |
| "grad_norm": 0.3897305428981781, | |
| "learning_rate": 0.0003187037037037037, | |
| "loss": 3.915, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.3918318137291839, | |
| "grad_norm": 0.35454675555229187, | |
| "learning_rate": 0.000315, | |
| "loss": 3.9041, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.3978599954788637, | |
| "grad_norm": 0.40520045161247253, | |
| "learning_rate": 0.0003112962962962963, | |
| "loss": 3.8982, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.4038881772285434, | |
| "grad_norm": 0.37290430068969727, | |
| "learning_rate": 0.0003075925925925926, | |
| "loss": 3.8943, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.4099163589782232, | |
| "grad_norm": 0.36008208990097046, | |
| "learning_rate": 0.00030388888888888886, | |
| "loss": 3.8885, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.41594454072790293, | |
| "grad_norm": 0.4695078730583191, | |
| "learning_rate": 0.0003001851851851852, | |
| "loss": 3.8825, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.4219727224775827, | |
| "grad_norm": 0.3244253993034363, | |
| "learning_rate": 0.00029648148148148144, | |
| "loss": 3.8749, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.42800090422726245, | |
| "grad_norm": 0.3059616684913635, | |
| "learning_rate": 0.0002927777777777778, | |
| "loss": 3.8748, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.43402908597694223, | |
| "grad_norm": 0.421289324760437, | |
| "learning_rate": 0.0002890740740740741, | |
| "loss": 3.8617, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.44005726772662196, | |
| "grad_norm": 0.31943073868751526, | |
| "learning_rate": 0.0002853703703703704, | |
| "loss": 3.8665, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.4460854494763017, | |
| "grad_norm": 0.38908112049102783, | |
| "learning_rate": 0.00028166666666666666, | |
| "loss": 3.857, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.45211363122598147, | |
| "grad_norm": 0.3740065395832062, | |
| "learning_rate": 0.000277962962962963, | |
| "loss": 3.8522, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.45211363122598147, | |
| "eval_loss": 3.7993524888239567, | |
| "eval_perplexity": 44.67224934568574, | |
| "eval_runtime": 112.3077, | |
| "eval_samples_per_second": 14.772, | |
| "eval_steps_per_second": 0.232, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.4581418129756612, | |
| "grad_norm": 0.3724636435508728, | |
| "learning_rate": 0.00027425925925925925, | |
| "loss": 3.8458, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.464169994725341, | |
| "grad_norm": 0.34661316871643066, | |
| "learning_rate": 0.00027055555555555557, | |
| "loss": 3.844, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.4701981764750207, | |
| "grad_norm": 0.2814076244831085, | |
| "learning_rate": 0.00026685185185185183, | |
| "loss": 3.8319, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.4762263582247005, | |
| "grad_norm": 0.3638046383857727, | |
| "learning_rate": 0.00026314814814814815, | |
| "loss": 3.8301, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.4822545399743802, | |
| "grad_norm": 0.37232160568237305, | |
| "learning_rate": 0.0002594444444444444, | |
| "loss": 3.8169, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.48828272172405995, | |
| "grad_norm": 0.4013717472553253, | |
| "learning_rate": 0.0002557407407407408, | |
| "loss": 3.8263, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.49431090347373974, | |
| "grad_norm": 0.4506720304489136, | |
| "learning_rate": 0.00025203703703703705, | |
| "loss": 3.8174, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.5003390852234195, | |
| "grad_norm": 0.2753377854824066, | |
| "learning_rate": 0.0002483333333333333, | |
| "loss": 3.8134, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.5063672669730992, | |
| "grad_norm": 0.3540724813938141, | |
| "learning_rate": 0.00024462962962962963, | |
| "loss": 3.8118, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.512395448722779, | |
| "grad_norm": 0.35745769739151, | |
| "learning_rate": 0.00024092592592592593, | |
| "loss": 3.7999, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.5184236304724588, | |
| "grad_norm": 0.3902524411678314, | |
| "learning_rate": 0.00023722222222222222, | |
| "loss": 3.8004, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.5244518122221385, | |
| "grad_norm": 0.3244744539260864, | |
| "learning_rate": 0.0002335185185185185, | |
| "loss": 3.7993, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.5304799939718182, | |
| "grad_norm": 0.34227174520492554, | |
| "learning_rate": 0.0002298148148148148, | |
| "loss": 3.7969, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.536508175721498, | |
| "grad_norm": 0.349168598651886, | |
| "learning_rate": 0.00022611111111111112, | |
| "loss": 3.7867, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.5425363574711778, | |
| "grad_norm": 0.46753671765327454, | |
| "learning_rate": 0.0002224074074074074, | |
| "loss": 3.7903, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.5425363574711778, | |
| "eval_loss": 3.7372125118654314, | |
| "eval_perplexity": 41.980805917649974, | |
| "eval_runtime": 112.2567, | |
| "eval_samples_per_second": 14.779, | |
| "eval_steps_per_second": 0.232, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.5485645392208575, | |
| "grad_norm": 0.34198659658432007, | |
| "learning_rate": 0.0002187037037037037, | |
| "loss": 3.7876, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.5545927209705372, | |
| "grad_norm": 0.37303704023361206, | |
| "learning_rate": 0.000215, | |
| "loss": 3.7787, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.560620902720217, | |
| "grad_norm": 0.3005342185497284, | |
| "learning_rate": 0.00021129629629629629, | |
| "loss": 3.7718, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.5666490844698968, | |
| "grad_norm": 0.29448869824409485, | |
| "learning_rate": 0.0002075925925925926, | |
| "loss": 3.7724, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.5726772662195765, | |
| "grad_norm": 0.29300233721733093, | |
| "learning_rate": 0.0002038888888888889, | |
| "loss": 3.7652, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.5787054479692563, | |
| "grad_norm": 0.3517768681049347, | |
| "learning_rate": 0.0002001851851851852, | |
| "loss": 3.7675, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.584733629718936, | |
| "grad_norm": 0.31965792179107666, | |
| "learning_rate": 0.00019648148148148148, | |
| "loss": 3.7615, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.5907618114686157, | |
| "grad_norm": 0.29577893018722534, | |
| "learning_rate": 0.00019277777777777777, | |
| "loss": 3.7634, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.5967899932182955, | |
| "grad_norm": 0.33181506395339966, | |
| "learning_rate": 0.00018907407407407406, | |
| "loss": 3.7513, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.6028181749679753, | |
| "grad_norm": 0.3156045377254486, | |
| "learning_rate": 0.00018537037037037038, | |
| "loss": 3.7533, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.6088463567176551, | |
| "grad_norm": 0.3246684670448303, | |
| "learning_rate": 0.00018166666666666667, | |
| "loss": 3.7491, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.6148745384673348, | |
| "grad_norm": 0.2846499979496002, | |
| "learning_rate": 0.00017796296296296296, | |
| "loss": 3.7443, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.6209027202170145, | |
| "grad_norm": 0.34447523951530457, | |
| "learning_rate": 0.00017425925925925926, | |
| "loss": 3.7454, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.6269309019666943, | |
| "grad_norm": 0.3255724310874939, | |
| "learning_rate": 0.00017055555555555555, | |
| "loss": 3.7437, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.632959083716374, | |
| "grad_norm": 0.2995988130569458, | |
| "learning_rate": 0.00016685185185185187, | |
| "loss": 3.7384, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.632959083716374, | |
| "eval_loss": 3.6879229315555366, | |
| "eval_perplexity": 39.96175739053913, | |
| "eval_runtime": 112.2917, | |
| "eval_samples_per_second": 14.774, | |
| "eval_steps_per_second": 0.232, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.6389872654660538, | |
| "grad_norm": 0.2952309846878052, | |
| "learning_rate": 0.00016314814814814816, | |
| "loss": 3.7376, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 0.6450154472157336, | |
| "grad_norm": 0.28378739953041077, | |
| "learning_rate": 0.00015944444444444445, | |
| "loss": 3.7361, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 0.6510436289654133, | |
| "grad_norm": 0.3023880124092102, | |
| "learning_rate": 0.00015574074074074074, | |
| "loss": 3.7336, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 0.657071810715093, | |
| "grad_norm": 0.2970915138721466, | |
| "learning_rate": 0.00015203703703703703, | |
| "loss": 3.7325, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 0.6630999924647728, | |
| "grad_norm": 0.3934483230113983, | |
| "learning_rate": 0.00014833333333333335, | |
| "loss": 3.7294, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.6691281742144526, | |
| "grad_norm": 0.3007951080799103, | |
| "learning_rate": 0.00014462962962962964, | |
| "loss": 3.7264, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 0.6751563559641324, | |
| "grad_norm": 0.2756880819797516, | |
| "learning_rate": 0.00014092592592592594, | |
| "loss": 3.7251, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 0.681184537713812, | |
| "grad_norm": 0.264085978269577, | |
| "learning_rate": 0.00013722222222222223, | |
| "loss": 3.7122, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 0.6872127194634918, | |
| "grad_norm": 0.2801743745803833, | |
| "learning_rate": 0.00013351851851851852, | |
| "loss": 3.7193, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 0.6932409012131716, | |
| "grad_norm": 0.32809194922447205, | |
| "learning_rate": 0.00012981481481481484, | |
| "loss": 3.7126, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.6992690829628513, | |
| "grad_norm": 0.27438148856163025, | |
| "learning_rate": 0.00012611111111111113, | |
| "loss": 3.7063, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 0.7052972647125311, | |
| "grad_norm": 0.25681811571121216, | |
| "learning_rate": 0.00012240740740740742, | |
| "loss": 3.7085, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 0.7113254464622109, | |
| "grad_norm": 0.2568652331829071, | |
| "learning_rate": 0.0001187037037037037, | |
| "loss": 3.7076, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 0.7173536282118906, | |
| "grad_norm": 0.2873578369617462, | |
| "learning_rate": 0.000115, | |
| "loss": 3.6987, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 0.7233818099615703, | |
| "grad_norm": 0.26023069024086, | |
| "learning_rate": 0.0001112962962962963, | |
| "loss": 3.707, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.7233818099615703, | |
| "eval_loss": 3.653235740343999, | |
| "eval_perplexity": 38.599361710783484, | |
| "eval_runtime": 112.4066, | |
| "eval_samples_per_second": 14.759, | |
| "eval_steps_per_second": 0.231, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.7294099917112501, | |
| "grad_norm": 0.2499583661556244, | |
| "learning_rate": 0.00010759259259259259, | |
| "loss": 3.7033, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 0.7354381734609299, | |
| "grad_norm": 0.2881416380405426, | |
| "learning_rate": 0.00010388888888888889, | |
| "loss": 3.7016, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 0.7414663552106096, | |
| "grad_norm": 0.2542083263397217, | |
| "learning_rate": 0.00010018518518518518, | |
| "loss": 3.7015, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 0.7474945369602893, | |
| "grad_norm": 0.2287365347146988, | |
| "learning_rate": 9.648148148148149e-05, | |
| "loss": 3.6942, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 0.7535227187099691, | |
| "grad_norm": 0.24961161613464355, | |
| "learning_rate": 9.277777777777778e-05, | |
| "loss": 3.6952, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.7595509004596489, | |
| "grad_norm": 0.26621654629707336, | |
| "learning_rate": 8.907407407407407e-05, | |
| "loss": 3.6942, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 0.7655790822093286, | |
| "grad_norm": 0.27608931064605713, | |
| "learning_rate": 8.537037037037038e-05, | |
| "loss": 3.6937, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 0.7716072639590084, | |
| "grad_norm": 0.23405340313911438, | |
| "learning_rate": 8.166666666666667e-05, | |
| "loss": 3.6889, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 0.7776354457086881, | |
| "grad_norm": 0.22344228625297546, | |
| "learning_rate": 7.796296296296296e-05, | |
| "loss": 3.6875, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 0.7836636274583678, | |
| "grad_norm": 0.25090643763542175, | |
| "learning_rate": 7.425925925925927e-05, | |
| "loss": 3.6819, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.7896918092080476, | |
| "grad_norm": 0.24225394427776337, | |
| "learning_rate": 7.055555555555556e-05, | |
| "loss": 3.6878, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 0.7957199909577274, | |
| "grad_norm": 0.25418150424957275, | |
| "learning_rate": 6.685185185185186e-05, | |
| "loss": 3.6849, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 0.8017481727074072, | |
| "grad_norm": 0.2390448898077011, | |
| "learning_rate": 6.314814814814815e-05, | |
| "loss": 3.6844, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 0.8077763544570868, | |
| "grad_norm": 0.2440630942583084, | |
| "learning_rate": 5.9444444444444445e-05, | |
| "loss": 3.6768, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 0.8138045362067666, | |
| "grad_norm": 0.22975711524486542, | |
| "learning_rate": 5.5740740740740744e-05, | |
| "loss": 3.6796, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.8138045362067666, | |
| "eval_loss": 3.628363446560925, | |
| "eval_perplexity": 37.6511480531639, | |
| "eval_runtime": 112.169, | |
| "eval_samples_per_second": 14.79, | |
| "eval_steps_per_second": 0.232, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.8198327179564464, | |
| "grad_norm": 0.21335391700267792, | |
| "learning_rate": 5.2037037037037035e-05, | |
| "loss": 3.6776, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 0.8258608997061262, | |
| "grad_norm": 0.2257867455482483, | |
| "learning_rate": 4.8333333333333334e-05, | |
| "loss": 3.6801, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 0.8318890814558059, | |
| "grad_norm": 0.2070840299129486, | |
| "learning_rate": 4.462962962962963e-05, | |
| "loss": 3.6754, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 0.8379172632054857, | |
| "grad_norm": 0.20770543813705444, | |
| "learning_rate": 4.092592592592593e-05, | |
| "loss": 3.6732, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 0.8439454449551654, | |
| "grad_norm": 0.23169459402561188, | |
| "learning_rate": 3.722222222222222e-05, | |
| "loss": 3.6754, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.8499736267048451, | |
| "grad_norm": 0.1974688619375229, | |
| "learning_rate": 3.351851851851852e-05, | |
| "loss": 3.6777, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 0.8560018084545249, | |
| "grad_norm": 0.2034008651971817, | |
| "learning_rate": 2.9814814814814815e-05, | |
| "loss": 3.67, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 0.8620299902042047, | |
| "grad_norm": 0.2013922780752182, | |
| "learning_rate": 2.6111111111111114e-05, | |
| "loss": 3.6671, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 0.8680581719538845, | |
| "grad_norm": 0.2004835456609726, | |
| "learning_rate": 2.240740740740741e-05, | |
| "loss": 3.6717, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 0.8740863537035641, | |
| "grad_norm": 0.19306205213069916, | |
| "learning_rate": 1.8703703703703707e-05, | |
| "loss": 3.6664, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.8801145354532439, | |
| "grad_norm": 0.19328537583351135, | |
| "learning_rate": 1.5e-05, | |
| "loss": 3.6711, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 0.8861427172029237, | |
| "grad_norm": 0.18284295499324799, | |
| "learning_rate": 1.1296296296296295e-05, | |
| "loss": 3.6616, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 0.8921708989526034, | |
| "grad_norm": 0.18400640785694122, | |
| "learning_rate": 7.592592592592593e-06, | |
| "loss": 3.658, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 0.8981990807022832, | |
| "grad_norm": 0.1778123676776886, | |
| "learning_rate": 3.888888888888889e-06, | |
| "loss": 3.6655, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 0.9042272624519629, | |
| "grad_norm": 0.17845961451530457, | |
| "learning_rate": 1.8518518518518518e-07, | |
| "loss": 3.6693, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.9042272624519629, | |
| "eval_loss": 3.614897001279198, | |
| "eval_perplexity": 37.14751958159777, | |
| "eval_runtime": 112.2672, | |
| "eval_samples_per_second": 14.777, | |
| "eval_steps_per_second": 0.232, | |
| "step": 3000 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 3000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 300, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.404194997141504e+18, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |