| { | |
| "best_metric": 0.8991448283195496, | |
| "best_model_checkpoint": "../../output/chatglm3-6b/LangGPT/checkpoint-700", | |
| "epoch": 9.0, | |
| "eval_steps": 100, | |
| "global_step": 720, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.125, | |
| "grad_norm": 0.40453147888183594, | |
| "learning_rate": 2.5e-05, | |
| "loss": 1.6827, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.5551838874816895, | |
| "learning_rate": 5e-05, | |
| "loss": 1.6309, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.375, | |
| "grad_norm": 0.7859359383583069, | |
| "learning_rate": 4.997482666353287e-05, | |
| "loss": 1.5415, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.597720742225647, | |
| "learning_rate": 4.989935734988098e-05, | |
| "loss": 1.393, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.625, | |
| "grad_norm": 0.4020984172821045, | |
| "learning_rate": 4.977374404419837e-05, | |
| "loss": 1.2563, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.35916563868522644, | |
| "learning_rate": 4.959823971496574e-05, | |
| "loss": 1.1963, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.875, | |
| "grad_norm": 0.3013848066329956, | |
| "learning_rate": 4.937319780454559e-05, | |
| "loss": 1.1385, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.23849129676818848, | |
| "learning_rate": 4.909907151739633e-05, | |
| "loss": 1.1085, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.125, | |
| "grad_norm": 0.22885890305042267, | |
| "learning_rate": 4.877641290737884e-05, | |
| "loss": 1.1025, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 0.20683708786964417, | |
| "learning_rate": 4.8405871765993433e-05, | |
| "loss": 1.0558, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "eval_loss": 1.0500283241271973, | |
| "eval_runtime": 353.0769, | |
| "eval_samples_per_second": 2.419, | |
| "eval_steps_per_second": 0.606, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.375, | |
| "grad_norm": 0.20663395524024963, | |
| "learning_rate": 4.7988194313786275e-05, | |
| "loss": 1.0258, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.18335361778736115, | |
| "learning_rate": 4.752422169756048e-05, | |
| "loss": 1.0261, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.625, | |
| "grad_norm": 0.18184833228588104, | |
| "learning_rate": 4.701488829641845e-05, | |
| "loss": 0.9923, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 0.19089923799037933, | |
| "learning_rate": 4.6461219840046654e-05, | |
| "loss": 0.9835, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.875, | |
| "grad_norm": 0.17791251838207245, | |
| "learning_rate": 4.586433134303257e-05, | |
| "loss": 1.0039, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.18376672267913818, | |
| "learning_rate": 4.522542485937369e-05, | |
| "loss": 0.9947, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.125, | |
| "grad_norm": 0.20052292943000793, | |
| "learning_rate": 4.454578706170075e-05, | |
| "loss": 0.9821, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "grad_norm": 0.19209513068199158, | |
| "learning_rate": 4.382678665009028e-05, | |
| "loss": 0.9535, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.375, | |
| "grad_norm": 0.19733993709087372, | |
| "learning_rate": 4.306987159568479e-05, | |
| "loss": 0.9514, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.18989509344100952, | |
| "learning_rate": 4.227656622467162e-05, | |
| "loss": 0.9566, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "eval_loss": 0.9630343914031982, | |
| "eval_runtime": 353.288, | |
| "eval_samples_per_second": 2.417, | |
| "eval_steps_per_second": 0.606, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.625, | |
| "grad_norm": 0.19188831746578217, | |
| "learning_rate": 4.144846814849282e-05, | |
| "loss": 0.9655, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "grad_norm": 0.2034657597541809, | |
| "learning_rate": 4.058724504646834e-05, | |
| "loss": 0.9537, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.875, | |
| "grad_norm": 0.20900140702724457, | |
| "learning_rate": 3.969463130731183e-05, | |
| "loss": 0.951, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.231728196144104, | |
| "learning_rate": 3.8772424536302564e-05, | |
| "loss": 0.938, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.125, | |
| "grad_norm": 0.21837086975574493, | |
| "learning_rate": 3.782248193514766e-05, | |
| "loss": 0.955, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "grad_norm": 0.2057914286851883, | |
| "learning_rate": 3.6846716561824965e-05, | |
| "loss": 0.9319, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 3.375, | |
| "grad_norm": 0.22230790555477142, | |
| "learning_rate": 3.5847093477938956e-05, | |
| "loss": 0.9385, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": 0.24387766420841217, | |
| "learning_rate": 3.4825625791348096e-05, | |
| "loss": 0.911, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 3.625, | |
| "grad_norm": 0.2634485065937042, | |
| "learning_rate": 3.378437060203357e-05, | |
| "loss": 0.9366, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "grad_norm": 0.22965680062770844, | |
| "learning_rate": 3.272542485937369e-05, | |
| "loss": 0.9082, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "eval_loss": 0.928753137588501, | |
| "eval_runtime": 353.3512, | |
| "eval_samples_per_second": 2.417, | |
| "eval_steps_per_second": 0.606, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.875, | |
| "grad_norm": 0.21778391301631927, | |
| "learning_rate": 3.165092113916688e-05, | |
| "loss": 0.9158, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.24541890621185303, | |
| "learning_rate": 3.056302334890786e-05, | |
| "loss": 0.9027, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 4.125, | |
| "grad_norm": 0.25015348196029663, | |
| "learning_rate": 2.9463922369965917e-05, | |
| "loss": 0.9336, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 4.25, | |
| "grad_norm": 0.22015893459320068, | |
| "learning_rate": 2.8355831645441388e-05, | |
| "loss": 0.9161, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 4.375, | |
| "grad_norm": 0.2516670823097229, | |
| "learning_rate": 2.724098272258584e-05, | |
| "loss": 0.8966, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "grad_norm": 0.2541712820529938, | |
| "learning_rate": 2.6121620758762877e-05, | |
| "loss": 0.8954, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 4.625, | |
| "grad_norm": 0.25608915090560913, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.8815, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "grad_norm": 0.24169643223285675, | |
| "learning_rate": 2.3878379241237136e-05, | |
| "loss": 0.89, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 4.875, | |
| "grad_norm": 0.2623349130153656, | |
| "learning_rate": 2.2759017277414166e-05, | |
| "loss": 0.9196, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.29517388343811035, | |
| "learning_rate": 2.164416835455862e-05, | |
| "loss": 0.8992, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.9107962846755981, | |
| "eval_runtime": 353.3297, | |
| "eval_samples_per_second": 2.417, | |
| "eval_steps_per_second": 0.606, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 5.125, | |
| "grad_norm": 0.2589443027973175, | |
| "learning_rate": 2.0536077630034086e-05, | |
| "loss": 0.888, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 5.25, | |
| "grad_norm": 0.24191297590732574, | |
| "learning_rate": 1.9436976651092144e-05, | |
| "loss": 0.8901, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 5.375, | |
| "grad_norm": 0.27726104855537415, | |
| "learning_rate": 1.8349078860833123e-05, | |
| "loss": 0.9147, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "grad_norm": 0.23908096551895142, | |
| "learning_rate": 1.7274575140626318e-05, | |
| "loss": 0.8925, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 5.625, | |
| "grad_norm": 0.30176234245300293, | |
| "learning_rate": 1.621562939796643e-05, | |
| "loss": 0.9012, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 5.75, | |
| "grad_norm": 0.23645330965518951, | |
| "learning_rate": 1.5174374208651912e-05, | |
| "loss": 0.8808, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 5.875, | |
| "grad_norm": 0.2720588147640228, | |
| "learning_rate": 1.4152906522061048e-05, | |
| "loss": 0.8816, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.2631034553050995, | |
| "learning_rate": 1.3153283438175034e-05, | |
| "loss": 0.8941, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 6.125, | |
| "grad_norm": 0.2486189901828766, | |
| "learning_rate": 1.217751806485235e-05, | |
| "loss": 0.9048, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "grad_norm": 0.2926970422267914, | |
| "learning_rate": 1.122757546369744e-05, | |
| "loss": 0.8874, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "eval_loss": 0.9028034806251526, | |
| "eval_runtime": 353.2345, | |
| "eval_samples_per_second": 2.418, | |
| "eval_steps_per_second": 0.606, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 6.375, | |
| "grad_norm": 0.25221139192581177, | |
| "learning_rate": 1.0305368692688174e-05, | |
| "loss": 0.8738, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 6.5, | |
| "grad_norm": 0.2523793578147888, | |
| "learning_rate": 9.412754953531663e-06, | |
| "loss": 0.8951, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 6.625, | |
| "grad_norm": 0.2493809163570404, | |
| "learning_rate": 8.551531851507186e-06, | |
| "loss": 0.8914, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 6.75, | |
| "grad_norm": 0.2688143253326416, | |
| "learning_rate": 7.723433775328384e-06, | |
| "loss": 0.8818, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 6.875, | |
| "grad_norm": 0.2695543169975281, | |
| "learning_rate": 6.930128404315214e-06, | |
| "loss": 0.8794, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.27596864104270935, | |
| "learning_rate": 6.173213349909729e-06, | |
| "loss": 0.8814, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 7.125, | |
| "grad_norm": 0.27881208062171936, | |
| "learning_rate": 5.454212938299255e-06, | |
| "loss": 0.8909, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 7.25, | |
| "grad_norm": 0.2895490825176239, | |
| "learning_rate": 4.7745751406263165e-06, | |
| "loss": 0.8737, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 7.375, | |
| "grad_norm": 0.25476014614105225, | |
| "learning_rate": 4.135668656967434e-06, | |
| "loss": 0.8937, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "grad_norm": 0.2785739600658417, | |
| "learning_rate": 3.5387801599533475e-06, | |
| "loss": 0.8835, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "eval_loss": 0.899681031703949, | |
| "eval_runtime": 353.2175, | |
| "eval_samples_per_second": 2.418, | |
| "eval_steps_per_second": 0.606, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 7.625, | |
| "grad_norm": 0.27525651454925537, | |
| "learning_rate": 2.98511170358155e-06, | |
| "loss": 0.8841, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 7.75, | |
| "grad_norm": 0.25052082538604736, | |
| "learning_rate": 2.475778302439524e-06, | |
| "loss": 0.8979, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 7.875, | |
| "grad_norm": 0.2501230537891388, | |
| "learning_rate": 2.0118056862137357e-06, | |
| "loss": 0.8696, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 0.2521611452102661, | |
| "learning_rate": 1.59412823400657e-06, | |
| "loss": 0.8782, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 8.125, | |
| "grad_norm": 0.249056875705719, | |
| "learning_rate": 1.2235870926211619e-06, | |
| "loss": 0.8873, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 8.25, | |
| "grad_norm": 0.27458131313323975, | |
| "learning_rate": 9.009284826036691e-07, | |
| "loss": 0.8737, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 8.375, | |
| "grad_norm": 0.24417945742607117, | |
| "learning_rate": 6.268021954544096e-07, | |
| "loss": 0.8883, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 8.5, | |
| "grad_norm": 0.25331562757492065, | |
| "learning_rate": 4.0176028503425835e-07, | |
| "loss": 0.87, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 8.625, | |
| "grad_norm": 0.25556355714797974, | |
| "learning_rate": 2.262559558016325e-07, | |
| "loss": 0.8746, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 8.75, | |
| "grad_norm": 0.27511876821517944, | |
| "learning_rate": 1.006426501190233e-07, | |
| "loss": 0.8912, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 8.75, | |
| "eval_loss": 0.8991448283195496, | |
| "eval_runtime": 353.267, | |
| "eval_samples_per_second": 2.417, | |
| "eval_steps_per_second": 0.606, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 8.875, | |
| "grad_norm": 0.2639774680137634, | |
| "learning_rate": 2.5173336467135267e-08, | |
| "loss": 0.9007, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 0.27557557821273804, | |
| "learning_rate": 0.0, | |
| "loss": 0.8796, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "step": 720, | |
| "total_flos": 2.5580424283828716e+18, | |
| "train_loss": 0.9693152533637153, | |
| "train_runtime": 60193.9387, | |
| "train_samples_per_second": 1.148, | |
| "train_steps_per_second": 0.012 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 720, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 9, | |
| "save_steps": 100, | |
| "total_flos": 2.5580424283828716e+18, | |
| "train_batch_size": 12, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |