| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 6.999476713762428, | |
| "eval_steps": 1000, | |
| "global_step": 8360, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.958123953098828e-05, | |
| "loss": 0.1445, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 4.916247906197655e-05, | |
| "loss": 0.1585, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 4.874371859296483e-05, | |
| "loss": 0.1654, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 4.83249581239531e-05, | |
| "loss": 0.1701, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 4.7906197654941376e-05, | |
| "loss": 0.1884, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 4.748743718592965e-05, | |
| "loss": 0.1868, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 4.7068676716917926e-05, | |
| "loss": 0.1872, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 4.66499162479062e-05, | |
| "loss": 0.2018, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 4.6231155778894475e-05, | |
| "loss": 0.1865, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 4.581239530988275e-05, | |
| "loss": 0.2002, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "eval_loss": 2.548002243041992, | |
| "eval_runtime": 20.7773, | |
| "eval_samples_per_second": 57.466, | |
| "eval_steps_per_second": 14.391, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 4.5393634840871025e-05, | |
| "loss": 0.1776, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 4.49748743718593e-05, | |
| "loss": 0.2035, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 4.4556113902847574e-05, | |
| "loss": 0.1199, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 4.413735343383585e-05, | |
| "loss": 0.1311, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 4.3718592964824124e-05, | |
| "loss": 0.1279, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 4.32998324958124e-05, | |
| "loss": 0.1279, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 4.288107202680067e-05, | |
| "loss": 0.1249, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 4.246231155778895e-05, | |
| "loss": 0.1354, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 4.204355108877722e-05, | |
| "loss": 0.1407, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 4.16247906197655e-05, | |
| "loss": 0.1207, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "eval_loss": 2.662675619125366, | |
| "eval_runtime": 20.7988, | |
| "eval_samples_per_second": 57.407, | |
| "eval_steps_per_second": 14.376, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 4.120603015075377e-05, | |
| "loss": 0.1439, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 4.078726968174205e-05, | |
| "loss": 0.1342, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 4.036850921273032e-05, | |
| "loss": 0.1251, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 2.01, | |
| "learning_rate": 3.9949748743718597e-05, | |
| "loss": 0.1242, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 2.09, | |
| "learning_rate": 3.953098827470687e-05, | |
| "loss": 0.0826, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "learning_rate": 3.9112227805695146e-05, | |
| "loss": 0.0793, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 3.869346733668342e-05, | |
| "loss": 0.0874, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 3.8274706867671696e-05, | |
| "loss": 0.0992, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "learning_rate": 3.785594639865997e-05, | |
| "loss": 0.088, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 3.7437185929648245e-05, | |
| "loss": 0.0768, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "eval_loss": 2.742750406265259, | |
| "eval_runtime": 20.7709, | |
| "eval_samples_per_second": 57.484, | |
| "eval_steps_per_second": 14.395, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "learning_rate": 3.701842546063652e-05, | |
| "loss": 0.0805, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "learning_rate": 3.6599664991624795e-05, | |
| "loss": 0.0796, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 3.618090452261307e-05, | |
| "loss": 0.0819, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 3.5762144053601344e-05, | |
| "loss": 0.0844, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 2.93, | |
| "learning_rate": 3.534338358458962e-05, | |
| "loss": 0.081, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "learning_rate": 3.4924623115577894e-05, | |
| "loss": 0.0796, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "learning_rate": 3.450586264656617e-05, | |
| "loss": 0.0595, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "learning_rate": 3.408710217755444e-05, | |
| "loss": 0.0481, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "learning_rate": 3.366834170854272e-05, | |
| "loss": 0.0554, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "learning_rate": 3.324958123953099e-05, | |
| "loss": 0.0566, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "eval_loss": 2.823474407196045, | |
| "eval_runtime": 20.7588, | |
| "eval_samples_per_second": 57.518, | |
| "eval_steps_per_second": 14.404, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 3.283082077051927e-05, | |
| "loss": 0.0562, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 3.241206030150754e-05, | |
| "loss": 0.0584, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "learning_rate": 3.199329983249582e-05, | |
| "loss": 0.0555, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 3.68, | |
| "learning_rate": 3.157453936348409e-05, | |
| "loss": 0.0446, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 3.77, | |
| "learning_rate": 3.1155778894472366e-05, | |
| "loss": 0.0559, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 3.073701842546064e-05, | |
| "loss": 0.0582, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "learning_rate": 3.0318257956448916e-05, | |
| "loss": 0.0457, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 4.02, | |
| "learning_rate": 2.989949748743719e-05, | |
| "loss": 0.0486, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 2.9480737018425465e-05, | |
| "loss": 0.0292, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 4.19, | |
| "learning_rate": 2.906197654941374e-05, | |
| "loss": 0.0335, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 4.19, | |
| "eval_loss": 2.9011123180389404, | |
| "eval_runtime": 20.7527, | |
| "eval_samples_per_second": 57.535, | |
| "eval_steps_per_second": 14.408, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 4.27, | |
| "learning_rate": 2.8643216080402015e-05, | |
| "loss": 0.028, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 4.35, | |
| "learning_rate": 2.822445561139029e-05, | |
| "loss": 0.0317, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "learning_rate": 2.7805695142378564e-05, | |
| "loss": 0.0392, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 4.52, | |
| "learning_rate": 2.738693467336684e-05, | |
| "loss": 0.035, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "learning_rate": 2.696817420435511e-05, | |
| "loss": 0.0333, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 4.69, | |
| "learning_rate": 2.6549413735343385e-05, | |
| "loss": 0.0306, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 4.77, | |
| "learning_rate": 2.613065326633166e-05, | |
| "loss": 0.0427, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 4.86, | |
| "learning_rate": 2.5711892797319935e-05, | |
| "loss": 0.0249, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 4.94, | |
| "learning_rate": 2.529313232830821e-05, | |
| "loss": 0.0391, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 5.02, | |
| "learning_rate": 2.4874371859296484e-05, | |
| "loss": 0.0281, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 5.02, | |
| "eval_loss": 2.8658268451690674, | |
| "eval_runtime": 20.7258, | |
| "eval_samples_per_second": 57.609, | |
| "eval_steps_per_second": 14.426, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 5.11, | |
| "learning_rate": 2.445561139028476e-05, | |
| "loss": 0.0213, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 5.19, | |
| "learning_rate": 2.4036850921273034e-05, | |
| "loss": 0.0163, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 5.27, | |
| "learning_rate": 2.361809045226131e-05, | |
| "loss": 0.0166, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 5.36, | |
| "learning_rate": 2.3199329983249583e-05, | |
| "loss": 0.0161, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 5.44, | |
| "learning_rate": 2.2780569514237858e-05, | |
| "loss": 0.0241, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 5.53, | |
| "learning_rate": 2.2361809045226133e-05, | |
| "loss": 0.0237, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 5.61, | |
| "learning_rate": 2.1943048576214408e-05, | |
| "loss": 0.0143, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 5.69, | |
| "learning_rate": 2.1524288107202682e-05, | |
| "loss": 0.017, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 5.78, | |
| "learning_rate": 2.1105527638190957e-05, | |
| "loss": 0.0198, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 5.86, | |
| "learning_rate": 2.0686767169179232e-05, | |
| "loss": 0.0215, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 5.86, | |
| "eval_loss": 2.8833489418029785, | |
| "eval_runtime": 20.7599, | |
| "eval_samples_per_second": 57.515, | |
| "eval_steps_per_second": 14.403, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 5.94, | |
| "learning_rate": 2.0268006700167507e-05, | |
| "loss": 0.0197, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 6.03, | |
| "learning_rate": 1.984924623115578e-05, | |
| "loss": 0.0173, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 6.11, | |
| "learning_rate": 1.9430485762144056e-05, | |
| "loss": 0.015, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 6.2, | |
| "learning_rate": 1.901172529313233e-05, | |
| "loss": 0.0139, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 6.28, | |
| "learning_rate": 1.8592964824120602e-05, | |
| "loss": 0.0116, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 6.36, | |
| "learning_rate": 1.8174204355108877e-05, | |
| "loss": 0.0048, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 6.45, | |
| "learning_rate": 1.7755443886097152e-05, | |
| "loss": 0.0117, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 6.53, | |
| "learning_rate": 1.7336683417085427e-05, | |
| "loss": 0.0079, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 6.61, | |
| "learning_rate": 1.69179229480737e-05, | |
| "loss": 0.0095, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 6.7, | |
| "learning_rate": 1.6499162479061976e-05, | |
| "loss": 0.0081, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 6.7, | |
| "eval_loss": 2.974073648452759, | |
| "eval_runtime": 20.7629, | |
| "eval_samples_per_second": 57.506, | |
| "eval_steps_per_second": 14.401, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 6.78, | |
| "learning_rate": 1.608040201005025e-05, | |
| "loss": 0.0086, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 6.87, | |
| "learning_rate": 1.5661641541038526e-05, | |
| "loss": 0.013, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 6.95, | |
| "learning_rate": 1.5242881072026802e-05, | |
| "loss": 0.0079, | |
| "step": 8300 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 11940, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "total_flos": 1.919599695983693e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |