| { | |
| "best_metric": 0.0708099901676178, | |
| "best_model_checkpoint": "/nasty/data/tpid/vizwiz/ViTGPT2I2A/checkpoint-17000", | |
| "epoch": 5.0, | |
| "global_step": 29290, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 1.9320587231136908e-05, | |
| "loss": 0.1528, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "eval_loss": 0.08688350766897202, | |
| "eval_runtime": 552.3255, | |
| "eval_samples_per_second": 14.032, | |
| "eval_steps_per_second": 3.509, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 1.8637760327756916e-05, | |
| "loss": 0.0899, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "eval_loss": 0.08172640949487686, | |
| "eval_runtime": 437.9983, | |
| "eval_samples_per_second": 17.694, | |
| "eval_steps_per_second": 4.425, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 1.795493342437692e-05, | |
| "loss": 0.084, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "eval_loss": 0.07901930063962936, | |
| "eval_runtime": 217.4661, | |
| "eval_samples_per_second": 35.638, | |
| "eval_steps_per_second": 8.912, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 1.7272106520996928e-05, | |
| "loss": 0.0814, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "eval_loss": 0.07728389650583267, | |
| "eval_runtime": 218.3505, | |
| "eval_samples_per_second": 35.493, | |
| "eval_steps_per_second": 8.876, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 1.6589279617616936e-05, | |
| "loss": 0.0803, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "eval_loss": 0.07569148391485214, | |
| "eval_runtime": 218.1796, | |
| "eval_samples_per_second": 35.521, | |
| "eval_steps_per_second": 8.883, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 1.590645271423694e-05, | |
| "loss": 0.077, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "eval_loss": 0.07452211529016495, | |
| "eval_runtime": 220.2685, | |
| "eval_samples_per_second": 35.184, | |
| "eval_steps_per_second": 8.798, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 1.5223625810856947e-05, | |
| "loss": 0.0739, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "eval_loss": 0.0739876851439476, | |
| "eval_runtime": 205.9033, | |
| "eval_samples_per_second": 37.639, | |
| "eval_steps_per_second": 9.412, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 1.4540798907476957e-05, | |
| "loss": 0.0719, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "eval_loss": 0.07368426769971848, | |
| "eval_runtime": 217.9815, | |
| "eval_samples_per_second": 35.553, | |
| "eval_steps_per_second": 8.891, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 1.3857972004096963e-05, | |
| "loss": 0.0717, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "eval_loss": 0.07300957292318344, | |
| "eval_runtime": 219.2086, | |
| "eval_samples_per_second": 35.354, | |
| "eval_steps_per_second": 8.841, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 1.3175145100716968e-05, | |
| "loss": 0.0731, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "eval_loss": 0.0726846307516098, | |
| "eval_runtime": 219.3614, | |
| "eval_samples_per_second": 35.33, | |
| "eval_steps_per_second": 8.835, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 1.2492318197336977e-05, | |
| "loss": 0.0708, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "eval_loss": 0.07204297184944153, | |
| "eval_runtime": 217.9036, | |
| "eval_samples_per_second": 35.566, | |
| "eval_steps_per_second": 8.894, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "learning_rate": 1.1809491293956984e-05, | |
| "loss": 0.0697, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "eval_loss": 0.07171137630939484, | |
| "eval_runtime": 219.7173, | |
| "eval_samples_per_second": 35.273, | |
| "eval_steps_per_second": 8.82, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "learning_rate": 1.1126664390576988e-05, | |
| "loss": 0.0655, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "eval_loss": 0.07193101197481155, | |
| "eval_runtime": 218.2496, | |
| "eval_samples_per_second": 35.51, | |
| "eval_steps_per_second": 8.88, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 1.0443837487196998e-05, | |
| "loss": 0.0653, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "eval_loss": 0.07193595170974731, | |
| "eval_runtime": 218.5083, | |
| "eval_samples_per_second": 35.468, | |
| "eval_steps_per_second": 8.869, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 9.761693410720383e-06, | |
| "loss": 0.0657, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "eval_loss": 0.07120098173618317, | |
| "eval_runtime": 219.0492, | |
| "eval_samples_per_second": 35.38, | |
| "eval_steps_per_second": 8.847, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 9.07886650734039e-06, | |
| "loss": 0.0663, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "eval_loss": 0.07102101296186447, | |
| "eval_runtime": 218.7347, | |
| "eval_samples_per_second": 35.431, | |
| "eval_steps_per_second": 8.86, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 8.396722430863777e-06, | |
| "loss": 0.0654, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "eval_loss": 0.0708099901676178, | |
| "eval_runtime": 218.8513, | |
| "eval_samples_per_second": 35.412, | |
| "eval_steps_per_second": 8.855, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 3.07, | |
| "learning_rate": 7.713895527483783e-06, | |
| "loss": 0.0645, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 3.07, | |
| "eval_loss": 0.07163063436746597, | |
| "eval_runtime": 219.9493, | |
| "eval_samples_per_second": 35.235, | |
| "eval_steps_per_second": 8.811, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 3.24, | |
| "learning_rate": 7.0317514510071705e-06, | |
| "loss": 0.0616, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 3.24, | |
| "eval_loss": 0.07119180262088776, | |
| "eval_runtime": 219.2713, | |
| "eval_samples_per_second": 35.344, | |
| "eval_steps_per_second": 8.838, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "learning_rate": 6.348924547627177e-06, | |
| "loss": 0.0607, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "eval_loss": 0.07116228342056274, | |
| "eval_runtime": 219.1634, | |
| "eval_samples_per_second": 35.362, | |
| "eval_steps_per_second": 8.843, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "learning_rate": 5.666097644247184e-06, | |
| "loss": 0.0611, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "eval_loss": 0.07110943645238876, | |
| "eval_runtime": 219.7957, | |
| "eval_samples_per_second": 35.26, | |
| "eval_steps_per_second": 8.817, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "learning_rate": 4.98327074086719e-06, | |
| "loss": 0.0615, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "eval_loss": 0.07111015915870667, | |
| "eval_runtime": 220.0979, | |
| "eval_samples_per_second": 35.212, | |
| "eval_steps_per_second": 8.805, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "learning_rate": 4.3018094912939576e-06, | |
| "loss": 0.0614, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "eval_loss": 0.0710098147392273, | |
| "eval_runtime": 223.1322, | |
| "eval_samples_per_second": 34.733, | |
| "eval_steps_per_second": 8.685, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 3.618982587913964e-06, | |
| "loss": 0.0594, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "eval_loss": 0.07158154249191284, | |
| "eval_runtime": 220.2761, | |
| "eval_samples_per_second": 35.183, | |
| "eval_steps_per_second": 8.798, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 4.27, | |
| "learning_rate": 2.936155684533971e-06, | |
| "loss": 0.0587, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 4.27, | |
| "eval_loss": 0.07150966674089432, | |
| "eval_runtime": 223.8011, | |
| "eval_samples_per_second": 34.629, | |
| "eval_steps_per_second": 8.659, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "learning_rate": 2.2533287811539777e-06, | |
| "loss": 0.0574, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "eval_loss": 0.07154253125190735, | |
| "eval_runtime": 216.2215, | |
| "eval_samples_per_second": 35.843, | |
| "eval_steps_per_second": 8.963, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 4.61, | |
| "learning_rate": 1.5711847046773643e-06, | |
| "loss": 0.0579, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 4.61, | |
| "eval_loss": 0.07154078036546707, | |
| "eval_runtime": 219.6549, | |
| "eval_samples_per_second": 35.283, | |
| "eval_steps_per_second": 8.823, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 4.78, | |
| "learning_rate": 8.883578012973712e-07, | |
| "loss": 0.0581, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 4.78, | |
| "eval_loss": 0.07147673517465591, | |
| "eval_runtime": 218.889, | |
| "eval_samples_per_second": 35.406, | |
| "eval_steps_per_second": 8.854, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 4.95, | |
| "learning_rate": 2.0553089791737796e-07, | |
| "loss": 0.0579, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 4.95, | |
| "eval_loss": 0.07150830328464508, | |
| "eval_runtime": 219.8431, | |
| "eval_samples_per_second": 35.252, | |
| "eval_steps_per_second": 8.815, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 29290, | |
| "total_flos": 2.1143149521668145e+19, | |
| "train_loss": 0.07038866728798608, | |
| "train_runtime": 25445.4127, | |
| "train_samples_per_second": 4.604, | |
| "train_steps_per_second": 1.151 | |
| } | |
| ], | |
| "max_steps": 29290, | |
| "num_train_epochs": 5, | |
| "total_flos": 2.1143149521668145e+19, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |