{ "best_global_step": 190, "best_metric": 3.1472160816192627, "best_model_checkpoint": "./bert_mini_squadv2_finetuned/checkpoint-190", "epoch": 1.0, "eval_steps": 500, "global_step": 190, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.05263157894736842, "grad_norm": 4.97021484375, "learning_rate": 1.9526315789473688e-05, "loss": 5.8799, "step": 10 }, { "epoch": 0.10526315789473684, "grad_norm": 5.308949947357178, "learning_rate": 1.9e-05, "loss": 5.6001, "step": 20 }, { "epoch": 0.15789473684210525, "grad_norm": 5.93297004699707, "learning_rate": 1.8473684210526317e-05, "loss": 5.356, "step": 30 }, { "epoch": 0.21052631578947367, "grad_norm": 6.4546613693237305, "learning_rate": 1.7947368421052634e-05, "loss": 5.0317, "step": 40 }, { "epoch": 0.2631578947368421, "grad_norm": 6.0348734855651855, "learning_rate": 1.742105263157895e-05, "loss": 4.8894, "step": 50 }, { "epoch": 0.3157894736842105, "grad_norm": 7.766897201538086, "learning_rate": 1.6894736842105263e-05, "loss": 4.5613, "step": 60 }, { "epoch": 0.3684210526315789, "grad_norm": 6.935646057128906, "learning_rate": 1.636842105263158e-05, "loss": 4.4883, "step": 70 }, { "epoch": 0.42105263157894735, "grad_norm": 6.82500696182251, "learning_rate": 1.5842105263157896e-05, "loss": 4.2883, "step": 80 }, { "epoch": 0.47368421052631576, "grad_norm": 7.159075736999512, "learning_rate": 1.5315789473684212e-05, "loss": 4.165, "step": 90 }, { "epoch": 0.5263157894736842, "grad_norm": 7.179778575897217, "learning_rate": 1.4789473684210527e-05, "loss": 4.1544, "step": 100 }, { "epoch": 0.5789473684210527, "grad_norm": 7.615407943725586, "learning_rate": 1.4263157894736843e-05, "loss": 3.9639, "step": 110 }, { "epoch": 0.631578947368421, "grad_norm": 8.331777572631836, "learning_rate": 1.373684210526316e-05, "loss": 3.9313, "step": 120 }, { "epoch": 0.6842105263157895, "grad_norm": 8.408103942871094, "learning_rate": 1.3210526315789476e-05, "loss": 3.8429, "step": 130 }, { "epoch": 0.7368421052631579, "grad_norm": 7.173701286315918, "learning_rate": 1.268421052631579e-05, "loss": 3.8753, "step": 140 }, { "epoch": 0.7894736842105263, "grad_norm": 8.077372550964355, "learning_rate": 1.2157894736842107e-05, "loss": 3.726, "step": 150 }, { "epoch": 0.8421052631578947, "grad_norm": 8.297250747680664, "learning_rate": 1.1631578947368423e-05, "loss": 3.7247, "step": 160 }, { "epoch": 0.8947368421052632, "grad_norm": 8.952900886535645, "learning_rate": 1.1105263157894736e-05, "loss": 3.5831, "step": 170 }, { "epoch": 0.9473684210526315, "grad_norm": 7.744213581085205, "learning_rate": 1.0578947368421053e-05, "loss": 3.6806, "step": 180 }, { "epoch": 1.0, "grad_norm": 8.072354316711426, "learning_rate": 1.005263157894737e-05, "loss": 3.6219, "step": 190 }, { "epoch": 1.0, "eval_loss": 3.1472160816192627, "eval_runtime": 33.3578, "eval_samples_per_second": 15.259, "eval_steps_per_second": 0.959, "step": 190 } ], "logging_steps": 10, "max_steps": 380, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 22104484706304.0, "train_batch_size": 16, "trial_name": null, "trial_params": null }