| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.352941176470588, | |
| "eval_steps": 500, | |
| "global_step": 48, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.5882352941176471, | |
| "grad_norm": 1.615626717799089, | |
| "learning_rate": 5e-05, | |
| "loss": 1.171, | |
| "mean_token_accuracy": 0.7515053629875184, | |
| "num_tokens": 2505918.0, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 1.1176470588235294, | |
| "grad_norm": 0.9501076395039855, | |
| "learning_rate": 4.418604651162791e-05, | |
| "loss": 1.0081, | |
| "mean_token_accuracy": 0.7769864400227865, | |
| "num_tokens": 4791423.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.7058823529411766, | |
| "grad_norm": 0.4304626663566037, | |
| "learning_rate": 3.837209302325582e-05, | |
| "loss": 0.8307, | |
| "mean_token_accuracy": 0.8090634852647781, | |
| "num_tokens": 7330272.0, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 2.235294117647059, | |
| "grad_norm": 0.32218267013578467, | |
| "learning_rate": 3.2558139534883724e-05, | |
| "loss": 0.8111, | |
| "mean_token_accuracy": 0.8145165310965644, | |
| "num_tokens": 9618952.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 2.8235294117647056, | |
| "grad_norm": 0.34521913147387545, | |
| "learning_rate": 2.674418604651163e-05, | |
| "loss": 0.6874, | |
| "mean_token_accuracy": 0.8360600471496582, | |
| "num_tokens": 12114377.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 3.3529411764705883, | |
| "grad_norm": 0.2827302276368052, | |
| "learning_rate": 2.0930232558139536e-05, | |
| "loss": 0.6715, | |
| "mean_token_accuracy": 0.8426629536681705, | |
| "num_tokens": 14409009.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 3.9411764705882355, | |
| "grad_norm": 0.9916401982672689, | |
| "learning_rate": 1.5116279069767441e-05, | |
| "loss": 0.6055, | |
| "mean_token_accuracy": 0.8535039156675339, | |
| "num_tokens": 16918082.0, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 4.470588235294118, | |
| "grad_norm": 0.4660327419143292, | |
| "learning_rate": 9.302325581395349e-06, | |
| "loss": 0.6077, | |
| "mean_token_accuracy": 0.8566798832681444, | |
| "num_tokens": 19217454.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.35285140721947816, | |
| "learning_rate": 3.488372093023256e-06, | |
| "loss": 0.5145, | |
| "mean_token_accuracy": 0.8737427029344771, | |
| "num_tokens": 21475135.0, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 5.352941176470588, | |
| "mean_token_accuracy": 0.8594042261441549, | |
| "num_tokens": 22979390.0, | |
| "step": 48, | |
| "total_flos": 34346052091904.0, | |
| "train_loss": 0.7553561106324196, | |
| "train_runtime": 288.5449, | |
| "train_samples_per_second": 11.208, | |
| "train_steps_per_second": 0.166 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 48, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 34346052091904.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |