| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.1, | |
| "eval_steps": 2000, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 12.253900527954102, | |
| "learning_rate": 9.8e-07, | |
| "loss": 0.3335, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 9.193947792053223, | |
| "learning_rate": 9.9010101010101e-07, | |
| "loss": 0.2105, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 5.168125152587891, | |
| "learning_rate": 9.8e-07, | |
| "loss": 0.1618, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 2.163803815841675, | |
| "learning_rate": 9.698989898989898e-07, | |
| "loss": 0.1418, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 10.751598358154297, | |
| "learning_rate": 9.597979797979797e-07, | |
| "loss": 0.1215, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 7.5342793464660645, | |
| "learning_rate": 9.496969696969696e-07, | |
| "loss": 0.1142, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 8.430835723876953, | |
| "learning_rate": 9.395959595959596e-07, | |
| "loss": 0.121, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 1.8440964221954346, | |
| "learning_rate": 9.295959595959596e-07, | |
| "loss": 0.0969, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 2.699073553085327, | |
| "learning_rate": 9.194949494949495e-07, | |
| "loss": 0.1024, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 11.251789093017578, | |
| "learning_rate": 9.093939393939394e-07, | |
| "loss": 0.0899, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 7.728929042816162, | |
| "learning_rate": 8.992929292929292e-07, | |
| "loss": 0.0837, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 1.7401416301727295, | |
| "learning_rate": 8.891919191919191e-07, | |
| "loss": 0.0914, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 3.5504443645477295, | |
| "learning_rate": 8.790909090909091e-07, | |
| "loss": 0.0754, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 5.9004316329956055, | |
| "learning_rate": 8.68989898989899e-07, | |
| "loss": 0.0791, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 2.9171862602233887, | |
| "learning_rate": 8.588888888888888e-07, | |
| "loss": 0.0866, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 5.907050132751465, | |
| "learning_rate": 8.487878787878787e-07, | |
| "loss": 0.0768, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 4.856807708740234, | |
| "learning_rate": 8.386868686868687e-07, | |
| "loss": 0.0745, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 2.4457929134368896, | |
| "learning_rate": 8.285858585858585e-07, | |
| "loss": 0.0808, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 3.084287643432617, | |
| "learning_rate": 8.184848484848484e-07, | |
| "loss": 0.0743, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 2.936805486679077, | |
| "learning_rate": 8.083838383838384e-07, | |
| "loss": 0.0675, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "eval_loss": 0.07925247400999069, | |
| "eval_runtime": 204.8299, | |
| "eval_samples_per_second": 4.882, | |
| "eval_steps_per_second": 1.221, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 10000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 2000, | |
| "total_flos": 1.88564197343232e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |