tinyllama-toneopbot / checkpoint-273 /trainer_state.json
imrahulwarkade's picture
๐Ÿš€ Upload TinyLLaMA ToneOp LoRA adapter
e7bc389 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 273,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14652014652014653,
"grad_norm": 63.75182342529297,
"learning_rate": 0.00019705882352941177,
"loss": 13.3784,
"step": 10
},
{
"epoch": 0.29304029304029305,
"grad_norm": 33.430416107177734,
"learning_rate": 0.0001911764705882353,
"loss": 4.1258,
"step": 20
},
{
"epoch": 0.43956043956043955,
"grad_norm": 0.4142046570777893,
"learning_rate": 0.00018529411764705883,
"loss": 0.6903,
"step": 30
},
{
"epoch": 0.5860805860805861,
"grad_norm": 0.27323105931282043,
"learning_rate": 0.00017941176470588236,
"loss": 0.5677,
"step": 40
},
{
"epoch": 0.7326007326007326,
"grad_norm": 0.2076171338558197,
"learning_rate": 0.0001735294117647059,
"loss": 0.4418,
"step": 50
},
{
"epoch": 0.8791208791208791,
"grad_norm": 0.325761079788208,
"learning_rate": 0.00016764705882352942,
"loss": 0.4442,
"step": 60
},
{
"epoch": 1.0256410256410255,
"grad_norm": 0.741830050945282,
"learning_rate": 0.00016176470588235295,
"loss": 0.3808,
"step": 70
},
{
"epoch": 1.1721611721611722,
"grad_norm": 0.7903043031692505,
"learning_rate": 0.00015588235294117648,
"loss": 0.3212,
"step": 80
},
{
"epoch": 1.3186813186813187,
"grad_norm": 0.726527214050293,
"learning_rate": 0.00015000000000000001,
"loss": 0.2756,
"step": 90
},
{
"epoch": 1.4652014652014653,
"grad_norm": 0.4178701937198639,
"learning_rate": 0.00014411764705882354,
"loss": 0.2395,
"step": 100
},
{
"epoch": 1.6117216117216118,
"grad_norm": 0.29514220356941223,
"learning_rate": 0.00013823529411764707,
"loss": 0.206,
"step": 110
},
{
"epoch": 1.7582417582417582,
"grad_norm": 0.334573894739151,
"learning_rate": 0.0001323529411764706,
"loss": 0.2336,
"step": 120
},
{
"epoch": 1.9047619047619047,
"grad_norm": 0.22454310953617096,
"learning_rate": 0.0001264705882352941,
"loss": 0.2333,
"step": 130
},
{
"epoch": 2.051282051282051,
"grad_norm": 0.3633638918399811,
"learning_rate": 0.00012058823529411765,
"loss": 0.21,
"step": 140
},
{
"epoch": 2.197802197802198,
"grad_norm": 0.26258039474487305,
"learning_rate": 0.00011470588235294118,
"loss": 0.1804,
"step": 150
},
{
"epoch": 2.3443223443223444,
"grad_norm": 0.2950953543186188,
"learning_rate": 0.0001088235294117647,
"loss": 0.2074,
"step": 160
},
{
"epoch": 2.490842490842491,
"grad_norm": 0.24581202864646912,
"learning_rate": 0.00010294117647058823,
"loss": 0.2233,
"step": 170
},
{
"epoch": 2.6373626373626373,
"grad_norm": 0.3232247829437256,
"learning_rate": 9.705882352941177e-05,
"loss": 0.1909,
"step": 180
},
{
"epoch": 2.7838827838827838,
"grad_norm": 0.31195566058158875,
"learning_rate": 9.11764705882353e-05,
"loss": 0.2135,
"step": 190
},
{
"epoch": 2.9304029304029307,
"grad_norm": 0.3059927225112915,
"learning_rate": 8.529411764705883e-05,
"loss": 0.1813,
"step": 200
},
{
"epoch": 3.076923076923077,
"grad_norm": 0.2599155306816101,
"learning_rate": 7.941176470588235e-05,
"loss": 0.1721,
"step": 210
},
{
"epoch": 3.2234432234432235,
"grad_norm": 0.3562494218349457,
"learning_rate": 7.352941176470589e-05,
"loss": 0.18,
"step": 220
},
{
"epoch": 3.36996336996337,
"grad_norm": 0.3098445534706116,
"learning_rate": 6.764705882352942e-05,
"loss": 0.1621,
"step": 230
},
{
"epoch": 3.5164835164835164,
"grad_norm": 0.32583731412887573,
"learning_rate": 6.176470588235295e-05,
"loss": 0.1577,
"step": 240
},
{
"epoch": 3.663003663003663,
"grad_norm": 0.23592990636825562,
"learning_rate": 5.588235294117647e-05,
"loss": 0.2229,
"step": 250
},
{
"epoch": 3.8095238095238093,
"grad_norm": 0.33576756715774536,
"learning_rate": 5e-05,
"loss": 0.1959,
"step": 260
},
{
"epoch": 3.956043956043956,
"grad_norm": 0.26419597864151,
"learning_rate": 4.411764705882353e-05,
"loss": 0.1917,
"step": 270
}
],
"logging_steps": 10,
"max_steps": 340,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}