TC-ABB-BERT / checkpoint-375 /trainer_state.json
slightlycodic's picture
Upload folder using huggingface_hub
92e1cdd verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 375,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"grad_norm": 2.411430835723877,
"learning_rate": 1.9520000000000003e-05,
"loss": 0.2283,
"step": 10
},
{
"epoch": 0.16,
"grad_norm": 1.634353518486023,
"learning_rate": 1.898666666666667e-05,
"loss": 0.2332,
"step": 20
},
{
"epoch": 0.24,
"grad_norm": 1.4850561618804932,
"learning_rate": 1.8453333333333335e-05,
"loss": 0.1957,
"step": 30
},
{
"epoch": 0.32,
"grad_norm": 1.818241000175476,
"learning_rate": 1.792e-05,
"loss": 0.2057,
"step": 40
},
{
"epoch": 0.4,
"grad_norm": 1.1182990074157715,
"learning_rate": 1.7386666666666667e-05,
"loss": 0.2033,
"step": 50
},
{
"epoch": 0.48,
"grad_norm": 1.7503159046173096,
"learning_rate": 1.6853333333333333e-05,
"loss": 0.2026,
"step": 60
},
{
"epoch": 0.56,
"grad_norm": 1.2493916749954224,
"learning_rate": 1.632e-05,
"loss": 0.1888,
"step": 70
},
{
"epoch": 0.64,
"grad_norm": 2.819777488708496,
"learning_rate": 1.578666666666667e-05,
"loss": 0.1877,
"step": 80
},
{
"epoch": 0.72,
"grad_norm": 2.9391438961029053,
"learning_rate": 1.5253333333333335e-05,
"loss": 0.2035,
"step": 90
},
{
"epoch": 0.8,
"grad_norm": 1.5021731853485107,
"learning_rate": 1.4720000000000001e-05,
"loss": 0.2014,
"step": 100
},
{
"epoch": 0.88,
"grad_norm": 2.2055695056915283,
"learning_rate": 1.418666666666667e-05,
"loss": 0.1883,
"step": 110
},
{
"epoch": 0.96,
"grad_norm": 1.4860285520553589,
"learning_rate": 1.3653333333333334e-05,
"loss": 0.1603,
"step": 120
},
{
"epoch": 1.0,
"eval_accuracy": 0.8924624363591208,
"eval_f1": 0.8189732812969078,
"eval_loss": 0.34129005670547485,
"eval_precision": 0.7706214689265537,
"eval_recall": 0.8737988468930173,
"eval_runtime": 1.2518,
"eval_samples_per_second": 119.829,
"eval_steps_per_second": 7.989,
"step": 125
},
{
"epoch": 1.04,
"grad_norm": 1.8653055429458618,
"learning_rate": 1.3120000000000001e-05,
"loss": 0.177,
"step": 130
},
{
"epoch": 1.12,
"grad_norm": 1.1053411960601807,
"learning_rate": 1.2586666666666668e-05,
"loss": 0.1112,
"step": 140
},
{
"epoch": 1.2,
"grad_norm": 3.528696060180664,
"learning_rate": 1.2053333333333335e-05,
"loss": 0.1741,
"step": 150
},
{
"epoch": 1.28,
"grad_norm": 0.7176101803779602,
"learning_rate": 1.152e-05,
"loss": 0.154,
"step": 160
},
{
"epoch": 1.3599999999999999,
"grad_norm": 2.781198024749756,
"learning_rate": 1.0986666666666668e-05,
"loss": 0.1583,
"step": 170
},
{
"epoch": 1.44,
"grad_norm": 1.81239914894104,
"learning_rate": 1.0453333333333334e-05,
"loss": 0.1469,
"step": 180
},
{
"epoch": 1.52,
"grad_norm": 2.6247360706329346,
"learning_rate": 9.920000000000002e-06,
"loss": 0.1611,
"step": 190
},
{
"epoch": 1.6,
"grad_norm": 3.0847527980804443,
"learning_rate": 9.386666666666668e-06,
"loss": 0.1847,
"step": 200
},
{
"epoch": 1.6800000000000002,
"grad_norm": 1.1336817741394043,
"learning_rate": 8.853333333333334e-06,
"loss": 0.1417,
"step": 210
},
{
"epoch": 1.76,
"grad_norm": 1.5952337980270386,
"learning_rate": 8.32e-06,
"loss": 0.1461,
"step": 220
},
{
"epoch": 1.8399999999999999,
"grad_norm": 2.0221853256225586,
"learning_rate": 7.786666666666666e-06,
"loss": 0.1805,
"step": 230
},
{
"epoch": 1.92,
"grad_norm": 2.383049964904785,
"learning_rate": 7.253333333333335e-06,
"loss": 0.1478,
"step": 240
},
{
"epoch": 2.0,
"grad_norm": 2.451894760131836,
"learning_rate": 6.720000000000001e-06,
"loss": 0.1565,
"step": 250
},
{
"epoch": 2.0,
"eval_accuracy": 0.8908481311312554,
"eval_f1": 0.810126582278481,
"eval_loss": 0.35696592926979065,
"eval_precision": 0.7649402390438247,
"eval_recall": 0.8609865470852018,
"eval_runtime": 1.2788,
"eval_samples_per_second": 117.295,
"eval_steps_per_second": 7.82,
"step": 250
},
{
"epoch": 2.08,
"grad_norm": 1.107708215713501,
"learning_rate": 6.186666666666668e-06,
"loss": 0.0882,
"step": 260
},
{
"epoch": 2.16,
"grad_norm": 2.009230375289917,
"learning_rate": 5.653333333333334e-06,
"loss": 0.1579,
"step": 270
},
{
"epoch": 2.24,
"grad_norm": 1.4875034093856812,
"learning_rate": 5.12e-06,
"loss": 0.1289,
"step": 280
},
{
"epoch": 2.32,
"grad_norm": 2.073133945465088,
"learning_rate": 4.586666666666667e-06,
"loss": 0.1078,
"step": 290
},
{
"epoch": 2.4,
"grad_norm": 1.5456774234771729,
"learning_rate": 4.053333333333333e-06,
"loss": 0.1182,
"step": 300
},
{
"epoch": 2.48,
"grad_norm": 1.452431559562683,
"learning_rate": 3.52e-06,
"loss": 0.1282,
"step": 310
},
{
"epoch": 2.56,
"grad_norm": 2.7509889602661133,
"learning_rate": 2.986666666666667e-06,
"loss": 0.1292,
"step": 320
},
{
"epoch": 2.64,
"grad_norm": 1.7021485567092896,
"learning_rate": 2.4533333333333333e-06,
"loss": 0.1447,
"step": 330
},
{
"epoch": 2.7199999999999998,
"grad_norm": 1.1116437911987305,
"learning_rate": 1.9200000000000003e-06,
"loss": 0.1225,
"step": 340
},
{
"epoch": 2.8,
"grad_norm": 1.4331567287445068,
"learning_rate": 1.3866666666666668e-06,
"loss": 0.2136,
"step": 350
},
{
"epoch": 2.88,
"grad_norm": 7.344844341278076,
"learning_rate": 8.533333333333334e-07,
"loss": 0.1619,
"step": 360
},
{
"epoch": 2.96,
"grad_norm": 2.206089496612549,
"learning_rate": 3.2e-07,
"loss": 0.1553,
"step": 370
}
],
"logging_steps": 10,
"max_steps": 375,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 483611882505600.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}