yoonsung1211's picture
2024-11-14 llama 3.2 3b koalphaca fine tuned model
ebfed94
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.15278838808250572,
"eval_steps": 10,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0015278838808250573,
"grad_norm": 0.32149291038513184,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.665,
"step": 1
},
{
"epoch": 0.0030557677616501145,
"grad_norm": 0.34995731711387634,
"learning_rate": 4.000000000000001e-06,
"loss": 2.6434,
"step": 2
},
{
"epoch": 0.004583651642475172,
"grad_norm": 0.3517548739910126,
"learning_rate": 6e-06,
"loss": 2.7358,
"step": 3
},
{
"epoch": 0.006111535523300229,
"grad_norm": 0.3041296601295471,
"learning_rate": 8.000000000000001e-06,
"loss": 2.5857,
"step": 4
},
{
"epoch": 0.007639419404125287,
"grad_norm": 0.3146103620529175,
"learning_rate": 1e-05,
"loss": 2.6509,
"step": 5
},
{
"epoch": 0.009167303284950344,
"grad_norm": 0.3501134216785431,
"learning_rate": 1.2e-05,
"loss": 2.6375,
"step": 6
},
{
"epoch": 0.0106951871657754,
"grad_norm": 0.3118212819099426,
"learning_rate": 1.4000000000000001e-05,
"loss": 2.685,
"step": 7
},
{
"epoch": 0.012223071046600458,
"grad_norm": 0.33596929907798767,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.7284,
"step": 8
},
{
"epoch": 0.013750954927425516,
"grad_norm": 0.32165855169296265,
"learning_rate": 1.8e-05,
"loss": 2.7919,
"step": 9
},
{
"epoch": 0.015278838808250574,
"grad_norm": 0.3280738592147827,
"learning_rate": 2e-05,
"loss": 2.6194,
"step": 10
},
{
"epoch": 0.015278838808250574,
"eval_loss": 2.6943862438201904,
"eval_runtime": 97.4489,
"eval_samples_per_second": 2.175,
"eval_steps_per_second": 0.544,
"step": 10
},
{
"epoch": 0.01680672268907563,
"grad_norm": 0.3205621838569641,
"learning_rate": 2.2000000000000003e-05,
"loss": 2.6964,
"step": 11
},
{
"epoch": 0.01833460656990069,
"grad_norm": 0.3181299567222595,
"learning_rate": 2.4e-05,
"loss": 2.6889,
"step": 12
},
{
"epoch": 0.019862490450725745,
"grad_norm": 0.3135446012020111,
"learning_rate": 2.6000000000000002e-05,
"loss": 2.6606,
"step": 13
},
{
"epoch": 0.0213903743315508,
"grad_norm": 0.34573134779930115,
"learning_rate": 2.8000000000000003e-05,
"loss": 2.6154,
"step": 14
},
{
"epoch": 0.02291825821237586,
"grad_norm": 0.3248574137687683,
"learning_rate": 3e-05,
"loss": 2.6702,
"step": 15
},
{
"epoch": 0.024446142093200916,
"grad_norm": 0.304574579000473,
"learning_rate": 3.2000000000000005e-05,
"loss": 2.6536,
"step": 16
},
{
"epoch": 0.025974025974025976,
"grad_norm": 0.31393641233444214,
"learning_rate": 3.4000000000000007e-05,
"loss": 2.5807,
"step": 17
},
{
"epoch": 0.02750190985485103,
"grad_norm": 0.34099656343460083,
"learning_rate": 3.6e-05,
"loss": 2.7321,
"step": 18
},
{
"epoch": 0.029029793735676088,
"grad_norm": 0.30074265599250793,
"learning_rate": 3.8e-05,
"loss": 2.5924,
"step": 19
},
{
"epoch": 0.030557677616501147,
"grad_norm": 0.3261857032775879,
"learning_rate": 4e-05,
"loss": 2.6013,
"step": 20
},
{
"epoch": 0.030557677616501147,
"eval_loss": 2.6393423080444336,
"eval_runtime": 97.6877,
"eval_samples_per_second": 2.17,
"eval_steps_per_second": 0.543,
"step": 20
},
{
"epoch": 0.03208556149732621,
"grad_norm": 0.32320329546928406,
"learning_rate": 4.2e-05,
"loss": 2.5175,
"step": 21
},
{
"epoch": 0.03361344537815126,
"grad_norm": 0.32611727714538574,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.7366,
"step": 22
},
{
"epoch": 0.03514132925897632,
"grad_norm": 0.3132462501525879,
"learning_rate": 4.600000000000001e-05,
"loss": 2.4918,
"step": 23
},
{
"epoch": 0.03666921313980138,
"grad_norm": 0.3301452398300171,
"learning_rate": 4.8e-05,
"loss": 2.6077,
"step": 24
},
{
"epoch": 0.03819709702062643,
"grad_norm": 0.3712747097015381,
"learning_rate": 5e-05,
"loss": 2.5816,
"step": 25
},
{
"epoch": 0.03972498090145149,
"grad_norm": 0.33991169929504395,
"learning_rate": 5.2000000000000004e-05,
"loss": 2.6139,
"step": 26
},
{
"epoch": 0.04125286478227655,
"grad_norm": 0.3331744372844696,
"learning_rate": 5.4000000000000005e-05,
"loss": 2.6106,
"step": 27
},
{
"epoch": 0.0427807486631016,
"grad_norm": 0.34741106629371643,
"learning_rate": 5.6000000000000006e-05,
"loss": 2.5478,
"step": 28
},
{
"epoch": 0.04430863254392666,
"grad_norm": 0.34006839990615845,
"learning_rate": 5.8e-05,
"loss": 2.4678,
"step": 29
},
{
"epoch": 0.04583651642475172,
"grad_norm": 0.30897256731987,
"learning_rate": 6e-05,
"loss": 2.5094,
"step": 30
},
{
"epoch": 0.04583651642475172,
"eval_loss": 2.56597638130188,
"eval_runtime": 97.2121,
"eval_samples_per_second": 2.181,
"eval_steps_per_second": 0.545,
"step": 30
},
{
"epoch": 0.04736440030557677,
"grad_norm": 0.3237461745738983,
"learning_rate": 6.2e-05,
"loss": 2.4556,
"step": 31
},
{
"epoch": 0.04889228418640183,
"grad_norm": 0.31788715720176697,
"learning_rate": 6.400000000000001e-05,
"loss": 2.308,
"step": 32
},
{
"epoch": 0.05042016806722689,
"grad_norm": 0.3567247986793518,
"learning_rate": 6.6e-05,
"loss": 2.4991,
"step": 33
},
{
"epoch": 0.05194805194805195,
"grad_norm": 0.38980555534362793,
"learning_rate": 6.800000000000001e-05,
"loss": 2.4502,
"step": 34
},
{
"epoch": 0.053475935828877004,
"grad_norm": 0.3469444215297699,
"learning_rate": 7e-05,
"loss": 2.5429,
"step": 35
},
{
"epoch": 0.05500381970970206,
"grad_norm": 0.385903000831604,
"learning_rate": 7.2e-05,
"loss": 2.6139,
"step": 36
},
{
"epoch": 0.05653170359052712,
"grad_norm": 0.37668120861053467,
"learning_rate": 7.4e-05,
"loss": 2.5119,
"step": 37
},
{
"epoch": 0.058059587471352175,
"grad_norm": 0.3561411201953888,
"learning_rate": 7.6e-05,
"loss": 2.5083,
"step": 38
},
{
"epoch": 0.059587471352177235,
"grad_norm": 0.3774603009223938,
"learning_rate": 7.800000000000001e-05,
"loss": 2.5935,
"step": 39
},
{
"epoch": 0.061115355233002294,
"grad_norm": 0.37734416127204895,
"learning_rate": 8e-05,
"loss": 2.5097,
"step": 40
},
{
"epoch": 0.061115355233002294,
"eval_loss": 2.525169610977173,
"eval_runtime": 97.2448,
"eval_samples_per_second": 2.18,
"eval_steps_per_second": 0.545,
"step": 40
},
{
"epoch": 0.06264323911382735,
"grad_norm": 0.37569305300712585,
"learning_rate": 8.2e-05,
"loss": 2.5988,
"step": 41
},
{
"epoch": 0.06417112299465241,
"grad_norm": 0.38874006271362305,
"learning_rate": 8.4e-05,
"loss": 2.3542,
"step": 42
},
{
"epoch": 0.06569900687547746,
"grad_norm": 0.40507742762565613,
"learning_rate": 8.6e-05,
"loss": 2.5079,
"step": 43
},
{
"epoch": 0.06722689075630252,
"grad_norm": 0.38088974356651306,
"learning_rate": 8.800000000000001e-05,
"loss": 2.4938,
"step": 44
},
{
"epoch": 0.06875477463712758,
"grad_norm": 0.393225759267807,
"learning_rate": 9e-05,
"loss": 2.5691,
"step": 45
},
{
"epoch": 0.07028265851795264,
"grad_norm": 0.39259836077690125,
"learning_rate": 9.200000000000001e-05,
"loss": 2.491,
"step": 46
},
{
"epoch": 0.0718105423987777,
"grad_norm": 0.38004833459854126,
"learning_rate": 9.4e-05,
"loss": 2.3911,
"step": 47
},
{
"epoch": 0.07333842627960276,
"grad_norm": 0.41154587268829346,
"learning_rate": 9.6e-05,
"loss": 2.3079,
"step": 48
},
{
"epoch": 0.0748663101604278,
"grad_norm": 0.4159567058086395,
"learning_rate": 9.8e-05,
"loss": 2.4623,
"step": 49
},
{
"epoch": 0.07639419404125286,
"grad_norm": 0.39570435881614685,
"learning_rate": 0.0001,
"loss": 2.5313,
"step": 50
},
{
"epoch": 0.07639419404125286,
"eval_loss": 2.4937703609466553,
"eval_runtime": 97.1689,
"eval_samples_per_second": 2.182,
"eval_steps_per_second": 0.545,
"step": 50
},
{
"epoch": 0.07792207792207792,
"grad_norm": 0.3842063248157501,
"learning_rate": 0.0001,
"loss": 2.3827,
"step": 51
},
{
"epoch": 0.07944996180290298,
"grad_norm": 0.3905870020389557,
"learning_rate": 0.0001,
"loss": 2.3705,
"step": 52
},
{
"epoch": 0.08097784568372804,
"grad_norm": 0.408173531293869,
"learning_rate": 0.0001,
"loss": 2.4771,
"step": 53
},
{
"epoch": 0.0825057295645531,
"grad_norm": 0.3941194415092468,
"learning_rate": 0.0001,
"loss": 2.4952,
"step": 54
},
{
"epoch": 0.08403361344537816,
"grad_norm": 0.3991928696632385,
"learning_rate": 0.0001,
"loss": 2.4954,
"step": 55
},
{
"epoch": 0.0855614973262032,
"grad_norm": 0.42234569787979126,
"learning_rate": 0.0001,
"loss": 2.3436,
"step": 56
},
{
"epoch": 0.08708938120702826,
"grad_norm": 0.4667830169200897,
"learning_rate": 0.0001,
"loss": 2.4344,
"step": 57
},
{
"epoch": 0.08861726508785332,
"grad_norm": 0.44776275753974915,
"learning_rate": 0.0001,
"loss": 2.4326,
"step": 58
},
{
"epoch": 0.09014514896867838,
"grad_norm": 0.42618316411972046,
"learning_rate": 0.0001,
"loss": 2.3055,
"step": 59
},
{
"epoch": 0.09167303284950344,
"grad_norm": 0.4447256624698639,
"learning_rate": 0.0001,
"loss": 2.5108,
"step": 60
},
{
"epoch": 0.09167303284950344,
"eval_loss": 2.4696223735809326,
"eval_runtime": 98.5528,
"eval_samples_per_second": 2.151,
"eval_steps_per_second": 0.538,
"step": 60
},
{
"epoch": 0.0932009167303285,
"grad_norm": 0.44445377588272095,
"learning_rate": 0.0001,
"loss": 2.4297,
"step": 61
},
{
"epoch": 0.09472880061115355,
"grad_norm": 0.40467268228530884,
"learning_rate": 0.0001,
"loss": 2.2947,
"step": 62
},
{
"epoch": 0.0962566844919786,
"grad_norm": 0.4658174216747284,
"learning_rate": 0.0001,
"loss": 2.5242,
"step": 63
},
{
"epoch": 0.09778456837280367,
"grad_norm": 0.44855207204818726,
"learning_rate": 0.0001,
"loss": 2.3913,
"step": 64
},
{
"epoch": 0.09931245225362872,
"grad_norm": 0.4550195634365082,
"learning_rate": 0.0001,
"loss": 2.3403,
"step": 65
},
{
"epoch": 0.10084033613445378,
"grad_norm": 0.4837151765823364,
"learning_rate": 0.0001,
"loss": 2.4951,
"step": 66
},
{
"epoch": 0.10236822001527884,
"grad_norm": 0.4518112242221832,
"learning_rate": 0.0001,
"loss": 2.432,
"step": 67
},
{
"epoch": 0.1038961038961039,
"grad_norm": 0.43797725439071655,
"learning_rate": 0.0001,
"loss": 2.3761,
"step": 68
},
{
"epoch": 0.10542398777692895,
"grad_norm": 0.4753503203392029,
"learning_rate": 0.0001,
"loss": 2.3425,
"step": 69
},
{
"epoch": 0.10695187165775401,
"grad_norm": 0.4559343159198761,
"learning_rate": 0.0001,
"loss": 2.4907,
"step": 70
},
{
"epoch": 0.10695187165775401,
"eval_loss": 2.4506397247314453,
"eval_runtime": 96.9403,
"eval_samples_per_second": 2.187,
"eval_steps_per_second": 0.547,
"step": 70
},
{
"epoch": 0.10847975553857907,
"grad_norm": 0.43265870213508606,
"learning_rate": 0.0001,
"loss": 2.3957,
"step": 71
},
{
"epoch": 0.11000763941940413,
"grad_norm": 0.49019816517829895,
"learning_rate": 0.0001,
"loss": 2.3443,
"step": 72
},
{
"epoch": 0.11153552330022919,
"grad_norm": 0.46558165550231934,
"learning_rate": 0.0001,
"loss": 2.4487,
"step": 73
},
{
"epoch": 0.11306340718105425,
"grad_norm": 0.4491911232471466,
"learning_rate": 0.0001,
"loss": 2.4371,
"step": 74
},
{
"epoch": 0.11459129106187929,
"grad_norm": 0.4462290108203888,
"learning_rate": 0.0001,
"loss": 2.3225,
"step": 75
},
{
"epoch": 0.11611917494270435,
"grad_norm": 0.4684162437915802,
"learning_rate": 0.0001,
"loss": 2.3301,
"step": 76
},
{
"epoch": 0.11764705882352941,
"grad_norm": 0.4510643780231476,
"learning_rate": 0.0001,
"loss": 2.3803,
"step": 77
},
{
"epoch": 0.11917494270435447,
"grad_norm": 0.45720160007476807,
"learning_rate": 0.0001,
"loss": 2.1956,
"step": 78
},
{
"epoch": 0.12070282658517953,
"grad_norm": 0.5116860866546631,
"learning_rate": 0.0001,
"loss": 2.3297,
"step": 79
},
{
"epoch": 0.12223071046600459,
"grad_norm": 0.49560126662254333,
"learning_rate": 0.0001,
"loss": 2.3534,
"step": 80
},
{
"epoch": 0.12223071046600459,
"eval_loss": 2.431182861328125,
"eval_runtime": 97.315,
"eval_samples_per_second": 2.178,
"eval_steps_per_second": 0.545,
"step": 80
},
{
"epoch": 0.12375859434682965,
"grad_norm": 0.5099540948867798,
"learning_rate": 0.0001,
"loss": 2.3705,
"step": 81
},
{
"epoch": 0.1252864782276547,
"grad_norm": 0.5318695902824402,
"learning_rate": 0.0001,
"loss": 2.433,
"step": 82
},
{
"epoch": 0.12681436210847977,
"grad_norm": 0.5086469054222107,
"learning_rate": 0.0001,
"loss": 2.3501,
"step": 83
},
{
"epoch": 0.12834224598930483,
"grad_norm": 0.5173415541648865,
"learning_rate": 0.0001,
"loss": 2.5133,
"step": 84
},
{
"epoch": 0.12987012987012986,
"grad_norm": 0.5070729851722717,
"learning_rate": 0.0001,
"loss": 2.3097,
"step": 85
},
{
"epoch": 0.13139801375095492,
"grad_norm": 0.5236591100692749,
"learning_rate": 0.0001,
"loss": 2.4274,
"step": 86
},
{
"epoch": 0.13292589763177998,
"grad_norm": 0.4986536502838135,
"learning_rate": 0.0001,
"loss": 2.319,
"step": 87
},
{
"epoch": 0.13445378151260504,
"grad_norm": 0.5196006894111633,
"learning_rate": 0.0001,
"loss": 2.3206,
"step": 88
},
{
"epoch": 0.1359816653934301,
"grad_norm": 0.5313082337379456,
"learning_rate": 0.0001,
"loss": 2.3236,
"step": 89
},
{
"epoch": 0.13750954927425516,
"grad_norm": 0.5488006472587585,
"learning_rate": 0.0001,
"loss": 2.4561,
"step": 90
},
{
"epoch": 0.13750954927425516,
"eval_loss": 2.418342113494873,
"eval_runtime": 97.2483,
"eval_samples_per_second": 2.18,
"eval_steps_per_second": 0.545,
"step": 90
},
{
"epoch": 0.13903743315508021,
"grad_norm": 0.4645894169807434,
"learning_rate": 0.0001,
"loss": 2.2276,
"step": 91
},
{
"epoch": 0.14056531703590527,
"grad_norm": 0.5012871623039246,
"learning_rate": 0.0001,
"loss": 2.2207,
"step": 92
},
{
"epoch": 0.14209320091673033,
"grad_norm": 0.530411422252655,
"learning_rate": 0.0001,
"loss": 2.562,
"step": 93
},
{
"epoch": 0.1436210847975554,
"grad_norm": 0.5485709309577942,
"learning_rate": 0.0001,
"loss": 2.3263,
"step": 94
},
{
"epoch": 0.14514896867838045,
"grad_norm": 0.6266875863075256,
"learning_rate": 0.0001,
"loss": 2.6683,
"step": 95
},
{
"epoch": 0.1466768525592055,
"grad_norm": 0.5087680220603943,
"learning_rate": 0.0001,
"loss": 2.4218,
"step": 96
},
{
"epoch": 0.14820473644003057,
"grad_norm": 0.5369350910186768,
"learning_rate": 0.0001,
"loss": 2.3236,
"step": 97
},
{
"epoch": 0.1497326203208556,
"grad_norm": 0.5454537272453308,
"learning_rate": 0.0001,
"loss": 2.3474,
"step": 98
},
{
"epoch": 0.15126050420168066,
"grad_norm": 0.5537680387496948,
"learning_rate": 0.0001,
"loss": 2.3792,
"step": 99
},
{
"epoch": 0.15278838808250572,
"grad_norm": 0.5667999982833862,
"learning_rate": 0.0001,
"loss": 2.3371,
"step": 100
},
{
"epoch": 0.15278838808250572,
"eval_loss": 2.405158519744873,
"eval_runtime": 96.774,
"eval_samples_per_second": 2.191,
"eval_steps_per_second": 0.548,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.188069974338765e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}