multi_eng_zhtw_and_apigen / trainer_state.json
AaronWu901225's picture
Upload LoRA adapter folder
010a017 verified
raw
history blame
34.8 kB
{
"best_metric": 0.21331782639026642,
"best_model_checkpoint": "./xlam_lora_new_ete_over_size_3epoch_multi_mix/checkpoint-1640",
"epoch": 2.9986264435061303,
"eval_steps": 205,
"global_step": 1842,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016279188075494735,
"grad_norm": 0.7572630643844604,
"learning_rate": 2.688172043010753e-06,
"loss": 0.5223,
"step": 10
},
{
"epoch": 0.03255837615098947,
"grad_norm": 0.417061984539032,
"learning_rate": 5.376344086021506e-06,
"loss": 0.4858,
"step": 20
},
{
"epoch": 0.048837564226484206,
"grad_norm": 0.3718095123767853,
"learning_rate": 8.064516129032258e-06,
"loss": 0.4246,
"step": 30
},
{
"epoch": 0.06511675230197894,
"grad_norm": 0.2949349582195282,
"learning_rate": 1.0752688172043012e-05,
"loss": 0.4405,
"step": 40
},
{
"epoch": 0.08139594037747368,
"grad_norm": 0.3159159719944,
"learning_rate": 1.3440860215053763e-05,
"loss": 0.4148,
"step": 50
},
{
"epoch": 0.09767512845296841,
"grad_norm": 0.4167034327983856,
"learning_rate": 1.6129032258064517e-05,
"loss": 0.3393,
"step": 60
},
{
"epoch": 0.11395431652846315,
"grad_norm": 0.39410400390625,
"learning_rate": 1.881720430107527e-05,
"loss": 0.2464,
"step": 70
},
{
"epoch": 0.13023350460395788,
"grad_norm": 0.3644021153450012,
"learning_rate": 2.1505376344086024e-05,
"loss": 0.2294,
"step": 80
},
{
"epoch": 0.1465126926794526,
"grad_norm": 0.30372634530067444,
"learning_rate": 2.4193548387096777e-05,
"loss": 0.2315,
"step": 90
},
{
"epoch": 0.16279188075494735,
"grad_norm": 0.2586315870285034,
"learning_rate": 2.4999011923655086e-05,
"loss": 0.1932,
"step": 100
},
{
"epoch": 0.17907106883044208,
"grad_norm": 0.37825971841812134,
"learning_rate": 2.4994172742085852e-05,
"loss": 0.2204,
"step": 110
},
{
"epoch": 0.19535025690593683,
"grad_norm": 0.21422357857227325,
"learning_rate": 2.4985302531208654e-05,
"loss": 0.1795,
"step": 120
},
{
"epoch": 0.21162944498143155,
"grad_norm": 0.2566869854927063,
"learning_rate": 2.4972404152844008e-05,
"loss": 0.1668,
"step": 130
},
{
"epoch": 0.2279086330569263,
"grad_norm": 0.28194501996040344,
"learning_rate": 2.49554817684312e-05,
"loss": 0.1476,
"step": 140
},
{
"epoch": 0.24418782113242102,
"grad_norm": 0.24139340221881866,
"learning_rate": 2.4934540837685647e-05,
"loss": 0.1609,
"step": 150
},
{
"epoch": 0.26046700920791577,
"grad_norm": 0.3306334614753723,
"learning_rate": 2.490958811683741e-05,
"loss": 0.1638,
"step": 160
},
{
"epoch": 0.2767461972834105,
"grad_norm": 0.27301114797592163,
"learning_rate": 2.4880631656451447e-05,
"loss": 0.1494,
"step": 170
},
{
"epoch": 0.2930253853589052,
"grad_norm": 0.34037259221076965,
"learning_rate": 2.484768079883018e-05,
"loss": 0.1534,
"step": 180
},
{
"epoch": 0.30930457343439993,
"grad_norm": 0.2306762933731079,
"learning_rate": 2.4810746174999418e-05,
"loss": 0.1749,
"step": 190
},
{
"epoch": 0.3255837615098947,
"grad_norm": 0.3183388113975525,
"learning_rate": 2.476983970127841e-05,
"loss": 0.1482,
"step": 200
},
{
"epoch": 0.33372335554764204,
"eval_loss": 0.3089325428009033,
"eval_runtime": 34.8769,
"eval_samples_per_second": 5.419,
"eval_steps_per_second": 5.419,
"step": 205
},
{
"epoch": 0.34186294958538943,
"grad_norm": 0.28704971075057983,
"learning_rate": 2.472497457543525e-05,
"loss": 0.1471,
"step": 210
},
{
"epoch": 0.35814213766088415,
"grad_norm": 0.2939195930957794,
"learning_rate": 2.4676165272428866e-05,
"loss": 0.1631,
"step": 220
},
{
"epoch": 0.3744213257363789,
"grad_norm": 0.31506845355033875,
"learning_rate": 2.4623427539738897e-05,
"loss": 0.1353,
"step": 230
},
{
"epoch": 0.39070051381187365,
"grad_norm": 0.3761660158634186,
"learning_rate": 2.456677839228506e-05,
"loss": 0.1716,
"step": 240
},
{
"epoch": 0.4069797018873684,
"grad_norm": 0.29187777638435364,
"learning_rate": 2.450623610693757e-05,
"loss": 0.1195,
"step": 250
},
{
"epoch": 0.4232588899628631,
"grad_norm": 0.46237581968307495,
"learning_rate": 2.4441820216620425e-05,
"loss": 0.1484,
"step": 260
},
{
"epoch": 0.4395380780383578,
"grad_norm": 0.4580917954444885,
"learning_rate": 2.437355150400945e-05,
"loss": 0.1009,
"step": 270
},
{
"epoch": 0.4558172661138526,
"grad_norm": 0.4181467890739441,
"learning_rate": 2.4301451994827112e-05,
"loss": 0.1376,
"step": 280
},
{
"epoch": 0.4720964541893473,
"grad_norm": 0.3629908561706543,
"learning_rate": 2.422554495073633e-05,
"loss": 0.1083,
"step": 290
},
{
"epoch": 0.48837564226484204,
"grad_norm": 0.4282682240009308,
"learning_rate": 2.4145854861835447e-05,
"loss": 0.1373,
"step": 300
},
{
"epoch": 0.5046548303403368,
"grad_norm": 0.4914080500602722,
"learning_rate": 2.406240743875699e-05,
"loss": 0.156,
"step": 310
},
{
"epoch": 0.5209340184158315,
"grad_norm": 0.3880573511123657,
"learning_rate": 2.3975229604372526e-05,
"loss": 0.1415,
"step": 320
},
{
"epoch": 0.5372132064913262,
"grad_norm": 0.42599862813949585,
"learning_rate": 2.3884349485106477e-05,
"loss": 0.1338,
"step": 330
},
{
"epoch": 0.553492394566821,
"grad_norm": 0.4339046776294708,
"learning_rate": 2.378979640186163e-05,
"loss": 0.1368,
"step": 340
},
{
"epoch": 0.5697715826423158,
"grad_norm": 0.30713170766830444,
"learning_rate": 2.3691600860559222e-05,
"loss": 0.1154,
"step": 350
},
{
"epoch": 0.5860507707178104,
"grad_norm": 0.4618566036224365,
"learning_rate": 2.3589794542296764e-05,
"loss": 0.1203,
"step": 360
},
{
"epoch": 0.6023299587933052,
"grad_norm": 0.40802672505378723,
"learning_rate": 2.3484410293126664e-05,
"loss": 0.1144,
"step": 370
},
{
"epoch": 0.6186091468687999,
"grad_norm": 0.5242702960968018,
"learning_rate": 2.3375482113459014e-05,
"loss": 0.1281,
"step": 380
},
{
"epoch": 0.6348883349442946,
"grad_norm": 0.4045926630496979,
"learning_rate": 2.3263045147091944e-05,
"loss": 0.1145,
"step": 390
},
{
"epoch": 0.6511675230197894,
"grad_norm": 0.5347346067428589,
"learning_rate": 2.3147135669873096e-05,
"loss": 0.1256,
"step": 400
},
{
"epoch": 0.6674467110952841,
"grad_norm": 0.4755608141422272,
"learning_rate": 2.302779107799583e-05,
"loss": 0.1251,
"step": 410
},
{
"epoch": 0.6674467110952841,
"eval_loss": 0.26128318905830383,
"eval_runtime": 34.9177,
"eval_samples_per_second": 5.413,
"eval_steps_per_second": 5.413,
"step": 410
},
{
"epoch": 0.6837258991707789,
"grad_norm": 0.4720211923122406,
"learning_rate": 2.290504987593399e-05,
"loss": 0.1399,
"step": 420
},
{
"epoch": 0.7000050872462736,
"grad_norm": 0.709035336971283,
"learning_rate": 2.2778951664019105e-05,
"loss": 0.1375,
"step": 430
},
{
"epoch": 0.7162842753217683,
"grad_norm": 0.534866213798523,
"learning_rate": 2.2649537125664034e-05,
"loss": 0.1125,
"step": 440
},
{
"epoch": 0.7325634633972631,
"grad_norm": 0.522056519985199,
"learning_rate": 2.2516848014237146e-05,
"loss": 0.0943,
"step": 450
},
{
"epoch": 0.7488426514727577,
"grad_norm": 0.2830965518951416,
"learning_rate": 2.238092713959133e-05,
"loss": 0.1248,
"step": 460
},
{
"epoch": 0.7651218395482525,
"grad_norm": 0.39431601762771606,
"learning_rate": 2.2241818354252113e-05,
"loss": 0.1248,
"step": 470
},
{
"epoch": 0.7814010276237473,
"grad_norm": 0.4821482002735138,
"learning_rate": 2.209956653926944e-05,
"loss": 0.1359,
"step": 480
},
{
"epoch": 0.797680215699242,
"grad_norm": 0.4956236481666565,
"learning_rate": 2.1954217589737535e-05,
"loss": 0.1232,
"step": 490
},
{
"epoch": 0.8139594037747367,
"grad_norm": 0.49444642663002014,
"learning_rate": 2.180581839998766e-05,
"loss": 0.1031,
"step": 500
},
{
"epoch": 0.8302385918502315,
"grad_norm": 0.3857091963291168,
"learning_rate": 2.165441684845847e-05,
"loss": 0.1023,
"step": 510
},
{
"epoch": 0.8465177799257262,
"grad_norm": 0.4830643832683563,
"learning_rate": 2.150006178224886e-05,
"loss": 0.1067,
"step": 520
},
{
"epoch": 0.862796968001221,
"grad_norm": 0.5119408965110779,
"learning_rate": 2.1342803001358278e-05,
"loss": 0.1209,
"step": 530
},
{
"epoch": 0.8790761560767156,
"grad_norm": 0.46363013982772827,
"learning_rate": 2.118269124261963e-05,
"loss": 0.1134,
"step": 540
},
{
"epoch": 0.8953553441522104,
"grad_norm": 0.42933255434036255,
"learning_rate": 2.1019778163329912e-05,
"loss": 0.1101,
"step": 550
},
{
"epoch": 0.9116345322277052,
"grad_norm": 0.5474070906639099,
"learning_rate": 2.0854116324583867e-05,
"loss": 0.1291,
"step": 560
},
{
"epoch": 0.9279137203031999,
"grad_norm": 0.43502509593963623,
"learning_rate": 2.0685759174316066e-05,
"loss": 0.0936,
"step": 570
},
{
"epoch": 0.9441929083786946,
"grad_norm": 0.632621169090271,
"learning_rate": 2.051476103005684e-05,
"loss": 0.1196,
"step": 580
},
{
"epoch": 0.9604720964541893,
"grad_norm": 0.553187906742096,
"learning_rate": 2.034117706140768e-05,
"loss": 0.1186,
"step": 590
},
{
"epoch": 0.9767512845296841,
"grad_norm": 0.48446330428123474,
"learning_rate": 2.0165063272241712e-05,
"loss": 0.1249,
"step": 600
},
{
"epoch": 0.9930304726051788,
"grad_norm": 0.47837090492248535,
"learning_rate": 1.9986476482635003e-05,
"loss": 0.1097,
"step": 610
},
{
"epoch": 1.0011700666429262,
"eval_loss": 0.2388339340686798,
"eval_runtime": 34.8769,
"eval_samples_per_second": 5.419,
"eval_steps_per_second": 5.419,
"step": 615
},
{
"epoch": 1.0093096606806735,
"grad_norm": 0.5520356893539429,
"learning_rate": 1.980547431053456e-05,
"loss": 0.131,
"step": 620
},
{
"epoch": 1.0255888487561682,
"grad_norm": 0.6150078177452087,
"learning_rate": 1.9622115153168884e-05,
"loss": 0.1187,
"step": 630
},
{
"epoch": 1.041868036831663,
"grad_norm": 0.5100656151771545,
"learning_rate": 1.9436458168207117e-05,
"loss": 0.114,
"step": 640
},
{
"epoch": 1.0581472249071577,
"grad_norm": 0.5156052112579346,
"learning_rate": 1.9248563254672825e-05,
"loss": 0.1099,
"step": 650
},
{
"epoch": 1.0744264129826524,
"grad_norm": 0.4662775993347168,
"learning_rate": 1.9058491033618632e-05,
"loss": 0.1135,
"step": 660
},
{
"epoch": 1.0907056010581473,
"grad_norm": 0.4357255697250366,
"learning_rate": 1.886630282856787e-05,
"loss": 0.1036,
"step": 670
},
{
"epoch": 1.106984789133642,
"grad_norm": 0.3861764967441559,
"learning_rate": 1.867206064572962e-05,
"loss": 0.1145,
"step": 680
},
{
"epoch": 1.1232639772091366,
"grad_norm": 0.4562045633792877,
"learning_rate": 1.8475827153993447e-05,
"loss": 0.1107,
"step": 690
},
{
"epoch": 1.1395431652846315,
"grad_norm": 0.332917720079422,
"learning_rate": 1.8277665664710387e-05,
"loss": 0.1266,
"step": 700
},
{
"epoch": 1.1558223533601262,
"grad_norm": 0.5971720814704895,
"learning_rate": 1.807764011126663e-05,
"loss": 0.1122,
"step": 710
},
{
"epoch": 1.1721015414356208,
"grad_norm": 0.6102172136306763,
"learning_rate": 1.787581502845651e-05,
"loss": 0.1046,
"step": 720
},
{
"epoch": 1.1883807295111157,
"grad_norm": 0.5294010043144226,
"learning_rate": 1.767225553166146e-05,
"loss": 0.1044,
"step": 730
},
{
"epoch": 1.2046599175866104,
"grad_norm": 0.5074148178100586,
"learning_rate": 1.7467027295841688e-05,
"loss": 0.1251,
"step": 740
},
{
"epoch": 1.220939105662105,
"grad_norm": 0.6349917650222778,
"learning_rate": 1.7260196534347235e-05,
"loss": 0.1037,
"step": 750
},
{
"epoch": 1.2372182937376,
"grad_norm": 0.34580153226852417,
"learning_rate": 1.7051829977555426e-05,
"loss": 0.0831,
"step": 760
},
{
"epoch": 1.2534974818130946,
"grad_norm": 0.4629954993724823,
"learning_rate": 1.684199485134144e-05,
"loss": 0.1068,
"step": 770
},
{
"epoch": 1.2697766698885893,
"grad_norm": 0.6406750082969666,
"learning_rate": 1.6630758855389055e-05,
"loss": 0.1192,
"step": 780
},
{
"epoch": 1.286055857964084,
"grad_norm": 0.4982251226902008,
"learning_rate": 1.6418190141348485e-05,
"loss": 0.123,
"step": 790
},
{
"epoch": 1.3023350460395788,
"grad_norm": 0.5146717429161072,
"learning_rate": 1.6204357290848464e-05,
"loss": 0.0831,
"step": 800
},
{
"epoch": 1.3186142341150735,
"grad_norm": 0.4735712707042694,
"learning_rate": 1.5989329293369538e-05,
"loss": 0.0971,
"step": 810
},
{
"epoch": 1.3348934221905682,
"grad_norm": 0.7393200397491455,
"learning_rate": 1.5773175523985818e-05,
"loss": 0.0923,
"step": 820
},
{
"epoch": 1.3348934221905682,
"eval_loss": 0.22815725207328796,
"eval_runtime": 34.8794,
"eval_samples_per_second": 5.419,
"eval_steps_per_second": 5.419,
"step": 820
},
{
"epoch": 1.351172610266063,
"grad_norm": 0.8956180214881897,
"learning_rate": 1.5555965720982284e-05,
"loss": 0.0817,
"step": 830
},
{
"epoch": 1.3674517983415577,
"grad_norm": 0.7423743009567261,
"learning_rate": 1.533776996335497e-05,
"loss": 0.1178,
"step": 840
},
{
"epoch": 1.3837309864170524,
"grad_norm": 0.7034802436828613,
"learning_rate": 1.5118658648201145e-05,
"loss": 0.1289,
"step": 850
},
{
"epoch": 1.400010174492547,
"grad_norm": 0.48646238446235657,
"learning_rate": 1.4898702468006922e-05,
"loss": 0.0839,
"step": 860
},
{
"epoch": 1.416289362568042,
"grad_norm": 0.28704097867012024,
"learning_rate": 1.4677972387839548e-05,
"loss": 0.0974,
"step": 870
},
{
"epoch": 1.4325685506435366,
"grad_norm": 0.674045205116272,
"learning_rate": 1.4456539622451748e-05,
"loss": 0.1006,
"step": 880
},
{
"epoch": 1.4488477387190315,
"grad_norm": 0.3513787090778351,
"learning_rate": 1.4234475613305509e-05,
"loss": 0.1104,
"step": 890
},
{
"epoch": 1.4651269267945262,
"grad_norm": 0.8029477596282959,
"learning_rate": 1.4011852005522727e-05,
"loss": 0.1131,
"step": 900
},
{
"epoch": 1.4814061148700208,
"grad_norm": 0.5420731902122498,
"learning_rate": 1.378874062477015e-05,
"loss": 0.0943,
"step": 910
},
{
"epoch": 1.4976853029455155,
"grad_norm": 0.7574429512023926,
"learning_rate": 1.3565213454086048e-05,
"loss": 0.1234,
"step": 920
},
{
"epoch": 1.5139644910210102,
"grad_norm": 0.5867305994033813,
"learning_rate": 1.3341342610656157e-05,
"loss": 0.1036,
"step": 930
},
{
"epoch": 1.530243679096505,
"grad_norm": 0.47744086384773254,
"learning_rate": 1.311720032254629e-05,
"loss": 0.1082,
"step": 940
},
{
"epoch": 1.546522867172,
"grad_norm": 0.6975990533828735,
"learning_rate": 1.289285890539919e-05,
"loss": 0.0967,
"step": 950
},
{
"epoch": 1.5628020552474946,
"grad_norm": 0.7781053781509399,
"learning_rate": 1.2668390739103172e-05,
"loss": 0.1219,
"step": 960
},
{
"epoch": 1.5790812433229893,
"grad_norm": 0.5423984527587891,
"learning_rate": 1.2443868244439958e-05,
"loss": 0.1085,
"step": 970
},
{
"epoch": 1.595360431398484,
"grad_norm": 0.5535146594047546,
"learning_rate": 1.2219363859719392e-05,
"loss": 0.0942,
"step": 980
},
{
"epoch": 1.6116396194739786,
"grad_norm": 0.30531561374664307,
"learning_rate": 1.1994950017408451e-05,
"loss": 0.0944,
"step": 990
},
{
"epoch": 1.6279188075494735,
"grad_norm": 0.7325620055198669,
"learning_rate": 1.1770699120762161e-05,
"loss": 0.1126,
"step": 1000
},
{
"epoch": 1.6441979956249682,
"grad_norm": 1.1568708419799805,
"learning_rate": 1.1546683520463961e-05,
"loss": 0.1073,
"step": 1010
},
{
"epoch": 1.660477183700463,
"grad_norm": 0.6926931142807007,
"learning_rate": 1.1322975491282961e-05,
"loss": 0.0825,
"step": 1020
},
{
"epoch": 1.6686167777382104,
"eval_loss": 0.22156645357608795,
"eval_runtime": 34.8778,
"eval_samples_per_second": 5.419,
"eval_steps_per_second": 5.419,
"step": 1025
},
{
"epoch": 1.6767563717759577,
"grad_norm": 0.41277509927749634,
"learning_rate": 1.1099647208755764e-05,
"loss": 0.0991,
"step": 1030
},
{
"epoch": 1.6930355598514524,
"grad_norm": 0.4389091730117798,
"learning_rate": 1.0876770725900265e-05,
"loss": 0.088,
"step": 1040
},
{
"epoch": 1.709314747926947,
"grad_norm": 0.48445749282836914,
"learning_rate": 1.0654417949968986e-05,
"loss": 0.1158,
"step": 1050
},
{
"epoch": 1.725593936002442,
"grad_norm": 0.6507833003997803,
"learning_rate": 1.0432660619249448e-05,
"loss": 0.1099,
"step": 1060
},
{
"epoch": 1.7418731240779366,
"grad_norm": 0.6933814883232117,
"learning_rate": 1.0211570279919044e-05,
"loss": 0.0757,
"step": 1070
},
{
"epoch": 1.7581523121534315,
"grad_norm": 0.7795721292495728,
"learning_rate": 9.991218262961901e-06,
"loss": 0.1017,
"step": 1080
},
{
"epoch": 1.7744315002289262,
"grad_norm": 0.594406008720398,
"learning_rate": 9.771675661155165e-06,
"loss": 0.1144,
"step": 1090
},
{
"epoch": 1.7907106883044208,
"grad_norm": 0.34790194034576416,
"learning_rate": 9.553013306132158e-06,
"loss": 0.0904,
"step": 1100
},
{
"epoch": 1.8069898763799155,
"grad_norm": 0.4349744915962219,
"learning_rate": 9.335301745529751e-06,
"loss": 0.1085,
"step": 1110
},
{
"epoch": 1.8232690644554101,
"grad_norm": 0.5773786306381226,
"learning_rate": 9.118611220227399e-06,
"loss": 0.1038,
"step": 1120
},
{
"epoch": 1.839548252530905,
"grad_norm": 0.4364662766456604,
"learning_rate": 8.903011641685128e-06,
"loss": 0.097,
"step": 1130
},
{
"epoch": 1.8558274406063997,
"grad_norm": 0.7753048539161682,
"learning_rate": 8.688572569387817e-06,
"loss": 0.1045,
"step": 1140
},
{
"epoch": 1.8721066286818946,
"grad_norm": 0.48441290855407715,
"learning_rate": 8.475363188403022e-06,
"loss": 0.095,
"step": 1150
},
{
"epoch": 1.8883858167573893,
"grad_norm": 0.6351140141487122,
"learning_rate": 8.263452287059607e-06,
"loss": 0.0977,
"step": 1160
},
{
"epoch": 1.904665004832884,
"grad_norm": 0.8837946057319641,
"learning_rate": 8.052908234754376e-06,
"loss": 0.0987,
"step": 1170
},
{
"epoch": 1.9209441929083786,
"grad_norm": 0.48196184635162354,
"learning_rate": 7.84379895989388e-06,
"loss": 0.088,
"step": 1180
},
{
"epoch": 1.9372233809838735,
"grad_norm": 0.5001464486122131,
"learning_rate": 7.636191927978465e-06,
"loss": 0.1161,
"step": 1190
},
{
"epoch": 1.9535025690593681,
"grad_norm": 0.6405985951423645,
"learning_rate": 7.430154119835716e-06,
"loss": 0.1023,
"step": 1200
},
{
"epoch": 1.969781757134863,
"grad_norm": 0.7047804594039917,
"learning_rate": 7.225752010010231e-06,
"loss": 0.1131,
"step": 1210
},
{
"epoch": 1.9860609452103577,
"grad_norm": 0.5221819281578064,
"learning_rate": 7.023051545316763e-06,
"loss": 0.0948,
"step": 1220
},
{
"epoch": 2.0023401332858524,
"grad_norm": 0.4171787202358246,
"learning_rate": 6.822118123563614e-06,
"loss": 0.0995,
"step": 1230
},
{
"epoch": 2.0023401332858524,
"eval_loss": 0.21631866693496704,
"eval_runtime": 34.8988,
"eval_samples_per_second": 5.416,
"eval_steps_per_second": 5.416,
"step": 1230
},
{
"epoch": 2.018619321361347,
"grad_norm": 0.7596387267112732,
"learning_rate": 6.623016572453172e-06,
"loss": 0.104,
"step": 1240
},
{
"epoch": 2.0348985094368417,
"grad_norm": 0.3702397346496582,
"learning_rate": 6.425811128666353e-06,
"loss": 0.0693,
"step": 1250
},
{
"epoch": 2.0511776975123364,
"grad_norm": 0.605099081993103,
"learning_rate": 6.230565417137758e-06,
"loss": 0.097,
"step": 1260
},
{
"epoch": 2.0674568855878315,
"grad_norm": 0.4555053412914276,
"learning_rate": 6.03734243052818e-06,
"loss": 0.0976,
"step": 1270
},
{
"epoch": 2.083736073663326,
"grad_norm": 0.7848448157310486,
"learning_rate": 5.8462045089011066e-06,
"loss": 0.1013,
"step": 1280
},
{
"epoch": 2.100015261738821,
"grad_norm": 0.6905212998390198,
"learning_rate": 5.657213319609776e-06,
"loss": 0.1094,
"step": 1290
},
{
"epoch": 2.1162944498143155,
"grad_norm": 0.5153264999389648,
"learning_rate": 5.4704298374012834e-06,
"loss": 0.0789,
"step": 1300
},
{
"epoch": 2.13257363788981,
"grad_norm": 0.8393344879150391,
"learning_rate": 5.2859143247441e-06,
"loss": 0.0904,
"step": 1310
},
{
"epoch": 2.148852825965305,
"grad_norm": 0.7440715432167053,
"learning_rate": 5.103726312385452e-06,
"loss": 0.0938,
"step": 1320
},
{
"epoch": 2.1651320140408,
"grad_norm": 0.8069117069244385,
"learning_rate": 4.923924580144743e-06,
"loss": 0.0908,
"step": 1330
},
{
"epoch": 2.1814112021162946,
"grad_norm": 0.5500065088272095,
"learning_rate": 4.746567137949261e-06,
"loss": 0.0976,
"step": 1340
},
{
"epoch": 2.1976903901917892,
"grad_norm": 0.51816725730896,
"learning_rate": 4.5717112071182715e-06,
"loss": 0.0889,
"step": 1350
},
{
"epoch": 2.213969578267284,
"grad_norm": 0.4226435124874115,
"learning_rate": 4.399413201901559e-06,
"loss": 0.0814,
"step": 1360
},
{
"epoch": 2.2302487663427786,
"grad_norm": 0.4923081398010254,
"learning_rate": 4.229728711278325e-06,
"loss": 0.086,
"step": 1370
},
{
"epoch": 2.2465279544182732,
"grad_norm": 0.5883035659790039,
"learning_rate": 4.062712481022371e-06,
"loss": 0.095,
"step": 1380
},
{
"epoch": 2.2628071424937684,
"grad_norm": 0.5114026069641113,
"learning_rate": 3.898418396039323e-06,
"loss": 0.1038,
"step": 1390
},
{
"epoch": 2.279086330569263,
"grad_norm": 0.5486142039299011,
"learning_rate": 3.7368994629815953e-06,
"loss": 0.0902,
"step": 1400
},
{
"epoch": 2.2953655186447577,
"grad_norm": 0.756912350654602,
"learning_rate": 3.5782077931467e-06,
"loss": 0.0706,
"step": 1410
},
{
"epoch": 2.3116447067202524,
"grad_norm": 0.6888672709465027,
"learning_rate": 3.42239458566444e-06,
"loss": 0.1065,
"step": 1420
},
{
"epoch": 2.327923894795747,
"grad_norm": 0.5472647547721863,
"learning_rate": 3.269510110978398e-06,
"loss": 0.0815,
"step": 1430
},
{
"epoch": 2.3360634888334944,
"eval_loss": 0.21516536176204681,
"eval_runtime": 34.891,
"eval_samples_per_second": 5.417,
"eval_steps_per_second": 5.417,
"step": 1435
},
{
"epoch": 2.3442030828712417,
"grad_norm": 0.5613276958465576,
"learning_rate": 3.119603694627042e-06,
"loss": 0.0923,
"step": 1440
},
{
"epoch": 2.3604822709467363,
"grad_norm": 0.8540468811988831,
"learning_rate": 2.9727237013296854e-06,
"loss": 0.1192,
"step": 1450
},
{
"epoch": 2.3767614590222315,
"grad_norm": 0.7269755005836487,
"learning_rate": 2.828917519382457e-06,
"loss": 0.0889,
"step": 1460
},
{
"epoch": 2.393040647097726,
"grad_norm": 0.6140917539596558,
"learning_rate": 2.6882315453692686e-06,
"loss": 0.0936,
"step": 1470
},
{
"epoch": 2.409319835173221,
"grad_norm": 0.4730454981327057,
"learning_rate": 2.550711169192775e-06,
"loss": 0.0976,
"step": 1480
},
{
"epoch": 2.4255990232487155,
"grad_norm": 0.5974939465522766,
"learning_rate": 2.4164007594300875e-06,
"loss": 0.0913,
"step": 1490
},
{
"epoch": 2.44187821132421,
"grad_norm": 0.6668256521224976,
"learning_rate": 2.2853436490180374e-06,
"loss": 0.0982,
"step": 1500
},
{
"epoch": 2.458157399399705,
"grad_norm": 0.6182997226715088,
"learning_rate": 2.1575821212725334e-06,
"loss": 0.0861,
"step": 1510
},
{
"epoch": 2.4744365874752,
"grad_norm": 0.5460255146026611,
"learning_rate": 2.0331573962465864e-06,
"loss": 0.086,
"step": 1520
},
{
"epoch": 2.4907157755506946,
"grad_norm": 0.6361858248710632,
"learning_rate": 1.912109617431372e-06,
"loss": 0.0911,
"step": 1530
},
{
"epoch": 2.5069949636261892,
"grad_norm": 0.8699812889099121,
"learning_rate": 1.7944778388046243e-06,
"loss": 0.0884,
"step": 1540
},
{
"epoch": 2.523274151701684,
"grad_norm": 0.5886068344116211,
"learning_rate": 1.680300012230543e-06,
"loss": 0.1027,
"step": 1550
},
{
"epoch": 2.5395533397771786,
"grad_norm": 0.6138848066329956,
"learning_rate": 1.5696129752152774e-06,
"loss": 0.0939,
"step": 1560
},
{
"epoch": 2.5558325278526732,
"grad_norm": 0.7268607020378113,
"learning_rate": 1.4624524390219455e-06,
"loss": 0.083,
"step": 1570
},
{
"epoch": 2.572111715928168,
"grad_norm": 0.619888961315155,
"learning_rate": 1.3588529771490054e-06,
"loss": 0.1087,
"step": 1580
},
{
"epoch": 2.5883909040036626,
"grad_norm": 0.5299406051635742,
"learning_rate": 1.2588480141757204e-06,
"loss": 0.0997,
"step": 1590
},
{
"epoch": 2.6046700920791577,
"grad_norm": 0.6051465272903442,
"learning_rate": 1.1624698149782842e-06,
"loss": 0.0953,
"step": 1600
},
{
"epoch": 2.6209492801546523,
"grad_norm": 0.6585546135902405,
"learning_rate": 1.0697494743201226e-06,
"loss": 0.1057,
"step": 1610
},
{
"epoch": 2.637228468230147,
"grad_norm": 0.5243381261825562,
"learning_rate": 9.807169068197008e-07,
"loss": 0.09,
"step": 1620
},
{
"epoch": 2.6535076563056417,
"grad_norm": 0.6636092066764832,
"learning_rate": 8.95400837299093e-07,
"loss": 0.061,
"step": 1630
},
{
"epoch": 2.6697868443811363,
"grad_norm": 0.6529124975204468,
"learning_rate": 8.138287915164078e-07,
"loss": 0.0897,
"step": 1640
},
{
"epoch": 2.6697868443811363,
"eval_loss": 0.21331782639026642,
"eval_runtime": 34.9348,
"eval_samples_per_second": 5.41,
"eval_steps_per_second": 5.41,
"step": 1640
},
{
"epoch": 2.6860660324566314,
"grad_norm": 0.7361763715744019,
"learning_rate": 7.360270872850808e-07,
"loss": 0.0983,
"step": 1650
},
{
"epoch": 2.702345220532126,
"grad_norm": 0.7820421457290649,
"learning_rate": 6.620208259828855e-07,
"loss": 0.0724,
"step": 1660
},
{
"epoch": 2.718624408607621,
"grad_norm": 0.47821661829948425,
"learning_rate": 5.918338844534077e-07,
"loss": 0.0906,
"step": 1670
},
{
"epoch": 2.7349035966831154,
"grad_norm": 0.5179721713066101,
"learning_rate": 5.25488907302589e-07,
"loss": 0.0851,
"step": 1680
},
{
"epoch": 2.75118278475861,
"grad_norm": 0.7704452872276306,
"learning_rate": 4.63007299592845e-07,
"loss": 0.0765,
"step": 1690
},
{
"epoch": 2.7674619728341048,
"grad_norm": 0.6302313208580017,
"learning_rate": 4.044092199370797e-07,
"loss": 0.093,
"step": 1700
},
{
"epoch": 2.7837411609095994,
"grad_norm": 0.43464457988739014,
"learning_rate": 3.497135739948657e-07,
"loss": 0.0949,
"step": 1710
},
{
"epoch": 2.800020348985094,
"grad_norm": 0.6571847796440125,
"learning_rate": 2.98938008372851e-07,
"loss": 0.0897,
"step": 1720
},
{
"epoch": 2.816299537060589,
"grad_norm": 0.542305052280426,
"learning_rate": 2.520989049313957e-07,
"loss": 0.0968,
"step": 1730
},
{
"epoch": 2.832578725136084,
"grad_norm": 0.5765232443809509,
"learning_rate": 2.0921137549923946e-07,
"loss": 0.0782,
"step": 1740
},
{
"epoch": 2.8488579132115786,
"grad_norm": 0.6098420023918152,
"learning_rate": 1.702892569979353e-07,
"loss": 0.0808,
"step": 1750
},
{
"epoch": 2.865137101287073,
"grad_norm": 0.5190752148628235,
"learning_rate": 1.353451069776024e-07,
"loss": 0.106,
"step": 1760
},
{
"epoch": 2.881416289362568,
"grad_norm": 0.5709157586097717,
"learning_rate": 1.0439019956544893e-07,
"loss": 0.104,
"step": 1770
},
{
"epoch": 2.897695477438063,
"grad_norm": 0.6572442054748535,
"learning_rate": 7.743452182837202e-08,
"loss": 0.1155,
"step": 1780
},
{
"epoch": 2.9139746655135577,
"grad_norm": 0.8765654563903809,
"learning_rate": 5.448677055080453e-08,
"loss": 0.1118,
"step": 1790
},
{
"epoch": 2.9302538535890523,
"grad_norm": 0.3849591910839081,
"learning_rate": 3.555434942884156e-08,
"loss": 0.097,
"step": 1800
},
{
"epoch": 2.946533041664547,
"grad_norm": 0.6078172922134399,
"learning_rate": 2.06433666815678e-08,
"loss": 0.0987,
"step": 1810
},
{
"epoch": 2.9628122297400417,
"grad_norm": 0.7132030129432678,
"learning_rate": 9.758633080352019e-09,
"loss": 0.0866,
"step": 1820
},
{
"epoch": 2.9790914178155363,
"grad_norm": 0.879240870475769,
"learning_rate": 2.903660396723351e-09,
"loss": 0.0863,
"step": 1830
},
{
"epoch": 2.995370605891031,
"grad_norm": 0.6857780814170837,
"learning_rate": 8.066026937064709e-11,
"loss": 0.1078,
"step": 1840
},
{
"epoch": 2.9986264435061303,
"step": 1842,
"total_flos": 1.5737761155189965e+18,
"train_loss": 0.12212386991865083,
"train_runtime": 37466.128,
"train_samples_per_second": 1.574,
"train_steps_per_second": 0.049
}
],
"logging_steps": 10,
"max_steps": 1842,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 205,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5737761155189965e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}