{ "best_metric": 0.22815725207328796, "best_model_checkpoint": "./xlam_lora_new_ete_over_size_3epoch_multi_mix/checkpoint-820", "epoch": 1.3348934221905682, "eval_steps": 205, "global_step": 820, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.016279188075494735, "grad_norm": 0.7572630643844604, "learning_rate": 2.688172043010753e-06, "loss": 0.5223, "step": 10 }, { "epoch": 0.03255837615098947, "grad_norm": 0.417061984539032, "learning_rate": 5.376344086021506e-06, "loss": 0.4858, "step": 20 }, { "epoch": 0.048837564226484206, "grad_norm": 0.3718095123767853, "learning_rate": 8.064516129032258e-06, "loss": 0.4246, "step": 30 }, { "epoch": 0.06511675230197894, "grad_norm": 0.2949349582195282, "learning_rate": 1.0752688172043012e-05, "loss": 0.4405, "step": 40 }, { "epoch": 0.08139594037747368, "grad_norm": 0.3159159719944, "learning_rate": 1.3440860215053763e-05, "loss": 0.4148, "step": 50 }, { "epoch": 0.09767512845296841, "grad_norm": 0.4167034327983856, "learning_rate": 1.6129032258064517e-05, "loss": 0.3393, "step": 60 }, { "epoch": 0.11395431652846315, "grad_norm": 0.39410400390625, "learning_rate": 1.881720430107527e-05, "loss": 0.2464, "step": 70 }, { "epoch": 0.13023350460395788, "grad_norm": 0.3644021153450012, "learning_rate": 2.1505376344086024e-05, "loss": 0.2294, "step": 80 }, { "epoch": 0.1465126926794526, "grad_norm": 0.30372634530067444, "learning_rate": 2.4193548387096777e-05, "loss": 0.2315, "step": 90 }, { "epoch": 0.16279188075494735, "grad_norm": 0.2586315870285034, "learning_rate": 2.4999011923655086e-05, "loss": 0.1932, "step": 100 }, { "epoch": 0.17907106883044208, "grad_norm": 0.37825971841812134, "learning_rate": 2.4994172742085852e-05, "loss": 0.2204, "step": 110 }, { "epoch": 0.19535025690593683, "grad_norm": 0.21422357857227325, "learning_rate": 2.4985302531208654e-05, "loss": 0.1795, "step": 120 }, { "epoch": 0.21162944498143155, "grad_norm": 0.2566869854927063, "learning_rate": 2.4972404152844008e-05, "loss": 0.1668, "step": 130 }, { "epoch": 0.2279086330569263, "grad_norm": 0.28194501996040344, "learning_rate": 2.49554817684312e-05, "loss": 0.1476, "step": 140 }, { "epoch": 0.24418782113242102, "grad_norm": 0.24139340221881866, "learning_rate": 2.4934540837685647e-05, "loss": 0.1609, "step": 150 }, { "epoch": 0.26046700920791577, "grad_norm": 0.3306334614753723, "learning_rate": 2.490958811683741e-05, "loss": 0.1638, "step": 160 }, { "epoch": 0.2767461972834105, "grad_norm": 0.27301114797592163, "learning_rate": 2.4880631656451447e-05, "loss": 0.1494, "step": 170 }, { "epoch": 0.2930253853589052, "grad_norm": 0.34037259221076965, "learning_rate": 2.484768079883018e-05, "loss": 0.1534, "step": 180 }, { "epoch": 0.30930457343439993, "grad_norm": 0.2306762933731079, "learning_rate": 2.4810746174999418e-05, "loss": 0.1749, "step": 190 }, { "epoch": 0.3255837615098947, "grad_norm": 0.3183388113975525, "learning_rate": 2.476983970127841e-05, "loss": 0.1482, "step": 200 }, { "epoch": 0.33372335554764204, "eval_loss": 0.3089325428009033, "eval_runtime": 34.8769, "eval_samples_per_second": 5.419, "eval_steps_per_second": 5.419, "step": 205 }, { "epoch": 0.34186294958538943, "grad_norm": 0.28704971075057983, "learning_rate": 2.472497457543525e-05, "loss": 0.1471, "step": 210 }, { "epoch": 0.35814213766088415, "grad_norm": 0.2939195930957794, "learning_rate": 2.4676165272428866e-05, "loss": 0.1631, "step": 220 }, { "epoch": 0.3744213257363789, "grad_norm": 0.31506845355033875, "learning_rate": 2.4623427539738897e-05, "loss": 0.1353, "step": 230 }, { "epoch": 0.39070051381187365, "grad_norm": 0.3761660158634186, "learning_rate": 2.456677839228506e-05, "loss": 0.1716, "step": 240 }, { "epoch": 0.4069797018873684, "grad_norm": 0.29187777638435364, "learning_rate": 2.450623610693757e-05, "loss": 0.1195, "step": 250 }, { "epoch": 0.4232588899628631, "grad_norm": 0.46237581968307495, "learning_rate": 2.4441820216620425e-05, "loss": 0.1484, "step": 260 }, { "epoch": 0.4395380780383578, "grad_norm": 0.4580917954444885, "learning_rate": 2.437355150400945e-05, "loss": 0.1009, "step": 270 }, { "epoch": 0.4558172661138526, "grad_norm": 0.4181467890739441, "learning_rate": 2.4301451994827112e-05, "loss": 0.1376, "step": 280 }, { "epoch": 0.4720964541893473, "grad_norm": 0.3629908561706543, "learning_rate": 2.422554495073633e-05, "loss": 0.1083, "step": 290 }, { "epoch": 0.48837564226484204, "grad_norm": 0.4282682240009308, "learning_rate": 2.4145854861835447e-05, "loss": 0.1373, "step": 300 }, { "epoch": 0.5046548303403368, "grad_norm": 0.4914080500602722, "learning_rate": 2.406240743875699e-05, "loss": 0.156, "step": 310 }, { "epoch": 0.5209340184158315, "grad_norm": 0.3880573511123657, "learning_rate": 2.3975229604372526e-05, "loss": 0.1415, "step": 320 }, { "epoch": 0.5372132064913262, "grad_norm": 0.42599862813949585, "learning_rate": 2.3884349485106477e-05, "loss": 0.1338, "step": 330 }, { "epoch": 0.553492394566821, "grad_norm": 0.4339046776294708, "learning_rate": 2.378979640186163e-05, "loss": 0.1368, "step": 340 }, { "epoch": 0.5697715826423158, "grad_norm": 0.30713170766830444, "learning_rate": 2.3691600860559222e-05, "loss": 0.1154, "step": 350 }, { "epoch": 0.5860507707178104, "grad_norm": 0.4618566036224365, "learning_rate": 2.3589794542296764e-05, "loss": 0.1203, "step": 360 }, { "epoch": 0.6023299587933052, "grad_norm": 0.40802672505378723, "learning_rate": 2.3484410293126664e-05, "loss": 0.1144, "step": 370 }, { "epoch": 0.6186091468687999, "grad_norm": 0.5242702960968018, "learning_rate": 2.3375482113459014e-05, "loss": 0.1281, "step": 380 }, { "epoch": 0.6348883349442946, "grad_norm": 0.4045926630496979, "learning_rate": 2.3263045147091944e-05, "loss": 0.1145, "step": 390 }, { "epoch": 0.6511675230197894, "grad_norm": 0.5347346067428589, "learning_rate": 2.3147135669873096e-05, "loss": 0.1256, "step": 400 }, { "epoch": 0.6674467110952841, "grad_norm": 0.4755608141422272, "learning_rate": 2.302779107799583e-05, "loss": 0.1251, "step": 410 }, { "epoch": 0.6674467110952841, "eval_loss": 0.26128318905830383, "eval_runtime": 34.9177, "eval_samples_per_second": 5.413, "eval_steps_per_second": 5.413, "step": 410 }, { "epoch": 0.6837258991707789, "grad_norm": 0.4720211923122406, "learning_rate": 2.290504987593399e-05, "loss": 0.1399, "step": 420 }, { "epoch": 0.7000050872462736, "grad_norm": 0.709035336971283, "learning_rate": 2.2778951664019105e-05, "loss": 0.1375, "step": 430 }, { "epoch": 0.7162842753217683, "grad_norm": 0.534866213798523, "learning_rate": 2.2649537125664034e-05, "loss": 0.1125, "step": 440 }, { "epoch": 0.7325634633972631, "grad_norm": 0.522056519985199, "learning_rate": 2.2516848014237146e-05, "loss": 0.0943, "step": 450 }, { "epoch": 0.7488426514727577, "grad_norm": 0.2830965518951416, "learning_rate": 2.238092713959133e-05, "loss": 0.1248, "step": 460 }, { "epoch": 0.7651218395482525, "grad_norm": 0.39431601762771606, "learning_rate": 2.2241818354252113e-05, "loss": 0.1248, "step": 470 }, { "epoch": 0.7814010276237473, "grad_norm": 0.4821482002735138, "learning_rate": 2.209956653926944e-05, "loss": 0.1359, "step": 480 }, { "epoch": 0.797680215699242, "grad_norm": 0.4956236481666565, "learning_rate": 2.1954217589737535e-05, "loss": 0.1232, "step": 490 }, { "epoch": 0.8139594037747367, "grad_norm": 0.49444642663002014, "learning_rate": 2.180581839998766e-05, "loss": 0.1031, "step": 500 }, { "epoch": 0.8302385918502315, "grad_norm": 0.3857091963291168, "learning_rate": 2.165441684845847e-05, "loss": 0.1023, "step": 510 }, { "epoch": 0.8465177799257262, "grad_norm": 0.4830643832683563, "learning_rate": 2.150006178224886e-05, "loss": 0.1067, "step": 520 }, { "epoch": 0.862796968001221, "grad_norm": 0.5119408965110779, "learning_rate": 2.1342803001358278e-05, "loss": 0.1209, "step": 530 }, { "epoch": 0.8790761560767156, "grad_norm": 0.46363013982772827, "learning_rate": 2.118269124261963e-05, "loss": 0.1134, "step": 540 }, { "epoch": 0.8953553441522104, "grad_norm": 0.42933255434036255, "learning_rate": 2.1019778163329912e-05, "loss": 0.1101, "step": 550 }, { "epoch": 0.9116345322277052, "grad_norm": 0.5474070906639099, "learning_rate": 2.0854116324583867e-05, "loss": 0.1291, "step": 560 }, { "epoch": 0.9279137203031999, "grad_norm": 0.43502509593963623, "learning_rate": 2.0685759174316066e-05, "loss": 0.0936, "step": 570 }, { "epoch": 0.9441929083786946, "grad_norm": 0.632621169090271, "learning_rate": 2.051476103005684e-05, "loss": 0.1196, "step": 580 }, { "epoch": 0.9604720964541893, "grad_norm": 0.553187906742096, "learning_rate": 2.034117706140768e-05, "loss": 0.1186, "step": 590 }, { "epoch": 0.9767512845296841, "grad_norm": 0.48446330428123474, "learning_rate": 2.0165063272241712e-05, "loss": 0.1249, "step": 600 }, { "epoch": 0.9930304726051788, "grad_norm": 0.47837090492248535, "learning_rate": 1.9986476482635003e-05, "loss": 0.1097, "step": 610 }, { "epoch": 1.0011700666429262, "eval_loss": 0.2388339340686798, "eval_runtime": 34.8769, "eval_samples_per_second": 5.419, "eval_steps_per_second": 5.419, "step": 615 }, { "epoch": 1.0093096606806735, "grad_norm": 0.5520356893539429, "learning_rate": 1.980547431053456e-05, "loss": 0.131, "step": 620 }, { "epoch": 1.0255888487561682, "grad_norm": 0.6150078177452087, "learning_rate": 1.9622115153168884e-05, "loss": 0.1187, "step": 630 }, { "epoch": 1.041868036831663, "grad_norm": 0.5100656151771545, "learning_rate": 1.9436458168207117e-05, "loss": 0.114, "step": 640 }, { "epoch": 1.0581472249071577, "grad_norm": 0.5156052112579346, "learning_rate": 1.9248563254672825e-05, "loss": 0.1099, "step": 650 }, { "epoch": 1.0744264129826524, "grad_norm": 0.4662775993347168, "learning_rate": 1.9058491033618632e-05, "loss": 0.1135, "step": 660 }, { "epoch": 1.0907056010581473, "grad_norm": 0.4357255697250366, "learning_rate": 1.886630282856787e-05, "loss": 0.1036, "step": 670 }, { "epoch": 1.106984789133642, "grad_norm": 0.3861764967441559, "learning_rate": 1.867206064572962e-05, "loss": 0.1145, "step": 680 }, { "epoch": 1.1232639772091366, "grad_norm": 0.4562045633792877, "learning_rate": 1.8475827153993447e-05, "loss": 0.1107, "step": 690 }, { "epoch": 1.1395431652846315, "grad_norm": 0.332917720079422, "learning_rate": 1.8277665664710387e-05, "loss": 0.1266, "step": 700 }, { "epoch": 1.1558223533601262, "grad_norm": 0.5971720814704895, "learning_rate": 1.807764011126663e-05, "loss": 0.1122, "step": 710 }, { "epoch": 1.1721015414356208, "grad_norm": 0.6102172136306763, "learning_rate": 1.787581502845651e-05, "loss": 0.1046, "step": 720 }, { "epoch": 1.1883807295111157, "grad_norm": 0.5294010043144226, "learning_rate": 1.767225553166146e-05, "loss": 0.1044, "step": 730 }, { "epoch": 1.2046599175866104, "grad_norm": 0.5074148178100586, "learning_rate": 1.7467027295841688e-05, "loss": 0.1251, "step": 740 }, { "epoch": 1.220939105662105, "grad_norm": 0.6349917650222778, "learning_rate": 1.7260196534347235e-05, "loss": 0.1037, "step": 750 }, { "epoch": 1.2372182937376, "grad_norm": 0.34580153226852417, "learning_rate": 1.7051829977555426e-05, "loss": 0.0831, "step": 760 }, { "epoch": 1.2534974818130946, "grad_norm": 0.4629954993724823, "learning_rate": 1.684199485134144e-05, "loss": 0.1068, "step": 770 }, { "epoch": 1.2697766698885893, "grad_norm": 0.6406750082969666, "learning_rate": 1.6630758855389055e-05, "loss": 0.1192, "step": 780 }, { "epoch": 1.286055857964084, "grad_norm": 0.4982251226902008, "learning_rate": 1.6418190141348485e-05, "loss": 0.123, "step": 790 }, { "epoch": 1.3023350460395788, "grad_norm": 0.5146717429161072, "learning_rate": 1.6204357290848464e-05, "loss": 0.0831, "step": 800 }, { "epoch": 1.3186142341150735, "grad_norm": 0.4735712707042694, "learning_rate": 1.5989329293369538e-05, "loss": 0.0971, "step": 810 }, { "epoch": 1.3348934221905682, "grad_norm": 0.7393200397491455, "learning_rate": 1.5773175523985818e-05, "loss": 0.0923, "step": 820 }, { "epoch": 1.3348934221905682, "eval_loss": 0.22815725207328796, "eval_runtime": 34.8794, "eval_samples_per_second": 5.419, "eval_steps_per_second": 5.419, "step": 820 } ], "logging_steps": 10, "max_steps": 1842, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 205, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 6.989312360723251e+17, "train_batch_size": 1, "trial_name": null, "trial_params": null }