| { | |
| "best_metric": 0.042311783879995346, | |
| "best_model_checkpoint": "./xlam_lora_new_ete_over_size_3epoch_multi_full_eng/checkpoint-1539", | |
| "epoch": 2.996902520498026, | |
| "eval_steps": 171, | |
| "global_step": 1542, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.019435165502581234, | |
| "grad_norm": 0.2028260976076126, | |
| "learning_rate": 3.205128205128205e-06, | |
| "loss": 0.0883, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03887033100516247, | |
| "grad_norm": 0.16199472546577454, | |
| "learning_rate": 6.41025641025641e-06, | |
| "loss": 0.0862, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0583054965077437, | |
| "grad_norm": 0.10014229267835617, | |
| "learning_rate": 9.615384615384616e-06, | |
| "loss": 0.0785, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.07774066201032494, | |
| "grad_norm": 0.07943801581859589, | |
| "learning_rate": 1.282051282051282e-05, | |
| "loss": 0.0772, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.09717582751290617, | |
| "grad_norm": 0.11369317024946213, | |
| "learning_rate": 1.602564102564103e-05, | |
| "loss": 0.0649, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1166109930154874, | |
| "grad_norm": 0.10264527052640915, | |
| "learning_rate": 1.923076923076923e-05, | |
| "loss": 0.0686, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.13604615851806864, | |
| "grad_norm": 0.08583803474903107, | |
| "learning_rate": 2.2435897435897437e-05, | |
| "loss": 0.0657, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.15548132402064987, | |
| "grad_norm": 0.09326862543821335, | |
| "learning_rate": 2.4999884878368972e-05, | |
| "loss": 0.0605, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.1749164895232311, | |
| "grad_norm": 0.09622487425804138, | |
| "learning_rate": 2.4995855843928913e-05, | |
| "loss": 0.0617, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.19435165502581234, | |
| "grad_norm": 0.10855123400688171, | |
| "learning_rate": 2.4986072848240374e-05, | |
| "loss": 0.0548, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.21378682052839357, | |
| "grad_norm": 0.12510105967521667, | |
| "learning_rate": 2.4970540396075083e-05, | |
| "loss": 0.0545, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.2332219860309748, | |
| "grad_norm": 0.11628145724534988, | |
| "learning_rate": 2.494926563965445e-05, | |
| "loss": 0.0601, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.252657151533556, | |
| "grad_norm": 0.12050613015890121, | |
| "learning_rate": 2.4922258375356232e-05, | |
| "loss": 0.0585, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.2720923170361373, | |
| "grad_norm": 0.08949285745620728, | |
| "learning_rate": 2.488953103920354e-05, | |
| "loss": 0.0477, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.2915274825387185, | |
| "grad_norm": 0.14706853032112122, | |
| "learning_rate": 2.4851098701138466e-05, | |
| "loss": 0.0555, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.31096264804129975, | |
| "grad_norm": 0.13029147684574127, | |
| "learning_rate": 2.4806979058082832e-05, | |
| "loss": 0.0493, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.330397813543881, | |
| "grad_norm": 0.1321103274822235, | |
| "learning_rate": 2.475719242578929e-05, | |
| "loss": 0.0547, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.3323413300941391, | |
| "eval_loss": 0.054034121334552765, | |
| "eval_runtime": 172.2024, | |
| "eval_samples_per_second": 5.029, | |
| "eval_steps_per_second": 5.029, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.3498329790464622, | |
| "grad_norm": 0.14103983342647552, | |
| "learning_rate": 2.470176172948652e-05, | |
| "loss": 0.0522, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.36926814454904344, | |
| "grad_norm": 0.14120739698410034, | |
| "learning_rate": 2.4640712493322894e-05, | |
| "loss": 0.0501, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.3887033100516247, | |
| "grad_norm": 0.1301490068435669, | |
| "learning_rate": 2.4574072828613354e-05, | |
| "loss": 0.0492, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.4081384755542059, | |
| "grad_norm": 0.1606156975030899, | |
| "learning_rate": 2.450187342089502e-05, | |
| "loss": 0.0488, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.42757364105678713, | |
| "grad_norm": 0.1623048037290573, | |
| "learning_rate": 2.442414751579744e-05, | |
| "loss": 0.0459, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.44700880655936837, | |
| "grad_norm": 0.1424696147441864, | |
| "learning_rate": 2.434093090373396e-05, | |
| "loss": 0.0441, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.4664439720619496, | |
| "grad_norm": 0.13980570435523987, | |
| "learning_rate": 2.4252261903421375e-05, | |
| "loss": 0.0483, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.4858791375645308, | |
| "grad_norm": 0.17693087458610535, | |
| "learning_rate": 2.415818134423528e-05, | |
| "loss": 0.0451, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.505314303067112, | |
| "grad_norm": 0.18210634589195251, | |
| "learning_rate": 2.405873254740942e-05, | |
| "loss": 0.0466, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.5247494685696933, | |
| "grad_norm": 0.1527709811925888, | |
| "learning_rate": 2.395396130608756e-05, | |
| "loss": 0.0488, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.5441846340722746, | |
| "grad_norm": 0.15795762836933136, | |
| "learning_rate": 2.3843915864237143e-05, | |
| "loss": 0.0448, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.5636197995748558, | |
| "grad_norm": 0.15495383739471436, | |
| "learning_rate": 2.3728646894434413e-05, | |
| "loss": 0.047, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.583054965077437, | |
| "grad_norm": 0.17630279064178467, | |
| "learning_rate": 2.3608207474531236e-05, | |
| "loss": 0.0429, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6024901305800182, | |
| "grad_norm": 0.17523537576198578, | |
| "learning_rate": 2.3482653063214356e-05, | |
| "loss": 0.0473, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.6219252960825995, | |
| "grad_norm": 0.1995943933725357, | |
| "learning_rate": 2.3352041474468373e-05, | |
| "loss": 0.0392, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.6413604615851807, | |
| "grad_norm": 0.1683543622493744, | |
| "learning_rate": 2.3216432850954155e-05, | |
| "loss": 0.0438, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.660795627087762, | |
| "grad_norm": 0.1374560445547104, | |
| "learning_rate": 2.307588963631497e-05, | |
| "loss": 0.0388, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.6646826601882782, | |
| "eval_loss": 0.048522137105464935, | |
| "eval_runtime": 174.8794, | |
| "eval_samples_per_second": 4.952, | |
| "eval_steps_per_second": 4.952, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 0.6802307925903431, | |
| "grad_norm": 0.166055366396904, | |
| "learning_rate": 2.293047654642309e-05, | |
| "loss": 0.0469, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.6996659580929244, | |
| "grad_norm": 0.14119000732898712, | |
| "learning_rate": 2.2780260539580116e-05, | |
| "loss": 0.0424, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.7191011235955056, | |
| "grad_norm": 0.23057888448238373, | |
| "learning_rate": 2.2625310785684688e-05, | |
| "loss": 0.0451, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.7385362890980869, | |
| "grad_norm": 0.1304253786802292, | |
| "learning_rate": 2.2465698634381892e-05, | |
| "loss": 0.0434, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.7579714546006681, | |
| "grad_norm": 0.17952455580234528, | |
| "learning_rate": 2.2301497582208884e-05, | |
| "loss": 0.0409, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.7774066201032493, | |
| "grad_norm": 0.15560920536518097, | |
| "learning_rate": 2.2132783238751987e-05, | |
| "loss": 0.0465, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.7968417856058305, | |
| "grad_norm": 0.12066332250833511, | |
| "learning_rate": 2.1959633291830785e-05, | |
| "loss": 0.0433, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.8162769511084118, | |
| "grad_norm": 0.15932129323482513, | |
| "learning_rate": 2.1782127471725232e-05, | |
| "loss": 0.0458, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.835712116610993, | |
| "grad_norm": 0.19025273621082306, | |
| "learning_rate": 2.160034751446231e-05, | |
| "loss": 0.0522, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.8551472821135743, | |
| "grad_norm": 0.15156866610050201, | |
| "learning_rate": 2.1414377124179038e-05, | |
| "loss": 0.0433, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.8745824476161554, | |
| "grad_norm": 0.1594618409872055, | |
| "learning_rate": 2.1224301934579293e-05, | |
| "loss": 0.0434, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.8940176131187367, | |
| "grad_norm": 0.16077858209609985, | |
| "learning_rate": 2.1030209469502098e-05, | |
| "loss": 0.0426, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.9134527786213179, | |
| "grad_norm": 0.15807421505451202, | |
| "learning_rate": 2.0832189102619543e-05, | |
| "loss": 0.0461, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.9328879441238992, | |
| "grad_norm": 0.20585452020168304, | |
| "learning_rate": 2.0630332016282927e-05, | |
| "loss": 0.0429, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.9523231096264804, | |
| "grad_norm": 0.1859339326620102, | |
| "learning_rate": 2.0424731159536083e-05, | |
| "loss": 0.0432, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.9717582751290617, | |
| "grad_norm": 0.20674780011177063, | |
| "learning_rate": 2.021548120531516e-05, | |
| "loss": 0.0392, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.9911934406316428, | |
| "grad_norm": 0.22314420342445374, | |
| "learning_rate": 2.0002678506854606e-05, | |
| "loss": 0.041, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.9970239902824173, | |
| "eval_loss": 0.04610577970743179, | |
| "eval_runtime": 175.1913, | |
| "eval_samples_per_second": 4.943, | |
| "eval_steps_per_second": 4.943, | |
| "step": 513 | |
| }, | |
| { | |
| "epoch": 1.010628606134224, | |
| "grad_norm": 0.1707136183977127, | |
| "learning_rate": 1.9786421053319455e-05, | |
| "loss": 0.0468, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.0300637716368053, | |
| "grad_norm": 0.18121393024921417, | |
| "learning_rate": 1.9566808424684284e-05, | |
| "loss": 0.0361, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.0494989371393866, | |
| "grad_norm": 0.17815589904785156, | |
| "learning_rate": 1.9343941745879695e-05, | |
| "loss": 0.0418, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.0689341026419679, | |
| "grad_norm": 0.17634443938732147, | |
| "learning_rate": 1.9117923640227314e-05, | |
| "loss": 0.0437, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.088369268144549, | |
| "grad_norm": 0.18102432787418365, | |
| "learning_rate": 1.8888858182184925e-05, | |
| "loss": 0.0426, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.1078044336471302, | |
| "grad_norm": 0.20596696436405182, | |
| "learning_rate": 1.865685084942331e-05, | |
| "loss": 0.0414, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.1272395991497115, | |
| "grad_norm": 0.2145373523235321, | |
| "learning_rate": 1.8422008474257013e-05, | |
| "loss": 0.0354, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.1466747646522928, | |
| "grad_norm": 0.2385258674621582, | |
| "learning_rate": 1.8184439194451326e-05, | |
| "loss": 0.0357, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.166109930154874, | |
| "grad_norm": 0.23679479956626892, | |
| "learning_rate": 1.7944252403428097e-05, | |
| "loss": 0.041, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.1855450956574551, | |
| "grad_norm": 0.1947854906320572, | |
| "learning_rate": 1.770155869989343e-05, | |
| "loss": 0.0407, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.2049802611600364, | |
| "grad_norm": 0.25392264127731323, | |
| "learning_rate": 1.7456469836910334e-05, | |
| "loss": 0.0405, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.2244154266626177, | |
| "grad_norm": 0.2636931240558624, | |
| "learning_rate": 1.7209098670439817e-05, | |
| "loss": 0.0442, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.243850592165199, | |
| "grad_norm": 0.19380666315555573, | |
| "learning_rate": 1.695955910737419e-05, | |
| "loss": 0.0344, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.26328575766778, | |
| "grad_norm": 0.2423335164785385, | |
| "learning_rate": 1.670796605308638e-05, | |
| "loss": 0.0415, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.2827209231703613, | |
| "grad_norm": 0.20322546362876892, | |
| "learning_rate": 1.6454435358519524e-05, | |
| "loss": 0.0381, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.3021560886729426, | |
| "grad_norm": 0.20570382475852966, | |
| "learning_rate": 1.6199083766841115e-05, | |
| "loss": 0.035, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.321591254175524, | |
| "grad_norm": 0.23237545788288116, | |
| "learning_rate": 1.594202885968636e-05, | |
| "loss": 0.0447, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.3293653203765563, | |
| "eval_loss": 0.0446239709854126, | |
| "eval_runtime": 174.4977, | |
| "eval_samples_per_second": 4.963, | |
| "eval_steps_per_second": 4.963, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 1.341026419678105, | |
| "grad_norm": 0.2273474782705307, | |
| "learning_rate": 1.568338900301536e-05, | |
| "loss": 0.0406, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.3604615851806863, | |
| "grad_norm": 0.2105632871389389, | |
| "learning_rate": 1.5423283292609255e-05, | |
| "loss": 0.0384, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.3798967506832676, | |
| "grad_norm": 0.728905200958252, | |
| "learning_rate": 1.5161831499230197e-05, | |
| "loss": 0.0385, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.3993319161858488, | |
| "grad_norm": 0.2697829604148865, | |
| "learning_rate": 1.4899154013470574e-05, | |
| "loss": 0.0407, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.4187670816884301, | |
| "grad_norm": 0.23418137431144714, | |
| "learning_rate": 1.4635371790316805e-05, | |
| "loss": 0.0425, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.4382022471910112, | |
| "grad_norm": 0.21163399517536163, | |
| "learning_rate": 1.437060629345325e-05, | |
| "loss": 0.0369, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.4576374126935925, | |
| "grad_norm": 0.24824394285678864, | |
| "learning_rate": 1.4104979439331889e-05, | |
| "loss": 0.0413, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.4770725781961738, | |
| "grad_norm": 0.24107202887535095, | |
| "learning_rate": 1.383861354103349e-05, | |
| "loss": 0.0368, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.4965077436987548, | |
| "grad_norm": 0.25261572003364563, | |
| "learning_rate": 1.357163125194618e-05, | |
| "loss": 0.0437, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.5159429092013361, | |
| "grad_norm": 0.1909024715423584, | |
| "learning_rate": 1.3304155509287273e-05, | |
| "loss": 0.0376, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.5353780747039174, | |
| "grad_norm": 0.24820125102996826, | |
| "learning_rate": 1.3036309477494433e-05, | |
| "loss": 0.038, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.5548132402064985, | |
| "grad_norm": 0.2151060849428177, | |
| "learning_rate": 1.27682164915122e-05, | |
| "loss": 0.0427, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.57424840570908, | |
| "grad_norm": 0.2735498249530792, | |
| "learning_rate": 1.25e-05, | |
| "loss": 0.0405, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.593683571211661, | |
| "grad_norm": 0.20167022943496704, | |
| "learning_rate": 1.2231783508487806e-05, | |
| "loss": 0.0361, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.6131187367142423, | |
| "grad_norm": 0.29892289638519287, | |
| "learning_rate": 1.1963690522505568e-05, | |
| "loss": 0.0343, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.6325539022168236, | |
| "grad_norm": 0.2181161344051361, | |
| "learning_rate": 1.169584449071273e-05, | |
| "loss": 0.044, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.6519890677194047, | |
| "grad_norm": 0.24029949307441711, | |
| "learning_rate": 1.142836874805382e-05, | |
| "loss": 0.0347, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.6617066504706954, | |
| "eval_loss": 0.04329855740070343, | |
| "eval_runtime": 170.3718, | |
| "eval_samples_per_second": 5.083, | |
| "eval_steps_per_second": 5.083, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 1.6714242332219862, | |
| "grad_norm": 0.23985505104064941, | |
| "learning_rate": 1.1161386458966511e-05, | |
| "loss": 0.0401, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.6908593987245673, | |
| "grad_norm": 0.22673290967941284, | |
| "learning_rate": 1.0895020560668112e-05, | |
| "loss": 0.0364, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.7102945642271485, | |
| "grad_norm": 0.22750601172447205, | |
| "learning_rate": 1.0629393706546752e-05, | |
| "loss": 0.039, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.7297297297297298, | |
| "grad_norm": 0.24846433103084564, | |
| "learning_rate": 1.03646282096832e-05, | |
| "loss": 0.0341, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.749164895232311, | |
| "grad_norm": 0.19867610931396484, | |
| "learning_rate": 1.0100845986529429e-05, | |
| "loss": 0.036, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.7686000607348922, | |
| "grad_norm": 0.2108948975801468, | |
| "learning_rate": 9.838168500769806e-06, | |
| "loss": 0.0359, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.7880352262374735, | |
| "grad_norm": 0.22352129220962524, | |
| "learning_rate": 9.576716707390745e-06, | |
| "loss": 0.0432, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.8074703917400545, | |
| "grad_norm": 0.2634281516075134, | |
| "learning_rate": 9.316610996984642e-06, | |
| "loss": 0.0371, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.826905557242636, | |
| "grad_norm": 0.261662095785141, | |
| "learning_rate": 9.057971140313645e-06, | |
| "loss": 0.0414, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.846340722745217, | |
| "grad_norm": 0.269505113363266, | |
| "learning_rate": 8.800916233158885e-06, | |
| "loss": 0.033, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.8657758882477984, | |
| "grad_norm": 0.2583780288696289, | |
| "learning_rate": 8.545564641480484e-06, | |
| "loss": 0.04, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.8852110537503797, | |
| "grad_norm": 0.2607627213001251, | |
| "learning_rate": 8.292033946913624e-06, | |
| "loss": 0.0354, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.9046462192529607, | |
| "grad_norm": 0.23419621586799622, | |
| "learning_rate": 8.040440892625817e-06, | |
| "loss": 0.0362, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.924081384755542, | |
| "grad_norm": 0.2785259783267975, | |
| "learning_rate": 7.790901329560186e-06, | |
| "loss": 0.0367, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.9435165502581233, | |
| "grad_norm": 0.2678421437740326, | |
| "learning_rate": 7.543530163089671e-06, | |
| "loss": 0.0363, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.9629517157607044, | |
| "grad_norm": 0.2274320125579834, | |
| "learning_rate": 7.29844130010657e-06, | |
| "loss": 0.0395, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.9823868812632859, | |
| "grad_norm": 0.2792135179042816, | |
| "learning_rate": 7.055747596571904e-06, | |
| "loss": 0.0374, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.9940479805648343, | |
| "eval_loss": 0.04268278926610947, | |
| "eval_runtime": 169.855, | |
| "eval_samples_per_second": 5.098, | |
| "eval_steps_per_second": 5.098, | |
| "step": 1026 | |
| }, | |
| { | |
| "epoch": 2.001822046765867, | |
| "grad_norm": 0.2434389591217041, | |
| "learning_rate": 6.815560805548682e-06, | |
| "loss": 0.0359, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.021257212268448, | |
| "grad_norm": 0.2474217265844345, | |
| "learning_rate": 6.577991525742991e-06, | |
| "loss": 0.0301, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.0406923777710295, | |
| "grad_norm": 0.21972987055778503, | |
| "learning_rate": 6.3431491505766965e-06, | |
| "loss": 0.031, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.0601275432736106, | |
| "grad_norm": 0.26206907629966736, | |
| "learning_rate": 6.111141817815079e-06, | |
| "loss": 0.0367, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.079562708776192, | |
| "grad_norm": 0.26828330755233765, | |
| "learning_rate": 5.882076359772686e-06, | |
| "loss": 0.0358, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.098997874278773, | |
| "grad_norm": 0.3341256380081177, | |
| "learning_rate": 5.656058254120305e-06, | |
| "loss": 0.0362, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.118433039781354, | |
| "grad_norm": 0.24040237069129944, | |
| "learning_rate": 5.4331915753157175e-06, | |
| "loss": 0.0336, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.1378682052839357, | |
| "grad_norm": 0.2951533794403076, | |
| "learning_rate": 5.213578946680547e-06, | |
| "loss": 0.0374, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.157303370786517, | |
| "grad_norm": 0.3339778184890747, | |
| "learning_rate": 4.997321493145399e-06, | |
| "loss": 0.0336, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.176738536289098, | |
| "grad_norm": 0.28566041588783264, | |
| "learning_rate": 4.784518794684843e-06, | |
| "loss": 0.0305, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.1961737017916794, | |
| "grad_norm": 0.23707829415798187, | |
| "learning_rate": 4.575268840463919e-06, | |
| "loss": 0.0347, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.2156088672942604, | |
| "grad_norm": 0.268509179353714, | |
| "learning_rate": 4.3696679837170755e-06, | |
| "loss": 0.0338, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.235044032796842, | |
| "grad_norm": 0.2489359974861145, | |
| "learning_rate": 4.16781089738046e-06, | |
| "loss": 0.0346, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.254479198299423, | |
| "grad_norm": 0.2973306179046631, | |
| "learning_rate": 3.969790530497904e-06, | |
| "loss": 0.0341, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.273914363802004, | |
| "grad_norm": 0.2385181337594986, | |
| "learning_rate": 3.7756980654207032e-06, | |
| "loss": 0.0307, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.2933495293045856, | |
| "grad_norm": 0.24831977486610413, | |
| "learning_rate": 3.585622875820965e-06, | |
| "loss": 0.0324, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.3127846948071666, | |
| "grad_norm": 0.2952122092247009, | |
| "learning_rate": 3.399652485537691e-06, | |
| "loss": 0.0337, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.3263893106589735, | |
| "eval_loss": 0.042657386511564255, | |
| "eval_runtime": 168.8317, | |
| "eval_samples_per_second": 5.129, | |
| "eval_steps_per_second": 5.129, | |
| "step": 1197 | |
| }, | |
| { | |
| "epoch": 2.332219860309748, | |
| "grad_norm": 0.29106399416923523, | |
| "learning_rate": 3.2178725282747665e-06, | |
| "loss": 0.0346, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.351655025812329, | |
| "grad_norm": 0.30523982644081116, | |
| "learning_rate": 3.040366708169222e-06, | |
| "loss": 0.036, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.3710901913149103, | |
| "grad_norm": 0.2523854672908783, | |
| "learning_rate": 2.8672167612480163e-06, | |
| "loss": 0.0298, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.390525356817492, | |
| "grad_norm": 0.2531813383102417, | |
| "learning_rate": 2.698502417791121e-06, | |
| "loss": 0.0383, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.409960522320073, | |
| "grad_norm": 0.2635211944580078, | |
| "learning_rate": 2.534301365618107e-06, | |
| "loss": 0.0357, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.4293956878226544, | |
| "grad_norm": 0.26689672470092773, | |
| "learning_rate": 2.374689214315311e-06, | |
| "loss": 0.0333, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.4488308533252354, | |
| "grad_norm": 0.29953232407569885, | |
| "learning_rate": 2.2197394604198853e-06, | |
| "loss": 0.0317, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.4682660188278165, | |
| "grad_norm": 0.21628761291503906, | |
| "learning_rate": 2.069523453576909e-06, | |
| "loss": 0.0362, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.487701184330398, | |
| "grad_norm": 0.2457425892353058, | |
| "learning_rate": 1.924110363685033e-06, | |
| "loss": 0.0353, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.507136349832979, | |
| "grad_norm": 0.3192308247089386, | |
| "learning_rate": 1.783567149045845e-06, | |
| "loss": 0.036, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.52657151533556, | |
| "grad_norm": 0.2815244793891907, | |
| "learning_rate": 1.6479585255316287e-06, | |
| "loss": 0.0329, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.5460066808381416, | |
| "grad_norm": 0.2549531161785126, | |
| "learning_rate": 1.5173469367856477e-06, | |
| "loss": 0.0382, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.5654418463407227, | |
| "grad_norm": 0.25880110263824463, | |
| "learning_rate": 1.3917925254687686e-06, | |
| "loss": 0.0344, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.5848770118433038, | |
| "grad_norm": 0.22720050811767578, | |
| "learning_rate": 1.271353105565591e-06, | |
| "loss": 0.0353, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.6043121773458853, | |
| "grad_norm": 0.2991912364959717, | |
| "learning_rate": 1.1560841357628582e-06, | |
| "loss": 0.0369, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.6237473428484663, | |
| "grad_norm": 0.3291521668434143, | |
| "learning_rate": 1.046038693912442e-06, | |
| "loss": 0.0331, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.643182508351048, | |
| "grad_norm": 0.24359193444252014, | |
| "learning_rate": 9.412674525905815e-07, | |
| "loss": 0.0335, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.6587306407531126, | |
| "eval_loss": 0.042365700006484985, | |
| "eval_runtime": 168.4092, | |
| "eval_samples_per_second": 5.142, | |
| "eval_steps_per_second": 5.142, | |
| "step": 1368 | |
| }, | |
| { | |
| "epoch": 2.662617673853629, | |
| "grad_norm": 0.30196014046669006, | |
| "learning_rate": 8.4181865576472e-07, | |
| "loss": 0.0319, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.68205283935621, | |
| "grad_norm": 0.31683069467544556, | |
| "learning_rate": 7.477380965786263e-07, | |
| "loss": 0.0353, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.7014880048587915, | |
| "grad_norm": 0.24554206430912018, | |
| "learning_rate": 6.590690962660393e-07, | |
| "loss": 0.0349, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.7209231703613725, | |
| "grad_norm": 0.3739015460014343, | |
| "learning_rate": 5.758524842025631e-07, | |
| "loss": 0.0353, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.740358335863954, | |
| "grad_norm": 0.27808961272239685, | |
| "learning_rate": 4.98126579104978e-07, | |
| "loss": 0.0337, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.759793501366535, | |
| "grad_norm": 0.2409674972295761, | |
| "learning_rate": 4.259271713866475e-07, | |
| "loss": 0.0357, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.779228666869116, | |
| "grad_norm": 0.2730494737625122, | |
| "learning_rate": 3.5928750667710844e-07, | |
| "loss": 0.0339, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.7986638323716977, | |
| "grad_norm": 0.23926123976707458, | |
| "learning_rate": 2.982382705134817e-07, | |
| "loss": 0.0349, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.8180989978742788, | |
| "grad_norm": 0.21703185141086578, | |
| "learning_rate": 2.4280757421071414e-07, | |
| "loss": 0.0366, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.8375341633768603, | |
| "grad_norm": 0.2484457939863205, | |
| "learning_rate": 1.9302094191716856e-07, | |
| "loss": 0.0338, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.8569693288794413, | |
| "grad_norm": 0.30563613772392273, | |
| "learning_rate": 1.489012988615368e-07, | |
| "loss": 0.0346, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.8764044943820224, | |
| "grad_norm": 0.3028404414653778, | |
| "learning_rate": 1.104689607964643e-07, | |
| "loss": 0.0386, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.8958396598846035, | |
| "grad_norm": 0.3035370111465454, | |
| "learning_rate": 7.774162464377066e-08, | |
| "loss": 0.0375, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.915274825387185, | |
| "grad_norm": 0.25364699959754944, | |
| "learning_rate": 5.073436034554874e-08, | |
| "loss": 0.0379, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.934709990889766, | |
| "grad_norm": 0.2718573212623596, | |
| "learning_rate": 2.945960392492092e-08, | |
| "loss": 0.0324, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.9541451563923475, | |
| "grad_norm": 0.33609631657600403, | |
| "learning_rate": 1.3927151759626777e-08, | |
| "loss": 0.0329, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.9735803218949286, | |
| "grad_norm": 0.2513211965560913, | |
| "learning_rate": 4.144156071086979e-09, | |
| "loss": 0.0338, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.9910719708472517, | |
| "eval_loss": 0.042311783879995346, | |
| "eval_runtime": 168.3509, | |
| "eval_samples_per_second": 5.144, | |
| "eval_steps_per_second": 5.144, | |
| "step": 1539 | |
| }, | |
| { | |
| "epoch": 2.9930154873975097, | |
| "grad_norm": 0.2795769274234772, | |
| "learning_rate": 1.1512163102661566e-10, | |
| "loss": 0.0348, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.996902520498026, | |
| "step": 1542, | |
| "total_flos": 1.1822411081967206e+18, | |
| "train_loss": 0.04155438780378739, | |
| "train_runtime": 32863.2543, | |
| "train_samples_per_second": 1.503, | |
| "train_steps_per_second": 0.047 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1542, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 171, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.1822411081967206e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |