Farouk
Training in progress, step 200
c427e87
{
"best_metric": 0.5259036421775818,
"best_model_checkpoint": "./output_v2/7b_cluster00_Nous-Hermes-llama-2-7b_codellama_blob_1/checkpoint-200",
"epoch": 0.5614035087719298,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 2e-06,
"loss": 0.7795,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 4e-06,
"loss": 0.6789,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 5.999999999999999e-06,
"loss": 0.6843,
"step": 3
},
{
"epoch": 0.01,
"learning_rate": 8e-06,
"loss": 0.6884,
"step": 4
},
{
"epoch": 0.01,
"learning_rate": 9.999999999999999e-06,
"loss": 0.5968,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 1.1999999999999999e-05,
"loss": 0.6587,
"step": 6
},
{
"epoch": 0.02,
"learning_rate": 1.4e-05,
"loss": 0.6264,
"step": 7
},
{
"epoch": 0.02,
"learning_rate": 1.6e-05,
"loss": 0.6894,
"step": 8
},
{
"epoch": 0.03,
"learning_rate": 1.7999999999999997e-05,
"loss": 0.537,
"step": 9
},
{
"epoch": 0.03,
"learning_rate": 1.9999999999999998e-05,
"loss": 0.6001,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 2.2e-05,
"loss": 0.7057,
"step": 11
},
{
"epoch": 0.03,
"learning_rate": 2.3999999999999997e-05,
"loss": 0.6323,
"step": 12
},
{
"epoch": 0.04,
"learning_rate": 2.6e-05,
"loss": 0.5636,
"step": 13
},
{
"epoch": 0.04,
"learning_rate": 2.8e-05,
"loss": 0.5497,
"step": 14
},
{
"epoch": 0.04,
"learning_rate": 2.9999999999999997e-05,
"loss": 0.5838,
"step": 15
},
{
"epoch": 0.04,
"learning_rate": 3.2e-05,
"loss": 0.7057,
"step": 16
},
{
"epoch": 0.05,
"learning_rate": 3.399999999999999e-05,
"loss": 0.9464,
"step": 17
},
{
"epoch": 0.05,
"learning_rate": 3.5999999999999994e-05,
"loss": 0.7112,
"step": 18
},
{
"epoch": 0.05,
"learning_rate": 3.8e-05,
"loss": 0.5115,
"step": 19
},
{
"epoch": 0.06,
"learning_rate": 3.9999999999999996e-05,
"loss": 0.6474,
"step": 20
},
{
"epoch": 0.06,
"learning_rate": 4.2e-05,
"loss": 1.046,
"step": 21
},
{
"epoch": 0.06,
"learning_rate": 4.4e-05,
"loss": 0.9237,
"step": 22
},
{
"epoch": 0.06,
"learning_rate": 4.599999999999999e-05,
"loss": 0.7492,
"step": 23
},
{
"epoch": 0.07,
"learning_rate": 4.7999999999999994e-05,
"loss": 0.7008,
"step": 24
},
{
"epoch": 0.07,
"learning_rate": 4.9999999999999996e-05,
"loss": 0.6973,
"step": 25
},
{
"epoch": 0.07,
"eval_loss": 0.6718204617500305,
"eval_runtime": 26.3354,
"eval_samples_per_second": 7.594,
"eval_steps_per_second": 1.899,
"step": 25
},
{
"dharma_eval_accuracy": 0.47296288292243954,
"dharma_eval_accuracy_ARC-Challenge": 0.6296296296296297,
"dharma_eval_accuracy_ARC-Easy": 0.7592592592592593,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.4444444444444444,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.4098360655737705,
"dharma_eval_accuracy_openbookqa": 0.2962962962962963,
"dharma_eval_accuracy_truthful_qa": 0.37037037037037035,
"dharma_eval_accuracy_winogrande": 0.3888888888888889,
"dharma_loss": 3.742157681465149,
"epoch": 0.07,
"step": 25
},
{
"epoch": 0.07,
"learning_rate": 5.2e-05,
"loss": 0.6103,
"step": 26
},
{
"epoch": 0.08,
"learning_rate": 5.399999999999999e-05,
"loss": 0.6061,
"step": 27
},
{
"epoch": 0.08,
"learning_rate": 5.6e-05,
"loss": 0.4808,
"step": 28
},
{
"epoch": 0.08,
"learning_rate": 5.7999999999999994e-05,
"loss": 0.5991,
"step": 29
},
{
"epoch": 0.08,
"learning_rate": 5.9999999999999995e-05,
"loss": 0.868,
"step": 30
},
{
"epoch": 0.09,
"learning_rate": 6.199999999999999e-05,
"loss": 0.9894,
"step": 31
},
{
"epoch": 0.09,
"learning_rate": 6.4e-05,
"loss": 0.7307,
"step": 32
},
{
"epoch": 0.09,
"learning_rate": 6.599999999999999e-05,
"loss": 0.5034,
"step": 33
},
{
"epoch": 0.1,
"learning_rate": 6.799999999999999e-05,
"loss": 0.6597,
"step": 34
},
{
"epoch": 0.1,
"learning_rate": 7e-05,
"loss": 0.6906,
"step": 35
},
{
"epoch": 0.1,
"learning_rate": 7.199999999999999e-05,
"loss": 0.7378,
"step": 36
},
{
"epoch": 0.1,
"learning_rate": 7.4e-05,
"loss": 0.8251,
"step": 37
},
{
"epoch": 0.11,
"learning_rate": 7.6e-05,
"loss": 0.5945,
"step": 38
},
{
"epoch": 0.11,
"learning_rate": 7.8e-05,
"loss": 0.5483,
"step": 39
},
{
"epoch": 0.11,
"learning_rate": 7.999999999999999e-05,
"loss": 0.7956,
"step": 40
},
{
"epoch": 0.12,
"learning_rate": 8.199999999999999e-05,
"loss": 0.8239,
"step": 41
},
{
"epoch": 0.12,
"learning_rate": 8.4e-05,
"loss": 0.7577,
"step": 42
},
{
"epoch": 0.12,
"learning_rate": 8.6e-05,
"loss": 0.6013,
"step": 43
},
{
"epoch": 0.12,
"learning_rate": 8.8e-05,
"loss": 0.5764,
"step": 44
},
{
"epoch": 0.13,
"learning_rate": 8.999999999999999e-05,
"loss": 0.5965,
"step": 45
},
{
"epoch": 0.13,
"learning_rate": 9.199999999999999e-05,
"loss": 0.5114,
"step": 46
},
{
"epoch": 0.13,
"learning_rate": 9.4e-05,
"loss": 0.5308,
"step": 47
},
{
"epoch": 0.13,
"learning_rate": 9.599999999999999e-05,
"loss": 0.6932,
"step": 48
},
{
"epoch": 0.14,
"learning_rate": 9.799999999999998e-05,
"loss": 0.6133,
"step": 49
},
{
"epoch": 0.14,
"learning_rate": 9.999999999999999e-05,
"loss": 0.6333,
"step": 50
},
{
"epoch": 0.14,
"eval_loss": 0.5996996164321899,
"eval_runtime": 26.2956,
"eval_samples_per_second": 7.606,
"eval_steps_per_second": 1.901,
"step": 50
},
{
"dharma_eval_accuracy": 0.4889515491843299,
"dharma_eval_accuracy_ARC-Challenge": 0.6666666666666666,
"dharma_eval_accuracy_ARC-Easy": 0.6666666666666666,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.4444444444444444,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.4426229508196721,
"dharma_eval_accuracy_openbookqa": 0.3148148148148148,
"dharma_eval_accuracy_truthful_qa": 0.3888888888888889,
"dharma_eval_accuracy_winogrande": 0.5185185185185185,
"dharma_loss": 2.0854941487312315,
"epoch": 0.14,
"step": 50
},
{
"epoch": 0.14,
"learning_rate": 0.000102,
"loss": 0.6521,
"step": 51
},
{
"epoch": 0.15,
"learning_rate": 0.000104,
"loss": 0.97,
"step": 52
},
{
"epoch": 0.15,
"learning_rate": 0.00010599999999999999,
"loss": 0.6123,
"step": 53
},
{
"epoch": 0.15,
"learning_rate": 0.00010799999999999998,
"loss": 0.6386,
"step": 54
},
{
"epoch": 0.15,
"learning_rate": 0.00010999999999999998,
"loss": 0.5887,
"step": 55
},
{
"epoch": 0.16,
"learning_rate": 0.000112,
"loss": 0.6423,
"step": 56
},
{
"epoch": 0.16,
"learning_rate": 0.00011399999999999999,
"loss": 0.6878,
"step": 57
},
{
"epoch": 0.16,
"learning_rate": 0.00011599999999999999,
"loss": 0.6244,
"step": 58
},
{
"epoch": 0.17,
"learning_rate": 0.00011799999999999998,
"loss": 0.59,
"step": 59
},
{
"epoch": 0.17,
"learning_rate": 0.00011999999999999999,
"loss": 0.6907,
"step": 60
},
{
"epoch": 0.17,
"learning_rate": 0.000122,
"loss": 0.547,
"step": 61
},
{
"epoch": 0.17,
"learning_rate": 0.00012399999999999998,
"loss": 0.5439,
"step": 62
},
{
"epoch": 0.18,
"learning_rate": 0.00012599999999999997,
"loss": 0.6434,
"step": 63
},
{
"epoch": 0.18,
"learning_rate": 0.000128,
"loss": 0.6842,
"step": 64
},
{
"epoch": 0.18,
"learning_rate": 0.00013,
"loss": 0.5338,
"step": 65
},
{
"epoch": 0.19,
"learning_rate": 0.00013199999999999998,
"loss": 0.6296,
"step": 66
},
{
"epoch": 0.19,
"learning_rate": 0.00013399999999999998,
"loss": 0.6383,
"step": 67
},
{
"epoch": 0.19,
"learning_rate": 0.00013599999999999997,
"loss": 0.5724,
"step": 68
},
{
"epoch": 0.19,
"learning_rate": 0.000138,
"loss": 0.4764,
"step": 69
},
{
"epoch": 0.2,
"learning_rate": 0.00014,
"loss": 0.5626,
"step": 70
},
{
"epoch": 0.2,
"learning_rate": 0.00014199999999999998,
"loss": 0.6306,
"step": 71
},
{
"epoch": 0.2,
"learning_rate": 0.00014399999999999998,
"loss": 0.6035,
"step": 72
},
{
"epoch": 0.2,
"learning_rate": 0.000146,
"loss": 0.5429,
"step": 73
},
{
"epoch": 0.21,
"learning_rate": 0.000148,
"loss": 0.619,
"step": 74
},
{
"epoch": 0.21,
"learning_rate": 0.00015,
"loss": 0.5222,
"step": 75
},
{
"epoch": 0.21,
"eval_loss": 0.5627657175064087,
"eval_runtime": 26.2982,
"eval_samples_per_second": 7.605,
"eval_steps_per_second": 1.901,
"step": 75
},
{
"dharma_eval_accuracy": 0.43427872625797975,
"dharma_eval_accuracy_ARC-Challenge": 0.5925925925925926,
"dharma_eval_accuracy_ARC-Easy": 0.6111111111111112,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.4444444444444444,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.39344262295081966,
"dharma_eval_accuracy_openbookqa": 0.14814814814814814,
"dharma_eval_accuracy_truthful_qa": 0.4074074074074074,
"dharma_eval_accuracy_winogrande": 0.37037037037037035,
"dharma_loss": 2.0870093054771424,
"epoch": 0.21,
"step": 75
},
{
"epoch": 0.21,
"learning_rate": 0.000152,
"loss": 0.5294,
"step": 76
},
{
"epoch": 0.22,
"learning_rate": 0.00015399999999999998,
"loss": 0.6536,
"step": 77
},
{
"epoch": 0.22,
"learning_rate": 0.000156,
"loss": 0.6388,
"step": 78
},
{
"epoch": 0.22,
"learning_rate": 0.00015799999999999996,
"loss": 0.5226,
"step": 79
},
{
"epoch": 0.22,
"learning_rate": 0.00015999999999999999,
"loss": 0.5604,
"step": 80
},
{
"epoch": 0.23,
"learning_rate": 0.000162,
"loss": 0.6587,
"step": 81
},
{
"epoch": 0.23,
"learning_rate": 0.00016399999999999997,
"loss": 0.636,
"step": 82
},
{
"epoch": 0.23,
"learning_rate": 0.000166,
"loss": 0.5049,
"step": 83
},
{
"epoch": 0.24,
"learning_rate": 0.000168,
"loss": 0.5608,
"step": 84
},
{
"epoch": 0.24,
"learning_rate": 0.00016999999999999999,
"loss": 0.5538,
"step": 85
},
{
"epoch": 0.24,
"learning_rate": 0.000172,
"loss": 0.4616,
"step": 86
},
{
"epoch": 0.24,
"learning_rate": 0.00017399999999999997,
"loss": 0.5288,
"step": 87
},
{
"epoch": 0.25,
"learning_rate": 0.000176,
"loss": 0.7095,
"step": 88
},
{
"epoch": 0.25,
"learning_rate": 0.000178,
"loss": 0.5551,
"step": 89
},
{
"epoch": 0.25,
"learning_rate": 0.00017999999999999998,
"loss": 0.7202,
"step": 90
},
{
"epoch": 0.26,
"learning_rate": 0.00018199999999999998,
"loss": 0.6458,
"step": 91
},
{
"epoch": 0.26,
"learning_rate": 0.00018399999999999997,
"loss": 0.8067,
"step": 92
},
{
"epoch": 0.26,
"learning_rate": 0.000186,
"loss": 0.6365,
"step": 93
},
{
"epoch": 0.26,
"learning_rate": 0.000188,
"loss": 0.6281,
"step": 94
},
{
"epoch": 0.27,
"learning_rate": 0.00018999999999999998,
"loss": 0.607,
"step": 95
},
{
"epoch": 0.27,
"learning_rate": 0.00019199999999999998,
"loss": 0.5874,
"step": 96
},
{
"epoch": 0.27,
"learning_rate": 0.00019399999999999997,
"loss": 0.6351,
"step": 97
},
{
"epoch": 0.28,
"learning_rate": 0.00019599999999999997,
"loss": 0.5638,
"step": 98
},
{
"epoch": 0.28,
"learning_rate": 0.000198,
"loss": 0.5591,
"step": 99
},
{
"epoch": 0.28,
"learning_rate": 0.00019999999999999998,
"loss": 0.7135,
"step": 100
},
{
"epoch": 0.28,
"eval_loss": 0.5447857975959778,
"eval_runtime": 26.319,
"eval_samples_per_second": 7.599,
"eval_steps_per_second": 1.9,
"step": 100
},
{
"dharma_eval_accuracy": 0.3907201017200709,
"dharma_eval_accuracy_ARC-Challenge": 0.5370370370370371,
"dharma_eval_accuracy_ARC-Easy": 0.5,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.4074074074074074,
"dharma_eval_accuracy_agieval": 0.2711864406779661,
"dharma_eval_accuracy_bigbench": 0.39344262295081966,
"dharma_eval_accuracy_openbookqa": 0.05555555555555555,
"dharma_eval_accuracy_truthful_qa": 0.37037037037037035,
"dharma_eval_accuracy_winogrande": 0.2777777777777778,
"dharma_loss": 2.4872950978279116,
"epoch": 0.28,
"step": 100
},
{
"epoch": 0.28,
"learning_rate": 0.00020199999999999998,
"loss": 0.5848,
"step": 101
},
{
"epoch": 0.29,
"learning_rate": 0.000204,
"loss": 0.5673,
"step": 102
},
{
"epoch": 0.29,
"learning_rate": 0.00020599999999999997,
"loss": 0.7776,
"step": 103
},
{
"epoch": 0.29,
"learning_rate": 0.000208,
"loss": 0.4885,
"step": 104
},
{
"epoch": 0.29,
"learning_rate": 0.00020999999999999998,
"loss": 0.7011,
"step": 105
},
{
"epoch": 0.3,
"learning_rate": 0.00021199999999999998,
"loss": 0.7296,
"step": 106
},
{
"epoch": 0.3,
"learning_rate": 0.000214,
"loss": 0.5359,
"step": 107
},
{
"epoch": 0.3,
"learning_rate": 0.00021599999999999996,
"loss": 0.5348,
"step": 108
},
{
"epoch": 0.31,
"learning_rate": 0.00021799999999999999,
"loss": 0.7451,
"step": 109
},
{
"epoch": 0.31,
"learning_rate": 0.00021999999999999995,
"loss": 0.4569,
"step": 110
},
{
"epoch": 0.31,
"learning_rate": 0.00022199999999999998,
"loss": 0.5391,
"step": 111
},
{
"epoch": 0.31,
"learning_rate": 0.000224,
"loss": 0.639,
"step": 112
},
{
"epoch": 0.32,
"learning_rate": 0.00022599999999999996,
"loss": 0.5961,
"step": 113
},
{
"epoch": 0.32,
"learning_rate": 0.00022799999999999999,
"loss": 0.5731,
"step": 114
},
{
"epoch": 0.32,
"learning_rate": 0.00023,
"loss": 0.6014,
"step": 115
},
{
"epoch": 0.33,
"learning_rate": 0.00023199999999999997,
"loss": 0.5322,
"step": 116
},
{
"epoch": 0.33,
"learning_rate": 0.000234,
"loss": 0.8166,
"step": 117
},
{
"epoch": 0.33,
"learning_rate": 0.00023599999999999996,
"loss": 0.6645,
"step": 118
},
{
"epoch": 0.33,
"learning_rate": 0.00023799999999999998,
"loss": 0.6643,
"step": 119
},
{
"epoch": 0.34,
"learning_rate": 0.00023999999999999998,
"loss": 0.7731,
"step": 120
},
{
"epoch": 0.34,
"learning_rate": 0.00024199999999999997,
"loss": 0.5809,
"step": 121
},
{
"epoch": 0.34,
"learning_rate": 0.000244,
"loss": 0.3629,
"step": 122
},
{
"epoch": 0.35,
"learning_rate": 0.00024599999999999996,
"loss": 0.5334,
"step": 123
},
{
"epoch": 0.35,
"learning_rate": 0.00024799999999999996,
"loss": 0.5752,
"step": 124
},
{
"epoch": 0.35,
"learning_rate": 0.00025,
"loss": 0.7159,
"step": 125
},
{
"epoch": 0.35,
"eval_loss": 0.5494118332862854,
"eval_runtime": 26.2877,
"eval_samples_per_second": 7.608,
"eval_steps_per_second": 1.902,
"step": 125
},
{
"dharma_eval_accuracy": 0.3134701340221392,
"dharma_eval_accuracy_ARC-Challenge": 0.3888888888888889,
"dharma_eval_accuracy_ARC-Easy": 0.46296296296296297,
"dharma_eval_accuracy_BoolQ": 0.2962962962962963,
"dharma_eval_accuracy_MMLU": 0.3888888888888889,
"dharma_eval_accuracy_agieval": 0.1864406779661017,
"dharma_eval_accuracy_bigbench": 0.2459016393442623,
"dharma_eval_accuracy_openbookqa": 0.16666666666666666,
"dharma_eval_accuracy_truthful_qa": 0.35185185185185186,
"dharma_eval_accuracy_winogrande": 0.3333333333333333,
"dharma_loss": 2.170360087394714,
"epoch": 0.35,
"step": 125
},
{
"epoch": 0.35,
"learning_rate": 0.00025199999999999995,
"loss": 0.5875,
"step": 126
},
{
"epoch": 0.36,
"learning_rate": 0.000254,
"loss": 0.822,
"step": 127
},
{
"epoch": 0.36,
"learning_rate": 0.000256,
"loss": 0.6072,
"step": 128
},
{
"epoch": 0.36,
"learning_rate": 0.000258,
"loss": 0.6567,
"step": 129
},
{
"epoch": 0.36,
"learning_rate": 0.00026,
"loss": 0.76,
"step": 130
},
{
"epoch": 0.37,
"learning_rate": 0.00026199999999999997,
"loss": 0.7146,
"step": 131
},
{
"epoch": 0.37,
"learning_rate": 0.00026399999999999997,
"loss": 0.4487,
"step": 132
},
{
"epoch": 0.37,
"learning_rate": 0.000266,
"loss": 0.5557,
"step": 133
},
{
"epoch": 0.38,
"learning_rate": 0.00026799999999999995,
"loss": 0.5519,
"step": 134
},
{
"epoch": 0.38,
"learning_rate": 0.00027,
"loss": 0.5053,
"step": 135
},
{
"epoch": 0.38,
"learning_rate": 0.00027199999999999994,
"loss": 0.4679,
"step": 136
},
{
"epoch": 0.38,
"learning_rate": 0.000274,
"loss": 0.6165,
"step": 137
},
{
"epoch": 0.39,
"learning_rate": 0.000276,
"loss": 0.7692,
"step": 138
},
{
"epoch": 0.39,
"learning_rate": 0.000278,
"loss": 0.5988,
"step": 139
},
{
"epoch": 0.39,
"learning_rate": 0.00028,
"loss": 0.7898,
"step": 140
},
{
"epoch": 0.4,
"learning_rate": 0.00028199999999999997,
"loss": 0.5874,
"step": 141
},
{
"epoch": 0.4,
"learning_rate": 0.00028399999999999996,
"loss": 0.6034,
"step": 142
},
{
"epoch": 0.4,
"learning_rate": 0.00028599999999999996,
"loss": 0.5387,
"step": 143
},
{
"epoch": 0.4,
"learning_rate": 0.00028799999999999995,
"loss": 0.7721,
"step": 144
},
{
"epoch": 0.41,
"learning_rate": 0.00029,
"loss": 0.6096,
"step": 145
},
{
"epoch": 0.41,
"learning_rate": 0.000292,
"loss": 0.5389,
"step": 146
},
{
"epoch": 0.41,
"learning_rate": 0.000294,
"loss": 0.5682,
"step": 147
},
{
"epoch": 0.42,
"learning_rate": 0.000296,
"loss": 1.1456,
"step": 148
},
{
"epoch": 0.42,
"learning_rate": 0.000298,
"loss": 0.5876,
"step": 149
},
{
"epoch": 0.42,
"learning_rate": 0.0003,
"loss": 0.5566,
"step": 150
},
{
"epoch": 0.42,
"eval_loss": 0.5394015312194824,
"eval_runtime": 26.287,
"eval_samples_per_second": 7.608,
"eval_steps_per_second": 1.902,
"step": 150
},
{
"dharma_eval_accuracy": 0.40817636814981756,
"dharma_eval_accuracy_ARC-Challenge": 0.3888888888888889,
"dharma_eval_accuracy_ARC-Easy": 0.5370370370370371,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.35185185185185186,
"dharma_eval_accuracy_agieval": 0.22033898305084745,
"dharma_eval_accuracy_bigbench": 0.36065573770491804,
"dharma_eval_accuracy_openbookqa": 0.25925925925925924,
"dharma_eval_accuracy_truthful_qa": 0.37037037037037035,
"dharma_eval_accuracy_winogrande": 0.48148148148148145,
"dharma_loss": 2.362686163902283,
"epoch": 0.42,
"step": 150
},
{
"epoch": 0.42,
"learning_rate": 0.0002999999685313931,
"loss": 0.6497,
"step": 151
},
{
"epoch": 0.43,
"learning_rate": 0.00029999987412558584,
"loss": 0.5076,
"step": 152
},
{
"epoch": 0.43,
"learning_rate": 0.0002999997167826177,
"loss": 0.4569,
"step": 153
},
{
"epoch": 0.43,
"learning_rate": 0.00029999949650255474,
"loss": 0.7303,
"step": 154
},
{
"epoch": 0.44,
"learning_rate": 0.0002999992132854894,
"loss": 0.5881,
"step": 155
},
{
"epoch": 0.44,
"learning_rate": 0.0002999988671315404,
"loss": 0.4117,
"step": 156
},
{
"epoch": 0.44,
"learning_rate": 0.0002999984580408531,
"loss": 0.5484,
"step": 157
},
{
"epoch": 0.44,
"learning_rate": 0.00029999798601359915,
"loss": 0.5758,
"step": 158
},
{
"epoch": 0.45,
"learning_rate": 0.00029999745104997654,
"loss": 0.5546,
"step": 159
},
{
"epoch": 0.45,
"learning_rate": 0.0002999968531502098,
"loss": 0.6216,
"step": 160
},
{
"epoch": 0.45,
"learning_rate": 0.0002999961923145497,
"loss": 0.4963,
"step": 161
},
{
"epoch": 0.45,
"learning_rate": 0.0002999954685432736,
"loss": 0.7437,
"step": 162
},
{
"epoch": 0.46,
"learning_rate": 0.0002999946818366852,
"loss": 0.5865,
"step": 163
},
{
"epoch": 0.46,
"learning_rate": 0.00029999383219511444,
"loss": 0.3994,
"step": 164
},
{
"epoch": 0.46,
"learning_rate": 0.0002999929196189179,
"loss": 0.634,
"step": 165
},
{
"epoch": 0.47,
"learning_rate": 0.0002999919441084786,
"loss": 0.6072,
"step": 166
},
{
"epoch": 0.47,
"learning_rate": 0.0002999909056642057,
"loss": 0.4711,
"step": 167
},
{
"epoch": 0.47,
"learning_rate": 0.00029998980428653496,
"loss": 0.5343,
"step": 168
},
{
"epoch": 0.47,
"learning_rate": 0.00029998863997592843,
"loss": 0.6088,
"step": 169
},
{
"epoch": 0.48,
"learning_rate": 0.00029998741273287477,
"loss": 0.5484,
"step": 170
},
{
"epoch": 0.48,
"learning_rate": 0.0002999861225578888,
"loss": 0.5482,
"step": 171
},
{
"epoch": 0.48,
"learning_rate": 0.00029998476945151183,
"loss": 0.4954,
"step": 172
},
{
"epoch": 0.49,
"learning_rate": 0.00029998335341431174,
"loss": 0.4553,
"step": 173
},
{
"epoch": 0.49,
"learning_rate": 0.0002999818744468825,
"loss": 0.4391,
"step": 174
},
{
"epoch": 0.49,
"learning_rate": 0.0002999803325498448,
"loss": 0.5352,
"step": 175
},
{
"epoch": 0.49,
"eval_loss": 0.5328384041786194,
"eval_runtime": 26.2738,
"eval_samples_per_second": 7.612,
"eval_steps_per_second": 1.903,
"step": 175
},
{
"dharma_eval_accuracy": 0.3837828752156806,
"dharma_eval_accuracy_ARC-Challenge": 0.4444444444444444,
"dharma_eval_accuracy_ARC-Easy": 0.5370370370370371,
"dharma_eval_accuracy_BoolQ": 0.6111111111111112,
"dharma_eval_accuracy_MMLU": 0.4074074074074074,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.32786885245901637,
"dharma_eval_accuracy_openbookqa": 0.18518518518518517,
"dharma_eval_accuracy_truthful_qa": 0.35185185185185186,
"dharma_eval_accuracy_winogrande": 0.35185185185185186,
"dharma_loss": 2.2411803226470948,
"epoch": 0.49,
"step": 175
},
{
"epoch": 0.49,
"learning_rate": 0.0002999787277238455,
"loss": 0.7305,
"step": 176
},
{
"epoch": 0.5,
"learning_rate": 0.000299977059969558,
"loss": 0.6827,
"step": 177
},
{
"epoch": 0.5,
"learning_rate": 0.00029997532928768204,
"loss": 0.5133,
"step": 178
},
{
"epoch": 0.5,
"learning_rate": 0.00029997353567894384,
"loss": 0.5676,
"step": 179
},
{
"epoch": 0.51,
"learning_rate": 0.0002999716791440959,
"loss": 0.6915,
"step": 180
},
{
"epoch": 0.51,
"learning_rate": 0.00029996975968391715,
"loss": 0.482,
"step": 181
},
{
"epoch": 0.51,
"learning_rate": 0.000299967777299213,
"loss": 0.7289,
"step": 182
},
{
"epoch": 0.51,
"learning_rate": 0.0002999657319908153,
"loss": 0.6318,
"step": 183
},
{
"epoch": 0.52,
"learning_rate": 0.0002999636237595821,
"loss": 0.6356,
"step": 184
},
{
"epoch": 0.52,
"learning_rate": 0.00029996145260639806,
"loss": 0.5514,
"step": 185
},
{
"epoch": 0.52,
"learning_rate": 0.0002999592185321741,
"loss": 0.6791,
"step": 186
},
{
"epoch": 0.52,
"learning_rate": 0.0002999569215378477,
"loss": 0.6175,
"step": 187
},
{
"epoch": 0.53,
"learning_rate": 0.0002999545616243825,
"loss": 0.5652,
"step": 188
},
{
"epoch": 0.53,
"learning_rate": 0.00029995213879276876,
"loss": 0.5743,
"step": 189
},
{
"epoch": 0.53,
"learning_rate": 0.000299949653044023,
"loss": 0.4428,
"step": 190
},
{
"epoch": 0.54,
"learning_rate": 0.00029994710437918824,
"loss": 0.7182,
"step": 191
},
{
"epoch": 0.54,
"learning_rate": 0.0002999444927993338,
"loss": 0.7305,
"step": 192
},
{
"epoch": 0.54,
"learning_rate": 0.00029994181830555555,
"loss": 0.6492,
"step": 193
},
{
"epoch": 0.54,
"learning_rate": 0.00029993908089897555,
"loss": 0.5823,
"step": 194
},
{
"epoch": 0.55,
"learning_rate": 0.00029993628058074245,
"loss": 0.4601,
"step": 195
},
{
"epoch": 0.55,
"learning_rate": 0.00029993341735203114,
"loss": 0.5288,
"step": 196
},
{
"epoch": 0.55,
"learning_rate": 0.00029993049121404303,
"loss": 0.5001,
"step": 197
},
{
"epoch": 0.56,
"learning_rate": 0.0002999275021680058,
"loss": 0.6355,
"step": 198
},
{
"epoch": 0.56,
"learning_rate": 0.0002999244502151737,
"loss": 0.5916,
"step": 199
},
{
"epoch": 0.56,
"learning_rate": 0.00029992133535682725,
"loss": 0.5561,
"step": 200
},
{
"epoch": 0.56,
"eval_loss": 0.5259036421775818,
"eval_runtime": 26.3348,
"eval_samples_per_second": 7.595,
"eval_steps_per_second": 1.899,
"step": 200
},
{
"dharma_eval_accuracy": 0.4344639628977871,
"dharma_eval_accuracy_ARC-Challenge": 0.5185185185185185,
"dharma_eval_accuracy_ARC-Easy": 0.5740740740740741,
"dharma_eval_accuracy_BoolQ": 0.7222222222222222,
"dharma_eval_accuracy_MMLU": 0.4074074074074074,
"dharma_eval_accuracy_agieval": 0.288135593220339,
"dharma_eval_accuracy_bigbench": 0.3442622950819672,
"dharma_eval_accuracy_openbookqa": 0.18518518518518517,
"dharma_eval_accuracy_truthful_qa": 0.42592592592592593,
"dharma_eval_accuracy_winogrande": 0.4444444444444444,
"dharma_loss": 2.388957099914551,
"epoch": 0.56,
"step": 200
}
],
"max_steps": 5000,
"num_train_epochs": 15,
"total_flos": 6.277915233887846e+16,
"trial_name": null,
"trial_params": null
}