Farouk
Training in progress, step 200
c427e87
{
"best_metric": 0.5110138058662415,
"best_model_checkpoint": "./output_v2/7b_cluster00_Nous-Hermes-llama-2-7b_codellama_blob_1/checkpoint-600",
"epoch": 1.8947368421052633,
"global_step": 675,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 2e-06,
"loss": 0.7795,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 4e-06,
"loss": 0.6789,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 5.999999999999999e-06,
"loss": 0.6843,
"step": 3
},
{
"epoch": 0.01,
"learning_rate": 8e-06,
"loss": 0.6883,
"step": 4
},
{
"epoch": 0.01,
"learning_rate": 9.999999999999999e-06,
"loss": 0.5969,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 1.1999999999999999e-05,
"loss": 0.6588,
"step": 6
},
{
"epoch": 0.02,
"learning_rate": 1.4e-05,
"loss": 0.6267,
"step": 7
},
{
"epoch": 0.02,
"learning_rate": 1.6e-05,
"loss": 0.6893,
"step": 8
},
{
"epoch": 0.03,
"learning_rate": 1.7999999999999997e-05,
"loss": 0.5368,
"step": 9
},
{
"epoch": 0.03,
"learning_rate": 1.9999999999999998e-05,
"loss": 0.6016,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 2.2e-05,
"loss": 0.7062,
"step": 11
},
{
"epoch": 0.03,
"learning_rate": 2.3999999999999997e-05,
"loss": 0.6317,
"step": 12
},
{
"epoch": 0.04,
"learning_rate": 2.6e-05,
"loss": 0.5637,
"step": 13
},
{
"epoch": 0.04,
"learning_rate": 2.8e-05,
"loss": 0.5495,
"step": 14
},
{
"epoch": 0.04,
"learning_rate": 2.9999999999999997e-05,
"loss": 0.5845,
"step": 15
},
{
"epoch": 0.04,
"learning_rate": 3.2e-05,
"loss": 0.706,
"step": 16
},
{
"epoch": 0.05,
"learning_rate": 3.399999999999999e-05,
"loss": 0.9474,
"step": 17
},
{
"epoch": 0.05,
"learning_rate": 3.5999999999999994e-05,
"loss": 0.7117,
"step": 18
},
{
"epoch": 0.05,
"learning_rate": 3.8e-05,
"loss": 0.5116,
"step": 19
},
{
"epoch": 0.06,
"learning_rate": 3.9999999999999996e-05,
"loss": 0.6477,
"step": 20
},
{
"epoch": 0.06,
"learning_rate": 4.2e-05,
"loss": 1.0475,
"step": 21
},
{
"epoch": 0.06,
"learning_rate": 4.4e-05,
"loss": 0.926,
"step": 22
},
{
"epoch": 0.06,
"learning_rate": 4.599999999999999e-05,
"loss": 0.7514,
"step": 23
},
{
"epoch": 0.07,
"learning_rate": 4.7999999999999994e-05,
"loss": 0.7025,
"step": 24
},
{
"epoch": 0.07,
"learning_rate": 4.9999999999999996e-05,
"loss": 0.6988,
"step": 25
},
{
"epoch": 0.07,
"eval_loss": 0.6739590167999268,
"eval_runtime": 26.3533,
"eval_samples_per_second": 7.589,
"eval_steps_per_second": 1.897,
"step": 25
},
{
"dharma_eval_accuracy": 0.46490680424489195,
"dharma_eval_accuracy_ARC-Challenge": 0.6111111111111112,
"dharma_eval_accuracy_ARC-Easy": 0.7592592592592593,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.42592592592592593,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.4098360655737705,
"dharma_eval_accuracy_openbookqa": 0.2962962962962963,
"dharma_eval_accuracy_truthful_qa": 0.37037037037037035,
"dharma_eval_accuracy_winogrande": 0.37037037037037035,
"dharma_loss": 3.781016577720642,
"epoch": 0.07,
"step": 25
},
{
"epoch": 0.07,
"learning_rate": 5.2e-05,
"loss": 0.6124,
"step": 26
},
{
"epoch": 0.08,
"learning_rate": 5.399999999999999e-05,
"loss": 0.6074,
"step": 27
},
{
"epoch": 0.08,
"learning_rate": 5.6e-05,
"loss": 0.4834,
"step": 28
},
{
"epoch": 0.08,
"learning_rate": 5.7999999999999994e-05,
"loss": 0.6022,
"step": 29
},
{
"epoch": 0.08,
"learning_rate": 5.9999999999999995e-05,
"loss": 0.8702,
"step": 30
},
{
"epoch": 0.09,
"learning_rate": 6.199999999999999e-05,
"loss": 0.991,
"step": 31
},
{
"epoch": 0.09,
"learning_rate": 6.4e-05,
"loss": 0.736,
"step": 32
},
{
"epoch": 0.09,
"learning_rate": 6.599999999999999e-05,
"loss": 0.5063,
"step": 33
},
{
"epoch": 0.1,
"learning_rate": 6.799999999999999e-05,
"loss": 0.6644,
"step": 34
},
{
"epoch": 0.1,
"learning_rate": 7e-05,
"loss": 0.6932,
"step": 35
},
{
"epoch": 0.1,
"learning_rate": 7.199999999999999e-05,
"loss": 0.7433,
"step": 36
},
{
"epoch": 0.1,
"learning_rate": 7.4e-05,
"loss": 0.8272,
"step": 37
},
{
"epoch": 0.11,
"learning_rate": 7.6e-05,
"loss": 0.5963,
"step": 38
},
{
"epoch": 0.11,
"learning_rate": 7.8e-05,
"loss": 0.558,
"step": 39
},
{
"epoch": 0.11,
"learning_rate": 7.999999999999999e-05,
"loss": 0.7975,
"step": 40
},
{
"epoch": 0.12,
"learning_rate": 8.199999999999999e-05,
"loss": 0.8271,
"step": 41
},
{
"epoch": 0.12,
"learning_rate": 8.4e-05,
"loss": 0.7612,
"step": 42
},
{
"epoch": 0.12,
"learning_rate": 8.6e-05,
"loss": 0.6064,
"step": 43
},
{
"epoch": 0.12,
"learning_rate": 8.8e-05,
"loss": 0.5793,
"step": 44
},
{
"epoch": 0.13,
"learning_rate": 8.999999999999999e-05,
"loss": 0.6009,
"step": 45
},
{
"epoch": 0.13,
"learning_rate": 9.199999999999999e-05,
"loss": 0.5151,
"step": 46
},
{
"epoch": 0.13,
"learning_rate": 9.4e-05,
"loss": 0.5317,
"step": 47
},
{
"epoch": 0.13,
"learning_rate": 9.599999999999999e-05,
"loss": 0.6964,
"step": 48
},
{
"epoch": 0.14,
"learning_rate": 9.799999999999998e-05,
"loss": 0.6153,
"step": 49
},
{
"epoch": 0.14,
"learning_rate": 9.999999999999999e-05,
"loss": 0.6366,
"step": 50
},
{
"epoch": 0.14,
"eval_loss": 0.6041869521141052,
"eval_runtime": 26.2576,
"eval_samples_per_second": 7.617,
"eval_steps_per_second": 1.904,
"step": 50
},
{
"dharma_eval_accuracy": 0.4823682161368556,
"dharma_eval_accuracy_ARC-Challenge": 0.6666666666666666,
"dharma_eval_accuracy_ARC-Easy": 0.6851851851851852,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.42592592592592593,
"dharma_eval_accuracy_agieval": 0.2711864406779661,
"dharma_eval_accuracy_bigbench": 0.45901639344262296,
"dharma_eval_accuracy_openbookqa": 0.3148148148148148,
"dharma_eval_accuracy_truthful_qa": 0.35185185185185186,
"dharma_eval_accuracy_winogrande": 0.46296296296296297,
"dharma_loss": 2.14614901971817,
"epoch": 0.14,
"step": 50
},
{
"epoch": 0.14,
"learning_rate": 0.000102,
"loss": 0.6561,
"step": 51
},
{
"epoch": 0.15,
"learning_rate": 0.000104,
"loss": 0.9783,
"step": 52
},
{
"epoch": 0.15,
"learning_rate": 0.00010599999999999999,
"loss": 0.6149,
"step": 53
},
{
"epoch": 0.15,
"learning_rate": 0.00010799999999999998,
"loss": 0.6407,
"step": 54
},
{
"epoch": 0.15,
"learning_rate": 0.00010999999999999998,
"loss": 0.5906,
"step": 55
},
{
"epoch": 0.16,
"learning_rate": 0.000112,
"loss": 0.6447,
"step": 56
},
{
"epoch": 0.16,
"learning_rate": 0.00011399999999999999,
"loss": 0.6892,
"step": 57
},
{
"epoch": 0.16,
"learning_rate": 0.00011599999999999999,
"loss": 0.6289,
"step": 58
},
{
"epoch": 0.17,
"learning_rate": 0.00011799999999999998,
"loss": 0.5937,
"step": 59
},
{
"epoch": 0.17,
"learning_rate": 0.00011999999999999999,
"loss": 0.6936,
"step": 60
},
{
"epoch": 0.17,
"learning_rate": 0.000122,
"loss": 0.5546,
"step": 61
},
{
"epoch": 0.17,
"learning_rate": 0.00012399999999999998,
"loss": 0.5464,
"step": 62
},
{
"epoch": 0.18,
"learning_rate": 0.00012599999999999997,
"loss": 0.6424,
"step": 63
},
{
"epoch": 0.18,
"learning_rate": 0.000128,
"loss": 0.6849,
"step": 64
},
{
"epoch": 0.18,
"learning_rate": 0.00013,
"loss": 0.5383,
"step": 65
},
{
"epoch": 0.19,
"learning_rate": 0.00013199999999999998,
"loss": 0.6284,
"step": 66
},
{
"epoch": 0.19,
"learning_rate": 0.00013399999999999998,
"loss": 0.6387,
"step": 67
},
{
"epoch": 0.19,
"learning_rate": 0.00013599999999999997,
"loss": 0.5718,
"step": 68
},
{
"epoch": 0.19,
"learning_rate": 0.000138,
"loss": 0.479,
"step": 69
},
{
"epoch": 0.2,
"learning_rate": 0.00014,
"loss": 0.5653,
"step": 70
},
{
"epoch": 0.2,
"learning_rate": 0.00014199999999999998,
"loss": 0.6335,
"step": 71
},
{
"epoch": 0.2,
"learning_rate": 0.00014399999999999998,
"loss": 0.6159,
"step": 72
},
{
"epoch": 0.2,
"learning_rate": 0.000146,
"loss": 0.5467,
"step": 73
},
{
"epoch": 0.21,
"learning_rate": 0.000148,
"loss": 0.6248,
"step": 74
},
{
"epoch": 0.21,
"learning_rate": 0.00015,
"loss": 0.5263,
"step": 75
},
{
"epoch": 0.21,
"eval_loss": 0.5646250247955322,
"eval_runtime": 26.2937,
"eval_samples_per_second": 7.606,
"eval_steps_per_second": 1.902,
"step": 75
},
{
"dharma_eval_accuracy": 0.43486359379663075,
"dharma_eval_accuracy_ARC-Challenge": 0.5925925925925926,
"dharma_eval_accuracy_ARC-Easy": 0.6111111111111112,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.4074074074074074,
"dharma_eval_accuracy_agieval": 0.2033898305084746,
"dharma_eval_accuracy_bigbench": 0.3770491803278688,
"dharma_eval_accuracy_openbookqa": 0.12962962962962962,
"dharma_eval_accuracy_truthful_qa": 0.4074074074074074,
"dharma_eval_accuracy_winogrande": 0.48148148148148145,
"dharma_loss": 2.0373267664909362,
"epoch": 0.21,
"step": 75
},
{
"epoch": 0.21,
"learning_rate": 0.000152,
"loss": 0.5322,
"step": 76
},
{
"epoch": 0.22,
"learning_rate": 0.00015399999999999998,
"loss": 0.6562,
"step": 77
},
{
"epoch": 0.22,
"learning_rate": 0.000156,
"loss": 0.6459,
"step": 78
},
{
"epoch": 0.22,
"learning_rate": 0.00015799999999999996,
"loss": 0.5265,
"step": 79
},
{
"epoch": 0.22,
"learning_rate": 0.00015999999999999999,
"loss": 0.5635,
"step": 80
},
{
"epoch": 0.23,
"learning_rate": 0.000162,
"loss": 0.6606,
"step": 81
},
{
"epoch": 0.23,
"learning_rate": 0.00016399999999999997,
"loss": 0.6397,
"step": 82
},
{
"epoch": 0.23,
"learning_rate": 0.000166,
"loss": 0.5062,
"step": 83
},
{
"epoch": 0.24,
"learning_rate": 0.000168,
"loss": 0.5622,
"step": 84
},
{
"epoch": 0.24,
"learning_rate": 0.00016999999999999999,
"loss": 0.5564,
"step": 85
},
{
"epoch": 0.24,
"learning_rate": 0.000172,
"loss": 0.4649,
"step": 86
},
{
"epoch": 0.24,
"learning_rate": 0.00017399999999999997,
"loss": 0.5314,
"step": 87
},
{
"epoch": 0.25,
"learning_rate": 0.000176,
"loss": 0.7116,
"step": 88
},
{
"epoch": 0.25,
"learning_rate": 0.000178,
"loss": 0.5589,
"step": 89
},
{
"epoch": 0.25,
"learning_rate": 0.00017999999999999998,
"loss": 0.7248,
"step": 90
},
{
"epoch": 0.26,
"learning_rate": 0.00018199999999999998,
"loss": 0.6522,
"step": 91
},
{
"epoch": 0.26,
"learning_rate": 0.00018399999999999997,
"loss": 0.8075,
"step": 92
},
{
"epoch": 0.26,
"learning_rate": 0.000186,
"loss": 0.6412,
"step": 93
},
{
"epoch": 0.26,
"learning_rate": 0.000188,
"loss": 0.6319,
"step": 94
},
{
"epoch": 0.27,
"learning_rate": 0.00018999999999999998,
"loss": 0.6112,
"step": 95
},
{
"epoch": 0.27,
"learning_rate": 0.00019199999999999998,
"loss": 0.5936,
"step": 96
},
{
"epoch": 0.27,
"learning_rate": 0.00019399999999999997,
"loss": 0.6387,
"step": 97
},
{
"epoch": 0.28,
"learning_rate": 0.00019599999999999997,
"loss": 0.567,
"step": 98
},
{
"epoch": 0.28,
"learning_rate": 0.000198,
"loss": 0.5639,
"step": 99
},
{
"epoch": 0.28,
"learning_rate": 0.00019999999999999998,
"loss": 0.715,
"step": 100
},
{
"epoch": 0.28,
"eval_loss": 0.5477311611175537,
"eval_runtime": 26.3038,
"eval_samples_per_second": 7.603,
"eval_steps_per_second": 1.901,
"step": 100
},
{
"dharma_eval_accuracy": 0.39048398217611885,
"dharma_eval_accuracy_ARC-Challenge": 0.5740740740740741,
"dharma_eval_accuracy_ARC-Easy": 0.48148148148148145,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.4074074074074074,
"dharma_eval_accuracy_agieval": 0.2711864406779661,
"dharma_eval_accuracy_bigbench": 0.4098360655737705,
"dharma_eval_accuracy_openbookqa": 0.07407407407407407,
"dharma_eval_accuracy_truthful_qa": 0.35185185185185186,
"dharma_eval_accuracy_winogrande": 0.24074074074074073,
"dharma_loss": 2.541038901805878,
"epoch": 0.28,
"step": 100
},
{
"epoch": 0.28,
"learning_rate": 0.00020199999999999998,
"loss": 0.5853,
"step": 101
},
{
"epoch": 0.29,
"learning_rate": 0.000204,
"loss": 0.5712,
"step": 102
},
{
"epoch": 0.29,
"learning_rate": 0.00020599999999999997,
"loss": 0.7782,
"step": 103
},
{
"epoch": 0.29,
"learning_rate": 0.000208,
"loss": 0.4925,
"step": 104
},
{
"epoch": 0.29,
"learning_rate": 0.00020999999999999998,
"loss": 0.7024,
"step": 105
},
{
"epoch": 0.3,
"learning_rate": 0.00021199999999999998,
"loss": 0.7275,
"step": 106
},
{
"epoch": 0.3,
"learning_rate": 0.000214,
"loss": 0.5375,
"step": 107
},
{
"epoch": 0.3,
"learning_rate": 0.00021599999999999996,
"loss": 0.5393,
"step": 108
},
{
"epoch": 0.31,
"learning_rate": 0.00021799999999999999,
"loss": 0.7458,
"step": 109
},
{
"epoch": 0.31,
"learning_rate": 0.00021999999999999995,
"loss": 0.4592,
"step": 110
},
{
"epoch": 0.31,
"learning_rate": 0.00022199999999999998,
"loss": 0.5413,
"step": 111
},
{
"epoch": 0.31,
"learning_rate": 0.000224,
"loss": 0.6476,
"step": 112
},
{
"epoch": 0.32,
"learning_rate": 0.00022599999999999996,
"loss": 0.5974,
"step": 113
},
{
"epoch": 0.32,
"learning_rate": 0.00022799999999999999,
"loss": 0.5752,
"step": 114
},
{
"epoch": 0.32,
"learning_rate": 0.00023,
"loss": 0.6011,
"step": 115
},
{
"epoch": 0.33,
"learning_rate": 0.00023199999999999997,
"loss": 0.5327,
"step": 116
},
{
"epoch": 0.33,
"learning_rate": 0.000234,
"loss": 0.8215,
"step": 117
},
{
"epoch": 0.33,
"learning_rate": 0.00023599999999999996,
"loss": 0.6684,
"step": 118
},
{
"epoch": 0.33,
"learning_rate": 0.00023799999999999998,
"loss": 0.6667,
"step": 119
},
{
"epoch": 0.34,
"learning_rate": 0.00023999999999999998,
"loss": 0.7711,
"step": 120
},
{
"epoch": 0.34,
"learning_rate": 0.00024199999999999997,
"loss": 0.5782,
"step": 121
},
{
"epoch": 0.34,
"learning_rate": 0.000244,
"loss": 0.3653,
"step": 122
},
{
"epoch": 0.35,
"learning_rate": 0.00024599999999999996,
"loss": 0.5351,
"step": 123
},
{
"epoch": 0.35,
"learning_rate": 0.00024799999999999996,
"loss": 0.5766,
"step": 124
},
{
"epoch": 0.35,
"learning_rate": 0.00025,
"loss": 0.7174,
"step": 125
},
{
"epoch": 0.35,
"eval_loss": 0.5514150261878967,
"eval_runtime": 26.3198,
"eval_samples_per_second": 7.599,
"eval_steps_per_second": 1.9,
"step": 125
},
{
"dharma_eval_accuracy": 0.32516919994923144,
"dharma_eval_accuracy_ARC-Challenge": 0.3888888888888889,
"dharma_eval_accuracy_ARC-Easy": 0.48148148148148145,
"dharma_eval_accuracy_BoolQ": 0.3333333333333333,
"dharma_eval_accuracy_MMLU": 0.3148148148148148,
"dharma_eval_accuracy_agieval": 0.2033898305084746,
"dharma_eval_accuracy_bigbench": 0.2786885245901639,
"dharma_eval_accuracy_openbookqa": 0.16666666666666666,
"dharma_eval_accuracy_truthful_qa": 0.35185185185185186,
"dharma_eval_accuracy_winogrande": 0.4074074074074074,
"dharma_loss": 2.2513291540145874,
"epoch": 0.35,
"step": 125
},
{
"epoch": 0.35,
"learning_rate": 0.00025199999999999995,
"loss": 0.5899,
"step": 126
},
{
"epoch": 0.36,
"learning_rate": 0.000254,
"loss": 0.8261,
"step": 127
},
{
"epoch": 0.36,
"learning_rate": 0.000256,
"loss": 0.6096,
"step": 128
},
{
"epoch": 0.36,
"learning_rate": 0.000258,
"loss": 0.6574,
"step": 129
},
{
"epoch": 0.36,
"learning_rate": 0.00026,
"loss": 0.7565,
"step": 130
},
{
"epoch": 0.37,
"learning_rate": 0.00026199999999999997,
"loss": 0.7156,
"step": 131
},
{
"epoch": 0.37,
"learning_rate": 0.00026399999999999997,
"loss": 0.4399,
"step": 132
},
{
"epoch": 0.37,
"learning_rate": 0.000266,
"loss": 0.5608,
"step": 133
},
{
"epoch": 0.38,
"learning_rate": 0.00026799999999999995,
"loss": 0.553,
"step": 134
},
{
"epoch": 0.38,
"learning_rate": 0.00027,
"loss": 0.5061,
"step": 135
},
{
"epoch": 0.38,
"learning_rate": 0.00027199999999999994,
"loss": 0.4781,
"step": 136
},
{
"epoch": 0.38,
"learning_rate": 0.000274,
"loss": 0.6203,
"step": 137
},
{
"epoch": 0.39,
"learning_rate": 0.000276,
"loss": 0.7665,
"step": 138
},
{
"epoch": 0.39,
"learning_rate": 0.000278,
"loss": 0.6048,
"step": 139
},
{
"epoch": 0.39,
"learning_rate": 0.00028,
"loss": 0.789,
"step": 140
},
{
"epoch": 0.4,
"learning_rate": 0.00028199999999999997,
"loss": 0.5875,
"step": 141
},
{
"epoch": 0.4,
"learning_rate": 0.00028399999999999996,
"loss": 0.603,
"step": 142
},
{
"epoch": 0.4,
"learning_rate": 0.00028599999999999996,
"loss": 0.5381,
"step": 143
},
{
"epoch": 0.4,
"learning_rate": 0.00028799999999999995,
"loss": 0.7706,
"step": 144
},
{
"epoch": 0.41,
"learning_rate": 0.00029,
"loss": 0.6116,
"step": 145
},
{
"epoch": 0.41,
"learning_rate": 0.000292,
"loss": 0.5398,
"step": 146
},
{
"epoch": 0.41,
"learning_rate": 0.000294,
"loss": 0.5728,
"step": 147
},
{
"epoch": 0.42,
"learning_rate": 0.000296,
"loss": 1.1167,
"step": 148
},
{
"epoch": 0.42,
"learning_rate": 0.000298,
"loss": 0.5872,
"step": 149
},
{
"epoch": 0.42,
"learning_rate": 0.0003,
"loss": 0.559,
"step": 150
},
{
"epoch": 0.42,
"eval_loss": 0.5424469113349915,
"eval_runtime": 26.2188,
"eval_samples_per_second": 7.628,
"eval_steps_per_second": 1.907,
"step": 150
},
{
"dharma_eval_accuracy": 0.38760023646257474,
"dharma_eval_accuracy_ARC-Challenge": 0.46296296296296297,
"dharma_eval_accuracy_ARC-Easy": 0.42592592592592593,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2777777777777778,
"dharma_eval_accuracy_agieval": 0.22033898305084745,
"dharma_eval_accuracy_bigbench": 0.36065573770491804,
"dharma_eval_accuracy_openbookqa": 0.25925925925925924,
"dharma_eval_accuracy_truthful_qa": 0.37037037037037035,
"dharma_eval_accuracy_winogrande": 0.4074074074074074,
"dharma_loss": 2.487543685913086,
"epoch": 0.42,
"step": 150
},
{
"epoch": 0.42,
"learning_rate": 0.0002999999685313931,
"loss": 0.6534,
"step": 151
},
{
"epoch": 0.43,
"learning_rate": 0.00029999987412558584,
"loss": 0.5063,
"step": 152
},
{
"epoch": 0.43,
"learning_rate": 0.0002999997167826177,
"loss": 0.4621,
"step": 153
},
{
"epoch": 0.43,
"learning_rate": 0.00029999949650255474,
"loss": 0.7303,
"step": 154
},
{
"epoch": 0.44,
"learning_rate": 0.0002999992132854894,
"loss": 0.5919,
"step": 155
},
{
"epoch": 0.44,
"learning_rate": 0.0002999988671315404,
"loss": 0.4129,
"step": 156
},
{
"epoch": 0.44,
"learning_rate": 0.0002999984580408531,
"loss": 0.5495,
"step": 157
},
{
"epoch": 0.44,
"learning_rate": 0.00029999798601359915,
"loss": 0.5766,
"step": 158
},
{
"epoch": 0.45,
"learning_rate": 0.00029999745104997654,
"loss": 0.5602,
"step": 159
},
{
"epoch": 0.45,
"learning_rate": 0.0002999968531502098,
"loss": 0.6254,
"step": 160
},
{
"epoch": 0.45,
"learning_rate": 0.0002999961923145497,
"loss": 0.4962,
"step": 161
},
{
"epoch": 0.45,
"learning_rate": 0.0002999954685432736,
"loss": 0.7463,
"step": 162
},
{
"epoch": 0.46,
"learning_rate": 0.0002999946818366852,
"loss": 0.5892,
"step": 163
},
{
"epoch": 0.46,
"learning_rate": 0.00029999383219511444,
"loss": 0.3993,
"step": 164
},
{
"epoch": 0.46,
"learning_rate": 0.0002999929196189179,
"loss": 0.6364,
"step": 165
},
{
"epoch": 0.47,
"learning_rate": 0.0002999919441084786,
"loss": 0.6043,
"step": 166
},
{
"epoch": 0.47,
"learning_rate": 0.0002999909056642057,
"loss": 0.4706,
"step": 167
},
{
"epoch": 0.47,
"learning_rate": 0.00029998980428653496,
"loss": 0.5362,
"step": 168
},
{
"epoch": 0.47,
"learning_rate": 0.00029998863997592843,
"loss": 0.6128,
"step": 169
},
{
"epoch": 0.48,
"learning_rate": 0.00029998741273287477,
"loss": 0.5543,
"step": 170
},
{
"epoch": 0.48,
"learning_rate": 0.0002999861225578888,
"loss": 0.5491,
"step": 171
},
{
"epoch": 0.48,
"learning_rate": 0.00029998476945151183,
"loss": 0.4944,
"step": 172
},
{
"epoch": 0.49,
"learning_rate": 0.00029998335341431174,
"loss": 0.4592,
"step": 173
},
{
"epoch": 0.49,
"learning_rate": 0.0002999818744468825,
"loss": 0.4393,
"step": 174
},
{
"epoch": 0.49,
"learning_rate": 0.0002999803325498448,
"loss": 0.5342,
"step": 175
},
{
"epoch": 0.49,
"eval_loss": 0.5361905694007874,
"eval_runtime": 26.2417,
"eval_samples_per_second": 7.621,
"eval_steps_per_second": 1.905,
"step": 175
},
{
"dharma_eval_accuracy": 0.37737391616555577,
"dharma_eval_accuracy_ARC-Challenge": 0.46296296296296297,
"dharma_eval_accuracy_ARC-Easy": 0.5,
"dharma_eval_accuracy_BoolQ": 0.5740740740740741,
"dharma_eval_accuracy_MMLU": 0.37037037037037035,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.3442622950819672,
"dharma_eval_accuracy_openbookqa": 0.14814814814814814,
"dharma_eval_accuracy_truthful_qa": 0.3148148148148148,
"dharma_eval_accuracy_winogrande": 0.4444444444444444,
"dharma_loss": 2.1437022438049316,
"epoch": 0.49,
"step": 175
},
{
"epoch": 0.49,
"learning_rate": 0.0002999787277238455,
"loss": 0.7472,
"step": 176
},
{
"epoch": 0.5,
"learning_rate": 0.000299977059969558,
"loss": 0.6834,
"step": 177
},
{
"epoch": 0.5,
"learning_rate": 0.00029997532928768204,
"loss": 0.5137,
"step": 178
},
{
"epoch": 0.5,
"learning_rate": 0.00029997353567894384,
"loss": 0.5725,
"step": 179
},
{
"epoch": 0.51,
"learning_rate": 0.0002999716791440959,
"loss": 0.6968,
"step": 180
},
{
"epoch": 0.51,
"learning_rate": 0.00029996975968391715,
"loss": 0.4837,
"step": 181
},
{
"epoch": 0.51,
"learning_rate": 0.000299967777299213,
"loss": 0.731,
"step": 182
},
{
"epoch": 0.51,
"learning_rate": 0.0002999657319908153,
"loss": 0.6332,
"step": 183
},
{
"epoch": 0.52,
"learning_rate": 0.0002999636237595821,
"loss": 0.642,
"step": 184
},
{
"epoch": 0.52,
"learning_rate": 0.00029996145260639806,
"loss": 0.5518,
"step": 185
},
{
"epoch": 0.52,
"learning_rate": 0.0002999592185321741,
"loss": 0.6771,
"step": 186
},
{
"epoch": 0.52,
"learning_rate": 0.0002999569215378477,
"loss": 0.6216,
"step": 187
},
{
"epoch": 0.53,
"learning_rate": 0.0002999545616243825,
"loss": 0.5675,
"step": 188
},
{
"epoch": 0.53,
"learning_rate": 0.00029995213879276876,
"loss": 0.5789,
"step": 189
},
{
"epoch": 0.53,
"learning_rate": 0.000299949653044023,
"loss": 0.4393,
"step": 190
},
{
"epoch": 0.54,
"learning_rate": 0.00029994710437918824,
"loss": 0.7194,
"step": 191
},
{
"epoch": 0.54,
"learning_rate": 0.0002999444927993338,
"loss": 0.7315,
"step": 192
},
{
"epoch": 0.54,
"learning_rate": 0.00029994181830555555,
"loss": 0.6467,
"step": 193
},
{
"epoch": 0.54,
"learning_rate": 0.00029993908089897555,
"loss": 0.5778,
"step": 194
},
{
"epoch": 0.55,
"learning_rate": 0.00029993628058074245,
"loss": 0.4764,
"step": 195
},
{
"epoch": 0.55,
"learning_rate": 0.00029993341735203114,
"loss": 0.5286,
"step": 196
},
{
"epoch": 0.55,
"learning_rate": 0.00029993049121404303,
"loss": 0.5216,
"step": 197
},
{
"epoch": 0.56,
"learning_rate": 0.0002999275021680058,
"loss": 0.6284,
"step": 198
},
{
"epoch": 0.56,
"learning_rate": 0.0002999244502151737,
"loss": 0.6015,
"step": 199
},
{
"epoch": 0.56,
"learning_rate": 0.00029992133535682725,
"loss": 0.5592,
"step": 200
},
{
"epoch": 0.56,
"eval_loss": 0.5275110006332397,
"eval_runtime": 26.2597,
"eval_samples_per_second": 7.616,
"eval_steps_per_second": 1.904,
"step": 200
},
{
"dharma_eval_accuracy": 0.4200606707167172,
"dharma_eval_accuracy_ARC-Challenge": 0.5185185185185185,
"dharma_eval_accuracy_ARC-Easy": 0.5185185185185185,
"dharma_eval_accuracy_BoolQ": 0.6851851851851852,
"dharma_eval_accuracy_MMLU": 0.35185185185185186,
"dharma_eval_accuracy_agieval": 0.288135593220339,
"dharma_eval_accuracy_bigbench": 0.3442622950819672,
"dharma_eval_accuracy_openbookqa": 0.12962962962962962,
"dharma_eval_accuracy_truthful_qa": 0.3888888888888889,
"dharma_eval_accuracy_winogrande": 0.5555555555555556,
"dharma_loss": 2.505841832637787,
"epoch": 0.56,
"step": 200
},
{
"epoch": 0.56,
"learning_rate": 0.0002999181575942733,
"loss": 0.4536,
"step": 201
},
{
"epoch": 0.57,
"learning_rate": 0.0002999149169288452,
"loss": 0.5653,
"step": 202
},
{
"epoch": 0.57,
"learning_rate": 0.0002999116133619028,
"loss": 0.4653,
"step": 203
},
{
"epoch": 0.57,
"learning_rate": 0.0002999082468948321,
"loss": 0.4889,
"step": 204
},
{
"epoch": 0.58,
"learning_rate": 0.00029990481752904563,
"loss": 0.472,
"step": 205
},
{
"epoch": 0.58,
"learning_rate": 0.0002999013252659823,
"loss": 0.5357,
"step": 206
},
{
"epoch": 0.58,
"learning_rate": 0.0002998977701071074,
"loss": 0.6088,
"step": 207
},
{
"epoch": 0.58,
"learning_rate": 0.00029989415205391263,
"loss": 0.5453,
"step": 208
},
{
"epoch": 0.59,
"learning_rate": 0.00029989047110791595,
"loss": 0.6145,
"step": 209
},
{
"epoch": 0.59,
"learning_rate": 0.0002998867272706619,
"loss": 0.7111,
"step": 210
},
{
"epoch": 0.59,
"learning_rate": 0.0002998829205437214,
"loss": 0.5549,
"step": 211
},
{
"epoch": 0.6,
"learning_rate": 0.0002998790509286915,
"loss": 0.557,
"step": 212
},
{
"epoch": 0.6,
"learning_rate": 0.000299875118427196,
"loss": 0.6453,
"step": 213
},
{
"epoch": 0.6,
"learning_rate": 0.00029987112304088483,
"loss": 0.6133,
"step": 214
},
{
"epoch": 0.6,
"learning_rate": 0.0002998670647714343,
"loss": 0.699,
"step": 215
},
{
"epoch": 0.61,
"learning_rate": 0.0002998629436205473,
"loss": 0.5953,
"step": 216
},
{
"epoch": 0.61,
"learning_rate": 0.00029985875958995296,
"loss": 0.6149,
"step": 217
},
{
"epoch": 0.61,
"learning_rate": 0.00029985451268140683,
"loss": 0.5211,
"step": 218
},
{
"epoch": 0.61,
"learning_rate": 0.00029985020289669077,
"loss": 0.7371,
"step": 219
},
{
"epoch": 0.62,
"learning_rate": 0.00029984583023761317,
"loss": 0.6213,
"step": 220
},
{
"epoch": 0.62,
"learning_rate": 0.0002998413947060086,
"loss": 0.7568,
"step": 221
},
{
"epoch": 0.62,
"learning_rate": 0.00029983689630373825,
"loss": 0.5585,
"step": 222
},
{
"epoch": 0.63,
"learning_rate": 0.0002998323350326895,
"loss": 0.577,
"step": 223
},
{
"epoch": 0.63,
"learning_rate": 0.0002998277108947762,
"loss": 0.6245,
"step": 224
},
{
"epoch": 0.63,
"learning_rate": 0.00029982302389193856,
"loss": 0.6179,
"step": 225
},
{
"epoch": 0.63,
"eval_loss": 0.5254238247871399,
"eval_runtime": 26.3192,
"eval_samples_per_second": 7.599,
"eval_steps_per_second": 1.9,
"step": 225
},
{
"dharma_eval_accuracy": 0.3981244218501481,
"dharma_eval_accuracy_ARC-Challenge": 0.5555555555555556,
"dharma_eval_accuracy_ARC-Easy": 0.5925925925925926,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.37037037037037035,
"dharma_eval_accuracy_agieval": 0.22033898305084745,
"dharma_eval_accuracy_bigbench": 0.3442622950819672,
"dharma_eval_accuracy_openbookqa": 0.1111111111111111,
"dharma_eval_accuracy_truthful_qa": 0.24074074074074073,
"dharma_eval_accuracy_winogrande": 0.4444444444444444,
"dharma_loss": 2.3062019448280333,
"epoch": 0.63,
"step": 225
},
{
"epoch": 0.63,
"learning_rate": 0.0002998182740261432,
"loss": 0.6392,
"step": 226
},
{
"epoch": 0.64,
"learning_rate": 0.0002998134612993829,
"loss": 0.597,
"step": 227
},
{
"epoch": 0.64,
"learning_rate": 0.0002998085857136772,
"loss": 0.4129,
"step": 228
},
{
"epoch": 0.64,
"learning_rate": 0.00029980364727107166,
"loss": 0.5858,
"step": 229
},
{
"epoch": 0.65,
"learning_rate": 0.00029979864597363845,
"loss": 0.5442,
"step": 230
},
{
"epoch": 0.65,
"learning_rate": 0.0002997935818234759,
"loss": 0.5392,
"step": 231
},
{
"epoch": 0.65,
"learning_rate": 0.00029978845482270906,
"loss": 0.4328,
"step": 232
},
{
"epoch": 0.65,
"learning_rate": 0.0002997832649734889,
"loss": 0.5739,
"step": 233
},
{
"epoch": 0.66,
"learning_rate": 0.0002997780122779931,
"loss": 0.7016,
"step": 234
},
{
"epoch": 0.66,
"learning_rate": 0.0002997726967384255,
"loss": 0.577,
"step": 235
},
{
"epoch": 0.66,
"learning_rate": 0.0002997673183570165,
"loss": 0.5955,
"step": 236
},
{
"epoch": 0.67,
"learning_rate": 0.00029976187713602273,
"loss": 0.272,
"step": 237
},
{
"epoch": 0.67,
"learning_rate": 0.0002997563730777273,
"loss": 0.6432,
"step": 238
},
{
"epoch": 0.67,
"learning_rate": 0.00029975080618443946,
"loss": 0.4899,
"step": 239
},
{
"epoch": 0.67,
"learning_rate": 0.00029974517645849503,
"loss": 0.5061,
"step": 240
},
{
"epoch": 0.68,
"learning_rate": 0.0002997394839022562,
"loss": 0.5742,
"step": 241
},
{
"epoch": 0.68,
"learning_rate": 0.00029973372851811145,
"loss": 0.5601,
"step": 242
},
{
"epoch": 0.68,
"learning_rate": 0.00029972791030847553,
"loss": 0.766,
"step": 243
},
{
"epoch": 0.68,
"learning_rate": 0.0002997220292757898,
"loss": 0.6389,
"step": 244
},
{
"epoch": 0.69,
"learning_rate": 0.0002997160854225217,
"loss": 0.5442,
"step": 245
},
{
"epoch": 0.69,
"learning_rate": 0.00029971007875116527,
"loss": 0.6111,
"step": 246
},
{
"epoch": 0.69,
"learning_rate": 0.0002997040092642407,
"loss": 0.6487,
"step": 247
},
{
"epoch": 0.7,
"learning_rate": 0.0002996978769642947,
"loss": 0.5975,
"step": 248
},
{
"epoch": 0.7,
"learning_rate": 0.0002996916818539003,
"loss": 0.5919,
"step": 249
},
{
"epoch": 0.7,
"learning_rate": 0.0002996854239356567,
"loss": 0.5678,
"step": 250
},
{
"epoch": 0.7,
"eval_loss": 0.5266316533088684,
"eval_runtime": 26.2898,
"eval_samples_per_second": 7.608,
"eval_steps_per_second": 1.902,
"step": 250
},
{
"dharma_eval_accuracy": 0.36438734124819766,
"dharma_eval_accuracy_ARC-Challenge": 0.48148148148148145,
"dharma_eval_accuracy_ARC-Easy": 0.5,
"dharma_eval_accuracy_BoolQ": 0.6851851851851852,
"dharma_eval_accuracy_MMLU": 0.4074074074074074,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.2459016393442623,
"dharma_eval_accuracy_openbookqa": 0.07407407407407407,
"dharma_eval_accuracy_truthful_qa": 0.2777777777777778,
"dharma_eval_accuracy_winogrande": 0.37037037037037035,
"dharma_loss": 2.7018258204460146,
"epoch": 0.7,
"step": 250
},
{
"epoch": 0.7,
"learning_rate": 0.0002996791032121898,
"loss": 0.3632,
"step": 251
},
{
"epoch": 0.71,
"learning_rate": 0.0002996727196861515,
"loss": 0.5284,
"step": 252
},
{
"epoch": 0.71,
"learning_rate": 0.00029966627336022034,
"loss": 0.4349,
"step": 253
},
{
"epoch": 0.71,
"learning_rate": 0.000299659764237101,
"loss": 0.4471,
"step": 254
},
{
"epoch": 0.72,
"learning_rate": 0.00029965319231952455,
"loss": 0.5277,
"step": 255
},
{
"epoch": 0.72,
"learning_rate": 0.00029964655761024854,
"loss": 0.6247,
"step": 256
},
{
"epoch": 0.72,
"learning_rate": 0.00029963986011205675,
"loss": 0.6735,
"step": 257
},
{
"epoch": 0.72,
"learning_rate": 0.0002996330998277593,
"loss": 0.5861,
"step": 258
},
{
"epoch": 0.73,
"learning_rate": 0.0002996262767601926,
"loss": 0.8834,
"step": 259
},
{
"epoch": 0.73,
"learning_rate": 0.0002996193909122197,
"loss": 0.4856,
"step": 260
},
{
"epoch": 0.73,
"learning_rate": 0.00029961244228672953,
"loss": 0.5929,
"step": 261
},
{
"epoch": 0.74,
"learning_rate": 0.0002996054308866378,
"loss": 0.6344,
"step": 262
},
{
"epoch": 0.74,
"learning_rate": 0.0002995983567148862,
"loss": 0.4248,
"step": 263
},
{
"epoch": 0.74,
"learning_rate": 0.000299591219774443,
"loss": 0.6535,
"step": 264
},
{
"epoch": 0.74,
"learning_rate": 0.00029958402006830274,
"loss": 0.4722,
"step": 265
},
{
"epoch": 0.75,
"learning_rate": 0.0002995767575994863,
"loss": 0.4557,
"step": 266
},
{
"epoch": 0.75,
"learning_rate": 0.00029956943237104084,
"loss": 0.5803,
"step": 267
},
{
"epoch": 0.75,
"learning_rate": 0.0002995620443860399,
"loss": 0.6765,
"step": 268
},
{
"epoch": 0.76,
"learning_rate": 0.0002995545936475833,
"loss": 0.5386,
"step": 269
},
{
"epoch": 0.76,
"learning_rate": 0.0002995470801587973,
"loss": 0.6843,
"step": 270
},
{
"epoch": 0.76,
"learning_rate": 0.0002995395039228343,
"loss": 0.629,
"step": 271
},
{
"epoch": 0.76,
"learning_rate": 0.00029953186494287336,
"loss": 0.5211,
"step": 272
},
{
"epoch": 0.77,
"learning_rate": 0.0002995241632221195,
"loss": 0.5231,
"step": 273
},
{
"epoch": 0.77,
"learning_rate": 0.00029951639876380425,
"loss": 0.5548,
"step": 274
},
{
"epoch": 0.77,
"learning_rate": 0.0002995085715711854,
"loss": 0.5616,
"step": 275
},
{
"epoch": 0.77,
"eval_loss": 0.534767746925354,
"eval_runtime": 26.3271,
"eval_samples_per_second": 7.597,
"eval_steps_per_second": 1.899,
"step": 275
},
{
"dharma_eval_accuracy": 0.44850192726145927,
"dharma_eval_accuracy_ARC-Challenge": 0.48148148148148145,
"dharma_eval_accuracy_ARC-Easy": 0.6111111111111112,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.37037037037037035,
"dharma_eval_accuracy_agieval": 0.2711864406779661,
"dharma_eval_accuracy_bigbench": 0.5245901639344263,
"dharma_eval_accuracy_openbookqa": 0.18518518518518517,
"dharma_eval_accuracy_truthful_qa": 0.3888888888888889,
"dharma_eval_accuracy_winogrande": 0.5,
"dharma_loss": 2.7097329370975496,
"epoch": 0.77,
"step": 275
},
{
"epoch": 0.77,
"learning_rate": 0.0002995006816475471,
"loss": 0.5013,
"step": 276
},
{
"epoch": 0.78,
"learning_rate": 0.00029949272899619994,
"loss": 0.6726,
"step": 277
},
{
"epoch": 0.78,
"learning_rate": 0.0002994847136204805,
"loss": 0.5449,
"step": 278
},
{
"epoch": 0.78,
"learning_rate": 0.0002994766355237521,
"loss": 0.6026,
"step": 279
},
{
"epoch": 0.79,
"learning_rate": 0.0002994684947094039,
"loss": 0.4757,
"step": 280
},
{
"epoch": 0.79,
"learning_rate": 0.00029946029118085193,
"loss": 0.501,
"step": 281
},
{
"epoch": 0.79,
"learning_rate": 0.000299452024941538,
"loss": 0.4933,
"step": 282
},
{
"epoch": 0.79,
"learning_rate": 0.0002994436959949306,
"loss": 0.4705,
"step": 283
},
{
"epoch": 0.8,
"learning_rate": 0.0002994353043445244,
"loss": 0.8139,
"step": 284
},
{
"epoch": 0.8,
"learning_rate": 0.0002994268499938403,
"loss": 0.4696,
"step": 285
},
{
"epoch": 0.8,
"learning_rate": 0.0002994183329464256,
"loss": 0.5552,
"step": 286
},
{
"epoch": 0.81,
"learning_rate": 0.00029940975320585396,
"loss": 0.5407,
"step": 287
},
{
"epoch": 0.81,
"learning_rate": 0.00029940111077572526,
"loss": 0.592,
"step": 288
},
{
"epoch": 0.81,
"learning_rate": 0.00029939240565966574,
"loss": 0.5984,
"step": 289
},
{
"epoch": 0.81,
"learning_rate": 0.00029938363786132774,
"loss": 0.613,
"step": 290
},
{
"epoch": 0.82,
"learning_rate": 0.00029937480738439023,
"loss": 0.4807,
"step": 291
},
{
"epoch": 0.82,
"learning_rate": 0.00029936591423255826,
"loss": 0.692,
"step": 292
},
{
"epoch": 0.82,
"learning_rate": 0.00029935695840956327,
"loss": 0.5535,
"step": 293
},
{
"epoch": 0.83,
"learning_rate": 0.00029934793991916295,
"loss": 0.6597,
"step": 294
},
{
"epoch": 0.83,
"learning_rate": 0.00029933885876514115,
"loss": 0.5728,
"step": 295
},
{
"epoch": 0.83,
"learning_rate": 0.0002993297149513083,
"loss": 0.5043,
"step": 296
},
{
"epoch": 0.83,
"learning_rate": 0.00029932050848150105,
"loss": 0.5827,
"step": 297
},
{
"epoch": 0.84,
"learning_rate": 0.0002993112393595821,
"loss": 0.4782,
"step": 298
},
{
"epoch": 0.84,
"learning_rate": 0.0002993019075894406,
"loss": 0.4526,
"step": 299
},
{
"epoch": 0.84,
"learning_rate": 0.0002992925131749921,
"loss": 0.5883,
"step": 300
},
{
"epoch": 0.84,
"eval_loss": 0.5223216414451599,
"eval_runtime": 26.2814,
"eval_samples_per_second": 7.61,
"eval_steps_per_second": 1.902,
"step": 300
},
{
"dharma_eval_accuracy": 0.43339599362877435,
"dharma_eval_accuracy_ARC-Challenge": 0.46296296296296297,
"dharma_eval_accuracy_ARC-Easy": 0.5370370370370371,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.3888888888888889,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.4426229508196721,
"dharma_eval_accuracy_openbookqa": 0.14814814814814814,
"dharma_eval_accuracy_truthful_qa": 0.42592592592592593,
"dharma_eval_accuracy_winogrande": 0.5370370370370371,
"dharma_loss": 2.758728890419006,
"epoch": 0.84,
"step": 300
},
{
"epoch": 0.84,
"learning_rate": 0.00029928305612017823,
"loss": 0.4232,
"step": 301
},
{
"epoch": 0.85,
"learning_rate": 0.0002992735364289671,
"loss": 0.4762,
"step": 302
},
{
"epoch": 0.85,
"learning_rate": 0.0002992639541053528,
"loss": 0.4538,
"step": 303
},
{
"epoch": 0.85,
"learning_rate": 0.0002992543091533561,
"loss": 0.6404,
"step": 304
},
{
"epoch": 0.86,
"learning_rate": 0.00029924460157702376,
"loss": 0.6473,
"step": 305
},
{
"epoch": 0.86,
"learning_rate": 0.0002992348313804289,
"loss": 0.57,
"step": 306
},
{
"epoch": 0.86,
"learning_rate": 0.00029922499856767094,
"loss": 0.4909,
"step": 307
},
{
"epoch": 0.86,
"learning_rate": 0.00029921510314287545,
"loss": 0.48,
"step": 308
},
{
"epoch": 0.87,
"learning_rate": 0.00029920514511019456,
"loss": 0.3962,
"step": 309
},
{
"epoch": 0.87,
"learning_rate": 0.00029919512447380625,
"loss": 0.7047,
"step": 310
},
{
"epoch": 0.87,
"learning_rate": 0.0002991850412379151,
"loss": 0.6004,
"step": 311
},
{
"epoch": 0.88,
"learning_rate": 0.0002991748954067519,
"loss": 0.5994,
"step": 312
},
{
"epoch": 0.88,
"learning_rate": 0.0002991646869845736,
"loss": 0.7561,
"step": 313
},
{
"epoch": 0.88,
"learning_rate": 0.0002991544159756634,
"loss": 0.4333,
"step": 314
},
{
"epoch": 0.88,
"learning_rate": 0.0002991440823843309,
"loss": 0.4611,
"step": 315
},
{
"epoch": 0.89,
"learning_rate": 0.0002991336862149119,
"loss": 0.5448,
"step": 316
},
{
"epoch": 0.89,
"learning_rate": 0.00029912322747176835,
"loss": 0.6594,
"step": 317
},
{
"epoch": 0.89,
"learning_rate": 0.0002991127061592887,
"loss": 0.7183,
"step": 318
},
{
"epoch": 0.9,
"learning_rate": 0.00029910212228188734,
"loss": 0.7627,
"step": 319
},
{
"epoch": 0.9,
"learning_rate": 0.0002990914758440051,
"loss": 0.9302,
"step": 320
},
{
"epoch": 0.9,
"learning_rate": 0.00029908076685010915,
"loss": 0.6771,
"step": 321
},
{
"epoch": 0.9,
"learning_rate": 0.0002990699953046926,
"loss": 0.6205,
"step": 322
},
{
"epoch": 0.91,
"learning_rate": 0.00029905916121227515,
"loss": 0.4057,
"step": 323
},
{
"epoch": 0.91,
"learning_rate": 0.00029904826457740247,
"loss": 0.3115,
"step": 324
},
{
"epoch": 0.91,
"learning_rate": 0.00029903730540464666,
"loss": 0.5224,
"step": 325
},
{
"epoch": 0.91,
"eval_loss": 0.520789623260498,
"eval_runtime": 26.3248,
"eval_samples_per_second": 7.597,
"eval_steps_per_second": 1.899,
"step": 325
},
{
"dharma_eval_accuracy": 0.4488672550788571,
"dharma_eval_accuracy_ARC-Challenge": 0.5555555555555556,
"dharma_eval_accuracy_ARC-Easy": 0.5370370370370371,
"dharma_eval_accuracy_BoolQ": 0.7222222222222222,
"dharma_eval_accuracy_MMLU": 0.4444444444444444,
"dharma_eval_accuracy_agieval": 0.288135593220339,
"dharma_eval_accuracy_bigbench": 0.3442622950819672,
"dharma_eval_accuracy_openbookqa": 0.12962962962962962,
"dharma_eval_accuracy_truthful_qa": 0.4444444444444444,
"dharma_eval_accuracy_winogrande": 0.5740740740740741,
"dharma_loss": 2.532070637226105,
"epoch": 0.91,
"step": 325
},
{
"epoch": 0.92,
"learning_rate": 0.0002990262836986059,
"loss": 0.345,
"step": 326
},
{
"epoch": 0.92,
"learning_rate": 0.0002990151994639048,
"loss": 0.5683,
"step": 327
},
{
"epoch": 0.92,
"learning_rate": 0.000299004052705194,
"loss": 0.4728,
"step": 328
},
{
"epoch": 0.92,
"learning_rate": 0.00029899284342715054,
"loss": 0.6099,
"step": 329
},
{
"epoch": 0.93,
"learning_rate": 0.00029898157163447763,
"loss": 0.5388,
"step": 330
},
{
"epoch": 0.93,
"learning_rate": 0.0002989702373319047,
"loss": 0.5225,
"step": 331
},
{
"epoch": 0.93,
"learning_rate": 0.00029895884052418735,
"loss": 0.4954,
"step": 332
},
{
"epoch": 0.93,
"learning_rate": 0.00029894738121610755,
"loss": 0.5619,
"step": 333
},
{
"epoch": 0.94,
"learning_rate": 0.0002989358594124733,
"loss": 0.5996,
"step": 334
},
{
"epoch": 0.94,
"learning_rate": 0.0002989242751181191,
"loss": 0.563,
"step": 335
},
{
"epoch": 0.94,
"learning_rate": 0.0002989126283379054,
"loss": 0.4444,
"step": 336
},
{
"epoch": 0.95,
"learning_rate": 0.000298900919076719,
"loss": 0.4801,
"step": 337
},
{
"epoch": 0.95,
"learning_rate": 0.00029888914733947275,
"loss": 0.538,
"step": 338
},
{
"epoch": 0.95,
"learning_rate": 0.00029887731313110613,
"loss": 0.5645,
"step": 339
},
{
"epoch": 0.95,
"learning_rate": 0.0002988654164565843,
"loss": 0.6028,
"step": 340
},
{
"epoch": 0.96,
"learning_rate": 0.00029885345732089905,
"loss": 0.5545,
"step": 341
},
{
"epoch": 0.96,
"learning_rate": 0.0002988414357290681,
"loss": 0.6561,
"step": 342
},
{
"epoch": 0.96,
"learning_rate": 0.0002988293516861356,
"loss": 0.5173,
"step": 343
},
{
"epoch": 0.97,
"learning_rate": 0.0002988172051971717,
"loss": 0.4821,
"step": 344
},
{
"epoch": 0.97,
"learning_rate": 0.0002988049962672728,
"loss": 0.6129,
"step": 345
},
{
"epoch": 0.97,
"learning_rate": 0.0002987927249015616,
"loss": 0.6202,
"step": 346
},
{
"epoch": 0.97,
"learning_rate": 0.00029878039110518704,
"loss": 0.6002,
"step": 347
},
{
"epoch": 0.98,
"learning_rate": 0.000298767994883324,
"loss": 0.5126,
"step": 348
},
{
"epoch": 0.98,
"learning_rate": 0.00029875553624117375,
"loss": 0.4087,
"step": 349
},
{
"epoch": 0.98,
"learning_rate": 0.00029874301518396376,
"loss": 0.6079,
"step": 350
},
{
"epoch": 0.98,
"eval_loss": 0.517877459526062,
"eval_runtime": 26.324,
"eval_samples_per_second": 7.598,
"eval_steps_per_second": 1.899,
"step": 350
},
{
"dharma_eval_accuracy": 0.3889552081796841,
"dharma_eval_accuracy_ARC-Challenge": 0.4074074074074074,
"dharma_eval_accuracy_ARC-Easy": 0.5,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.3333333333333333,
"dharma_eval_accuracy_agieval": 0.2033898305084746,
"dharma_eval_accuracy_bigbench": 0.2786885245901639,
"dharma_eval_accuracy_openbookqa": 0.16666666666666666,
"dharma_eval_accuracy_truthful_qa": 0.42592592592592593,
"dharma_eval_accuracy_winogrande": 0.48148148148148145,
"dharma_loss": 3.0203271565437317,
"epoch": 0.98,
"step": 350
},
{
"epoch": 0.99,
"learning_rate": 0.00029873043171694755,
"loss": 0.5359,
"step": 351
},
{
"epoch": 0.99,
"learning_rate": 0.0002987177858454049,
"loss": 0.38,
"step": 352
},
{
"epoch": 0.99,
"learning_rate": 0.00029870507757464193,
"loss": 0.5231,
"step": 353
},
{
"epoch": 0.99,
"learning_rate": 0.0002986923069099906,
"loss": 0.4214,
"step": 354
},
{
"epoch": 1.0,
"learning_rate": 0.00029867947385680936,
"loss": 0.4848,
"step": 355
},
{
"epoch": 1.0,
"learning_rate": 0.00029866657842048274,
"loss": 0.4576,
"step": 356
},
{
"epoch": 1.0,
"learning_rate": 0.00029865362060642136,
"loss": 0.646,
"step": 357
},
{
"epoch": 1.0,
"learning_rate": 0.0002986406004200621,
"loss": 0.5057,
"step": 358
},
{
"epoch": 1.01,
"learning_rate": 0.00029862751786686797,
"loss": 0.4205,
"step": 359
},
{
"epoch": 1.01,
"learning_rate": 0.0002986143729523282,
"loss": 0.6047,
"step": 360
},
{
"epoch": 1.01,
"learning_rate": 0.0002986011656819582,
"loss": 0.6834,
"step": 361
},
{
"epoch": 1.02,
"learning_rate": 0.0002985878960612993,
"loss": 0.6237,
"step": 362
},
{
"epoch": 1.02,
"learning_rate": 0.0002985745640959194,
"loss": 0.6008,
"step": 363
},
{
"epoch": 1.02,
"learning_rate": 0.00029856116979141224,
"loss": 0.3818,
"step": 364
},
{
"epoch": 1.02,
"learning_rate": 0.00029854771315339785,
"loss": 0.4934,
"step": 365
},
{
"epoch": 1.03,
"learning_rate": 0.0002985341941875224,
"loss": 0.5155,
"step": 366
},
{
"epoch": 1.03,
"learning_rate": 0.0002985206128994581,
"loss": 0.459,
"step": 367
},
{
"epoch": 1.03,
"learning_rate": 0.0002985069692949036,
"loss": 0.5994,
"step": 368
},
{
"epoch": 1.04,
"learning_rate": 0.0002984932633795833,
"loss": 0.428,
"step": 369
},
{
"epoch": 1.04,
"learning_rate": 0.00029847949515924806,
"loss": 0.4198,
"step": 370
},
{
"epoch": 1.04,
"learning_rate": 0.00029846566463967477,
"loss": 0.4873,
"step": 371
},
{
"epoch": 1.04,
"learning_rate": 0.0002984517718266664,
"loss": 0.4244,
"step": 372
},
{
"epoch": 1.05,
"learning_rate": 0.00029843781672605216,
"loss": 0.3869,
"step": 373
},
{
"epoch": 1.05,
"learning_rate": 0.00029842379934368735,
"loss": 0.5182,
"step": 374
},
{
"epoch": 1.05,
"learning_rate": 0.0002984097196854534,
"loss": 0.5025,
"step": 375
},
{
"epoch": 1.05,
"eval_loss": 0.5205254554748535,
"eval_runtime": 26.3484,
"eval_samples_per_second": 7.591,
"eval_steps_per_second": 1.898,
"step": 375
},
{
"dharma_eval_accuracy": 0.4285106631128675,
"dharma_eval_accuracy_ARC-Challenge": 0.24074074074074073,
"dharma_eval_accuracy_ARC-Easy": 0.2777777777777778,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2037037037037037,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.35185185185185186,
"dharma_eval_accuracy_truthful_qa": 0.7962962962962963,
"dharma_eval_accuracy_winogrande": 0.5370370370370371,
"dharma_loss": 2.4590850338935852,
"epoch": 1.05,
"step": 375
},
{
"epoch": 1.06,
"learning_rate": 0.0002983955777572578,
"loss": 0.6176,
"step": 376
},
{
"epoch": 1.06,
"learning_rate": 0.0002983813735650344,
"loss": 0.5538,
"step": 377
},
{
"epoch": 1.06,
"learning_rate": 0.00029836710711474287,
"loss": 0.3406,
"step": 378
},
{
"epoch": 1.06,
"learning_rate": 0.0002983527784123692,
"loss": 0.6863,
"step": 379
},
{
"epoch": 1.07,
"learning_rate": 0.0002983383874639254,
"loss": 0.5004,
"step": 380
},
{
"epoch": 1.07,
"learning_rate": 0.0002983239342754498,
"loss": 0.4399,
"step": 381
},
{
"epoch": 1.07,
"learning_rate": 0.0002983094188530065,
"loss": 0.6056,
"step": 382
},
{
"epoch": 1.08,
"learning_rate": 0.000298294841202686,
"loss": 0.3585,
"step": 383
},
{
"epoch": 1.08,
"learning_rate": 0.0002982802013306048,
"loss": 0.3589,
"step": 384
},
{
"epoch": 1.08,
"learning_rate": 0.00029826549924290557,
"loss": 0.4443,
"step": 385
},
{
"epoch": 1.08,
"learning_rate": 0.0002982507349457569,
"loss": 0.4661,
"step": 386
},
{
"epoch": 1.09,
"learning_rate": 0.00029823590844535366,
"loss": 0.5678,
"step": 387
},
{
"epoch": 1.09,
"learning_rate": 0.0002982210197479169,
"loss": 0.3639,
"step": 388
},
{
"epoch": 1.09,
"learning_rate": 0.00029820606885969347,
"loss": 0.3408,
"step": 389
},
{
"epoch": 1.09,
"learning_rate": 0.00029819105578695655,
"loss": 0.487,
"step": 390
},
{
"epoch": 1.1,
"learning_rate": 0.0002981759805360054,
"loss": 0.4763,
"step": 391
},
{
"epoch": 1.1,
"learning_rate": 0.0002981608431131653,
"loss": 0.4435,
"step": 392
},
{
"epoch": 1.1,
"learning_rate": 0.00029814564352478753,
"loss": 0.5277,
"step": 393
},
{
"epoch": 1.11,
"learning_rate": 0.00029813038177724965,
"loss": 0.5039,
"step": 394
},
{
"epoch": 1.11,
"learning_rate": 0.00029811505787695524,
"loss": 0.3175,
"step": 395
},
{
"epoch": 1.11,
"learning_rate": 0.0002980996718303338,
"loss": 0.5833,
"step": 396
},
{
"epoch": 1.11,
"learning_rate": 0.00029808422364384113,
"loss": 0.5385,
"step": 397
},
{
"epoch": 1.12,
"learning_rate": 0.00029806871332395895,
"loss": 0.452,
"step": 398
},
{
"epoch": 1.12,
"learning_rate": 0.0002980531408771951,
"loss": 0.5055,
"step": 399
},
{
"epoch": 1.12,
"learning_rate": 0.00029803750631008356,
"loss": 0.5219,
"step": 400
},
{
"epoch": 1.12,
"eval_loss": 0.5226805210113525,
"eval_runtime": 26.3767,
"eval_samples_per_second": 7.582,
"eval_steps_per_second": 1.896,
"step": 400
},
{
"dharma_eval_accuracy": 0.37912794706348474,
"dharma_eval_accuracy_ARC-Challenge": 0.24074074074074073,
"dharma_eval_accuracy_ARC-Easy": 0.2777777777777778,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2037037037037037,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.16666666666666666,
"dharma_eval_accuracy_truthful_qa": 0.5370370370370371,
"dharma_eval_accuracy_winogrande": 0.5370370370370371,
"dharma_loss": 2.5074869017601014,
"epoch": 1.12,
"step": 400
},
{
"epoch": 1.13,
"learning_rate": 0.00029802180962918426,
"loss": 0.3184,
"step": 401
},
{
"epoch": 1.13,
"learning_rate": 0.00029800605084108315,
"loss": 0.583,
"step": 402
},
{
"epoch": 1.13,
"learning_rate": 0.0002979902299523925,
"loss": 0.6319,
"step": 403
},
{
"epoch": 1.13,
"learning_rate": 0.00029797434696975035,
"loss": 0.4879,
"step": 404
},
{
"epoch": 1.14,
"learning_rate": 0.0002979584018998209,
"loss": 0.4589,
"step": 405
},
{
"epoch": 1.14,
"learning_rate": 0.0002979423947492944,
"loss": 0.4799,
"step": 406
},
{
"epoch": 1.14,
"learning_rate": 0.0002979263255248872,
"loss": 0.4208,
"step": 407
},
{
"epoch": 1.15,
"learning_rate": 0.0002979101942333416,
"loss": 0.4944,
"step": 408
},
{
"epoch": 1.15,
"learning_rate": 0.00029789400088142605,
"loss": 0.4458,
"step": 409
},
{
"epoch": 1.15,
"learning_rate": 0.000297877745475935,
"loss": 0.7163,
"step": 410
},
{
"epoch": 1.15,
"learning_rate": 0.00029786142802368877,
"loss": 0.388,
"step": 411
},
{
"epoch": 1.16,
"learning_rate": 0.00029784504853153397,
"loss": 0.4361,
"step": 412
},
{
"epoch": 1.16,
"learning_rate": 0.0002978286070063431,
"loss": 0.4336,
"step": 413
},
{
"epoch": 1.16,
"learning_rate": 0.0002978121034550148,
"loss": 0.4103,
"step": 414
},
{
"epoch": 1.16,
"learning_rate": 0.00029779553788447357,
"loss": 0.4731,
"step": 415
},
{
"epoch": 1.17,
"learning_rate": 0.00029777891030167,
"loss": 0.5297,
"step": 416
},
{
"epoch": 1.17,
"learning_rate": 0.00029776222071358074,
"loss": 0.6597,
"step": 417
},
{
"epoch": 1.17,
"learning_rate": 0.0002977454691272084,
"loss": 0.4055,
"step": 418
},
{
"epoch": 1.18,
"learning_rate": 0.0002977286555495818,
"loss": 0.4292,
"step": 419
},
{
"epoch": 1.18,
"learning_rate": 0.00029771177998775536,
"loss": 0.4474,
"step": 420
},
{
"epoch": 1.18,
"learning_rate": 0.0002976948424488099,
"loss": 0.5432,
"step": 421
},
{
"epoch": 1.18,
"learning_rate": 0.0002976778429398521,
"loss": 0.5991,
"step": 422
},
{
"epoch": 1.19,
"learning_rate": 0.00029766078146801453,
"loss": 0.3645,
"step": 423
},
{
"epoch": 1.19,
"learning_rate": 0.000297643658040456,
"loss": 0.6296,
"step": 424
},
{
"epoch": 1.19,
"learning_rate": 0.0002976264726643611,
"loss": 0.3538,
"step": 425
},
{
"epoch": 1.19,
"eval_loss": 0.5256252288818359,
"eval_runtime": 26.3606,
"eval_samples_per_second": 7.587,
"eval_steps_per_second": 1.897,
"step": 425
},
{
"dharma_eval_accuracy": 0.3914736260758304,
"dharma_eval_accuracy_ARC-Challenge": 0.25925925925925924,
"dharma_eval_accuracy_ARC-Easy": 0.3333333333333333,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2222222222222222,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.2037037037037037,
"dharma_eval_accuracy_truthful_qa": 0.5370370370370371,
"dharma_eval_accuracy_winogrande": 0.5185185185185185,
"dharma_loss": 2.387819214820862,
"epoch": 1.19,
"step": 425
},
{
"epoch": 1.2,
"learning_rate": 0.00029760922534694055,
"loss": 0.5401,
"step": 426
},
{
"epoch": 1.2,
"learning_rate": 0.00029759191609543095,
"loss": 0.511,
"step": 427
},
{
"epoch": 1.2,
"learning_rate": 0.0002975745449170949,
"loss": 0.3225,
"step": 428
},
{
"epoch": 1.2,
"learning_rate": 0.0002975571118192212,
"loss": 0.6389,
"step": 429
},
{
"epoch": 1.21,
"learning_rate": 0.00029753961680912427,
"loss": 0.8466,
"step": 430
},
{
"epoch": 1.21,
"learning_rate": 0.00029752205989414475,
"loss": 0.5068,
"step": 431
},
{
"epoch": 1.21,
"learning_rate": 0.0002975044410816492,
"loss": 0.6129,
"step": 432
},
{
"epoch": 1.22,
"learning_rate": 0.0002974867603790302,
"loss": 0.5928,
"step": 433
},
{
"epoch": 1.22,
"learning_rate": 0.0002974690177937062,
"loss": 0.5895,
"step": 434
},
{
"epoch": 1.22,
"learning_rate": 0.00029745121333312163,
"loss": 0.4582,
"step": 435
},
{
"epoch": 1.22,
"learning_rate": 0.00029743334700474693,
"loss": 0.4909,
"step": 436
},
{
"epoch": 1.23,
"learning_rate": 0.00029741541881607854,
"loss": 0.5243,
"step": 437
},
{
"epoch": 1.23,
"learning_rate": 0.00029739742877463865,
"loss": 0.4624,
"step": 438
},
{
"epoch": 1.23,
"learning_rate": 0.0002973793768879757,
"loss": 0.4072,
"step": 439
},
{
"epoch": 1.24,
"learning_rate": 0.0002973612631636638,
"loss": 0.5176,
"step": 440
},
{
"epoch": 1.24,
"learning_rate": 0.0002973430876093033,
"loss": 0.5494,
"step": 441
},
{
"epoch": 1.24,
"learning_rate": 0.00029732485023252015,
"loss": 0.7099,
"step": 442
},
{
"epoch": 1.24,
"learning_rate": 0.0002973065510409665,
"loss": 0.4408,
"step": 443
},
{
"epoch": 1.25,
"learning_rate": 0.00029728819004232036,
"loss": 0.5634,
"step": 444
},
{
"epoch": 1.25,
"learning_rate": 0.0002972697672442856,
"loss": 0.4925,
"step": 445
},
{
"epoch": 1.25,
"learning_rate": 0.0002972512826545922,
"loss": 0.5279,
"step": 446
},
{
"epoch": 1.25,
"learning_rate": 0.0002972327362809958,
"loss": 0.4325,
"step": 447
},
{
"epoch": 1.26,
"learning_rate": 0.0002972141281312782,
"loss": 0.3719,
"step": 448
},
{
"epoch": 1.26,
"learning_rate": 0.000297195458213247,
"loss": 0.4714,
"step": 449
},
{
"epoch": 1.26,
"learning_rate": 0.0002971767265347358,
"loss": 0.5246,
"step": 450
},
{
"epoch": 1.26,
"eval_loss": 0.5236319899559021,
"eval_runtime": 26.3909,
"eval_samples_per_second": 7.578,
"eval_steps_per_second": 1.895,
"step": 450
},
{
"dharma_eval_accuracy": 0.3974720915846538,
"dharma_eval_accuracy_ARC-Challenge": 0.24074074074074073,
"dharma_eval_accuracy_ARC-Easy": 0.35185185185185186,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2962962962962963,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.24074074074074073,
"dharma_eval_accuracy_truthful_qa": 0.5555555555555556,
"dharma_eval_accuracy_winogrande": 0.42592592592592593,
"dharma_loss": 2.5349762744903566,
"epoch": 1.26,
"step": 450
},
{
"epoch": 1.27,
"learning_rate": 0.0002971579331036041,
"loss": 0.5459,
"step": 451
},
{
"epoch": 1.27,
"learning_rate": 0.00029713907792773716,
"loss": 0.6227,
"step": 452
},
{
"epoch": 1.27,
"learning_rate": 0.0002971201610150463,
"loss": 0.2325,
"step": 453
},
{
"epoch": 1.27,
"learning_rate": 0.0002971011823734688,
"loss": 0.4858,
"step": 454
},
{
"epoch": 1.28,
"learning_rate": 0.00029708214201096755,
"loss": 0.4082,
"step": 455
},
{
"epoch": 1.28,
"learning_rate": 0.0002970630399355317,
"loss": 0.5423,
"step": 456
},
{
"epoch": 1.28,
"learning_rate": 0.00029704387615517606,
"loss": 0.3744,
"step": 457
},
{
"epoch": 1.29,
"learning_rate": 0.00029702465067794144,
"loss": 0.4369,
"step": 458
},
{
"epoch": 1.29,
"learning_rate": 0.00029700536351189445,
"loss": 0.5204,
"step": 459
},
{
"epoch": 1.29,
"learning_rate": 0.0002969860146651276,
"loss": 0.6042,
"step": 460
},
{
"epoch": 1.29,
"learning_rate": 0.0002969666041457594,
"loss": 0.6086,
"step": 461
},
{
"epoch": 1.3,
"learning_rate": 0.00029694713196193404,
"loss": 0.5142,
"step": 462
},
{
"epoch": 1.3,
"learning_rate": 0.0002969275981218218,
"loss": 0.3872,
"step": 463
},
{
"epoch": 1.3,
"learning_rate": 0.0002969080026336186,
"loss": 0.5711,
"step": 464
},
{
"epoch": 1.31,
"learning_rate": 0.00029688834550554646,
"loss": 0.439,
"step": 465
},
{
"epoch": 1.31,
"learning_rate": 0.00029686862674585307,
"loss": 0.3885,
"step": 466
},
{
"epoch": 1.31,
"learning_rate": 0.00029684884636281203,
"loss": 0.677,
"step": 467
},
{
"epoch": 1.31,
"learning_rate": 0.00029682900436472286,
"loss": 0.4995,
"step": 468
},
{
"epoch": 1.32,
"learning_rate": 0.00029680910075991087,
"loss": 0.4725,
"step": 469
},
{
"epoch": 1.32,
"learning_rate": 0.0002967891355567273,
"loss": 0.5783,
"step": 470
},
{
"epoch": 1.32,
"learning_rate": 0.0002967691087635491,
"loss": 0.4683,
"step": 471
},
{
"epoch": 1.32,
"learning_rate": 0.0002967490203887793,
"loss": 0.5214,
"step": 472
},
{
"epoch": 1.33,
"learning_rate": 0.00029672887044084636,
"loss": 0.4094,
"step": 473
},
{
"epoch": 1.33,
"learning_rate": 0.000296708658928205,
"loss": 0.3472,
"step": 474
},
{
"epoch": 1.33,
"learning_rate": 0.00029668838585933556,
"loss": 0.4722,
"step": 475
},
{
"epoch": 1.33,
"eval_loss": 0.5200064182281494,
"eval_runtime": 26.399,
"eval_samples_per_second": 7.576,
"eval_steps_per_second": 1.894,
"step": 475
},
{
"dharma_eval_accuracy": 0.4099921445943489,
"dharma_eval_accuracy_ARC-Challenge": 0.25925925925925924,
"dharma_eval_accuracy_ARC-Easy": 0.37037037037037035,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.24074074074074073,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.2777777777777778,
"dharma_eval_accuracy_truthful_qa": 0.6111111111111112,
"dharma_eval_accuracy_winogrande": 0.48148148148148145,
"dharma_loss": 2.660856533527374,
"epoch": 1.33,
"step": 475
},
{
"epoch": 1.34,
"learning_rate": 0.00029666805124274425,
"loss": 0.3981,
"step": 476
},
{
"epoch": 1.34,
"learning_rate": 0.00029664765508696306,
"loss": 0.4488,
"step": 477
},
{
"epoch": 1.34,
"learning_rate": 0.0002966271974005499,
"loss": 0.5023,
"step": 478
},
{
"epoch": 1.34,
"learning_rate": 0.00029660667819208836,
"loss": 0.5216,
"step": 479
},
{
"epoch": 1.35,
"learning_rate": 0.0002965860974701879,
"loss": 0.58,
"step": 480
},
{
"epoch": 1.35,
"learning_rate": 0.00029656545524348396,
"loss": 0.5255,
"step": 481
},
{
"epoch": 1.35,
"learning_rate": 0.0002965447515206375,
"loss": 0.2772,
"step": 482
},
{
"epoch": 1.36,
"learning_rate": 0.00029652398631033547,
"loss": 0.3947,
"step": 483
},
{
"epoch": 1.36,
"learning_rate": 0.0002965031596212905,
"loss": 0.5062,
"step": 484
},
{
"epoch": 1.36,
"learning_rate": 0.0002964822714622412,
"loss": 0.6169,
"step": 485
},
{
"epoch": 1.36,
"learning_rate": 0.0002964613218419517,
"loss": 0.3895,
"step": 486
},
{
"epoch": 1.37,
"learning_rate": 0.0002964403107692122,
"loss": 0.4819,
"step": 487
},
{
"epoch": 1.37,
"learning_rate": 0.00029641923825283854,
"loss": 0.5174,
"step": 488
},
{
"epoch": 1.37,
"learning_rate": 0.0002963981043016723,
"loss": 0.3454,
"step": 489
},
{
"epoch": 1.38,
"learning_rate": 0.000296376908924581,
"loss": 0.5092,
"step": 490
},
{
"epoch": 1.38,
"learning_rate": 0.0002963556521304577,
"loss": 0.4422,
"step": 491
},
{
"epoch": 1.38,
"learning_rate": 0.0002963343339282214,
"loss": 0.522,
"step": 492
},
{
"epoch": 1.38,
"learning_rate": 0.0002963129543268168,
"loss": 0.3454,
"step": 493
},
{
"epoch": 1.39,
"learning_rate": 0.00029629151333521446,
"loss": 0.4304,
"step": 494
},
{
"epoch": 1.39,
"learning_rate": 0.00029627001096241057,
"loss": 0.339,
"step": 495
},
{
"epoch": 1.39,
"learning_rate": 0.0002962484472174271,
"loss": 0.4912,
"step": 496
},
{
"epoch": 1.4,
"learning_rate": 0.0002962268221093118,
"loss": 0.4258,
"step": 497
},
{
"epoch": 1.4,
"learning_rate": 0.0002962051356471383,
"loss": 0.4818,
"step": 498
},
{
"epoch": 1.4,
"learning_rate": 0.0002961833878400056,
"loss": 0.6166,
"step": 499
},
{
"epoch": 1.4,
"learning_rate": 0.0002961615786970389,
"loss": 0.6639,
"step": 500
},
{
"epoch": 1.4,
"eval_loss": 0.5184725522994995,
"eval_runtime": 26.336,
"eval_samples_per_second": 7.594,
"eval_steps_per_second": 1.899,
"step": 500
},
{
"dharma_eval_accuracy": 0.40587691825690037,
"dharma_eval_accuracy_ARC-Challenge": 0.2777777777777778,
"dharma_eval_accuracy_ARC-Easy": 0.37037037037037035,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2037037037037037,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.25925925925925924,
"dharma_eval_accuracy_truthful_qa": 0.5925925925925926,
"dharma_eval_accuracy_winogrande": 0.5,
"dharma_loss": 2.3869275097846985,
"epoch": 1.4,
"step": 500
},
{
"epoch": 1.41,
"learning_rate": 0.00029613970822738874,
"loss": 0.3681,
"step": 501
},
{
"epoch": 1.41,
"learning_rate": 0.0002961177764402317,
"loss": 0.6636,
"step": 502
},
{
"epoch": 1.41,
"learning_rate": 0.00029609578334476987,
"loss": 0.4073,
"step": 503
},
{
"epoch": 1.41,
"learning_rate": 0.0002960737289502311,
"loss": 0.3434,
"step": 504
},
{
"epoch": 1.42,
"learning_rate": 0.00029605161326586916,
"loss": 0.7326,
"step": 505
},
{
"epoch": 1.42,
"learning_rate": 0.00029602943630096325,
"loss": 0.5293,
"step": 506
},
{
"epoch": 1.42,
"learning_rate": 0.00029600719806481844,
"loss": 0.5706,
"step": 507
},
{
"epoch": 1.43,
"learning_rate": 0.0002959848985667655,
"loss": 0.631,
"step": 508
},
{
"epoch": 1.43,
"learning_rate": 0.00029596253781616084,
"loss": 0.596,
"step": 509
},
{
"epoch": 1.43,
"learning_rate": 0.0002959401158223867,
"loss": 0.6091,
"step": 510
},
{
"epoch": 1.43,
"learning_rate": 0.00029591763259485083,
"loss": 0.7195,
"step": 511
},
{
"epoch": 1.44,
"learning_rate": 0.0002958950881429869,
"loss": 0.3447,
"step": 512
},
{
"epoch": 1.44,
"learning_rate": 0.000295872482476254,
"loss": 0.5116,
"step": 513
},
{
"epoch": 1.44,
"learning_rate": 0.00029584981560413717,
"loss": 0.4223,
"step": 514
},
{
"epoch": 1.45,
"learning_rate": 0.0002958270875361469,
"loss": 0.5816,
"step": 515
},
{
"epoch": 1.45,
"learning_rate": 0.0002958042982818196,
"loss": 0.4418,
"step": 516
},
{
"epoch": 1.45,
"learning_rate": 0.0002957814478507171,
"loss": 0.2934,
"step": 517
},
{
"epoch": 1.45,
"learning_rate": 0.00029575853625242704,
"loss": 0.4441,
"step": 518
},
{
"epoch": 1.46,
"learning_rate": 0.00029573556349656277,
"loss": 0.4365,
"step": 519
},
{
"epoch": 1.46,
"learning_rate": 0.0002957125295927631,
"loss": 0.4386,
"step": 520
},
{
"epoch": 1.46,
"learning_rate": 0.00029568943455069276,
"loss": 0.4075,
"step": 521
},
{
"epoch": 1.47,
"learning_rate": 0.00029566627838004193,
"loss": 0.514,
"step": 522
},
{
"epoch": 1.47,
"learning_rate": 0.0002956430610905265,
"loss": 0.4582,
"step": 523
},
{
"epoch": 1.47,
"learning_rate": 0.00029561978269188814,
"loss": 0.6063,
"step": 524
},
{
"epoch": 1.47,
"learning_rate": 0.0002955964431938939,
"loss": 0.4946,
"step": 525
},
{
"epoch": 1.47,
"eval_loss": 0.5167276263237,
"eval_runtime": 26.3841,
"eval_samples_per_second": 7.58,
"eval_steps_per_second": 1.895,
"step": 525
},
{
"dharma_eval_accuracy": 0.4205163299819223,
"dharma_eval_accuracy_ARC-Challenge": 0.3148148148148148,
"dharma_eval_accuracy_ARC-Easy": 0.4074074074074074,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2962962962962963,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.4918032786885246,
"dharma_eval_accuracy_openbookqa": 0.25925925925925924,
"dharma_eval_accuracy_truthful_qa": 0.5555555555555556,
"dharma_eval_accuracy_winogrande": 0.5185185185185185,
"dharma_loss": 2.4176649556159973,
"epoch": 1.47,
"step": 525
},
{
"epoch": 1.48,
"learning_rate": 0.0002955730426063365,
"loss": 0.3617,
"step": 526
},
{
"epoch": 1.48,
"learning_rate": 0.00029554958093903466,
"loss": 0.3467,
"step": 527
},
{
"epoch": 1.48,
"learning_rate": 0.00029552605820183235,
"loss": 0.3097,
"step": 528
},
{
"epoch": 1.48,
"learning_rate": 0.00029550247440459916,
"loss": 0.5096,
"step": 529
},
{
"epoch": 1.49,
"learning_rate": 0.0002954788295572305,
"loss": 0.3795,
"step": 530
},
{
"epoch": 1.49,
"learning_rate": 0.00029545512366964735,
"loss": 0.5871,
"step": 531
},
{
"epoch": 1.49,
"learning_rate": 0.00029543135675179626,
"loss": 0.57,
"step": 532
},
{
"epoch": 1.5,
"learning_rate": 0.00029540752881364925,
"loss": 0.5311,
"step": 533
},
{
"epoch": 1.5,
"learning_rate": 0.00029538363986520425,
"loss": 0.6116,
"step": 534
},
{
"epoch": 1.5,
"learning_rate": 0.00029535968991648454,
"loss": 0.4658,
"step": 535
},
{
"epoch": 1.5,
"learning_rate": 0.00029533567897753905,
"loss": 0.4921,
"step": 536
},
{
"epoch": 1.51,
"learning_rate": 0.0002953116070584424,
"loss": 0.4214,
"step": 537
},
{
"epoch": 1.51,
"learning_rate": 0.00029528747416929463,
"loss": 0.5963,
"step": 538
},
{
"epoch": 1.51,
"learning_rate": 0.00029526328032022155,
"loss": 0.6592,
"step": 539
},
{
"epoch": 1.52,
"learning_rate": 0.00029523902552137433,
"loss": 0.5055,
"step": 540
},
{
"epoch": 1.52,
"learning_rate": 0.00029521470978292994,
"loss": 0.4382,
"step": 541
},
{
"epoch": 1.52,
"learning_rate": 0.00029519033311509077,
"loss": 0.3389,
"step": 542
},
{
"epoch": 1.52,
"learning_rate": 0.0002951658955280848,
"loss": 0.3762,
"step": 543
},
{
"epoch": 1.53,
"learning_rate": 0.0002951413970321657,
"loss": 0.4591,
"step": 544
},
{
"epoch": 1.53,
"learning_rate": 0.0002951168376376124,
"loss": 0.4736,
"step": 545
},
{
"epoch": 1.53,
"learning_rate": 0.0002950922173547296,
"loss": 0.526,
"step": 546
},
{
"epoch": 1.54,
"learning_rate": 0.00029506753619384766,
"loss": 0.5705,
"step": 547
},
{
"epoch": 1.54,
"learning_rate": 0.00029504279416532223,
"loss": 0.5082,
"step": 548
},
{
"epoch": 1.54,
"learning_rate": 0.00029501799127953465,
"loss": 0.539,
"step": 549
},
{
"epoch": 1.54,
"learning_rate": 0.0002949931275468917,
"loss": 0.2924,
"step": 550
},
{
"epoch": 1.54,
"eval_loss": 0.5201791524887085,
"eval_runtime": 26.3162,
"eval_samples_per_second": 7.6,
"eval_steps_per_second": 1.9,
"step": 550
},
{
"dharma_eval_accuracy": 0.40152557237549985,
"dharma_eval_accuracy_ARC-Challenge": 0.2777777777777778,
"dharma_eval_accuracy_ARC-Easy": 0.37037037037037035,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2777777777777778,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5245901639344263,
"dharma_eval_accuracy_openbookqa": 0.18518518518518517,
"dharma_eval_accuracy_truthful_qa": 0.5370370370370371,
"dharma_eval_accuracy_winogrande": 0.5,
"dharma_loss": 2.79382320022583,
"epoch": 1.54,
"step": 550
},
{
"epoch": 1.55,
"learning_rate": 0.00029496820297782574,
"loss": 0.5506,
"step": 551
},
{
"epoch": 1.55,
"learning_rate": 0.00029494321758279465,
"loss": 0.4246,
"step": 552
},
{
"epoch": 1.55,
"learning_rate": 0.0002949181713722819,
"loss": 0.5631,
"step": 553
},
{
"epoch": 1.56,
"learning_rate": 0.0002948930643567964,
"loss": 0.5545,
"step": 554
},
{
"epoch": 1.56,
"learning_rate": 0.00029486789654687253,
"loss": 0.5853,
"step": 555
},
{
"epoch": 1.56,
"learning_rate": 0.00029484266795307037,
"loss": 0.3963,
"step": 556
},
{
"epoch": 1.56,
"learning_rate": 0.0002948173785859752,
"loss": 0.4131,
"step": 557
},
{
"epoch": 1.57,
"learning_rate": 0.000294792028456198,
"loss": 0.4353,
"step": 558
},
{
"epoch": 1.57,
"learning_rate": 0.0002947666175743753,
"loss": 0.5085,
"step": 559
},
{
"epoch": 1.57,
"learning_rate": 0.00029474114595116896,
"loss": 0.4826,
"step": 560
},
{
"epoch": 1.57,
"learning_rate": 0.00029471561359726645,
"loss": 0.4227,
"step": 561
},
{
"epoch": 1.58,
"learning_rate": 0.0002946900205233807,
"loss": 0.4017,
"step": 562
},
{
"epoch": 1.58,
"learning_rate": 0.00029466436674024997,
"loss": 0.4241,
"step": 563
},
{
"epoch": 1.58,
"learning_rate": 0.0002946386522586382,
"loss": 0.5668,
"step": 564
},
{
"epoch": 1.59,
"learning_rate": 0.00029461287708933473,
"loss": 0.2967,
"step": 565
},
{
"epoch": 1.59,
"learning_rate": 0.00029458704124315425,
"loss": 0.5195,
"step": 566
},
{
"epoch": 1.59,
"learning_rate": 0.0002945611447309371,
"loss": 0.4797,
"step": 567
},
{
"epoch": 1.59,
"learning_rate": 0.00029453518756354885,
"loss": 0.51,
"step": 568
},
{
"epoch": 1.6,
"learning_rate": 0.0002945091697518808,
"loss": 0.4373,
"step": 569
},
{
"epoch": 1.6,
"learning_rate": 0.0002944830913068494,
"loss": 0.6092,
"step": 570
},
{
"epoch": 1.6,
"learning_rate": 0.0002944569522393968,
"loss": 0.3854,
"step": 571
},
{
"epoch": 1.61,
"learning_rate": 0.00029443075256049036,
"loss": 0.6177,
"step": 572
},
{
"epoch": 1.61,
"learning_rate": 0.0002944044922811231,
"loss": 0.3344,
"step": 573
},
{
"epoch": 1.61,
"learning_rate": 0.0002943781714123132,
"loss": 0.4778,
"step": 574
},
{
"epoch": 1.61,
"learning_rate": 0.00029435178996510455,
"loss": 0.5104,
"step": 575
},
{
"epoch": 1.61,
"eval_loss": 0.5130020976066589,
"eval_runtime": 26.3402,
"eval_samples_per_second": 7.593,
"eval_steps_per_second": 1.898,
"step": 575
},
{
"dharma_eval_accuracy": 0.41822259726924604,
"dharma_eval_accuracy_ARC-Challenge": 0.2962962962962963,
"dharma_eval_accuracy_ARC-Easy": 0.35185185185185186,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2777777777777778,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.25925925925925924,
"dharma_eval_accuracy_truthful_qa": 0.6111111111111112,
"dharma_eval_accuracy_winogrande": 0.5185185185185185,
"dharma_loss": 2.5953469643592832,
"epoch": 1.61,
"step": 575
},
{
"epoch": 1.62,
"learning_rate": 0.0002943253479505662,
"loss": 0.4253,
"step": 576
},
{
"epoch": 1.62,
"learning_rate": 0.0002942988453797928,
"loss": 0.4838,
"step": 577
},
{
"epoch": 1.62,
"learning_rate": 0.00029427228226390424,
"loss": 0.5504,
"step": 578
},
{
"epoch": 1.63,
"learning_rate": 0.00029424565861404606,
"loss": 0.566,
"step": 579
},
{
"epoch": 1.63,
"learning_rate": 0.00029421897444138897,
"loss": 0.3383,
"step": 580
},
{
"epoch": 1.63,
"learning_rate": 0.00029419222975712915,
"loss": 0.4251,
"step": 581
},
{
"epoch": 1.63,
"learning_rate": 0.00029416542457248816,
"loss": 0.5096,
"step": 582
},
{
"epoch": 1.64,
"learning_rate": 0.000294138558898713,
"loss": 0.5763,
"step": 583
},
{
"epoch": 1.64,
"learning_rate": 0.000294111632747076,
"loss": 0.4761,
"step": 584
},
{
"epoch": 1.64,
"learning_rate": 0.0002940846461288748,
"loss": 0.4975,
"step": 585
},
{
"epoch": 1.64,
"learning_rate": 0.00029405759905543256,
"loss": 0.408,
"step": 586
},
{
"epoch": 1.65,
"learning_rate": 0.00029403049153809774,
"loss": 0.6146,
"step": 587
},
{
"epoch": 1.65,
"learning_rate": 0.0002940033235882441,
"loss": 0.4863,
"step": 588
},
{
"epoch": 1.65,
"learning_rate": 0.0002939760952172708,
"loss": 0.597,
"step": 589
},
{
"epoch": 1.66,
"learning_rate": 0.0002939488064366024,
"loss": 0.4794,
"step": 590
},
{
"epoch": 1.66,
"learning_rate": 0.00029392145725768874,
"loss": 0.5193,
"step": 591
},
{
"epoch": 1.66,
"learning_rate": 0.0002938940476920051,
"loss": 0.4921,
"step": 592
},
{
"epoch": 1.66,
"learning_rate": 0.0002938665777510519,
"loss": 0.709,
"step": 593
},
{
"epoch": 1.67,
"learning_rate": 0.0002938390474463551,
"loss": 0.4127,
"step": 594
},
{
"epoch": 1.67,
"learning_rate": 0.00029381145678946584,
"loss": 0.523,
"step": 595
},
{
"epoch": 1.67,
"learning_rate": 0.00029378380579196076,
"loss": 0.4203,
"step": 596
},
{
"epoch": 1.68,
"learning_rate": 0.00029375609446544165,
"loss": 0.6356,
"step": 597
},
{
"epoch": 1.68,
"learning_rate": 0.0002937283228215356,
"loss": 0.5171,
"step": 598
},
{
"epoch": 1.68,
"learning_rate": 0.00029370049087189514,
"loss": 0.6632,
"step": 599
},
{
"epoch": 1.68,
"learning_rate": 0.00029367259862819804,
"loss": 0.5575,
"step": 600
},
{
"epoch": 1.68,
"eval_loss": 0.5110138058662415,
"eval_runtime": 26.3409,
"eval_samples_per_second": 7.593,
"eval_steps_per_second": 1.898,
"step": 600
},
{
"dharma_eval_accuracy": 0.42028021043797037,
"dharma_eval_accuracy_ARC-Challenge": 0.24074074074074073,
"dharma_eval_accuracy_ARC-Easy": 0.37037037037037035,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2777777777777778,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.3148148148148148,
"dharma_eval_accuracy_truthful_qa": 0.5925925925925926,
"dharma_eval_accuracy_winogrande": 0.5370370370370371,
"dharma_loss": 2.4995884108543396,
"epoch": 1.68,
"step": 600
},
{
"epoch": 1.69,
"learning_rate": 0.00029364464610214734,
"loss": 0.5925,
"step": 601
},
{
"epoch": 1.69,
"learning_rate": 0.00029361663330547145,
"loss": 0.4611,
"step": 602
},
{
"epoch": 1.69,
"learning_rate": 0.000293588560249924,
"loss": 0.4945,
"step": 603
},
{
"epoch": 1.7,
"learning_rate": 0.00029356042694728384,
"loss": 0.5093,
"step": 604
},
{
"epoch": 1.7,
"learning_rate": 0.0002935322334093553,
"loss": 0.6404,
"step": 605
},
{
"epoch": 1.7,
"learning_rate": 0.0002935039796479678,
"loss": 0.5455,
"step": 606
},
{
"epoch": 1.7,
"learning_rate": 0.00029347566567497615,
"loss": 0.5144,
"step": 607
},
{
"epoch": 1.71,
"learning_rate": 0.0002934472915022603,
"loss": 0.5734,
"step": 608
},
{
"epoch": 1.71,
"learning_rate": 0.00029341885714172553,
"loss": 0.3246,
"step": 609
},
{
"epoch": 1.71,
"learning_rate": 0.0002933903626053024,
"loss": 0.4849,
"step": 610
},
{
"epoch": 1.72,
"learning_rate": 0.0002933618079049467,
"loss": 0.6909,
"step": 611
},
{
"epoch": 1.72,
"learning_rate": 0.0002933331930526394,
"loss": 0.4852,
"step": 612
},
{
"epoch": 1.72,
"learning_rate": 0.0002933045180603868,
"loss": 0.4287,
"step": 613
},
{
"epoch": 1.72,
"learning_rate": 0.00029327578294022043,
"loss": 0.4693,
"step": 614
},
{
"epoch": 1.73,
"learning_rate": 0.00029324698770419685,
"loss": 0.3903,
"step": 615
},
{
"epoch": 1.73,
"learning_rate": 0.00029321813236439823,
"loss": 0.4647,
"step": 616
},
{
"epoch": 1.73,
"learning_rate": 0.0002931892169329316,
"loss": 0.4867,
"step": 617
},
{
"epoch": 1.73,
"learning_rate": 0.00029316024142192937,
"loss": 0.7908,
"step": 618
},
{
"epoch": 1.74,
"learning_rate": 0.00029313120584354915,
"loss": 0.3817,
"step": 619
},
{
"epoch": 1.74,
"learning_rate": 0.0002931021102099737,
"loss": 0.4225,
"step": 620
},
{
"epoch": 1.74,
"learning_rate": 0.000293072954533411,
"loss": 0.4409,
"step": 621
},
{
"epoch": 1.75,
"learning_rate": 0.0002930437388260942,
"loss": 0.4802,
"step": 622
},
{
"epoch": 1.75,
"learning_rate": 0.0002930144631002817,
"loss": 0.4246,
"step": 623
},
{
"epoch": 1.75,
"learning_rate": 0.0002929851273682571,
"loss": 0.3303,
"step": 624
},
{
"epoch": 1.75,
"learning_rate": 0.0002929557316423291,
"loss": 0.5181,
"step": 625
},
{
"epoch": 1.75,
"eval_loss": 0.5158440470695496,
"eval_runtime": 26.3848,
"eval_samples_per_second": 7.58,
"eval_steps_per_second": 1.895,
"step": 625
},
{
"dharma_eval_accuracy": 0.40793453142562464,
"dharma_eval_accuracy_ARC-Challenge": 0.25925925925925924,
"dharma_eval_accuracy_ARC-Easy": 0.3333333333333333,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2222222222222222,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.2777777777777778,
"dharma_eval_accuracy_truthful_qa": 0.5925925925925926,
"dharma_eval_accuracy_winogrande": 0.5370370370370371,
"dharma_loss": 2.3892587933540343,
"epoch": 1.75,
"step": 625
},
{
"epoch": 1.76,
"learning_rate": 0.00029292627593483156,
"loss": 0.3671,
"step": 626
},
{
"epoch": 1.76,
"learning_rate": 0.0002928967602581236,
"loss": 0.3816,
"step": 627
},
{
"epoch": 1.76,
"learning_rate": 0.0002928671846245894,
"loss": 0.4803,
"step": 628
},
{
"epoch": 1.77,
"learning_rate": 0.00029283754904663845,
"loss": 0.6406,
"step": 629
},
{
"epoch": 1.77,
"learning_rate": 0.00029280785353670514,
"loss": 0.5698,
"step": 630
},
{
"epoch": 1.77,
"learning_rate": 0.0002927780981072492,
"loss": 0.6154,
"step": 631
},
{
"epoch": 1.77,
"learning_rate": 0.0002927482827707555,
"loss": 0.4089,
"step": 632
},
{
"epoch": 1.78,
"learning_rate": 0.000292718407539734,
"loss": 0.5746,
"step": 633
},
{
"epoch": 1.78,
"learning_rate": 0.0002926884724267198,
"loss": 0.3483,
"step": 634
},
{
"epoch": 1.78,
"learning_rate": 0.00029265847744427303,
"loss": 0.4937,
"step": 635
},
{
"epoch": 1.79,
"learning_rate": 0.00029262842260497913,
"loss": 0.3538,
"step": 636
},
{
"epoch": 1.79,
"learning_rate": 0.00029259830792144846,
"loss": 0.4694,
"step": 637
},
{
"epoch": 1.79,
"learning_rate": 0.00029256813340631664,
"loss": 0.5227,
"step": 638
},
{
"epoch": 1.79,
"learning_rate": 0.0002925378990722444,
"loss": 0.6286,
"step": 639
},
{
"epoch": 1.8,
"learning_rate": 0.00029250760493191734,
"loss": 0.439,
"step": 640
},
{
"epoch": 1.8,
"learning_rate": 0.0002924772509980465,
"loss": 0.4865,
"step": 641
},
{
"epoch": 1.8,
"learning_rate": 0.00029244683728336766,
"loss": 0.491,
"step": 642
},
{
"epoch": 1.8,
"learning_rate": 0.00029241636380064194,
"loss": 0.5232,
"step": 643
},
{
"epoch": 1.81,
"learning_rate": 0.0002923858305626555,
"loss": 0.6289,
"step": 644
},
{
"epoch": 1.81,
"learning_rate": 0.0002923552375822194,
"loss": 0.4458,
"step": 645
},
{
"epoch": 1.81,
"learning_rate": 0.00029232458487216996,
"loss": 0.4707,
"step": 646
},
{
"epoch": 1.82,
"learning_rate": 0.00029229387244536845,
"loss": 0.4265,
"step": 647
},
{
"epoch": 1.82,
"learning_rate": 0.0002922631003147013,
"loss": 0.3771,
"step": 648
},
{
"epoch": 1.82,
"learning_rate": 0.00029223226849307984,
"loss": 0.5994,
"step": 649
},
{
"epoch": 1.82,
"learning_rate": 0.00029220137699344055,
"loss": 0.4436,
"step": 650
},
{
"epoch": 1.82,
"eval_loss": 0.5179089903831482,
"eval_runtime": 26.3811,
"eval_samples_per_second": 7.581,
"eval_steps_per_second": 1.895,
"step": 650
},
{
"dharma_eval_accuracy": 0.44685480763403645,
"dharma_eval_accuracy_ARC-Challenge": 0.2777777777777778,
"dharma_eval_accuracy_ARC-Easy": 0.35185185185185186,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.2037037037037037,
"dharma_eval_accuracy_agieval": 0.2542372881355932,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.37037037037037035,
"dharma_eval_accuracy_truthful_qa": 0.8518518518518519,
"dharma_eval_accuracy_winogrande": 0.5,
"dharma_loss": 2.3754624218940736,
"epoch": 1.82,
"step": 650
},
{
"epoch": 1.83,
"learning_rate": 0.00029217042582874495,
"loss": 0.5742,
"step": 651
},
{
"epoch": 1.83,
"learning_rate": 0.0002921394150119796,
"loss": 0.4352,
"step": 652
},
{
"epoch": 1.83,
"learning_rate": 0.00029210834455615603,
"loss": 0.4036,
"step": 653
},
{
"epoch": 1.84,
"learning_rate": 0.00029207721447431083,
"loss": 0.4226,
"step": 654
},
{
"epoch": 1.84,
"learning_rate": 0.00029204602477950556,
"loss": 0.4018,
"step": 655
},
{
"epoch": 1.84,
"learning_rate": 0.0002920147754848269,
"loss": 0.4774,
"step": 656
},
{
"epoch": 1.84,
"learning_rate": 0.00029198346660338646,
"loss": 0.5114,
"step": 657
},
{
"epoch": 1.85,
"learning_rate": 0.0002919520981483209,
"loss": 0.4841,
"step": 658
},
{
"epoch": 1.85,
"learning_rate": 0.00029192067013279174,
"loss": 0.7345,
"step": 659
},
{
"epoch": 1.85,
"learning_rate": 0.00029188918256998564,
"loss": 0.5718,
"step": 660
},
{
"epoch": 1.86,
"learning_rate": 0.00029185763547311426,
"loss": 0.4517,
"step": 661
},
{
"epoch": 1.86,
"learning_rate": 0.0002918260288554141,
"loss": 0.4681,
"step": 662
},
{
"epoch": 1.86,
"learning_rate": 0.0002917943627301467,
"loss": 0.464,
"step": 663
},
{
"epoch": 1.86,
"learning_rate": 0.0002917626371105987,
"loss": 0.592,
"step": 664
},
{
"epoch": 1.87,
"learning_rate": 0.0002917308520100814,
"loss": 0.6453,
"step": 665
},
{
"epoch": 1.87,
"learning_rate": 0.0002916990074419314,
"loss": 0.4827,
"step": 666
},
{
"epoch": 1.87,
"learning_rate": 0.00029166710341950995,
"loss": 0.4945,
"step": 667
},
{
"epoch": 1.88,
"learning_rate": 0.00029163513995620347,
"loss": 0.47,
"step": 668
},
{
"epoch": 1.88,
"learning_rate": 0.00029160311706542325,
"loss": 0.4576,
"step": 669
},
{
"epoch": 1.88,
"learning_rate": 0.00029157103476060544,
"loss": 0.4582,
"step": 670
},
{
"epoch": 1.88,
"learning_rate": 0.0002915388930552112,
"loss": 0.5732,
"step": 671
},
{
"epoch": 1.89,
"learning_rate": 0.0002915066919627266,
"loss": 0.5289,
"step": 672
},
{
"epoch": 1.89,
"learning_rate": 0.0002914744314966627,
"loss": 0.5789,
"step": 673
},
{
"epoch": 1.89,
"learning_rate": 0.00029144211167055523,
"loss": 0.6537,
"step": 674
},
{
"epoch": 1.89,
"learning_rate": 0.0002914097324979651,
"loss": 0.3829,
"step": 675
},
{
"epoch": 1.89,
"eval_loss": 0.514592707157135,
"eval_runtime": 26.3555,
"eval_samples_per_second": 7.589,
"eval_steps_per_second": 1.897,
"step": 675
},
{
"dharma_eval_accuracy": 0.43674111578776453,
"dharma_eval_accuracy_ARC-Challenge": 0.2777777777777778,
"dharma_eval_accuracy_ARC-Easy": 0.35185185185185186,
"dharma_eval_accuracy_BoolQ": 0.7037037037037037,
"dharma_eval_accuracy_MMLU": 0.24074074074074073,
"dharma_eval_accuracy_agieval": 0.23728813559322035,
"dharma_eval_accuracy_bigbench": 0.5081967213114754,
"dharma_eval_accuracy_openbookqa": 0.2962962962962963,
"dharma_eval_accuracy_truthful_qa": 0.8148148148148148,
"dharma_eval_accuracy_winogrande": 0.5,
"dharma_loss": 2.5323671989440917,
"epoch": 1.89,
"step": 675
},
{
"epoch": 1.89,
"step": 675,
"total_flos": 2.1460864134131712e+17,
"train_loss": 0.5478784461374636,
"train_runtime": 6290.4308,
"train_samples_per_second": 12.718,
"train_steps_per_second": 0.795
}
],
"max_steps": 5000,
"num_train_epochs": 15,
"total_flos": 2.1460864134131712e+17,
"trial_name": null,
"trial_params": null
}