Zhangchen Xu
		
	commited on
		
		
					Model save
Browse files- README.md +88 -0
- all_results.json +9 -0
- generation_config.json +9 -0
- train_results.json +9 -0
- trainer_state.json +0 -0
    	
        README.md
    ADDED
    
    | @@ -0,0 +1,88 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            library_name: transformers
         | 
| 3 | 
            +
            license: llama3.1
         | 
| 4 | 
            +
            base_model: Magpie-Align/Llama-3.1-8B-Magpie-SFT-GMix-550K
         | 
| 5 | 
            +
            tags:
         | 
| 6 | 
            +
            - trl
         | 
| 7 | 
            +
            - dpo
         | 
| 8 | 
            +
            - generated_from_trainer
         | 
| 9 | 
            +
            model-index:
         | 
| 10 | 
            +
            - name: Llama-3.1-8B-Magpie-SFT-GMix-550K-DPO-02Mix
         | 
| 11 | 
            +
              results: []
         | 
| 12 | 
            +
            ---
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            <!-- This model card has been generated automatically according to the information the Trainer had access to. You
         | 
| 15 | 
            +
            should probably proofread and complete it, then remove this comment. -->
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            # Llama-3.1-8B-Magpie-SFT-GMix-550K-DPO-02Mix
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            This model is a fine-tuned version of [Magpie-Align/Llama-3.1-8B-Magpie-SFT-GMix-550K](https://huggingface.co/Magpie-Align/Llama-3.1-8B-Magpie-SFT-GMix-550K) on an unknown dataset.
         | 
| 20 | 
            +
            It achieves the following results on the evaluation set:
         | 
| 21 | 
            +
            - Loss: 0.3867
         | 
| 22 | 
            +
            - Rewards/chosen: -5.1502
         | 
| 23 | 
            +
            - Rewards/rejected: -6.8793
         | 
| 24 | 
            +
            - Rewards/accuracies: 0.8080
         | 
| 25 | 
            +
            - Rewards/margins: 1.7291
         | 
| 26 | 
            +
            - Logps/rejected: -1153.0979
         | 
| 27 | 
            +
            - Logps/chosen: -988.9237
         | 
| 28 | 
            +
            - Logits/rejected: -0.6120
         | 
| 29 | 
            +
            - Logits/chosen: -0.6722
         | 
| 30 | 
            +
             | 
| 31 | 
            +
            ## Model description
         | 
| 32 | 
            +
             | 
| 33 | 
            +
            More information needed
         | 
| 34 | 
            +
             | 
| 35 | 
            +
            ## Intended uses & limitations
         | 
| 36 | 
            +
             | 
| 37 | 
            +
            More information needed
         | 
| 38 | 
            +
             | 
| 39 | 
            +
            ## Training and evaluation data
         | 
| 40 | 
            +
             | 
| 41 | 
            +
            More information needed
         | 
| 42 | 
            +
             | 
| 43 | 
            +
            ## Training procedure
         | 
| 44 | 
            +
             | 
| 45 | 
            +
            ### Training hyperparameters
         | 
| 46 | 
            +
             | 
| 47 | 
            +
            The following hyperparameters were used during training:
         | 
| 48 | 
            +
            - learning_rate: 2e-07
         | 
| 49 | 
            +
            - train_batch_size: 2
         | 
| 50 | 
            +
            - eval_batch_size: 4
         | 
| 51 | 
            +
            - seed: 42
         | 
| 52 | 
            +
            - distributed_type: multi-GPU
         | 
| 53 | 
            +
            - num_devices: 4
         | 
| 54 | 
            +
            - gradient_accumulation_steps: 16
         | 
| 55 | 
            +
            - total_train_batch_size: 128
         | 
| 56 | 
            +
            - total_eval_batch_size: 16
         | 
| 57 | 
            +
            - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
         | 
| 58 | 
            +
            - lr_scheduler_type: cosine
         | 
| 59 | 
            +
            - lr_scheduler_warmup_ratio: 0.1
         | 
| 60 | 
            +
            - num_epochs: 1
         | 
| 61 | 
            +
             | 
| 62 | 
            +
            ### Training results
         | 
| 63 | 
            +
             | 
| 64 | 
            +
            | Training Loss | Epoch  | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
         | 
| 65 | 
            +
            |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
         | 
| 66 | 
            +
            | 0.686         | 0.0653 | 100  | 0.6856          | -0.0491        | -0.0616          | 0.6480             | 0.0125          | -471.3315      | -478.8181    | -0.7034         | -0.7427       |
         | 
| 67 | 
            +
            | 0.6218        | 0.1306 | 200  | 0.6277          | -0.6128        | -0.7720          | 0.6960             | 0.1591          | -542.3653      | -535.1920    | -0.7771         | -0.8125       |
         | 
| 68 | 
            +
            | 0.5705        | 0.1959 | 300  | 0.5545          | -2.4738        | -3.0052          | 0.7270             | 0.5314          | -765.6894      | -721.2881    | -0.7894         | -0.8230       |
         | 
| 69 | 
            +
            | 0.4606        | 0.2612 | 400  | 0.5081          | -2.6780        | -3.3782          | 0.7560             | 0.7002          | -802.9893      | -741.7116    | -0.6813         | -0.7247       |
         | 
| 70 | 
            +
            | 0.4314        | 0.3266 | 500  | 0.4787          | -3.6697        | -4.6026          | 0.7630             | 0.9329          | -925.4283      | -840.8740    | -0.6189         | -0.6691       |
         | 
| 71 | 
            +
            | 0.449         | 0.3919 | 600  | 0.4533          | -3.7414        | -4.8019          | 0.7820             | 1.0604          | -945.3563      | -848.0514    | -0.6157         | -0.6681       |
         | 
| 72 | 
            +
            | 0.4538        | 0.4572 | 700  | 0.4350          | -4.3858        | -5.6549          | 0.7890             | 1.2690          | -1030.6561     | -912.4920    | -0.5789         | -0.6331       |
         | 
| 73 | 
            +
            | 0.35          | 0.5225 | 800  | 0.4186          | -4.7129        | -6.1662          | 0.8010             | 1.4533          | -1081.7843     | -945.1964    | -0.5778         | -0.6347       |
         | 
| 74 | 
            +
            | 0.4153        | 0.5878 | 900  | 0.4108          | -4.9836        | -6.5320          | 0.7970             | 1.5484          | -1118.3677     | -972.2631    | -0.5895         | -0.6474       |
         | 
| 75 | 
            +
            | 0.3935        | 0.6531 | 1000 | 0.3999          | -4.4303        | -5.9370          | 0.8110             | 1.5067          | -1058.8646     | -916.9379    | -0.6016         | -0.6598       |
         | 
| 76 | 
            +
            | 0.3205        | 0.7184 | 1100 | 0.3950          | -5.1884        | -6.8827          | 0.8010             | 1.6943          | -1153.4371     | -992.7452    | -0.5846         | -0.6452       |
         | 
| 77 | 
            +
            | 0.3612        | 0.7837 | 1200 | 0.3901          | -5.0426        | -6.7179          | 0.8040             | 1.6753          | -1136.9619     | -978.1701    | -0.6046         | -0.6637       |
         | 
| 78 | 
            +
            | 0.3058        | 0.8490 | 1300 | 0.3877          | -5.1224        | -6.8428          | 0.8040             | 1.7204          | -1149.4465     | -986.1475    | -0.6087         | -0.6690       |
         | 
| 79 | 
            +
            | 0.3467        | 0.9144 | 1400 | 0.3871          | -5.2335        | -6.9809          | 0.8090             | 1.7474          | -1163.2629     | -997.2610    | -0.6071         | -0.6672       |
         | 
| 80 | 
            +
            | 0.3197        | 0.9797 | 1500 | 0.3867          | -5.1502        | -6.8793          | 0.8080             | 1.7291          | -1153.0979     | -988.9237    | -0.6120         | -0.6722       |
         | 
| 81 | 
            +
             | 
| 82 | 
            +
             | 
| 83 | 
            +
            ### Framework versions
         | 
| 84 | 
            +
             | 
| 85 | 
            +
            - Transformers 4.44.2
         | 
| 86 | 
            +
            - Pytorch 2.4.1+cu121
         | 
| 87 | 
            +
            - Datasets 3.0.0
         | 
| 88 | 
            +
            - Tokenizers 0.19.1
         | 
    	
        all_results.json
    ADDED
    
    | @@ -0,0 +1,9 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "epoch": 0.9999183606825047,
         | 
| 3 | 
            +
                "total_flos": 0.0,
         | 
| 4 | 
            +
                "train_loss": 0.46421666473520107,
         | 
| 5 | 
            +
                "train_runtime": 86694.5826,
         | 
| 6 | 
            +
                "train_samples": 195977,
         | 
| 7 | 
            +
                "train_samples_per_second": 2.261,
         | 
| 8 | 
            +
                "train_steps_per_second": 0.018
         | 
| 9 | 
            +
            }
         | 
    	
        generation_config.json
    ADDED
    
    | @@ -0,0 +1,9 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_from_model_config": true,
         | 
| 3 | 
            +
              "bos_token_id": 128000,
         | 
| 4 | 
            +
              "do_sample": true,
         | 
| 5 | 
            +
              "eos_token_id": 128001,
         | 
| 6 | 
            +
              "temperature": 0.6,
         | 
| 7 | 
            +
              "top_p": 0.9,
         | 
| 8 | 
            +
              "transformers_version": "4.44.2"
         | 
| 9 | 
            +
            }
         | 
    	
        train_results.json
    ADDED
    
    | @@ -0,0 +1,9 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "epoch": 0.9999183606825047,
         | 
| 3 | 
            +
                "total_flos": 0.0,
         | 
| 4 | 
            +
                "train_loss": 0.46421666473520107,
         | 
| 5 | 
            +
                "train_runtime": 86694.5826,
         | 
| 6 | 
            +
                "train_samples": 195977,
         | 
| 7 | 
            +
                "train_samples_per_second": 2.261,
         | 
| 8 | 
            +
                "train_steps_per_second": 0.018
         | 
| 9 | 
            +
            }
         | 
    	
        trainer_state.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
