Upload cfg.yaml
Browse files
    	
        cfg.yaml
    ADDED
    
    | @@ -0,0 +1,98 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            architecture:
         | 
| 2 | 
            +
                backbone_dtype: float32
         | 
| 3 | 
            +
                gradient_checkpointing: true
         | 
| 4 | 
            +
                intermediate_dropout: 0.0
         | 
| 5 | 
            +
                pretrained: true
         | 
| 6 | 
            +
                pretrained_weights: ''
         | 
| 7 | 
            +
            augmentation:
         | 
| 8 | 
            +
                neftune_noise_alpha: 0.0
         | 
| 9 | 
            +
                random_parent_probability: 0.0
         | 
| 10 | 
            +
                skip_parent_probability: 0.0
         | 
| 11 | 
            +
                token_mask_probability: 0.2
         | 
| 12 | 
            +
            dataset:
         | 
| 13 | 
            +
                add_eos_token_to_answer: false
         | 
| 14 | 
            +
                add_eos_token_to_prompt: false
         | 
| 15 | 
            +
                add_eos_token_to_system: false
         | 
| 16 | 
            +
                add_prompt_answer_tokens: false
         | 
| 17 | 
            +
                answer_column: score
         | 
| 18 | 
            +
                chatbot_author: H2O.ai
         | 
| 19 | 
            +
                chatbot_name: h2oGPT
         | 
| 20 | 
            +
                data_sample: 1.0
         | 
| 21 | 
            +
                data_sample_choice:
         | 
| 22 | 
            +
                - Train
         | 
| 23 | 
            +
                - Validation
         | 
| 24 | 
            +
                limit_chained_samples: false
         | 
| 25 | 
            +
                mask_prompt_labels: true
         | 
| 26 | 
            +
                num_classes: 6
         | 
| 27 | 
            +
                parent_id_column: None
         | 
| 28 | 
            +
                personalize: false
         | 
| 29 | 
            +
                prompt_column:
         | 
| 30 | 
            +
                - full_text
         | 
| 31 | 
            +
                system_column: None
         | 
| 32 | 
            +
                text_answer_separator: ''
         | 
| 33 | 
            +
                text_prompt_start: ''
         | 
| 34 | 
            +
                text_system_start: ''
         | 
| 35 | 
            +
                train_dataframe: /root/h2o-llmstudio/data/user/essay_train/essay_train.csv
         | 
| 36 | 
            +
                validation_dataframe: /root/h2o-llmstudio/data/user/essay_train/essay_train.csv
         | 
| 37 | 
            +
                validation_size: 0.1
         | 
| 38 | 
            +
                validation_strategy: automatic
         | 
| 39 | 
            +
            environment:
         | 
| 40 | 
            +
                compile_model: false
         | 
| 41 | 
            +
                deepspeed_allgather_bucket_size: 1000000
         | 
| 42 | 
            +
                deepspeed_method: ZeRO2
         | 
| 43 | 
            +
                deepspeed_reduce_bucket_size: 1000000
         | 
| 44 | 
            +
                deepspeed_stage3_param_persistence_threshold: 1000000
         | 
| 45 | 
            +
                deepspeed_stage3_prefetch_bucket_size: 1000000
         | 
| 46 | 
            +
                find_unused_parameters: false
         | 
| 47 | 
            +
                gpus:
         | 
| 48 | 
            +
                - '0'
         | 
| 49 | 
            +
                huggingface_branch: main
         | 
| 50 | 
            +
                mixed_precision: true
         | 
| 51 | 
            +
                mixed_precision_dtype: bfloat16
         | 
| 52 | 
            +
                number_of_workers: 8
         | 
| 53 | 
            +
                seed: -1
         | 
| 54 | 
            +
                trust_remote_code: true
         | 
| 55 | 
            +
                use_deepspeed: false
         | 
| 56 | 
            +
            experiment_name: masked-mamba.1
         | 
| 57 | 
            +
            llm_backbone: h2oai/h2ogpt-4096-llama2-7b
         | 
| 58 | 
            +
            logging:
         | 
| 59 | 
            +
                logger: Neptune
         | 
| 60 | 
            +
                neptune_project: samvelkoch/essay
         | 
| 61 | 
            +
            output_directory: /root/h2o-llmstudio/output/user/masked-mamba.1/
         | 
| 62 | 
            +
            prediction:
         | 
| 63 | 
            +
                batch_size_inference: 0
         | 
| 64 | 
            +
                metric: Accuracy
         | 
| 65 | 
            +
            problem_type: text_causal_classification_modeling
         | 
| 66 | 
            +
            tokenizer:
         | 
| 67 | 
            +
                add_prompt_answer_tokens: false
         | 
| 68 | 
            +
                max_length: 10240
         | 
| 69 | 
            +
                padding_quantile: 1.0
         | 
| 70 | 
            +
                tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
         | 
| 71 | 
            +
            training:
         | 
| 72 | 
            +
                batch_size: 2
         | 
| 73 | 
            +
                differential_learning_rate: 1.0e-05
         | 
| 74 | 
            +
                differential_learning_rate_layers:
         | 
| 75 | 
            +
                - classification_head
         | 
| 76 | 
            +
                drop_last_batch: true
         | 
| 77 | 
            +
                epochs: 1
         | 
| 78 | 
            +
                evaluate_before_training: false
         | 
| 79 | 
            +
                evaluation_epochs: 1.0
         | 
| 80 | 
            +
                freeze_layers: []
         | 
| 81 | 
            +
                grad_accumulation: 1
         | 
| 82 | 
            +
                gradient_clip: 0.0
         | 
| 83 | 
            +
                learning_rate: 0.0001
         | 
| 84 | 
            +
                lora: true
         | 
| 85 | 
            +
                lora_alpha: 16
         | 
| 86 | 
            +
                lora_dropout: 0.05
         | 
| 87 | 
            +
                lora_r: 4
         | 
| 88 | 
            +
                lora_target_modules: ''
         | 
| 89 | 
            +
                lora_unfreeze_layers: []
         | 
| 90 | 
            +
                loss_function: CrossEntropyLoss
         | 
| 91 | 
            +
                optimizer: AdamW
         | 
| 92 | 
            +
                save_checkpoint: last
         | 
| 93 | 
            +
                schedule: Cosine
         | 
| 94 | 
            +
                train_validation_data: false
         | 
| 95 | 
            +
                use_dora: false
         | 
| 96 | 
            +
                use_flash_attention_2: true
         | 
| 97 | 
            +
                warmup_epochs: 0.0
         | 
| 98 | 
            +
                weight_decay: 1.0e-05
         | 
