architecture: backbone_dtype: float32 force_embedding_gradients: true gradient_checkpointing: true intermediate_dropout: 0.0 pretrained: true pretrained_weights: '' augmentation: neftune_noise_alpha: 0.0 random_parent_probability: 0.0 skip_parent_probability: 0.0 token_mask_probability: 0.0 dataset: add_eos_token_to_answer: true add_eos_token_to_prompt: true add_eos_token_to_system: true answer_column: Assistentin chatbot_author: tmp-networks.de chatbot_name: EvaGPT-German data_sample: 1.0 data_sample_choice: - Train - Validation limit_chained_samples: false mask_prompt_labels: true parent_id_column: parent_id personalize: true prompt_column: - Benutzer - Kontext system_column: system text_answer_separator: <|Assistentin|> text_prompt_start: <|Benutzer|> text_system_start: <|System|> train_dataframe: /media/mtsmash/gpt/h2o-llmstudio/data/user/dataset.13-sep/dataset.csv validation_dataframe: /media/mtsmash/gpt/h2o-llmstudio/data/user/dataset.13-sep/dataset.csv validation_size: 0.01 validation_strategy: automatic environment: compile_model: false deepspeed_reduce_bucket_size: 1000000 deepspeed_stage3_param_persistence_threshold: 1000000 deepspeed_stage3_prefetch_bucket_size: 1000000 find_unused_parameters: false gpus: - '0' - '1' - '2' - '3' huggingface_branch: main mixed_precision: true number_of_workers: 96 seed: -1 trust_remote_code: true use_deepspeed: false experiment_name: EvaGPT-German-446M-Mini.v0.1.1 llm_backbone: /media/mtsmash/gpt/h2o-llmstudio/output/user/EvaGPT-German-446M-Mini.v0.1/ logging: logger: None neptune_project: '' output_directory: /media/mtsmash/gpt/h2o-llmstudio/output/user/EvaGPT-German-446M-Mini.v0.1.1/ prediction: batch_size_inference: 0 do_sample: true max_length_inference: 1312 metric: Perplexity metric_gpt_model: gpt-3.5-turbo-0301 metric_gpt_template: general min_length_inference: 2 num_beams: 3 num_history: 4 repetition_penalty: 1.1 stop_tokens: '' temperature: 0.7 top_k: 40 top_p: 1.0 problem_type: text_causal_language_modeling tokenizer: add_prefix_space: false add_prompt_answer_tokens: true max_length: 4096 max_length_answer: 3072 max_length_prompt: 1024 padding_quantile: 1.0 use_fast: true training: batch_size: 1 differential_learning_rate: 1.0e-05 differential_learning_rate_layers: [] drop_last_batch: true epochs: 4 evaluate_before_training: false evaluation_epochs: 1.0 grad_accumulation: 8 gradient_clip: 1.0 learning_rate: 0.0002 lora: true lora_alpha: 46 lora_dropout: 0.05 lora_r: 26 lora_target_modules: '' loss_function: TokenAveragedCrossEntropy optimizer: AdamW save_best_checkpoint: false schedule: Cosine train_validation_data: true use_flash_attention_2: false warmup_epochs: 0.0 weight_decay: 0.01