| top.booster: none | |
| top.checkpoint_path: null | |
| top.finetuning_type: full | |
| top.model_name: Qwen2-0.5B | |
| top.quantization_bit: none | |
| top.rope_scaling: none | |
| top.template: default | |
| top.visual_inputs: false | |
| train.additional_target: '' | |
| train.badam_mode: layer | |
| train.badam_switch_interval: 50 | |
| train.badam_switch_mode: ascending | |
| train.badam_update_ratio: 0.05 | |
| train.batch_size: 1 | |
| train.compute_type: fp32 | |
| train.create_new_adapter: false | |
| train.cutoff_len: 6100 | |
| train.dataset: | |
| - longzu | |
| train.dataset_dir: C:\AI\LLaMA-Factory\data | |
| train.ds_offload: false | |
| train.ds_stage: none | |
| train.freeze_extra_modules: '' | |
| train.freeze_trainable_layers: 2 | |
| train.freeze_trainable_modules: all | |
| train.galore_rank: 16 | |
| train.galore_scale: 0.25 | |
| train.galore_target: all | |
| train.galore_update_interval: 200 | |
| train.gradient_accumulation_steps: 8 | |
| train.learning_rate: 5e-5 | |
| train.logging_steps: 5 | |
| train.lora_alpha: 16 | |
| train.lora_dropout: 0 | |
| train.lora_rank: 8 | |
| train.lora_target: '' | |
| train.loraplus_lr_ratio: 0 | |
| train.lr_scheduler_type: cosine | |
| train.max_grad_norm: '1.0' | |
| train.max_samples: '100000' | |
| train.neftune_alpha: 0 | |
| train.num_train_epochs: '35' | |
| train.optim: adamw_torch | |
| train.packing: false | |
| train.ppo_score_norm: false | |
| train.ppo_whiten_rewards: false | |
| train.pref_beta: 0.1 | |
| train.pref_ftx: 0 | |
| train.pref_loss: sigmoid | |
| train.report_to: false | |
| train.resize_vocab: false | |
| train.reward_model: null | |
| train.save_steps: 100 | |
| train.shift_attn: false | |
| train.training_stage: Pre-Training | |
| train.upcast_layernorm: false | |
| train.use_badam: false | |
| train.use_dora: false | |
| train.use_galore: false | |
| train.use_llama_pro: false | |
| train.use_rslora: false | |
| train.val_size: 0 | |
| train.warmup_steps: 0 | |