deepseek-math.7b.ins.meta_math_cot.math55k.n5.critic_correct.dpo.H100.w4.v3.0.s42
/
training_config.yaml
| ds_cfg: | |
| train_micro_batch_size_per_gpu: ${per_gpu_train_batch_size} | |
| gradient_accumulation_steps: ${gradient_accumulation_steps} | |
| scheduler: | |
| type: WarmupDecayLR | |
| params: | |
| total_num_steps: null | |
| warmup_max_lr: ${learning_rate} | |
| warmup_num_steps: null | |
| warmup_type: linear | |
| optimizer: | |
| type: AdamW | |
| params: | |
| lr: ${learning_rate} | |
| betas: | |
| - 0.9 | |
| - 0.95 | |
| eps: 1.0e-06 | |
| weight_decay: ${weight_decay} | |
| bf16: | |
| enabled: true | |
| zero_optimization: | |
| stage: 1 | |
| offload_optimizer: | |
| device: cpu | |
| pin_memory: true | |
| stage3_param_persistence_threshold: 100000.0 | |
| stage3_max_live_parameters: 100000000.0 | |
| stage3_prefetch_bucket_size: 100000000.0 | |
| memory_efficient_linear: false | |
| steps_per_print: 25 | |
| gradient_clipping: 1.0 | |
| prescale_gradients: false | |
| sft_model_dir: experiments/deepseek-math.ins.7b.assess.sft.H100.w2.v3.0/checkpoint-200 | |
| train_file: ${sft_model_dir}/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.dpo.json | |
| dev_file: null | |
| test_file: null | |
| torch_dtype: | |
| _target_: general_util.training_utils.return_torch_dtype | |
| dtype: bfloat16 | |
| tokenizer_init: | |
| _target_: general_util.tokenization_utils.init_tokenizer | |
| tokenizer_path: ${model_name_or_path} | |
| padding_side: left | |
| device_map: | |
| _target_: models.utils.return_single_device_map | |
| model: | |
| _target_: models.llama.LlamaForCausalLMDPO.from_pretrained_with_ref_model | |
| beta: 0.5 | |
| gradient_checkpointing: true | |
| attn_implementation: flash_attention_2 | |
| torch_dtype: ${torch_dtype} | |
| device_map: ${device_map} | |
| ref_model: | |
| _target_: models.llama.LlamaForCausalLMDPO.from_pretrained | |
| pretrained_model_name_or_path: ${model_name_or_path} | |
| torch_dtype: ${torch_dtype} | |
| attn_implementation: flash_attention_2 | |
| device_map: ${device_map} | |
| read_tensor: | |
| _target_: data.logic_combine.MultiMappingDataset | |
| aligner: | |
| _target_: data.input_aligner.concat_aligner | |
| aligners: | |
| - _target_: data.input_aligner.dpo_pair_aligner | |
| pos_field: chosen_corr | |
| neg_field: reject_corr | |
| template: | |
| chosen: 'User: {query} | |
| Please reason step by step, and put your final answer within {instruction}. | |
| Assistant: {response} | |
| <WRONG STEP>{pos}<|end▁of▁sentence|>' | |
| reject: 'User: {query} | |
| Please reason step by step, and put your final answer within {instruction}. | |
| Assistant: {response} | |
| <WRONG STEP>{neg}<|end▁of▁sentence|>' | |
| prompt: 'User: {query} | |
| Please reason step by step, and put your final answer within {instruction}. | |
| Assistant: {response} | |
| <WRONG STEP>' | |
| instruction: \boxed{} | |
| kv_mapping: | |
| chosen: chosen | |
| reject: reject | |
| id: index | |
| prompt: prompt | |
| dist_load_data_barrier: false | |
| extended_vocab: null | |
| collator: | |
| _target_: data.dpo.DPOCollator | |
| tokenizer: ${tokenizer_init} | |
| max_seq_length: 1024 | |
| num_workers: 8 | |
| prefetch_factor: 2 | |
| model_name_or_path: ${sft_model_dir} | |
| pretrain: null | |
| dp_size: 4 | |
| tp_size: 1 | |
| pp_size: 1 | |
| exp_name: deepseek-math.7b.ins.meta_math_cot.math55k.n5.critic_correct.dpo.H100.w4.v3.0.s${seed} | |
| exp_notes: null | |
| output_dir: experiments/${exp_name} | |
| do_train: true | |
| evaluate_during_training: false | |
| do_eval: false | |
| eval_sub_path: checkpoint-100 | |
| per_gpu_train_batch_size: 4 | |
| per_gpu_eval_batch_size: 4 | |
| learning_rate: 1.0e-06 | |
| gradient_accumulation_steps: 4 | |
| weight_decay: 0.1 | |
| adam_epsilon: 1.0e-06 | |
| adam_betas: (0.9, 0.98) | |
| total_dataset_len: -1 | |
| max_grad_norm: 1.0 | |
| num_train_epochs: 1 | |
| max_steps: 0 | |
| warmup_proportion: 0.06 | |
| warmup_steps: 0 | |
| optimizer: null | |
| use_nvlamb: null | |
| bit_training: null | |
| logging_steps: 5 | |
| save_ds_state: false | |
| save_steps: 100 | |
| save_best: false | |
| eval_steps: 400 | |
| ddp_eval: true | |
| no_cuda: false | |
| seed: 42 | |
| local_rank: 0 | |
| fp16: true | |
| fp16_opt_level: O1 | |
| fp16_bfloat16: true | |
| prediction_cfg: | |
| metric: loss | |
| measure: -1 | |
| best_checkpoint: null | |
| best_result: null | |
| eval_forward_fn: | |
| _target_: general_util.evaluator.DefaultForwardFn | |
| post_process: | |
| _target_: post_processors.dpo.DPOEvalPostProcessor | |
| summary_helper: | |
| _target_: general_util.tensorboard_helper.WandbWriter | |
| batch_index_or_keys: null | |
| outputs_index_or_keys: | |
| train/chosen_reward: chosen_reward | |
| train/rejected_reward: rejected_reward | |
| n_gpu: 1 | |
| device: cuda:0 | |
| train_batch_size: null | |
| eval_batch_size: null | |
| world_size: 4 | |