| { | |
| "dataset.debug": false, | |
| "dataset.eval_path": "output/datasets/eval_ds_Qwen3-4B-Instruct-2507", | |
| "dataset.git_diff": "", | |
| "dataset.git_sha1": "unknown", | |
| "dataset.manual_sample_ids": [], | |
| "dataset.max_read_items": null, | |
| "dataset.output_dir": "output", | |
| "dataset.path": "./output/datasets/ds_Qwen3-4B-Instruct-2507", | |
| "dataset.read_eagle_format": false, | |
| "dataset.run_name": "temp_run", | |
| "dataset.seed": 0, | |
| "dataset_generation.batch_size": 1, | |
| "dataset_generation.debug": false, | |
| "dataset_generation.debug_target": null, | |
| "dataset_generation.ds_prefix": "ds_", | |
| "dataset_generation.git_diff": "", | |
| "dataset_generation.git_sha1": "unknown", | |
| "dataset_generation.max_length": 2048, | |
| "dataset_generation.output_dir": "output", | |
| "dataset_generation.run_name": "temp_run", | |
| "dataset_generation.save_every": 1000, | |
| "dataset_generation.seed": 0, | |
| "dataset_generation.sharegpt_path": "Aeala/ShareGPT_Vicuna_unfiltered", | |
| "device_names": [ | |
| "NVIDIA H100 80GB HBM3" | |
| ], | |
| "inference.debug": false, | |
| "inference.dynamic_draft_all_top_k": 59, | |
| "inference.dynamic_draft_max_depth": 6, | |
| "inference.dynamic_draft_top_k": 10, | |
| "inference.git_diff": "", | |
| "inference.git_sha1": "unknown", | |
| "inference.max_new_tokens": 2048, | |
| "inference.output_dir": "output", | |
| "inference.run_name": "temp_run", | |
| "inference.seed": 0, | |
| "inference.timer": false, | |
| "modeling.chat_template": "models/speculative_qwen3/chat_template.jinja2", | |
| "modeling.debug": false, | |
| "modeling.draft_config_custom_modify": { | |
| "num_experts": "256" | |
| }, | |
| "modeling.draft_config_modify": { | |
| "moe_intermediate_size": "base.intermediate_size // draft_config.num_experts_per_tok", | |
| "num_key_value_heads": "base.num_key_value_heads", | |
| "rope_theta": "base.rope_theta" | |
| }, | |
| "modeling.dtype": "torch.float32", | |
| "modeling.free_base_layers": "num_hidden_layers", | |
| "modeling.git_diff": "", | |
| "modeling.git_sha1": "unknown", | |
| "modeling.init_base_model": [ | |
| "SpeculativeQwen3ForCausalLM", | |
| "Qwen/Qwen3-4B-Instruct-2507" | |
| ], | |
| "modeling.init_draft_config": [ | |
| "Qwen3MoeDrafter", | |
| "Qwen/Qwen3-30B-A3B-Instruct-2507" | |
| ], | |
| "modeling.init_speculative_algorithm": [ | |
| "EagleV2", | |
| "dict(draft_layers=1)" | |
| ], | |
| "modeling.max_memory": null, | |
| "modeling.model_path": null, | |
| "modeling.output_dir": "output", | |
| "modeling.run_name": "temp_run", | |
| "modeling.seed": 0, | |
| "modeling.stand_alone_draft_model_key_adapt": null, | |
| "modeling.stand_alone_draft_model_key_modify": { | |
| "yuhuili": [ | |
| [ | |
| "^fc\\.", | |
| "eagle_fc." | |
| ], | |
| [ | |
| "embed_tokens.weight", | |
| null | |
| ] | |
| ] | |
| }, | |
| "modeling.stand_alone_draft_model_path": null, | |
| "modeling.tokenizer_add_tokens": "dict(unk_token=tokenizer.eos_token)", | |
| "modeling.tokenizer_init": "", | |
| "modeling.tokenizer_path": "Qwen/Qwen3-4B-Instruct-2507", | |
| "training.adam_beta1": 0.9, | |
| "training.adam_beta2": 0.95, | |
| "training.average_tokens_across_devices": false, | |
| "training.bf16": true, | |
| "training.dataloader_drop_last": true, | |
| "training.ddp_backend": "gloo", | |
| "training.ddp_find_unused_parameters": false, | |
| "training.debug": false, | |
| "training.deepspeed": null, | |
| "training.eval_steps": 100, | |
| "training.eval_strategy": "steps", | |
| "training.filter_out_shorts": false, | |
| "training.git_diff": "", | |
| "training.git_sha1": "2b0b25bb88db8e3ef194615116d36ca22bae15e1", | |
| "training.gradient_accumulation_steps": 8, | |
| "training.learning_rate": 3e-05, | |
| "training.logging_first_step": true, | |
| "training.logging_steps": 25, | |
| "training.lr_scheduler_type": "constant_with_warmup", | |
| "training.max_grad_norm": 0.5, | |
| "training.max_length": 2048, | |
| "training.max_steps": -1, | |
| "training.model_init_ckpt": null, | |
| "training.num_train_epochs": 10, | |
| "training.optim": "adamw_torch_fused", | |
| "training.output_dir": "output/eager-waterfall-110", | |
| "training.overwrite_output_dir": true, | |
| "training.per_device_eval_batch_size": 1, | |
| "training.per_device_train_batch_size": 1, | |
| "training.project": "eagle4", | |
| "training.report_to": "wandb", | |
| "training.resume_from_checkpoint": false, | |
| "training.resume_wandb_runid": null, | |
| "training.run_name": "eager-waterfall-110", | |
| "training.save_steps": 500, | |
| "training.save_strategy": "steps", | |
| "training.save_total_limit": 2, | |
| "training.seed": 0, | |
| "training.sequential_loading": false, | |
| "training.tf32": false, | |
| "training.use_default_num_items_getter": true, | |
| "training.use_eagle_pipeline": false, | |
| "training.warmup_steps": 2000, | |
| "training.world_size": 1 | |
| } |