ShuaiYang03's picture
Upload folder using huggingface_hub
524a58c verified
action_dim: 7
data_root_dir: /mnt/petrelfs/yangshuai1/rep/InstructVLA_official/cache
debug: false
disable_instruction: false
fix_system1: false
future_action_window_size: 15
hf_token: .hf_token
image_aug: true
is_resume: true
load_all_data_for_training: true
num_of_meta_query: 64
past_action_window_size: 0
pretrained_checkpoint: /mnt/petrelfs/yangshuai1/rep/InstructVLA_official/outputs/code_reimp/sys12_meta_query_action_only_sync_pretraining_v2_query_64_mlp_lora_reimplement_transformer_4_50_single_node_bs128_2--image_augstage1/checkpoints/step-180000-epoch-03-loss=0.1214.pt
repeated_diffusion_steps: 4
resume_epoch: 3
resume_step: 180000
run_id: sys12_meta_query_action_only_sync_pretraining_v2_query_64_mlp_lora_reimplement_transformer_4_50_single_node_bs128_2--image_augstage1
run_id_note: null
run_root_dir: outputs/code_reimp
save_interval: 20000
seed: 42
stage: stage1
trackers:
- jsonl
- wandb
use_mm: false
vla:
action_tokenizer: extra_action_tokenizer
base_vlm: ckpt/Eagle2-2B
data_mix: bridge_rt_1
enable_gradient_checkpointing: true
enable_mixed_precision_training: true
epochs: 100
expected_world_size: 8
freeze_llm_backbone: false
freeze_vision_backbone: false
global_batch_size: 128
learning_rate: 5.0e-05
lr_scheduler_type: constant
max_grad_norm: 1.0
max_steps: null
per_device_batch_size: 16
reduce_in_full_precision: true
shuffle_buffer_size: 250000
train_strategy: fsdp-full-shard
type: prism-qwen25-dinosiglip-224px+0_5b
unfreeze_last_llm_layer: false
vla_id: prism-qwen25-dinosiglip-224px+0_5b
warmup_ratio: 0.0
weight_decay: 0.0
wandb_entity: shuaiyang2003
wandb_project: dual_sys_code_clean
with_pointing: false