| { | |
| "adapter_path": "models/lora/mistral_lora_telegram_20251111_114741", | |
| "batch_size": 2, | |
| "config": null, | |
| "data": "data/phase2/mlx_datasets/telegram", | |
| "fine_tune_type": "lora", | |
| "grad_accumulation_steps": 1, | |
| "grad_checkpoint": false, | |
| "iters": 600, | |
| "learning_rate": 1e-05, | |
| "lora_parameters": { | |
| "rank": 8, | |
| "dropout": 0.0, | |
| "scale": 20.0 | |
| }, | |
| "lr_schedule": null, | |
| "mask_prompt": false, | |
| "max_seq_length": 2048, | |
| "model": "models/mistral-7b-instruct-v0.3-mlx", | |
| "num_layers": 16, | |
| "optimizer": "adam", | |
| "optimizer_config": { | |
| "adam": {}, | |
| "adamw": {}, | |
| "muon": {}, | |
| "sgd": {}, | |
| "adafactor": {} | |
| }, | |
| "project_name": null, | |
| "report_to": null, | |
| "resume_adapter_file": null, | |
| "save_every": 100, | |
| "seed": 42, | |
| "steps_per_eval": 100, | |
| "steps_per_report": 10, | |
| "test": true, | |
| "test_batches": 50, | |
| "train": true, | |
| "val_batches": 25 | |
| } |