| { | |
| "model_type": "qwen2vl", | |
| "quantization": "8-bit", | |
| "architectures": [ | |
| "Qwen2VLForConditionalGeneration" | |
| ], | |
| "torch_dtype": "bfloat16", | |
| "precision": "8-bit", | |
| "base_model": "Qwen/Qwen2.5-VL-3B-Instruct" | |
| } |
| { | |
| "model_type": "qwen2vl", | |
| "quantization": "8-bit", | |
| "architectures": [ | |
| "Qwen2VLForConditionalGeneration" | |
| ], | |
| "torch_dtype": "bfloat16", | |
| "precision": "8-bit", | |
| "base_model": "Qwen/Qwen2.5-VL-3B-Instruct" | |
| } |