{ "quantization": "int8", "method": "torchao", "original_model": "black-forest-labs/FLUX.1-schnell", "conversion_date": "2025-08-09T09:57:45.366707", "requires_bitsandbytes": false, "device": "cuda", "serialization_format": "pytorch", "notes": "TorchAO quantized models saved in PyTorch format due to storage compatibility" }