File size: 1,710 Bytes
77f96d0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
# Measure and compare VRAM with and without MXFP4 dequantize
import gc
import torch
from transformers import AutoModelForCausalLM, Mxfp4Config
MODEL_ID = "openai/gpt-oss-20b"
DEVICE = "cuda:0"
def get_used_gb():
free, total = torch.cuda.mem_get_info()
return (total - free) / (1024**3), total / (1024**3)
def clear_memory():
del_vars = [k for k in list(globals().keys()) if k.startswith("_tmp_")]
for k in del_vars:
globals().pop(k, None)
gc.collect()
torch.cuda.empty_cache()
torch.cuda.synchronize()
assert torch.cuda.is_available(), "CUDA is not available."
# --- Dequantized (heavier) ---
clear_memory()
before_deq_used, total_gb = get_used_gb()
qconf = Mxfp4Config(dequantize=True)
model_deq = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype="auto",
device_map=DEVICE,
quantization_config=qconf,
).eval()
after_deq_used, _ = get_used_gb()
# --- Quantized (lighter) ---
del model_deq
clear_memory()
before_q_used, _ = get_used_gb()
model_q = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype="auto",
device_map=DEVICE,
).eval()
after_q_used, _ = get_used_gb()
print(f"[dequantized] used before: {before_deq_used:.2f} GB, after: {after_deq_used:.2f} GB / total {total_gb:.2f} GB")
print(f"[quantized ] used before: {before_q_used:.2f} GB, after: {after_q_used:.2f} GB / total {total_gb:.2f} GB")
# Make these available for plotting
mx_results = {
"total_gb": total_gb,
"after_dequantized_gb": after_deq_used,
"after_quantized_gb": after_q_used,
}
# Outputs:
# [dequantized] used before: 0.41 GB, after: 43.18 GB / total 79.25 GB
# [quantized ] used before: 0.49 GB, after: 13.37 GB / total 79.25 GB |