File size: 3,190 Bytes
165be40 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
import os; os.environ["CUDA_VISIBLE_DEVICES"]="3"
import argparse
import time
import datasets
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
MODEL_ID = "Qwen/Qwen3-4B-Instruct-2507"
if __name__ == "__main__":
# Parse args
parser = argparse.ArgumentParser()
parser.add_argument("--num-blocks", "-n", type=int, default=None)
parser.add_argument("--max-batch-tokens", "-b", type=int, default=None)
parser.add_argument(
"--attn", type=str, default="paged_attention|kernels-community/flash-attn", help="Attention implementation"
)
parser.add_argument("--samples", type=int, default=500)
args = parser.parse_args()
# Prepare model
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
attn_implementation=args.attn,
dtype=torch.bfloat16,
)
model = model.cuda().eval()
# Prepare tokenizer and dataset
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, padding_side="left")
dataset = datasets.load_dataset("openai/gsm8k", "socratic", split="test")
dataset = dataset.select(range(args.samples))
tokenized_datasets = dataset.map(lambda x: tokenizer(x["question"]), batched=True)
simple_batch_inputs = [item["input_ids"] for item in tokenized_datasets]
# Prepare generation config
generation_config = GenerationConfig(
max_new_tokens=512,
use_cuda_graph=False, # Not supported for simple version
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
do_sample=False,
num_blocks=args.num_blocks,
max_batch_tokens=args.max_batch_tokens,
)
# Warmup iterations
_ = model.generate_batch(
inputs=simple_batch_inputs[: min(5, args.samples)],
generation_config=generation_config,
slice_inputs=True,
)
# Actual batch generation
print("--- Running CB Generation Example ---")
start_time = time.time()
batch_outputs = model.generate_batch(
inputs=simple_batch_inputs,
generation_config=generation_config,
slice_inputs=True,
)
end_time = time.time()
print("Done with batch generation.")
# Decode outputs
token_count = 0
for i, request in enumerate(batch_outputs):
input_text = tokenizer.decode(batch_outputs[request].prompt_ids, skip_special_tokens=True)
# Try to decode the output
try:
output_text = tokenizer.decode(batch_outputs[request].generated_tokens, skip_special_tokens=True)
token_count += len(batch_outputs[request].generated_tokens[1:])
except Exception as e:
print(f"Decoding failed for request {request}: {e}")
continue
# Compute stats and maybe print them
gen_time = end_time - start_time
tok_per_sec = token_count / gen_time
print("-" * 20)
print("--- Finished CB Generation Example ---\n")
print(f"CB generation took: {gen_time:.2f} seconds for {token_count} tokens. {tok_per_sec:.2f}tok/s")
# python load.py -n 512 -b 32 --samples 100
# CB generation took: 101.44 seconds for 49197 tokens. 484.97tok/s |