docs: Updated the `Transformers` example to use intended temp=0.15
#17
by
casinca
- opened
README.md
CHANGED
|
@@ -487,6 +487,8 @@ input_ids = tokenized["input_ids"].to(device="cuda")
|
|
| 487 |
output = model.generate(
|
| 488 |
input_ids,
|
| 489 |
max_new_tokens=200,
|
|
|
|
|
|
|
| 490 |
)[0]
|
| 491 |
|
| 492 |
decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]) :])
|
|
|
|
| 487 |
output = model.generate(
|
| 488 |
input_ids,
|
| 489 |
max_new_tokens=200,
|
| 490 |
+
do_sample=True,
|
| 491 |
+
temperature=0.15,
|
| 492 |
)[0]
|
| 493 |
|
| 494 |
decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]) :])
|