docs: Updated the `Transformers` example to use intended temp=0.15
#23
by
casinca - opened
README.md
CHANGED
|
@@ -502,6 +502,8 @@ input_ids = tokenized["input_ids"].to(device="cuda")
|
|
| 502 |
output = model.generate(
|
| 503 |
input_ids,
|
| 504 |
max_new_tokens=200,
|
|
|
|
|
|
|
| 505 |
)[0]
|
| 506 |
|
| 507 |
decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]) :])
|
|
|
|
| 502 |
output = model.generate(
|
| 503 |
input_ids,
|
| 504 |
max_new_tokens=200,
|
| 505 |
+
do_sample=True,
|
| 506 |
+
temperature=0.15,
|
| 507 |
)[0]
|
| 508 |
|
| 509 |
decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]) :])
|