OnAnOrange commited on
Commit
712318a
·
verified ·
1 Parent(s): 96ddee5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -5
README.md CHANGED
@@ -28,10 +28,6 @@ pip install torch transformers accelerate
28
  ```
29
 
30
  ## Quick Start
31
-
32
- > [!NOTE]
33
- > We recommend setting `enable_thinking=False` when using the model to ensure stable behavior and reproducible results.
34
-
35
  ```python
36
  import math
37
  import copy
@@ -235,7 +231,7 @@ prompts = [
235
  ],
236
  ]
237
 
238
- encoded = [tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=True, enable_thinking=False) for m in prompts]
239
  prompt_lens = [len(e) for e in encoded]
240
  max_len = max(prompt_lens)
241
  pad_id = tokenizer.pad_token_id
 
28
  ```
29
 
30
  ## Quick Start
 
 
 
 
31
  ```python
32
  import math
33
  import copy
 
231
  ],
232
  ]
233
 
234
+ encoded = [tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=True) for m in prompts]
235
  prompt_lens = [len(e) for e in encoded]
236
  max_len = max(prompt_lens)
237
  pad_id = tokenizer.pad_token_id