Spaces:
Running
Running
Commit
·
79357a0
1
Parent(s):
63ee5d3
Do not print out reasoning
Browse files
app.py
CHANGED
@@ -105,9 +105,14 @@ if st.button("Generate"):
|
|
105 |
with torch.no_grad():
|
106 |
outputs = model.generate( **inputs,
|
107 |
# max_new_tokens=100,
|
108 |
-
max_new_tokens=
|
109 |
-
|
110 |
-
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
# Back to still
|
113 |
# gif_html.markdown(
|
@@ -123,4 +128,5 @@ if st.button("Generate"):
|
|
123 |
|
124 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
125 |
st.markdown("### ✨ Output:")
|
126 |
-
|
|
|
|
105 |
with torch.no_grad():
|
106 |
outputs = model.generate( **inputs,
|
107 |
# max_new_tokens=100,
|
108 |
+
max_new_tokens=256,
|
109 |
+
do_sample=False,
|
110 |
+
temperature=1.0,
|
111 |
+
top_p=0.95,
|
112 |
+
top_k=50,
|
113 |
+
num_return_sequences=1,
|
114 |
+
eos_token_id=tokenizer.eos_token_id
|
115 |
+
)
|
116 |
|
117 |
# Back to still
|
118 |
# gif_html.markdown(
|
|
|
128 |
|
129 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
130 |
st.markdown("### ✨ Output:")
|
131 |
+
final_thought = result.split("<think>\n")[1:]
|
132 |
+
st.write(final_thought)
|