Spaces:
Running
on
Zero
Running
on
Zero
fixes
Browse files
app.py
CHANGED
@@ -15,8 +15,7 @@ LEAN4_DEFAULT_HEADER = (
|
|
15 |
title = "# ππ»ββοΈWelcome to πTonic's πππ¨π»βπ¬Moonshot Math"
|
16 |
|
17 |
description = """
|
18 |
-
|
19 |
-
Trained with Reinforcement Learning, 72B parameters, and a 32K token context window.\
|
20 |
- [Kimina-Prover-Preview GitHub](https://github.com/MoonshotAI/Kimina-Prover-Preview)\
|
21 |
- [Hugging Face: AI-MO/Kimina-Prover-72B](https://huggingface.co/AI-MO/Kimina-Prover-72B)\
|
22 |
- [Kimina Prover blog](https://huggingface.co/blog/AI-MO/kimina-prover)\
|
@@ -195,22 +194,22 @@ def main():
|
|
195 |
gr.Markdown(joinus)
|
196 |
with gr.Row():
|
197 |
with gr.Column():
|
198 |
-
user_input = gr.Textbox(label="Your message or formal statement", lines=4)
|
199 |
-
informal = gr.Textbox(value=additional_info_prompt, label="Optional informal prefix")
|
200 |
-
max_tokens = gr.Slider(minimum=150, maximum=4096, value=2500, label="Max Tokens")
|
201 |
submit = gr.Button("Send")
|
202 |
-
gr.Examples(
|
203 |
-
examples=examples,
|
204 |
-
inputs=[user_input, informal, max_tokens],
|
205 |
-
label="Example Problems"
|
206 |
-
)
|
207 |
with gr.Column():
|
208 |
-
chat = gr.Chatbot(label="
|
209 |
with gr.Accordion("Complete Output", open=False):
|
210 |
json_out = gr.JSON(label="Full Output")
|
211 |
code_out = gr.Code(label="Extracted Lean4 Code", language="python")
|
212 |
state = gr.State([])
|
213 |
submit.click(chat_handler, [user_input, informal, max_tokens, state], [chat, json_out, code_out, state])
|
|
|
|
|
|
|
|
|
|
|
214 |
gr.Markdown(citation)
|
215 |
demo.launch()
|
216 |
|
|
|
15 |
title = "# ππ»ββοΈWelcome to πTonic's πππ¨π»βπ¬Moonshot Math"
|
16 |
|
17 |
description = """
|
18 |
+
**πππ¨π»βπ¬AI-MO/Kimina-Prover-Distill-8B is a theorem proving model developed by Project Numina and Kimi teams, focusing on competition style problem solving capabilities in Lean 4. It is a distillation of AI-MO/Kimina-Prover-72B, a model trained via large scale reinforcement learning. It achieves 77.86% accuracy with Pass@32 on MiniF2F-test.\
|
|
|
19 |
- [Kimina-Prover-Preview GitHub](https://github.com/MoonshotAI/Kimina-Prover-Preview)\
|
20 |
- [Hugging Face: AI-MO/Kimina-Prover-72B](https://huggingface.co/AI-MO/Kimina-Prover-72B)\
|
21 |
- [Kimina Prover blog](https://huggingface.co/blog/AI-MO/kimina-prover)\
|
|
|
194 |
gr.Markdown(joinus)
|
195 |
with gr.Row():
|
196 |
with gr.Column():
|
197 |
+
user_input = gr.Textbox(label="π¨π»βπ»Your message or formal statement", lines=4)
|
198 |
+
informal = gr.Textbox(value=additional_info_prompt, label="ππ»ββοΈOptional informal prefix")
|
199 |
+
max_tokens = gr.Slider(minimum=150, maximum=4096, value=2500, label="πͺMax Tokens")
|
200 |
submit = gr.Button("Send")
|
|
|
|
|
|
|
|
|
|
|
201 |
with gr.Column():
|
202 |
+
chat = gr.Chatbot(label="πππ¨π»βπ¬Kimina Prover 8B")
|
203 |
with gr.Accordion("Complete Output", open=False):
|
204 |
json_out = gr.JSON(label="Full Output")
|
205 |
code_out = gr.Code(label="Extracted Lean4 Code", language="python")
|
206 |
state = gr.State([])
|
207 |
submit.click(chat_handler, [user_input, informal, max_tokens, state], [chat, json_out, code_out, state])
|
208 |
+
gr.Examples(
|
209 |
+
examples=examples,
|
210 |
+
inputs=[user_input, informal, max_tokens],
|
211 |
+
label="π€¦π»ββοΈExample Problems"
|
212 |
+
)
|
213 |
gr.Markdown(citation)
|
214 |
demo.launch()
|
215 |
|