llm-math-solver / app.py
jclian91's picture
Update app.py
f491d89 verified
# -*- coding: utf-8 -*-
import gradio as gr
import os
import re
import json
import subprocess
from openai import OpenAI
from retry import retry
from random import choices
from datetime import datetime
os.environ["OPENAI_BASE_URL"] = "http://117.50.185.39:50080/v1"
os.environ["OPENAI_API_KEY"] = "0"
client = OpenAI()
execution_desc = ["运行以上代码,输出会是: ",
"现在将上面的代码复制到Python环境中运行,运行结果为:",
"执行上述Python代码,运行结果将是:",
"上面的Python代码执行结果为:",
"运行上述代码,我们可以得到题目要求的答案。输出结果将是:"]
@retry(exceptions=Exception, tries=3, delay=2)
def question_answer(query):
g = open("collect.json", "a", encoding="utf-8")
messages = [{"role": "system", "content": "你是一个数学解题大师,请解决以下数学题,务必详细说明解题思路,并在必要时提供Python代码来支持你的推理。答案中的数值应使用\\boxed{}包围,最后的答案以“因此”开头并直接给出结论,不要添加任何多余的内容。"}]
messages.append({"role": "user", "content": f"题目:{query}"})
result = client.chat.completions.create(messages=messages,
model="gpt-3.5-turbo",
temperature=0.2,
stream=True)
reply_message = ""
for chunk in result:
if hasattr(chunk, "choices") and chunk.choices[0].delta.content:
reply_message += chunk.choices[0].delta.content
# find python code and execute the code
if '```python' in reply_message:
reply_message = '```'.join(reply_message.split('```')[:-1]).replace('```python', '\n```python') + '```'
messages.append({"role": "assistant", "content": reply_message})
python_code_string = re.findall(r'```python\n(.*?)\n```', reply_message, re.S)[0]
python_file_path = 'temp.py'
with open(python_file_path, 'w') as f:
f.write(python_code_string)
python_code_run = subprocess.run(['python3', python_file_path], stdout=subprocess.PIPE, timeout=10)
if python_code_run.returncode:
print("生成的Python代码无法运行!")
raise RuntimeError("生成的Python代码无法运行!")
python_code_execution = python_code_run.stdout.decode('utf-8')
os.remove(python_file_path)
if "``````" in python_code_execution:
raise ValueError("执行Python代码结果为空!")
code_reply_str = choices(execution_desc, k=1)[0]
code_reply = f"\n{code_reply_str}```{python_code_execution.strip()}```\n"
reply_message += code_reply
# yield reply_message
messages.append({"role": "user", "content": code_reply})
result = client.chat.completions.create(messages=messages,
model="gpt-3.5-turbo",
temperature=0.2,
stream=True)
for chunk in result:
if hasattr(chunk, "choices") and chunk.choices[0].delta.content:
reply_message += chunk.choices[0].delta.content
# yield reply_message
print(reply_message)
g.write(json.dumps({"query": query,
"answer": reply_message,
"time": datetime.now().strftime('%Y-%m-%d %H:%M:%S %f')
}, ensure_ascii=False)+"\n")
g.close()
return reply_message
demo = gr.Interface(
fn=question_answer,
inputs=gr.Textbox(lines=3, placeholder="题目", label="数学题目"),
outputs=gr.Markdown(),
)
demo.launch()