import gradio as gr import matplotlib.pyplot as plt import io import numpy as np from PIL import Image import requests import json import re # 将图像转换为 base64,以便在 gradio 中显示 def get_image_data(fig): buf = io.BytesIO() fig.savefig(buf, format='PNG') buf.seek(0) img = Image.open(buf) return img # 执行 Python 代码并生成图像 def execute_code(code): namespace = {} exec(code, namespace) fig = namespace.get('fig') # Assume the code generates a matplotlib figure named 'fig' if fig: return get_image_data(fig) else: raise ValueError("The code did not generate a matplotlib figure named 'fig'") def gpt_inference(base_url, model, openai_key, prompt): newprompt = f'Write Python code that does the following: \n\n{prompt}\n\nNote, the code is going to be executed in a Jupyter Python kernel. The code should create a matplotlib figure and assign it to a variable named "fig". The "fig" variable will be used for further processing.\n\nLast instruction, and this is the most important, just return code. No other outputs, as your full response will directly be executed in the kernel.' data = { "model": model, "messages": [ { "role": "user", "content": newprompt } ], "temperature": 0.7, } headers = { "Content-Type": "application/json", "Authorization": f"Bearer {openai_key}", } response = requests.post(f"{base_url}/v1/chat/completions", headers=headers, data=json.dumps(data)) def extract_code(text): # Match triple backtick blocks first triple_match = re.search(r'```(?:\w+\n)?(.+?)```', text, re.DOTALL) if triple_match: return triple_match.group(1).strip() else: # If no triple backtick blocks, match single backtick blocks single_match = re.search(r'`(.+?)`', text, re.DOTALL) if single_match: return single_match.group(1).strip() # If no code blocks found, return original text return text if response.status_code != 200: return "Error: " + response.text, 500 code = extract_code(response.json()["choices"][0]["message"]["content"]) img = execute_code(code) return img iface = gr.Interface( fn=gpt_inference, inputs=["text", gr.inputs.Dropdown(choices=["gpt3.5-turbo", "gpt4"], label="Model"), "text", "text"], outputs=gr.outputs.Image(type="pil"), input_labels=["Base URL", "Model", "OpenAI Key","Prompt"] ) iface.launch()