Spaces:
Sleeping
Sleeping
| import concurrent.futures | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| import json | |
| import traceback | |
| import tempfile | |
| import shutil | |
| class HuggingChatExecutor: | |
| def __init__(self): | |
| self.temp_dir = tempfile.TemporaryDirectory() | |
| self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) | |
| self.client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
| def _execute_chat(self, message, system_prompt, functions=None): | |
| messages = [{"role": "system", "content": system_prompt}] | |
| messages.append({"role": "user", "content": message}) | |
| try: | |
| if functions: | |
| # Parse functions string as JSON array | |
| functions_list = json.loads(functions) | |
| response = self.client.chat_completion( | |
| messages, | |
| functions=functions_list, | |
| stream=False | |
| ) | |
| else: | |
| response = self.client.chat_completion( | |
| messages, | |
| stream=False | |
| ) | |
| # Format the output | |
| output = { | |
| "message": message, | |
| "system_prompt": system_prompt, | |
| "functions": functions if functions else "No functions provided", | |
| "response": response.choices[0].message | |
| } | |
| return json.dumps(output, indent=2) | |
| except Exception as e: | |
| error_trace = traceback.format_exc() | |
| return f"Error executing chat:\n{str(e)}\n\nTraceback:\n{error_trace}" | |
| def execute(self, message, system_prompt, functions): | |
| future = self.executor.submit(self._execute_chat, message, system_prompt, functions) | |
| try: | |
| output = future.result(timeout=30) | |
| return output | |
| except concurrent.futures.TimeoutError: | |
| return "Error: Timed out waiting for chat response" | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| def __del__(self): | |
| self.executor.shutdown(wait=False) | |
| shutil.rmtree(self.temp_dir.name) | |
| def wrapper_execute(message, system_prompt, functions): | |
| executor = HuggingChatExecutor() | |
| return executor.execute(message, system_prompt, functions) | |
| def create_interface(): | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# HuggingChat Tool Executor") | |
| gr.Markdown("Execute chat completions with function calling capabilities") | |
| gr.Markdown("### Message") | |
| message_input = gr.Textbox( | |
| label="User Message", | |
| placeholder="Enter your message here...", | |
| lines=3 | |
| ) | |
| gr.Markdown("### System Prompt") | |
| system_input = gr.Textbox( | |
| label="System Prompt", | |
| value="You are a helpful AI assistant.", | |
| lines=2 | |
| ) | |
| gr.Markdown("### Functions") | |
| functions_input = gr.Textbox( | |
| label="Functions (JSON array)", | |
| placeholder='''[ | |
| { | |
| "name": "get_weather", | |
| "description": "Get the weather in a location", | |
| "parameters": { | |
| "type": "object", | |
| "properties": { | |
| "location": { | |
| "type": "string", | |
| "description": "The city and state, e.g. San Francisco, CA" | |
| } | |
| }, | |
| "required": ["location"] | |
| } | |
| } | |
| ]''', | |
| lines=10 | |
| ) | |
| gr.Markdown("### Output") | |
| output_text = gr.Textbox(label="Response", lines=20) | |
| run_button = gr.Button("Run") | |
| run_button.click( | |
| wrapper_execute, | |
| inputs=[message_input, system_input, functions_input], | |
| outputs=output_text | |
| ) | |
| return demo | |
| if __name__ == "__main__": | |
| demo = create_interface() | |
| demo.launch() | |