Spaces:
Sleeping
Sleeping
File size: 5,705 Bytes
28aa5c9 b041246 28aa5c9 56b95e8 28aa5c9 56b95e8 28aa5c9 2fcc4e5 28aa5c9 4576a88 28aa5c9 4576a88 28aa5c9 4576a88 28aa5c9 4576a88 54d2732 8403d94 54d2732 4041ce6 54d2732 28aa5c9 4576a88 e3547e6 4576a88 28aa5c9 4576a88 b37d9f7 4576a88 28aa5c9 e0eb26d 9dfd87a 28aa5c9 4576a88 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
from openai import AsyncAssistantEventHandler
from openai import AsyncOpenAI
import gradio as gr
import asyncio
import os
# set the keys
client = AsyncOpenAI(
api_key=os.getenv("OPENAI_API_KEY")
)
assistantID = os.getenv("OPENAI_ASSISTANT_ID")
mypassword = os.getenv("SPACE_PASSWORD")
mytitle = "<h1 align='center'>Quelles étaient les initiatives culturelles dans le Grand-Duché de Luxembourg dans le passé?</h1>"
mydescription="""
<h3 align='center'>Explorez les rapports d'activités du Ministère de la Culture luxembourgeois de 1987 à 2022.</h3>
<table width=100%>
<tr>
<th width=50% bgcolor="Moccasin">Posez vos questions en français ou dans une autre langue :</th>
<th bgcolor="Khaki">Réponse de l'Assistant File-Search OpenAI :</th>
</tr>
</table>
"""
myarticle ="""
<h3>Contexte :</h3>
<p>Cet espace de démonstration sur HuggingFace, qui utilise l'API du <a href="https://platform.openai.com/docs/models">modèle OpenAI</a> gpt-4o-mini-2024-07-18, a été réalisé par <a href="https://github.com/mbarnig">Marco Barnig</a>.</p>
"""
myinput = [
gr.Radio(["Recherche", "Exemples"], label = "Les exemples fonctionnent sans mot de passe.", value="Exemples"),
gr.Textbox(lines=1, label="Entrez le mot de passe secret !", scale=1),
gr.Textbox(lines=3, label="Que voulez-vous savoir ?", scale=7),
gr.Radio(["Quelles initiatives ont été prises en 2003 ?",
"Qui est le musicien Tristano ?",
"Wat fir grouss Evenementer hunn an de Jore stattfonnt, wou Lëtzebuerg d'Kulturhaaptstad war ?",
"Enumérez les événements clés qui on eu lieu de 2013 à 2017."],
value="Wat fir grouss Evenementer hunn an de Jore stattfonnt, wou Lëtzebuerg d'Kulturhaaptstad war ?",
label="Exemples")
]
class EventHandler(AsyncAssistantEventHandler):
def __init__(self) -> None:
super().__init__()
self.response_text = ""
async def on_text_created(self, text) -> None:
self.response_text += str(text)
async def on_text_delta(self, delta, snapshot):
self.response_text += str(delta.value)
async def on_text_done(self, text):
pass
async def on_tool_call_created(self, tool_call):
self.response_text += f"\n[Tool Call]: {str(tool_call.type)}\n"
async def on_tool_call_delta(self, delta, snapshot):
if snapshot.id != getattr(self, "current_tool_call", None):
self.current_tool_call = snapshot.id
self.response_text += f"\n[Tool Call Delta]: {str(delta.type)}\n"
if delta.type == 'code_interpreter':
if delta.code_interpreter.input:
self.response_text += str(delta.code_interpreter.input)
if delta.code_interpreter.outputs:
self.response_text += "\n\n[Output]:\n"
for output in delta.code_interpreter.outputs:
if output.type == "logs":
self.response_text += f"\n{str(output.logs)}"
async def on_tool_call_done(self, text):
pass
# Initialize session variables
session_data = {"assistant_id": assistantID, "thread_id": None}
async def initialize_thread():
# Create a Thread
thread = await client.beta.threads.create()
# Store thread ID in session_data for later use
session_data["thread_id"] = thread.id
async def generate_response(user_input):
assistant_id = session_data["assistant_id"]
thread_id = session_data["thread_id"]
# Add a Message to the Thread
oai_message = await client.beta.threads.messages.create(
thread_id=thread_id,
role="user",
content=user_input
)
# Create and Stream a Run
event_handler = EventHandler()
async with client.beta.threads.runs.stream(
thread_id=thread_id,
assistant_id=assistant_id,
instructions="Please assist the user with their query.",
event_handler=event_handler,
) as stream:
# Yield incremental updates
async for _ in stream:
await asyncio.sleep(0.1) # Small delay to mimic streaming
yield event_handler.response_text
# Gradio interface function (generator)
async def gradio_chat_interface(mode, password, user_input, example):
if mode == "Exemples":
filename = example[-6:-2] + ".md"
file = open("examples/" + filename, "r")
output = file.read()
yield output
else:
# check the password
if password == "":
yield "Veuillez entrer le mot de passe pour faire des recherches !"
elif password != mypassword:
yield "Veuillez entre le mot de passe correct pour faire des recherches !"
elif user_input == "":
yield "Le champ de recherche est vide ! Veuillez entrer votre question."
else:
# Create a new event loop if none exists (or if we are in a new thread)
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Initialize the thread if not already done
if session_data["thread_id"] is None:
await initialize_thread()
# Generate and yield responses
async for response in generate_response(user_input):
yield response
# Set up Gradio interface with streaming
interface = gr.Interface(
fn=gradio_chat_interface,
inputs=myinput,
outputs="markdown",
title=mytitle,
description=mydescription,
article=myarticle,
live=False,
flagging_mode="never"
)
# Launch the Gradio app
interface.launch() |