Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -97,6 +97,14 @@ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffus
|
|
97 |
###############################################
|
98 |
os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
|
99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
#################################################
|
102 |
#################################################
|
@@ -135,8 +143,6 @@ def add_text2(chatbot, prompt):
|
|
135 |
#nach dem Upload soll das zusätzliche Fenster mit dem image drinnen angezeigt werden
|
136 |
def file_anzeigen(file):
|
137 |
ext = analyze_file(file)
|
138 |
-
print("ext...............................")
|
139 |
-
print(ext)
|
140 |
if (ext == "png" or ext == "PNG" or ext == "jpg" or ext == "jpeg" or ext == "JPG" or ext == "JPEG"):
|
141 |
return gr.Image(width=47, visible=True, interactive = False, height=47, min_width=47, show_label=False, show_share_button=False, show_download_button=False, scale = 0.5), file, file
|
142 |
else:
|
@@ -202,11 +208,8 @@ def process_image(image_path, prompt):
|
|
202 |
|
203 |
##################################################
|
204 |
#openassistant um uploaded Files zu analysieren
|
205 |
-
def create_assistant(prompt, file):
|
206 |
-
#
|
207 |
-
client = OpenAI()
|
208 |
-
assistant = client.beta.assistants.create(name="File Analysator",instructions=template, model="gpt-4-1106-preview",)
|
209 |
-
thread = client.beta.threads.create()
|
210 |
file_neu = client.files.create(file=open(file,"rb",),purpose="assistants",)
|
211 |
# Update Assistant
|
212 |
assistant = client.beta.assistants.update(assistant.id,tools=[{"type": "code_interpreter"}, {"type": "retrieval"}],file_ids=[file_neu.id],)
|
@@ -266,7 +269,6 @@ def generate_bild(prompt, chatbot, model_option_zeichnen='HuggingFace', temperat
|
|
266 |
chatbot[-1][1]= "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8'))
|
267 |
else:
|
268 |
print("Bild Erzeugung DallE..............................")
|
269 |
-
client = OpenAI()
|
270 |
#als Format ginge auch 'url', n - Anz. der erzeugten Bilder
|
271 |
response = client.images.generate(model="dall-e-3",prompt=prompt,size="1024x1024",quality="standard",n=1, response_format='b64_json')
|
272 |
#chatbot[-1][1]= "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8'))
|
@@ -294,6 +296,9 @@ def generate_text_zu_bild(file, prompt, k, rag_option, chatbot):
|
|
294 |
#prompt = generate_prompt_with_history_openai(neu_text_mit_chunks, history)
|
295 |
#als reiner prompt:
|
296 |
prompt_neu = generate_prompt_with_history(neu_text_mit_chunks, chatbot)
|
|
|
|
|
|
|
297 |
|
298 |
headers, payload = process_image(file, prompt_neu)
|
299 |
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
@@ -323,6 +328,9 @@ def generate_text_zu_doc(file, prompt, k, rag_option, chatbot):
|
|
323 |
#prompt = generate_prompt_with_history_openai(neu_text_mit_chunks, history)
|
324 |
#als reiner prompt:
|
325 |
prompt_neu = generate_prompt_with_history(neu_text_mit_chunks, chatbot)
|
|
|
|
|
|
|
326 |
|
327 |
result = create_assistant(prompt_neu, file)
|
328 |
return result
|
|
|
97 |
###############################################
|
98 |
os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
|
99 |
|
100 |
+
################################################
|
101 |
+
#OpenAI Zugang, client und Assistant einmal erzeugen.
|
102 |
+
################################################
|
103 |
+
#zentral einmal erzeugen!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
104 |
+
client = OpenAI()
|
105 |
+
assistant = client.beta.assistants.create(name="File Analysator",instructions=template, model="gpt-4-1106-preview",)
|
106 |
+
thread = client.beta.threads.create()
|
107 |
+
|
108 |
|
109 |
#################################################
|
110 |
#################################################
|
|
|
143 |
#nach dem Upload soll das zusätzliche Fenster mit dem image drinnen angezeigt werden
|
144 |
def file_anzeigen(file):
|
145 |
ext = analyze_file(file)
|
|
|
|
|
146 |
if (ext == "png" or ext == "PNG" or ext == "jpg" or ext == "jpeg" or ext == "JPG" or ext == "JPEG"):
|
147 |
return gr.Image(width=47, visible=True, interactive = False, height=47, min_width=47, show_label=False, show_share_button=False, show_download_button=False, scale = 0.5), file, file
|
148 |
else:
|
|
|
208 |
|
209 |
##################################################
|
210 |
#openassistant um uploaded Files zu analysieren
|
211 |
+
def create_assistant(prompt, file):
|
212 |
+
#neues File dem Assistant hinzufügen
|
|
|
|
|
|
|
213 |
file_neu = client.files.create(file=open(file,"rb",),purpose="assistants",)
|
214 |
# Update Assistant
|
215 |
assistant = client.beta.assistants.update(assistant.id,tools=[{"type": "code_interpreter"}, {"type": "retrieval"}],file_ids=[file_neu.id],)
|
|
|
269 |
chatbot[-1][1]= "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8'))
|
270 |
else:
|
271 |
print("Bild Erzeugung DallE..............................")
|
|
|
272 |
#als Format ginge auch 'url', n - Anz. der erzeugten Bilder
|
273 |
response = client.images.generate(model="dall-e-3",prompt=prompt,size="1024x1024",quality="standard",n=1, response_format='b64_json')
|
274 |
#chatbot[-1][1]= "<img src='data:image/png;base64,{0}'/>".format(base64.b64encode(image_64).decode('utf-8'))
|
|
|
296 |
#prompt = generate_prompt_with_history_openai(neu_text_mit_chunks, history)
|
297 |
#als reiner prompt:
|
298 |
prompt_neu = generate_prompt_with_history(neu_text_mit_chunks, chatbot)
|
299 |
+
splittet = True
|
300 |
+
else:
|
301 |
+
splittet = False
|
302 |
|
303 |
headers, payload = process_image(file, prompt_neu)
|
304 |
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
|
|
328 |
#prompt = generate_prompt_with_history_openai(neu_text_mit_chunks, history)
|
329 |
#als reiner prompt:
|
330 |
prompt_neu = generate_prompt_with_history(neu_text_mit_chunks, chatbot)
|
331 |
+
splittet = True
|
332 |
+
else:
|
333 |
+
splittet = False
|
334 |
|
335 |
result = create_assistant(prompt_neu, file)
|
336 |
return result
|