Update app.py
Browse files
app.py
CHANGED
@@ -444,7 +444,7 @@ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, o
|
|
444 |
#geht nur über spezielle OpenAI-Schnittstelle...
|
445 |
headers, payload = process_image(file, prompt)
|
446 |
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
447 |
-
result = response.json()
|
448 |
history = history + [((file,), None),(prompt, result)]
|
449 |
|
450 |
chatbot[-1][1] = result
|
@@ -483,13 +483,8 @@ def generate_text (prompt, file, chatbot, history, rag_option, model_option, ope
|
|
483 |
raise gr.Error("Prompt ist erforderlich.")
|
484 |
|
485 |
#Prompt an history anhängen und einen Text daraus machen
|
486 |
-
|
487 |
-
|
488 |
-
else:
|
489 |
-
prompt_neu = process_image(file, prompt)
|
490 |
-
history_text_und_prompt = prompt_neu #generate_prompt_with_history_openai(prompt_neu, history)
|
491 |
-
print("historyundtextundfile.................")
|
492 |
-
print(history_text_und_prompt)
|
493 |
#history für HuggingFace Models formatieren
|
494 |
#history_text_und_prompt = generate_prompt_with_history_hf(prompt, history)
|
495 |
#history für openAi formatieren
|
|
|
444 |
#geht nur über spezielle OpenAI-Schnittstelle...
|
445 |
headers, payload = process_image(file, prompt)
|
446 |
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
447 |
+
result = response.json()['content']
|
448 |
history = history + [((file,), None),(prompt, result)]
|
449 |
|
450 |
chatbot[-1][1] = result
|
|
|
483 |
raise gr.Error("Prompt ist erforderlich.")
|
484 |
|
485 |
#Prompt an history anhängen und einen Text daraus machen
|
486 |
+
history_text_und_prompt = generate_prompt_with_history_openai(prompt, history)
|
487 |
+
|
|
|
|
|
|
|
|
|
|
|
488 |
#history für HuggingFace Models formatieren
|
489 |
#history_text_und_prompt = generate_prompt_with_history_hf(prompt, history)
|
490 |
#history für openAi formatieren
|