TA commited on
Commit
5bef454
1 Parent(s): ce7d6d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -1,8 +1,9 @@
1
  import gradio as gr
2
  import os
3
  import requests
 
4
 
5
- SYSTEM_PROMPT = "As an LLM, your job is to generate detailed prompts that start with generate the image, for image generation models based on user input. Be descriptive and specific, but also make sure your prompts are clear and concise."
6
  TITLE = "Image Prompter"
7
  EXAMPLE_INPUT = "A Man Riding A Horse in Space"
8
  zephyr_7b_beta = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
@@ -13,11 +14,11 @@ def build_input_prompt(message, chatbot, system_prompt):
13
  """
14
  Constructs the input prompt string from the chatbot interactions and the current message.
15
  """
16
- input_prompt = "<|system|>\n" + system_prompt + "</s>\n<|user|>\n"
17
  for interaction in chatbot:
18
- input_prompt = input_prompt + str(interaction[0]) + "</s>\n<|assistant|>\n" + str(interaction[1]) + "\n</s>\n<|user|>\n"
19
 
20
- input_prompt = input_prompt + str(message) + "</s>\n<|assistant|>"
21
  return input_prompt
22
 
23
 
@@ -57,8 +58,6 @@ def predict_beta(message, chatbot=[], system_prompt=""):
57
 
58
  def test_preview_chatbot(message, history):
59
  response = predict_beta(message, history, SYSTEM_PROMPT)
60
- text_start = response.rfind("<|assistant|>", ) + len("<|assistant|>")
61
- response = response[text_start:]
62
  return response
63
 
64
 
@@ -71,4 +70,4 @@ chatbot_preview = gr.Chatbot(layout="panel", value=[(None, welcome_preview_messa
71
  textbox_preview = gr.Textbox(scale=7, container=False, value=EXAMPLE_INPUT)
72
 
73
  demo = gr.ChatInterface(test_preview_chatbot, chatbot=chatbot_preview, textbox=textbox_preview)
74
- demo.launch()
 
1
  import gradio as gr
2
  import os
3
  import requests
4
+ import json # Importing json module
5
 
6
+ SYSTEM_PROMPT = "As an LLM, your job is to generate detailed prompts that start with generate the image, for image generation models based on user input. Be descriptive and specific, but also make sure your prompts are clear and concise."
7
  TITLE = "Image Prompter"
8
  EXAMPLE_INPUT = "A Man Riding A Horse in Space"
9
  zephyr_7b_beta = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
 
14
  """
15
  Constructs the input prompt string from the chatbot interactions and the current message.
16
  """
17
+ input_prompt = "\n" + system_prompt + "</s>\n\n"
18
  for interaction in chatbot:
19
+ input_prompt = input_prompt + str(interaction[0]) + "</s>\n\n" + str(interaction[1]) + "\n</s>\n\n"
20
 
21
+ input_prompt = input_prompt + str(message) + "</s>\n"
22
  return input_prompt
23
 
24
 
 
58
 
59
  def test_preview_chatbot(message, history):
60
  response = predict_beta(message, history, SYSTEM_PROMPT)
 
 
61
  return response
62
 
63
 
 
70
  textbox_preview = gr.Textbox(scale=7, container=False, value=EXAMPLE_INPUT)
71
 
72
  demo = gr.ChatInterface(test_preview_chatbot, chatbot=chatbot_preview, textbox=textbox_preview)
73
+ demo.launch()