TA commited on
Commit
f1e4e34
1 Parent(s): 534795e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -11
app.py CHANGED
@@ -9,7 +9,6 @@ import gradio as gr
9
  import os
10
  import requests
11
 
12
- # Include the HTML code for displaying the image
13
  html_temp = """
14
  <div style="position: absolute; top: 0; right: 0;">
15
  <img src='https://huggingface.co/spaces/NerdN/open-gpt-Image-Prompter/blob/main/_45a03b4d-ea0f-4b81-873d-ff6b10461d52.jpg' alt='Your Image' style='width:100px;height:100px;'>
@@ -25,13 +24,14 @@ def build_input_prompt(message, chatbot, system_prompt):
25
  """
26
  Constructs the input prompt string from the chatbot interactions and the current message.
27
  """
28
- input_prompt = "\n" + system_prompt + "</s>\n\n"
29
  for interaction in chatbot:
30
- input_prompt = input_prompt + str(interaction[0]) + "</s>\n\n" + str(interaction[1]) + "\n</s>\n\n"
31
 
32
- input_prompt = input_prompt + str(message) + "</s>\n"
33
  return input_prompt
34
 
 
35
  def post_request_beta(payload):
36
  """
37
  Sends a POST request to the predefined Zephyr-7b-Beta URL and returns the JSON response.
@@ -40,6 +40,7 @@ def post_request_beta(payload):
40
  response.raise_for_status() # Will raise an HTTPError if the HTTP request returned an unsuccessful status code
41
  return response.json()
42
 
 
43
  def predict_beta(message, chatbot=[], system_prompt=""):
44
  input_prompt = build_input_prompt(message, chatbot, system_prompt)
45
  data = {
@@ -67,12 +68,10 @@ def predict_beta(message, chatbot=[], system_prompt=""):
67
 
68
  def test_preview_chatbot(message, history):
69
  response = predict_beta(message, history, SYSTEM_PROMPT)
70
- text_start = response.rfind("", ) + len("")
71
  response = response[text_start:]
 
72
 
73
- # Include the image HTML code in the response
74
- response_with_image = f"{html_temp}\n{response}"
75
- return response_with_image
76
 
77
  welcome_preview_message = f"""
78
  Expand your imagination and broaden your horizons with LLM. Welcome to **{TITLE}**!:\nThis is a chatbot that can generate detailed prompts for image generation models based on simple and short user input.\nSay something like:
@@ -80,10 +79,8 @@ Expand your imagination and broaden your horizons with LLM. Welcome to **{TITLE}
80
  "{EXAMPLE_INPUT}"
81
  """
82
 
83
- # Use the modified test_preview_chatbot function
84
  chatbot_preview = gr.Chatbot(layout="panel", value=[(None, welcome_preview_message)])
85
  textbox_preview = gr.Textbox(scale=7, container=False, value=EXAMPLE_INPUT)
86
 
87
- # Use the modified test_preview_chatbot function
88
  demo = gr.ChatInterface(test_preview_chatbot, chatbot=chatbot_preview, textbox=textbox_preview)
89
- demo.launch(share=True)
 
9
  import os
10
  import requests
11
 
 
12
  html_temp = """
13
  <div style="position: absolute; top: 0; right: 0;">
14
  <img src='https://huggingface.co/spaces/NerdN/open-gpt-Image-Prompter/blob/main/_45a03b4d-ea0f-4b81-873d-ff6b10461d52.jpg' alt='Your Image' style='width:100px;height:100px;'>
 
24
  """
25
  Constructs the input prompt string from the chatbot interactions and the current message.
26
  """
27
+ input_prompt = "<|system|>\n" + system_prompt + "</s>\n<|user|>\n"
28
  for interaction in chatbot:
29
+ input_prompt = input_prompt + str(interaction[0]) + "</s>\n<|assistant|>\n" + str(interaction[1]) + "\n</s>\n<|user|>\n"
30
 
31
+ input_prompt = input_prompt + str(message) + "</s>\n<|assistant|>"
32
  return input_prompt
33
 
34
+
35
  def post_request_beta(payload):
36
  """
37
  Sends a POST request to the predefined Zephyr-7b-Beta URL and returns the JSON response.
 
40
  response.raise_for_status() # Will raise an HTTPError if the HTTP request returned an unsuccessful status code
41
  return response.json()
42
 
43
+
44
  def predict_beta(message, chatbot=[], system_prompt=""):
45
  input_prompt = build_input_prompt(message, chatbot, system_prompt)
46
  data = {
 
68
 
69
  def test_preview_chatbot(message, history):
70
  response = predict_beta(message, history, SYSTEM_PROMPT)
71
+ text_start = response.rfind("<|assistant|>", ) + len("<|assistant|>")
72
  response = response[text_start:]
73
+ return response
74
 
 
 
 
75
 
76
  welcome_preview_message = f"""
77
  Expand your imagination and broaden your horizons with LLM. Welcome to **{TITLE}**!:\nThis is a chatbot that can generate detailed prompts for image generation models based on simple and short user input.\nSay something like:
 
79
  "{EXAMPLE_INPUT}"
80
  """
81
 
 
82
  chatbot_preview = gr.Chatbot(layout="panel", value=[(None, welcome_preview_message)])
83
  textbox_preview = gr.Textbox(scale=7, container=False, value=EXAMPLE_INPUT)
84
 
 
85
  demo = gr.ChatInterface(test_preview_chatbot, chatbot=chatbot_preview, textbox=textbox_preview)
86
+ demo.launch(share=True)