Spaces:
Runtime error
Runtime error
TA
commited on
Commit
·
fb4f3f2
1
Parent(s):
ef43705
Update app.py
Browse files
app.py
CHANGED
@@ -63,8 +63,74 @@ html_temp = """
|
|
63 |
</html>
|
64 |
""".format(TITLE, enticing_image_path, EXAMPLE_INPUT)
|
65 |
|
66 |
-
|
67 |
|
68 |
-
|
|
|
69 |
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
</html>
|
64 |
""".format(TITLE, enticing_image_path, EXAMPLE_INPUT)
|
65 |
|
66 |
+
zephyr_7b_beta = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta/"
|
67 |
|
68 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
69 |
+
HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"}
|
70 |
|
71 |
+
def build_input_prompt(message, chatbot, system_prompt):
|
72 |
+
"""
|
73 |
+
Constructs the input prompt string from the chatbot interactions and the current message.
|
74 |
+
"""
|
75 |
+
input_prompt = "<|system|>\n" + system_prompt + "</s>\n<|user|>\n"
|
76 |
+
for interaction in chatbot:
|
77 |
+
input_prompt = input_prompt + str(interaction[0]) + "</s>\n<|assistant|>\n" + str(interaction[1]) + "\n</s>\n<|user|>\n"
|
78 |
+
|
79 |
+
input_prompt = input_prompt + str(message) + "</s>\n<|assistant|>"
|
80 |
+
return input_prompt
|
81 |
+
|
82 |
+
|
83 |
+
def post_request_beta(payload):
|
84 |
+
"""
|
85 |
+
Sends a POST request to the predefined Zephyr-7b-Beta URL and returns the JSON response.
|
86 |
+
"""
|
87 |
+
response = requests.post(zephyr_7b_beta, headers=HEADERS, json=payload)
|
88 |
+
response.raise_for_status() # Will raise an HTTPError if the HTTP request returned an unsuccessful status code
|
89 |
+
return response.json()
|
90 |
+
|
91 |
+
|
92 |
+
def predict_beta(message, chatbot=[], system_prompt=""):
|
93 |
+
input_prompt = build_input_prompt(message, chatbot, system_prompt)
|
94 |
+
data = {
|
95 |
+
"inputs": input_prompt
|
96 |
+
}
|
97 |
+
|
98 |
+
try:
|
99 |
+
response_data = post_request_beta(data)
|
100 |
+
json_obj = response_data[0]
|
101 |
+
|
102 |
+
if 'generated_text' in json_obj and len(json_obj['generated_text']) > 0:
|
103 |
+
bot_message = json_obj['generated_text']
|
104 |
+
return bot_message
|
105 |
+
elif 'error' in json_obj:
|
106 |
+
raise gr.Error(json_obj['error'] + ' Please refresh and try again with smaller input prompt')
|
107 |
+
else:
|
108 |
+
warning_msg = f"Unexpected response: {json_obj}"
|
109 |
+
raise gr.Error(warning_msg)
|
110 |
+
except requests.HTTPError as e:
|
111 |
+
error_msg = f"Request failed with status code {e.response.status_code}"
|
112 |
+
raise gr.Error(error_msg)
|
113 |
+
except json.JSONDecodeError as e:
|
114 |
+
error_msg = f"Failed to decode response as JSON: {str(e)}"
|
115 |
+
raise gr.Error(error_msg)
|
116 |
+
|
117 |
+
def test_preview_chatbot(message, history):
|
118 |
+
response = predict_beta(message, history, SYSTEM_PROMPT)
|
119 |
+
text_start = response.rfind("<|assistant|>", ) + len("<|assistant|>")
|
120 |
+
response = response[text_start:]
|
121 |
+
return response
|
122 |
+
|
123 |
+
|
124 |
+
welcome_preview_message = f"""
|
125 |
+
Expand your imagination and broaden your horizons with LLM. Welcome to **{TITLE}**!:\nThis is a chatbot that can generate detailed prompts for image generation models based on simple and short user input.\nSay something like:
|
126 |
+
|
127 |
+
"{EXAMPLE_INPUT}"
|
128 |
+
"""
|
129 |
+
|
130 |
+
chatbot_preview = gr.Chatbot(layout="panel", value=[(None, welcome_preview_message)])
|
131 |
+
textbox_preview = gr.Textbox(scale=7, container=False, value=EXAMPLE_INPUT)
|
132 |
+
|
133 |
+
demo = gr.ChatInterface(test_preview_chatbot, chatbot=chatbot_preview, textbox=textbox_preview)
|
134 |
+
|
135 |
+
|
136 |
+
demo.launch(share=True)
|