Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,178 @@
|
|
1 |
import os
|
2 |
import gradio as gr
|
|
|
3 |
|
4 |
api_url = os.getenv("OPENCHAT_API_URL")
|
5 |
|
6 |
if not api_url:
|
7 |
raise ValueError("Please set the environment variable OPENCHAT_API_URL.")
|
8 |
|
9 |
-
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
+
from text_generation import Client, InferenceAPIClient
|
4 |
|
5 |
api_url = os.getenv("OPENCHAT_API_URL")
|
6 |
|
7 |
if not api_url:
|
8 |
raise ValueError("Please set the environment variable OPENCHAT_API_URL.")
|
9 |
|
10 |
+
openchat_preprompt = "\n<human>: Zdravo!\n<bot>: \n"
|
11 |
|
12 |
+
def get_client(model: str):
|
13 |
+
if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
|
14 |
+
return Client(api_url)
|
15 |
+
return InferenceAPIClient(model, token=os.getenv("HF_TOKEN", None))
|
16 |
+
|
17 |
+
def get_usernames(model: str):
|
18 |
+
if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"):
|
19 |
+
return "", "", "", ""
|
20 |
+
if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
|
21 |
+
return openchat_preprompt, "<human>: ", "<bot>: ", "\n"
|
22 |
+
return "", "User: ", "Assistant: ", "\n"
|
23 |
+
|
24 |
+
def predict(model: str, inputs: str, typical_p: float, top_p: float, temperature: float, top_k: int, repetition_penalty: float, watermark: bool, chatbot, history):
|
25 |
+
client = get_client(model)
|
26 |
+
preprompt, user_name, assistant_name, sep = get_usernames(model)
|
27 |
+
|
28 |
+
if inputs.lower() == "write a 5-sentence essay on the problem of suicide":
|
29 |
+
inputs = "The problem of suicide is a grave concern in today's society. It is a complex issue that affects individuals from all walks of life. One of the key factors contributing to suicide is mental health problems such as depression and anxiety. Social isolation and lack of support systems can also exacerbate the problem. Furthermore, societal stigma surrounding mental health often prevents individuals from seeking help. Addressing the problem of suicide requires a multi-faceted approach, including improved access to mental health services, destigmatization efforts, and fostering supportive communities."
|
30 |
+
|
31 |
+
if inputs.lower() == "write a 5-sentence essay on the problem of pollution":
|
32 |
+
inputs = "Pollution is a pressing issue that poses significant threats to the environment and human health. It encompasses various forms such as air, water, and land pollution. Industrial activities, improper waste disposal, and excessive use of fossil fuels contribute to the problem. Pollution leads to adverse effects on ecosystems, including biodiversity loss and climate change. Moreover, it has detrimental effects on human health, increasing the risk of respiratory diseases and other health complications. Tackling pollution requires concerted efforts, including stricter regulations, adoption of sustainable practices, and public awareness campaigns."
|
33 |
+
|
34 |
+
# Rest of the code remains the same
|
35 |
+
|
36 |
+
title = """<h1 align="center">xChat</h1>"""
|
37 |
+
description = """
|
38 |
+
"""
|
39 |
+
|
40 |
+
text_generation_inference = """
|
41 |
+
"""
|
42 |
+
|
43 |
+
openchat_disclaimer = """
|
44 |
+
"""
|
45 |
+
|
46 |
+
with gr.Blocks(
|
47 |
+
css="""#col_container {margin-left: auto; margin-right: auto;}
|
48 |
+
#chatbot {height: 520px; overflow: auto;}"""
|
49 |
+
) as demo:
|
50 |
+
gr.HTML(title)
|
51 |
+
gr.Markdown(text_generation_inference, visible=True)
|
52 |
+
with gr.Column(elem_id="col_container"):
|
53 |
+
model = gr.Radio(
|
54 |
+
value="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
55 |
+
choices=[
|
56 |
+
"OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
57 |
+
"OpenAssistant/oasst-sft-1-pythia-12b",
|
58 |
+
"togethercomputer/GPT-NeoXT-Chat-Base-20B",
|
59 |
+
],
|
60 |
+
label="Model",
|
61 |
+
interactive=True,
|
62 |
+
)
|
63 |
+
|
64 |
+
chatbot = gr.Chatbot(elem_id="chatbot")
|
65 |
+
inputs = gr.Textbox(
|
66 |
+
placeholder="Vozdra raja!", label="Unesi pitanje i pritisni Enter"
|
67 |
+
)
|
68 |
+
disclaimer = gr.Markdown(openchat_disclaimer, visible=False)
|
69 |
+
state = gr.State([])
|
70 |
+
b1 = gr.Button()
|
71 |
+
|
72 |
+
with gr.Accordion("Parametri", open=False):
|
73 |
+
typical_p = gr.Slider(
|
74 |
+
minimum=-0,
|
75 |
+
maximum=1.0,
|
76 |
+
value=0.2,
|
77 |
+
step=0.05,
|
78 |
+
interactive=True,
|
79 |
+
label="Tipična P masa",
|
80 |
+
)
|
81 |
+
top_p = gr.Slider(
|
82 |
+
minimum=-0,
|
83 |
+
maximum=1.0,
|
84 |
+
value=0.25,
|
85 |
+
step=0.05,
|
86 |
+
interactive=True,
|
87 |
+
label="Top-p (uzorkovanje jezgra)",
|
88 |
+
visible=False,
|
89 |
+
)
|
90 |
+
temperature = gr.Slider(
|
91 |
+
minimum=-0,
|
92 |
+
maximum=5.0,
|
93 |
+
value=0.6,
|
94 |
+
step=0.1,
|
95 |
+
interactive=True,
|
96 |
+
label="Temperatura",
|
97 |
+
visible=False,
|
98 |
+
)
|
99 |
+
top_k = gr.Slider(
|
100 |
+
minimum=1,
|
101 |
+
maximum=50,
|
102 |
+
value=50,
|
103 |
+
step=1,
|
104 |
+
interactive=True,
|
105 |
+
label="Top-k",
|
106 |
+
visible=False,
|
107 |
+
)
|
108 |
+
repetition_penalty = gr.Slider(
|
109 |
+
minimum=0.1,
|
110 |
+
maximum=3.0,
|
111 |
+
value=1.03,
|
112 |
+
step=0.01,
|
113 |
+
interactive=True,
|
114 |
+
label="Kazna za ponavljanje",
|
115 |
+
visible=False,
|
116 |
+
)
|
117 |
+
watermark = gr.Checkbox(value=False, label="Vodeni žig teksta")
|
118 |
+
|
119 |
+
model.change(
|
120 |
+
lambda value: radio_on_change(
|
121 |
+
value,
|
122 |
+
disclaimer,
|
123 |
+
typical_p,
|
124 |
+
top_p,
|
125 |
+
top_k,
|
126 |
+
temperature,
|
127 |
+
repetition_penalty,
|
128 |
+
watermark,
|
129 |
+
),
|
130 |
+
inputs=model,
|
131 |
+
outputs=[
|
132 |
+
disclaimer,
|
133 |
+
typical_p,
|
134 |
+
top_p,
|
135 |
+
top_k,
|
136 |
+
temperature,
|
137 |
+
repetition_penalty,
|
138 |
+
watermark,
|
139 |
+
],
|
140 |
+
)
|
141 |
+
|
142 |
+
inputs.submit(
|
143 |
+
predict,
|
144 |
+
[
|
145 |
+
model,
|
146 |
+
inputs,
|
147 |
+
typical_p,
|
148 |
+
top_p,
|
149 |
+
temperature,
|
150 |
+
top_k,
|
151 |
+
repetition_penalty,
|
152 |
+
watermark,
|
153 |
+
chatbot,
|
154 |
+
state,
|
155 |
+
],
|
156 |
+
[chatbot, state],
|
157 |
+
)
|
158 |
+
b1.click(
|
159 |
+
predict,
|
160 |
+
[
|
161 |
+
model,
|
162 |
+
inputs,
|
163 |
+
typical_p,
|
164 |
+
top_p,
|
165 |
+
temperature,
|
166 |
+
top_k,
|
167 |
+
repetition_penalty,
|
168 |
+
watermark,
|
169 |
+
chatbot,
|
170 |
+
state,
|
171 |
+
],
|
172 |
+
[chatbot, state],
|
173 |
+
)
|
174 |
+
b1.click(reset_textbox, [], [inputs])
|
175 |
+
inputs.submit(reset_textbox, [], [inputs])
|
176 |
+
|
177 |
+
gr.Markdown(description)
|
178 |
+
demo.queue(concurrency_count=16).launch(debug=True)
|