Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,17 +6,6 @@ For more information on `huggingface_hub` Inference API support, please check th
|
|
6 |
"""
|
7 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
|
9 |
-
# Predefined list of interview questions
|
10 |
-
interview_questions = [
|
11 |
-
"Can you tell me about yourself?",
|
12 |
-
"Why are you interested in this position?",
|
13 |
-
"What are your strengths and weaknesses?",
|
14 |
-
"Can you describe a challenging work situation and how you handled it?",
|
15 |
-
"Where do you see yourself in five years?",
|
16 |
-
]
|
17 |
-
|
18 |
-
# Keep track of the current question index
|
19 |
-
question_index = 0
|
20 |
|
21 |
def respond(
|
22 |
message,
|
@@ -26,8 +15,6 @@ def respond(
|
|
26 |
temperature,
|
27 |
top_p,
|
28 |
):
|
29 |
-
global question_index
|
30 |
-
|
31 |
messages = [{"role": "system", "content": system_message}]
|
32 |
|
33 |
for val in history:
|
@@ -36,11 +23,10 @@ def respond(
|
|
36 |
if val[1]:
|
37 |
messages.append({"role": "assistant", "content": val[1]})
|
38 |
|
39 |
-
# Add the user's latest message
|
40 |
messages.append({"role": "user", "content": message})
|
41 |
|
42 |
-
# Generate the assistant's response
|
43 |
response = ""
|
|
|
44 |
for message in client.chat_completion(
|
45 |
messages,
|
46 |
max_tokens=max_tokens,
|
@@ -49,20 +35,10 @@ def respond(
|
|
49 |
top_p=top_p,
|
50 |
):
|
51 |
token = message.choices[0].delta.content
|
|
|
52 |
response += token
|
53 |
yield response
|
54 |
|
55 |
-
# Prepare the next question if there are more questions left
|
56 |
-
if question_index < len(interview_questions):
|
57 |
-
next_question = interview_questions[question_index]
|
58 |
-
question_index += 1
|
59 |
-
else:
|
60 |
-
next_question = "Thank you for answering all the questions. Do you have any questions for me?"
|
61 |
-
|
62 |
-
# Append the response to the history and add the next question
|
63 |
-
history.append((message, response))
|
64 |
-
yield response + "\n\n" + next_question
|
65 |
-
|
66 |
"""
|
67 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
68 |
"""
|
@@ -82,5 +58,6 @@ demo = gr.ChatInterface(
|
|
82 |
],
|
83 |
)
|
84 |
|
|
|
85 |
if __name__ == "__main__":
|
86 |
demo.launch()
|
|
|
6 |
"""
|
7 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def respond(
|
11 |
message,
|
|
|
15 |
temperature,
|
16 |
top_p,
|
17 |
):
|
|
|
|
|
18 |
messages = [{"role": "system", "content": system_message}]
|
19 |
|
20 |
for val in history:
|
|
|
23 |
if val[1]:
|
24 |
messages.append({"role": "assistant", "content": val[1]})
|
25 |
|
|
|
26 |
messages.append({"role": "user", "content": message})
|
27 |
|
|
|
28 |
response = ""
|
29 |
+
|
30 |
for message in client.chat_completion(
|
31 |
messages,
|
32 |
max_tokens=max_tokens,
|
|
|
35 |
top_p=top_p,
|
36 |
):
|
37 |
token = message.choices[0].delta.content
|
38 |
+
|
39 |
response += token
|
40 |
yield response
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
"""
|
43 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
44 |
"""
|
|
|
58 |
],
|
59 |
)
|
60 |
|
61 |
+
|
62 |
if __name__ == "__main__":
|
63 |
demo.launch()
|