Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,8 +7,10 @@ tokenizer = AutoTokenizer.from_pretrained("MohamedTalaat91/gpt2-tokenizer")
|
|
7 |
|
8 |
# Updated generate function
|
9 |
def generate(messages, state):
|
10 |
-
|
|
|
11 |
|
|
|
12 |
inputs = tokenizer(input_text, return_tensors="pt")
|
13 |
|
14 |
# Generate text based on the input
|
@@ -21,9 +23,14 @@ def generate(messages, state):
|
|
21 |
temperature=0.7 # Controls randomness in sampling
|
22 |
)
|
23 |
|
|
|
24 |
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
25 |
|
26 |
-
|
|
|
|
|
|
|
|
|
27 |
|
28 |
|
29 |
# Launch Gradio interface with the updated function
|
|
|
7 |
|
8 |
# Updated generate function
|
9 |
def generate(messages, state):
|
10 |
+
# The last message content is a dictionary with "role" and "content"
|
11 |
+
input_text = messages[-1] # Extracting the content of the last message
|
12 |
|
13 |
+
# Tokenize the input
|
14 |
inputs = tokenizer(input_text, return_tensors="pt")
|
15 |
|
16 |
# Generate text based on the input
|
|
|
23 |
temperature=0.7 # Controls randomness in sampling
|
24 |
)
|
25 |
|
26 |
+
# Decode the generated text
|
27 |
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
28 |
|
29 |
+
# Prepare the response in the correct format
|
30 |
+
bot_message = {"role": "bot", "content": generated_text}
|
31 |
+
messages.append(bot_message) # Add the bot's message to the conversation
|
32 |
+
|
33 |
+
return messages, state # Return the updated messages and state
|
34 |
|
35 |
|
36 |
# Launch Gradio interface with the updated function
|