Spaces:
Sleeping
Sleeping
skylersterling
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -19,6 +19,7 @@ def generate_text(prompt, temperature, top_p):
|
|
19 |
input_tokens = input_tokens.to('cpu')
|
20 |
|
21 |
generated_text = prompt # Start with the initial prompt
|
|
|
22 |
|
23 |
for _ in range(80): # Adjust the range to control the number of tokens generated
|
24 |
with torch.no_grad():
|
@@ -39,7 +40,7 @@ def generate_text(prompt, temperature, top_p):
|
|
39 |
generated_text += decoded_token # Append the new token to the generated text
|
40 |
if decoded_token == "#": # Stop if the end of sequence token is generated
|
41 |
break
|
42 |
-
yield generated_text # Yield the
|
43 |
|
44 |
# Create a Gradio interface with a text input, sliders for temperature and top_p, and a text output
|
45 |
interface = gr.Interface(
|
@@ -53,4 +54,4 @@ interface = gr.Interface(
|
|
53 |
live=False
|
54 |
)
|
55 |
|
56 |
-
interface.launch()
|
|
|
19 |
input_tokens = input_tokens.to('cpu')
|
20 |
|
21 |
generated_text = prompt # Start with the initial prompt
|
22 |
+
prompt_length = len(generated_text) # Capture the length of the initial prompt
|
23 |
|
24 |
for _ in range(80): # Adjust the range to control the number of tokens generated
|
25 |
with torch.no_grad():
|
|
|
40 |
generated_text += decoded_token # Append the new token to the generated text
|
41 |
if decoded_token == "#": # Stop if the end of sequence token is generated
|
42 |
break
|
43 |
+
yield generated_text[prompt_length:] # Yield the generated text excluding the initial prompt
|
44 |
|
45 |
# Create a Gradio interface with a text input, sliders for temperature and top_p, and a text output
|
46 |
interface = gr.Interface(
|
|
|
54 |
live=False
|
55 |
)
|
56 |
|
57 |
+
interface.launch()
|