Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -95,6 +95,12 @@ llama_model = AutoModelForCausalLM.from_pretrained(
|
|
95 |
)
|
96 |
llama_model.eval()
|
97 |
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
|
99 |
@spaces.GPU(duration=90)
|
100 |
def llama_generate(
|
|
|
95 |
)
|
96 |
llama_model.eval()
|
97 |
|
98 |
+
# --- IMPORTANT: Set Pad Token ---
|
99 |
+
# Llama3 does *not* have a default pad token. We *must* set one.
|
100 |
+
# Using the EOS token as the PAD token is a common and recommended practice.
|
101 |
+
if llama_tokenizer.pad_token is None:
|
102 |
+
llama_tokenizer.pad_token = llama_tokenizer.eos_token
|
103 |
+
|
104 |
|
105 |
@spaces.GPU(duration=90)
|
106 |
def llama_generate(
|