Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -74,7 +74,7 @@ def model_prediction(model, text, device):
|
|
74 |
|
75 |
# --- Llama 3.2 3B Model Setup ---
|
76 |
LLAMA_MAX_MAX_NEW_TOKENS = 512
|
77 |
-
LLAMA_DEFAULT_MAX_NEW_TOKENS =
|
78 |
LLAMA_MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
|
79 |
llama_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
80 |
llama_model_id = "meta-llama/Llama-3.2-1B-Instruct"
|
@@ -135,7 +135,7 @@ Given the following issue description:
|
|
135 |
---
|
136 |
{issue_text}
|
137 |
---
|
138 |
-
|
139 |
"""
|
140 |
try:
|
141 |
explanation = llama_generate(prompt)
|
@@ -279,4 +279,4 @@ interface = gr.Interface(
|
|
279 |
examples=example_texts,
|
280 |
css=css # Apply the CSS
|
281 |
)
|
282 |
-
interface.launch(
|
|
|
74 |
|
75 |
# --- Llama 3.2 3B Model Setup ---
|
76 |
LLAMA_MAX_MAX_NEW_TOKENS = 512
|
77 |
+
LLAMA_DEFAULT_MAX_NEW_TOKENS = 250
|
78 |
LLAMA_MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
|
79 |
llama_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
80 |
llama_model_id = "meta-llama/Llama-3.2-1B-Instruct"
|
|
|
135 |
---
|
136 |
{issue_text}
|
137 |
---
|
138 |
+
Explain why this issue might be classified as a **{quality_name}** issue. Provide a concise explanation, relating it back to the issue description. Keep the explanation short and concise. Do not repeat the prompt or include any preamble in your response - just provide the explanation directly.
|
139 |
"""
|
140 |
try:
|
141 |
explanation = llama_generate(prompt)
|
|
|
279 |
examples=example_texts,
|
280 |
css=css # Apply the CSS
|
281 |
)
|
282 |
+
interface.launch( )
|