frankai98 commited on
Commit
e5a7e55
·
verified ·
1 Parent(s): 5bfd693

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -274,12 +274,10 @@ def main():
274
 
275
  def process_with_gemma(prompt):
276
  try:
277
- tokenizer = AutoTokenizer.from_pretrained("unsloth/gemma-3-1b-it")
278
  pipe = pipeline(
279
  "text-generation",
280
- model="unsloth/gemma-3-1b-it",
281
  device="cuda" if torch.cuda.is_available() else -1,
282
- tokenizer=tokenizer,
283
  torch_dtype=torch.bfloat16,
284
  )
285
  result = pipe(prompt, max_new_tokens=256, return_full_text=False)
@@ -287,7 +285,7 @@ def main():
287
  except Exception as e:
288
  return None, str(e)
289
 
290
- status_text.markdown("**📝 Generating report with Gemma...**")
291
  progress_bar.progress(80)
292
 
293
  raw_result, error = process_with_gemma(prompt)
 
274
 
275
  def process_with_gemma(prompt):
276
  try:
 
277
  pipe = pipeline(
278
  "text-generation",
279
+ model="unsloth/Llama-3.2-1B-Instruct",
280
  device="cuda" if torch.cuda.is_available() else -1,
 
281
  torch_dtype=torch.bfloat16,
282
  )
283
  result = pipe(prompt, max_new_tokens=256, return_full_text=False)
 
285
  except Exception as e:
286
  return None, str(e)
287
 
288
+ status_text.markdown("**📝 Generating report with Llama...**")
289
  progress_bar.progress(80)
290
 
291
  raw_result, error = process_with_gemma(prompt)