Dhahlan2000 commited on
Commit
6e3bcaf
·
verified ·
1 Parent(s): 12e3736

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -5
app.py CHANGED
@@ -44,14 +44,28 @@ def transliterate_to_sinhala(text):
44
  return transliterate.process('Velthuis', 'Sinhala', text)
45
 
46
  # Load conversation model
47
- conv_model_name = "Qwen/Qwen2-0.5B-Instruct" # Use GPT-2 instead of the gated model
48
- tokenizer = AutoTokenizer.from_pretrained(conv_model_name)
49
  model = AutoModelForCausalLM.from_pretrained(conv_model_name).to(device)
50
 
51
  def conversation_predict(text):
52
- input_ids = tokenizer(text, return_tensors="pt").to(device)
53
- outputs = model.generate(**input_ids)
54
- return tokenizer.decode(outputs[0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  def ai_predicted(user_input):
57
  if user_input.lower() == 'exit':
 
44
  return transliterate.process('Velthuis', 'Sinhala', text)
45
 
46
  # Load conversation model
47
+ conv_model_name = "microsoft/Phi-3-mini-4k-instruct" # Use GPT-2 instead of the gated model
48
+ tokenizer = AutoTokenizer.from_pretrained(conv_model_name, trust_remote_code=True)
49
  model = AutoModelForCausalLM.from_pretrained(conv_model_name).to(device)
50
 
51
  def conversation_predict(text):
52
+ pipe = pipeline(
53
+ "text-generation",
54
+ model=model,
55
+ tokenizer=tokenizer,
56
+ )
57
+ generation_args = {
58
+ "max_new_tokens": 500,
59
+ "return_full_text": False,
60
+ "temperature": 0.0,
61
+ "do_sample": False,
62
+ }
63
+
64
+ output = pipe(text, **generation_args)
65
+ return output[0]['generated_text']
66
+ # input_ids = tokenizer(text, return_tensors="pt").to(device)
67
+ # outputs = model.generate(**input_ids)
68
+ # return tokenizer.decode(outputs[0])
69
 
70
  def ai_predicted(user_input):
71
  if user_input.lower() == 'exit':