kazimsayed commited on
Commit
3a2c5b6
·
1 Parent(s): 475781c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -1,11 +1,14 @@
1
  import random
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
 
4
 
5
  tokenizer = AutoTokenizer.from_pretrained("docto/Docto-Bot")
6
  model = AutoModelForCausalLM.from_pretrained("docto/Docto-Bot")
7
  special_token = '<|endoftext|>'
8
 
 
 
9
  def get_reply(userinput):
10
  prompt_text = f'Question: {userinput}\nAnswer:'
11
  encoded_prompt = tokenizer.encode(prompt_text,
@@ -28,7 +31,8 @@ def get_reply(userinput):
28
  try:
29
  result = tokenizer.decode(random.choice(output_sequences))
30
  result = result[result.index("Answer: "):result.index(special_token)]
31
- return(result[8:])
 
32
  except:
33
  return "Sorry! I don\'t Know"
34
 
 
1
  import random
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
+ from gramformer import Gramformer
5
 
6
  tokenizer = AutoTokenizer.from_pretrained("docto/Docto-Bot")
7
  model = AutoModelForCausalLM.from_pretrained("docto/Docto-Bot")
8
  special_token = '<|endoftext|>'
9
 
10
+ gf = Gramformer(models = 1, use_gpu=False) # 1=corrector, 2=detector
11
+
12
  def get_reply(userinput):
13
  prompt_text = f'Question: {userinput}\nAnswer:'
14
  encoded_prompt = tokenizer.encode(prompt_text,
 
31
  try:
32
  result = tokenizer.decode(random.choice(output_sequences))
33
  result = result[result.index("Answer: "):result.index(special_token)]
34
+ corrected_sentence = gf.correct(result[8:], max_candidates=1)
35
+ return corrected_sentence
36
  except:
37
  return "Sorry! I don\'t Know"
38