Dhahlan2000 commited on
Commit
a691c88
·
verified ·
1 Parent(s): 8163759

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
  from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
4
  from aksharamukha import transliterate
5
  import torch
@@ -13,7 +12,7 @@ eng_trans_tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled
13
  translator = pipeline('translation', model=trans_model, tokenizer=eng_trans_tokenizer, src_lang="eng_Latn", tgt_lang='sin_Sinh', max_length=400, device=device)
14
 
15
  sin_trans_model = AutoModelForSeq2SeqLM.from_pretrained("thilina/mt5-sinhalese-english").to(device)
16
- si_trans_tokenizer = AutoTokenizer.from_pretrained("thilina/mt5-sinhalese-english")
17
 
18
  singlish_pipe = pipeline("text2text-generation", model="Dhahlan2000/Simple_Translation-model-for-GPT-v14")
19
 
@@ -45,7 +44,7 @@ def transliterate_to_sinhala(text):
45
  return transliterate.process('Velthuis', 'Sinhala', text)
46
 
47
  # Load conversation model
48
- conv_model_name = "google/gemma-7b"
49
  tokenizer = AutoTokenizer.from_pretrained(conv_model_name)
50
  model = AutoModelForCausalLM.from_pretrained(conv_model_name).to(device)
51
 
 
1
  import gradio as gr
 
2
  from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
3
  from aksharamukha import transliterate
4
  import torch
 
12
  translator = pipeline('translation', model=trans_model, tokenizer=eng_trans_tokenizer, src_lang="eng_Latn", tgt_lang='sin_Sinh', max_length=400, device=device)
13
 
14
  sin_trans_model = AutoModelForSeq2SeqLM.from_pretrained("thilina/mt5-sinhalese-english").to(device)
15
+ si_trans_tokenizer = AutoTokenizer.from_pretrained("thilina/mt5-sinhalese-english", use_fast=False) # Use slow tokenizer
16
 
17
  singlish_pipe = pipeline("text2text-generation", model="Dhahlan2000/Simple_Translation-model-for-GPT-v14")
18
 
 
44
  return transliterate.process('Velthuis', 'Sinhala', text)
45
 
46
  # Load conversation model
47
+ conv_model_name = "gpt2" # Use GPT-2 instead of the gated model
48
  tokenizer = AutoTokenizer.from_pretrained(conv_model_name)
49
  model = AutoModelForCausalLM.from_pretrained(conv_model_name).to(device)
50