soufyane commited on
Commit
4c1bec7
1 Parent(s): bc17a11

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -23
app.py CHANGED
@@ -2,26 +2,21 @@ import gradio as gr
2
  from gtts import gTTS
3
  from io import BytesIO
4
  import IPython.display as ipd
 
5
 
6
- codellama_model = gr.Interface.load("models/meta-llama/CodeLlama-7b-Python-hf")
7
- deepseek_model = gr.Interface.load("models/deepseek-ai/deepseek-coder-1.3b-instruct")
 
 
8
 
9
- def process_text_codellama(input_text):
10
- return codellama_model.predict(input_text)
 
 
 
11
 
12
- def process_speech_codellama(audio):
13
- response = codellama_model.predict(audio)
14
- tts = gTTS(text=response, lang='en')
15
- fp = BytesIO()
16
- tts.write_to_fp(fp)
17
- fp.seek(0)
18
- return ipd.Audio(fp.read(), autoplay=True)
19
-
20
- def process_text_deepseek(input_text):
21
- return deepseek_model.predict(input_text)
22
-
23
- def process_speech_deepseek(audio):
24
- response = deepseek_model.predict(audio)
25
  tts = gTTS(text=response, lang='en')
26
  fp = BytesIO()
27
  tts.write_to_fp(fp)
@@ -29,14 +24,13 @@ def process_speech_deepseek(audio):
29
  return ipd.Audio(fp.read(), autoplay=True)
30
 
31
  def main(input_text):
32
- if input_text[1]:
33
- return process_text_deepseek(input_text[0]), process_speech_deepseek(input_text[0])
34
- else:
35
- return process_text_codellama(input_text[0]), process_speech_codellama(input_text[0])
36
 
37
  gr.Interface(
38
  fn=main,
39
- inputs=["text", "checkbox"], # ["input text", "enable voice input"]
40
  outputs=["text", "audio"],
 
 
41
  live=True
42
- ).launch()
 
2
  from gtts import gTTS
3
  from io import BytesIO
4
  import IPython.display as ipd
5
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
6
 
7
+ # Load your Hugging Face model and tokenizer
8
+ model_name = "soufyane/gemma_data_science"
9
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
10
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
11
 
12
+ def process_text_gemma(input_text):
13
+ input_ids = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)["input_ids"]
14
+ output_ids = model.generate(input_ids)
15
+ response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
16
+ return response
17
 
18
+ def process_speech_gemma(audio):
19
+ response = process_text_gemma(audio)
 
 
 
 
 
 
 
 
 
 
 
20
  tts = gTTS(text=response, lang='en')
21
  fp = BytesIO()
22
  tts.write_to_fp(fp)
 
24
  return ipd.Audio(fp.read(), autoplay=True)
25
 
26
  def main(input_text):
27
+ return process_text_gemma(input_text[0]), process_speech_gemma(input_text[0])
 
 
 
28
 
29
  gr.Interface(
30
  fn=main,
31
+ inputs=["text"],
32
  outputs=["text", "audio"],
33
+ title="Gemma Data Science Model",
34
+ description="This is a text-to-text model for data science tasks.",
35
  live=True
36
+ ).launch()