File size: 1,175 Bytes
23e1044 4c1bec7 23e1044 4c1bec7 23e1044 4c1bec7 23e1044 4c1bec7 23e1044 4c1bec7 23e1044 4c1bec7 23e1044 4c1bec7 23e1044 4c1bec7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr
from gtts import gTTS
from io import BytesIO
import IPython.display as ipd
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
# Load your Hugging Face model and tokenizer
model_name = "soufyane/gemma_data_science"
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
def process_text_gemma(input_text):
input_ids = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)["input_ids"]
output_ids = model.generate(input_ids)
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
return response
def process_speech_gemma(audio):
response = process_text_gemma(audio)
tts = gTTS(text=response, lang='en')
fp = BytesIO()
tts.write_to_fp(fp)
fp.seek(0)
return ipd.Audio(fp.read(), autoplay=True)
def main(input_text):
return process_text_gemma(input_text[0]), process_speech_gemma(input_text[0])
gr.Interface(
fn=main,
inputs=["text"],
outputs=["text", "audio"],
title="Gemma Data Science Model",
description="This is a text-to-text model for data science tasks.",
live=True
).launch()
|