|
import keras_nlp |
|
from keras_nlp.models import GemmaCausalLM |
|
import warnings |
|
warnings.filterwarnings('ignore') |
|
import os |
|
|
|
|
|
os.environ["KERAS_BACKEND"] = "jax" |
|
|
|
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"]="1.00" |
|
|
|
|
|
model_name = "soufyane/gemma_data_science" |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
model = keras_nlp.models.CausalLM.from_preset(f"hf://soufyane/gemma_data_science") |
|
|
|
def process_text_gemma(input_text): |
|
response = model.generate(f"question: {input_text}", max_length=256) |
|
return response |
|
|
|
|
|
def main(input_text): |
|
return process_text_gemma(input_text[0]) |
|
|
|
gr.Interface( |
|
fn=main, |
|
inputs=["text"], |
|
outputs=["text"], |
|
title="Gemma Data Science Model", |
|
description="This is a text-to-text model for data science tasks.", |
|
live=True |
|
).launch() |
|
|