File size: 952 Bytes
22711eb
 
 
 
 
 
 
 
 
 
23e1044
4c1bec7
 
 
 
23e1044
22711eb
 
4c1bec7
22711eb
4c1bec7
23e1044
 
 
22711eb
23e1044
 
 
4c1bec7
22711eb
4c1bec7
 
23e1044
4c1bec7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import keras_nlp
from keras_nlp.models import GemmaCausalLM
import warnings
warnings.filterwarnings('ignore')
import os

#set the envirenment
os.environ["KERAS_BACKEND"] = "jax"  # Or "torch" or "tensorflow".

os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"]="1.00"

# Load your Hugging Face model and tokenizer
model_name = "soufyane/gemma_data_science"
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

model = keras_nlp.models.CausalLM.from_preset(f"hf://soufyane/gemma_data_science")

def process_text_gemma(input_text):
    response = model.generate(f"question: {input_text}", max_length=256)
    return response


def main(input_text):
    return process_text_gemma(input_text[0])

gr.Interface(
    fn=main,
    inputs=["text"],
    outputs=["text"],
    title="Gemma Data Science Model",
    description="This is a text-to-text model for data science tasks.",
    live=True
).launch()