Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import transformers | |
import torch | |
from huggingface_hub import login | |
import os | |
# load huggingface | |
api_key = os.getenv("ACCESS_TOKEN") | |
login(token=api_key) | |
# setup model | |
model_id = "google/gemma-2-2b-it" | |
dtype = torch.bfloat16 | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained(model_id) | |
model.to(device) | |
model.eval() | |
def poet(text): | |
prompt = 'Make 25 lines, it has to be absolutely 25 lines of text no less no exception, of shakespeare based on this prompt: ' + text | |
chat = [{"role": "user", "content": prompt}] | |
prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) | |
inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") | |
inputs = inputs.to(device) | |
outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=350) | |
return tokenizer.decode(outputs[0]) | |
st.title("Shakespeare Ai") | |
st.write("A space made to allow people to create shakespeare like text!") | |
# get prompt | |
prompt = st.text_input("Enter your prompt: ") | |
# analyze prompt | |
shakespeare = poet(prompt) | |
# write content | |
st.write(shakespeare) |