File size: 1,303 Bytes
c61e09a
5d78665
 
c61e09a
8547a05
 
 
5d78665
8547a05
 
35580ab
5d78665
 
 
9213c90
5d78665
9213c90
 
 
c61e09a
 
d36b401
dc35d96
5d78665
 
9213c90
fae4998
5d78665
46ebd3a
c61e09a
2061d50
35580ab
 
c61e09a
35580ab
 
c55ac76
35580ab
 
4d38996
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
import transformers
import torch
from huggingface_hub import login
import os

# load huggingface
api_key = os.getenv("ACCESS_TOKEN")
login(token=api_key)

# setup model
model_id = "google/gemma-2-2b-it"
dtype = torch.bfloat16
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
model.to(device)
model.eval()

def poet(text):
    prompt = 'Make 25 lines, it has to be absolutely 25 lines of text no less no exception, of shakespeare based on this prompt: ' + text
    chat = [{"role": "user", "content": prompt}]
    prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
    inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
    inputs = inputs.to(device)
    outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=350)
    return tokenizer.decode(outputs[0])
    
st.title("Shakespeare Ai")
st.write("A space made to allow people to create shakespeare like text!")

# get prompt
prompt = st.text_input("Enter your prompt: ")

# analyze prompt
shakespeare = poet(prompt)

# write content
st.write(shakespeare)