Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
# Load the Phi 2 model and tokenizer | |
tokenizer = AutoTokenizer.from_pretrained( | |
"microsoft/phi-2", | |
trust_remote_code=True | |
) | |
model = AutoModelForCausalLM.from_pretrained( | |
"arieridwans/phi_2-finetuned-lyrics", | |
device_map="auto", | |
trust_remote_code=True | |
) | |
# Streamlit UI | |
st.title("Eleanor Rigby") | |
# User input prompt | |
user_prompt = st.text_area("Enter your prompt that can be song lyrics:", """Yesterday, I saw you in my dream""") | |
# Generate output based on user input | |
if st.button("Generate Output"): | |
instruct_prompt = "Instruct:You are a song writer and your main reference is The Beatles. Write a song lyrics by completing these words:" | |
output_prompt = "Output:" | |
input = inference_tokenizer(""" {0}{1}\n{2} """.format(instruct_prompt, user_prompt, output_prompt), | |
return_tensors="pt", | |
return_attention_mask=False, | |
padding=True, | |
truncation=True) | |
result = inference_model.generate(**input, repetition_penalty=1.2, max_length=1024) | |
output = inference_tokenizer.batch_decode(result, skip_special_tokens=True)[0] | |
st.text("Generated Result:") | |
st.write(output) |