Spaces:
Running
Running
import streamlit as st | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
model_name = "codellama/CodeLlama-7b-Python-hf" | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
device = torch.device("cuda" if torch.cuda.is_available() else 'cpu') | |
model = model.to(device) | |
# prompt = st.text_area("Enter your prompt:") | |
# def translate_text(text, source_lang, target_lang): | |
# tokenizer.src_lang = source_lang | |
# encoded_text = tokenizer(text, return_tensors="pt").to(device) | |
# generated_tokens = model.generate(**encoded_text, forced_bos_token_id=tokenizer.lang_code_to_id[target_lang]) | |
# #Decode the output | |
# translated_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True) | |
# return translated_text | |
st.markdown("### Python Code Helper") | |
# source_language = '' | |
# target_language = '' | |
# source = st.sidebar.selectbox('Source Language', languages) | |
# if source: | |
# source_language = lang_dict.get(source) | |
# st.write(source_language) | |
# target = st.sidebar.selectbox('Target Language', languages) | |
# if target: | |
# target_language = lang_dict.get(target) | |
# st.write(target_language) | |
with st.form(key="myForm"): | |
prompt = st.text_area("Enter your Prompt") | |
submit = st.form_submit_button("Submit", type='primary') | |
if submit and prompt: | |
with st.spinner("Generating Response"): | |
response = model.invoke(prompt) | |
st.write(response) | |