llamaai / app.py
javear's picture
Update app.py
372b3b1 verified
import streamlit as st
from langchain_ollama.llms import Ollama # Updated import path
from langchain import LLMChain
from langchain.prompts import PromptTemplate
# Title for the Streamlit app
st.title("LLaMA 3.1 8B Instruct Model with Streamlit (Using LangChain & Ollama)")
# Load the Ollama model using LangChain
@st.cache_resource
def load_ollama_model():
return Ollama(model="llama3.1") # You can use other versions if needed
llama_model = load_ollama_model()
# Create a LangChain LLMChain object
prompt_template = PromptTemplate(
input_variables=["prompt"],
template="{prompt}"
)
llm_chain = LLMChain(
llm=llama_model,
prompt=prompt_template
)
# Input text from the user
user_input = st.text_area("Enter your prompt:", "")
# Generate response using the model
if st.button("Generate"):
if user_input:
# Generate response from LLMChain using LangChain and Ollama
response = llm_chain.run({"prompt": user_input})
st.text_area("Model Response:", response, height=200)
else:
st.warning("Please enter a prompt.")