|
import streamlit as st |
|
from langchain_ollama.llms import Ollama |
|
from langchain import LLMChain |
|
from langchain.prompts import PromptTemplate |
|
|
|
|
|
st.title("LLaMA 3.1 8B Instruct Model with Streamlit (Using LangChain & Ollama)") |
|
|
|
|
|
@st.cache_resource |
|
def load_ollama_model(): |
|
return Ollama(model="llama3.1") |
|
|
|
llama_model = load_ollama_model() |
|
|
|
|
|
prompt_template = PromptTemplate( |
|
input_variables=["prompt"], |
|
template="{prompt}" |
|
) |
|
|
|
llm_chain = LLMChain( |
|
llm=llama_model, |
|
prompt=prompt_template |
|
) |
|
|
|
|
|
user_input = st.text_area("Enter your prompt:", "") |
|
|
|
|
|
if st.button("Generate"): |
|
if user_input: |
|
|
|
response = llm_chain.run({"prompt": user_input}) |
|
st.text_area("Model Response:", response, height=200) |
|
else: |
|
st.warning("Please enter a prompt.") |
|
|