vgl_o1 / app.py
Sebbe33's picture
Update app.py
75d536a verified
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage
# (Optional) If you're using Anthropic
# from langchain.chat_models import ChatAnthropic
# Placeholder functions for other LLMs (DeepSeek, Gemini, Ollama, etc.)
# Implement or import your own logic here.
def get_deepseek_llm(api_key: str):
"""
TODO: Implement your DeepSeek integration.
"""
# return your DeepSeek LLM client
pass
def get_gemini_llm(api_key: str):
"""
TODO: Implement your Gemini integration.
"""
# return your Gemini LLM client
pass
def get_ollama_llm():
"""
TODO: Implement your local Ollama integration.
Possibly specify a port, endpoint, etc.
"""
# return your Ollama LLM client
pass
def get_claude_llm(api_key: str):
"""
Example for Anthropic's Claude
"""
# If you installed anthropic: pip install anthropic
# from langchain.chat_models import ChatAnthropic
# llm = ChatAnthropic(anthropic_api_key=api_key)
# return llm
pass
def load_llm(selected_model: str, api_key: str):
"""
Returns the LLM object depending on user selection.
"""
if selected_model == "OpenAI":
# Use OpenAI ChatModel
# By default uses GPT-3.5. You can pass model_name="gpt-4" if you have access.
llm = ChatOpenAI(temperature=0.7, openai_api_key=api_key)
elif selected_model == "Claude":
# llm = get_claude_llm(api_key) # Uncomment once implemented
llm = None # Placeholder
st.warning("Claude is not implemented. Implement the get_claude_llm function.")
elif selected_model == "Gemini":
# llm = get_gemini_llm(api_key) # Uncomment once implemented
llm = None
st.warning("Gemini is not implemented. Implement the get_gemini_llm function.")
elif selected_model == "DeepSeek":
# llm = get_deepseek_llm(api_key) # Uncomment once implemented
llm = None
st.warning("DeepSeek is not implemented. Implement the get_deepseek_llm function.")
elif selected_model == "Ollama (local)":
# llm = get_ollama_llm() # Uncomment once implemented
llm = None
st.warning("Ollama is not implemented. Implement the get_ollama_llm function.")
else:
llm = None
return llm
def initialize_session_state():
"""
Initialize the session state for storing conversation history.
"""
if "messages" not in st.session_state:
st.session_state["messages"] = []
def main():
st.title("Multi-LLM Chat App")
# Sidebar for model selection and API key
st.sidebar.header("Configuration")
selected_model = st.sidebar.selectbox(
"Select an LLM",
["OpenAI", "Claude", "Gemini", "DeepSeek", "Ollama (local)"]
)
api_key = st.sidebar.text_input("API Key (if needed)", type="password")
st.sidebar.write("---")
if st.sidebar.button("Clear Chat"):
st.session_state["messages"] = []
# Initialize conversation in session state
initialize_session_state()
# Load the chosen LLM
llm = load_llm(selected_model, api_key)
# Display existing conversation
for msg in st.session_state["messages"]:
if msg["role"] == "user":
st.markdown(f"**You:** {msg['content']}")
else:
st.markdown(f"**LLM:** {msg['content']}")
# User input
user_input = st.text_input("Type your message here...", "")
# On submit
if st.button("Send"):
if user_input.strip() == "":
st.warning("Please enter a message before sending.")
else:
# Add user message to conversation history
st.session_state["messages"].append({"role": "user", "content": user_input})
if llm is None:
st.error("LLM is not configured or implemented for this choice.")
else:
# Prepare messages in a LangChain format
lc_messages = []
for msg in st.session_state["messages"]:
if msg["role"] == "user":
lc_messages.append(HumanMessage(content=msg["content"]))
else:
lc_messages.append(AIMessage(content=msg["content"]))
# Call the LLM
response = llm(lc_messages)
# Add LLM response to conversation
st.session_state["messages"].append({"role": "assistant", "content": response.content})
# End
if __name__ == "__main__":
main()