Upload 2 files
Browse files- app.py +94 -0
- requirements.txt +35 -0
app.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain_groq import ChatGroq
|
3 |
+
from langchain.chains import LLMChain
|
4 |
+
from langchain.prompts import PromptTemplate
|
5 |
+
from langchain_community.utilities import WikipediaAPIWrapper
|
6 |
+
from langchain.agents.agent_types import AgentType
|
7 |
+
from langchain.agents import Tool, initialize_agent
|
8 |
+
from langchain.callbacks import StreamlitCallbackHandler
|
9 |
+
|
10 |
+
# Set up Streamlit page configuration
|
11 |
+
st.set_page_config(page_title="General Knowledge Assistant", page_icon="🧭")
|
12 |
+
st.title("General Knowledge Assistant")
|
13 |
+
|
14 |
+
# API Key input for Groq
|
15 |
+
groq_api_key = st.sidebar.text_input(label="Groq API Key", type="password")
|
16 |
+
|
17 |
+
if not groq_api_key:
|
18 |
+
st.info("Please add your Groq API key to continue")
|
19 |
+
st.stop()
|
20 |
+
|
21 |
+
# Initialize the LLM (Groq API - llama-3.1-70b)
|
22 |
+
llm = ChatGroq(model="llama-3.1-70b-versatile", groq_api_key=groq_api_key)
|
23 |
+
|
24 |
+
# Initialize Wikipedia tool for information retrieval
|
25 |
+
wikipedia_wrapper = WikipediaAPIWrapper()
|
26 |
+
wikipedia_tool = Tool(
|
27 |
+
name="Wikipedia",
|
28 |
+
func=wikipedia_wrapper.run,
|
29 |
+
description="A tool for searching the Internet to find information on various topics, including general knowledge."
|
30 |
+
)
|
31 |
+
|
32 |
+
# Prompt template for general knowledge questions
|
33 |
+
prompt = """
|
34 |
+
You are a knowledgeable assistant. Your task is to answer the user's questions accurately, using your general knowledge.
|
35 |
+
If the answer is not readily available in your knowledge base, search Wikipedia for relevant information.
|
36 |
+
Your information should be accurate and up to date.Whenever I tell you to write essay give a title also to the essay.
|
37 |
+
Question: {question}
|
38 |
+
Answer:
|
39 |
+
"""
|
40 |
+
|
41 |
+
# Initialize the prompt template
|
42 |
+
prompt_template = PromptTemplate(
|
43 |
+
input_variables=["question"],
|
44 |
+
template=prompt
|
45 |
+
)
|
46 |
+
|
47 |
+
# Combine all the tools into a chain for answering general knowledge questions
|
48 |
+
chain = LLMChain(llm=llm, prompt=prompt_template)
|
49 |
+
|
50 |
+
# Reasoning tool for logic-based or factual questions
|
51 |
+
reasoning_tool = Tool(
|
52 |
+
name="Reasoning tool",
|
53 |
+
func=chain.run,
|
54 |
+
description="A tool for answering general knowledge questions using logical reasoning and factual information.Try to use the latest information"
|
55 |
+
)
|
56 |
+
|
57 |
+
# Initialize the agent with the tools and LLM
|
58 |
+
assistant_agent = initialize_agent(
|
59 |
+
tools=[wikipedia_tool, reasoning_tool],
|
60 |
+
llm=llm,
|
61 |
+
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
62 |
+
verbose=False,
|
63 |
+
handle_parsing_errors=True
|
64 |
+
)
|
65 |
+
|
66 |
+
# Initialize session state for message history if it doesn't exist
|
67 |
+
if "messages" not in st.session_state:
|
68 |
+
st.session_state["messages"] = [
|
69 |
+
{"role": "assistant", "content": "Hi, I'm your general knowledge assistant. Feel free to ask me any question!"}
|
70 |
+
]
|
71 |
+
|
72 |
+
# Display the conversation history
|
73 |
+
for msg in st.session_state.messages:
|
74 |
+
st.chat_message(msg["role"]).write(msg['content'])
|
75 |
+
|
76 |
+
# Get the user's question
|
77 |
+
question = st.text_area("Enter your question:", "Please enter your general knowledge question here")
|
78 |
+
|
79 |
+
# Handle the button click to process the question
|
80 |
+
if st.button("find my answer"):
|
81 |
+
if question:
|
82 |
+
with st.spinner("Generate response.."):
|
83 |
+
st.session_state.messages.append({"role":"user","content":question})
|
84 |
+
st.chat_message("user").write(question)
|
85 |
+
|
86 |
+
st_cb=StreamlitCallbackHandler(st.container(),expand_new_thoughts=False)
|
87 |
+
response=assistant_agent.run(st.session_state.messages,callbacks=[st_cb]
|
88 |
+
)
|
89 |
+
st.session_state.messages.append({'role':'assistant',"content":response})
|
90 |
+
st.write('### Response:')
|
91 |
+
st.success(response)
|
92 |
+
|
93 |
+
else:
|
94 |
+
st.warning("Please enter the question")
|
requirements.txt
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langchain
|
2 |
+
python-dotenv
|
3 |
+
ipykernel
|
4 |
+
langchain-community
|
5 |
+
pypdf
|
6 |
+
bs4
|
7 |
+
arxiv
|
8 |
+
pymupdf
|
9 |
+
wikipedia
|
10 |
+
langchain-text-splitters
|
11 |
+
langchain-openai
|
12 |
+
chromadb
|
13 |
+
sentence_transformers
|
14 |
+
langchain_huggingface
|
15 |
+
faiss-cpu
|
16 |
+
langchain_chroma
|
17 |
+
duckdb
|
18 |
+
pandas
|
19 |
+
openai
|
20 |
+
langchain-groq
|
21 |
+
duckduckgo_search==5.3.1b1
|
22 |
+
pymupdf
|
23 |
+
arxiv
|
24 |
+
wikipedia
|
25 |
+
mysql-connector-python
|
26 |
+
SQLAlchemy
|
27 |
+
validators==0.28.1
|
28 |
+
youtube_transcript_api
|
29 |
+
unstructured
|
30 |
+
pytube
|
31 |
+
numexpr
|
32 |
+
huggingface_hub
|
33 |
+
Sympy
|
34 |
+
PyPDF2
|
35 |
+
streamlit
|