SPBhai commited on
Commit
f5844b6
·
1 Parent(s): 8ff692d

Added bg_app.py and requirements.txt

Browse files
Files changed (2) hide show
  1. bg_app.py +103 -0
  2. requirements.txt +15 -1
bg_app.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #bg_app.py
2
+
3
+ import gradio as gr
4
+ from langchain.schema import HumanMessage, AIMessage
5
+ from langchain.chains import create_retrieval_chain, create_history_aware_retriever
6
+ from langchain.chains.combine_documents import create_stuff_documents_chain
7
+ from langchain.vectorstores import Chroma
8
+ from langchain.llms import HuggingFacePipeline
9
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
10
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, pipeline
11
+
12
+ # Load the pre-existing vector store
13
+ vector_store = Chroma(persist_directory="./bg_data_english")
14
+ similarity_retriever = vector_store.as_retriever(
15
+ search_type="similarity_score_threshold", search_kwargs={"k": 5, "score_threshold": 0.2}
16
+ )
17
+
18
+ # Load the LLM
19
+ quantization_config = BitsAndBytesConfig(load_in_8bit=True)
20
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
21
+ llm_model = AutoModelForCausalLM.from_pretrained(
22
+ "google/gemma-2-9b-it",
23
+ quantization_config=quantization_config,
24
+ )
25
+ text_generation_pipeline = pipeline(
26
+ model=llm_model,
27
+ tokenizer=tokenizer,
28
+ task="text-generation",
29
+ return_full_text=False,
30
+ max_new_tokens=350,
31
+ )
32
+ llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
33
+
34
+ # Reformulating user queries with history context
35
+ rephrase_system_prompt = """Given a chat history and the latest user question
36
+ which might reference context in the chat history, formulate a standalone question
37
+ which can be understood without the chat history. Do NOT answer the question,
38
+ just reformulate it if needed and otherwise return it as is."""
39
+
40
+ rephrase_prompt = ChatPromptTemplate.from_messages(
41
+ [
42
+ ("system", rephrase_system_prompt),
43
+ MessagesPlaceholder("chat_history"),
44
+ ("human", "{input}"),
45
+ ]
46
+ )
47
+
48
+ history_aware_retriever = create_history_aware_retriever(llm, similarity_retriever, rephrase_prompt)
49
+
50
+ # Define the question-answering system prompt
51
+ qa_system_prompt = """You are a saintly guide inspired by the teachings of the Bhagavad Gita, offering wisdom and moral guidance. Answer questions in a friendly and compassionate tone, drawing insights from the scripture to help users with their life challenges.
52
+ Use the provided context to craft your response and remain faithful to the philosophy of the Bhagavad Gita.
53
+ If you don't know the answer, humbly admit it or request the user to clarify or provide more details.
54
+ Limit your response to 5 lines unless the user explicitly asks for more explanation.
55
+
56
+ Question:
57
+ {input}
58
+
59
+ Context:
60
+ {context}
61
+
62
+ Answer:
63
+ """
64
+
65
+ qa_prompt = ChatPromptTemplate.from_messages(
66
+ [
67
+ ("system", qa_system_prompt),
68
+ MessagesPlaceholder("chat_history"),
69
+ ("human", "{input}"),
70
+ ]
71
+ )
72
+
73
+ question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
74
+ qa_rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
75
+
76
+ # Function to generate answers
77
+ chat_history = []
78
+
79
+ def chat(question):
80
+ global chat_history
81
+ response = qa_rag_chain.invoke({"input": question, "chat_history": chat_history})
82
+ answer = response["answer"].strip()
83
+ if answer.startswith("Saintly Guide:"):
84
+ answer = answer[len("Saintly Guide:"):].strip()
85
+ elif answer.startswith("AI:"):
86
+ answer = answer[len("AI:"):].strip()
87
+ chat_history.extend([HumanMessage(content=question), AIMessage(content=response["answer"])])
88
+ return answer
89
+
90
+ # Create Gradio interface
91
+ interface = gr.Interface(
92
+ fn=chat,
93
+ inputs=gr.Textbox(label="Ask your question", placeholder="What's troubling you?"),
94
+ outputs=gr.Textbox(label="Answer"),
95
+ title="Bhagavad Gita Chatbot",
96
+ description="Ask questions inspired by the teachings of the Bhagavad Gita and receive saintly guidance."
97
+ )
98
+
99
+ # Launch the app
100
+ if __name__ == "__main__":
101
+ interface.launch()
102
+
103
+
requirements.txt CHANGED
@@ -1 +1,15 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ gradio
4
+ transformers
5
+ langchain>=0.0.227
6
+ langchain-hub
7
+ langchain-vectorstores
8
+ sentence-transformers
9
+ torch
10
+ bitsandbytes
11
+ accelerate
12
+ chromadb
13
+ huggingface-hub
14
+ pydantic
15
+ typing-extensions