halimbahae commited on
Commit
1335f41
·
verified ·
1 Parent(s): e84dc32

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -7
app.py CHANGED
@@ -8,8 +8,21 @@ from langchain.document_loaders import PyPDFLoader, UnstructuredFileLoader, CSVL
8
  from langchain.chains import RetrievalQA
9
  from langchain.prompts import PromptTemplate
10
 
11
- # Initialize the Zephyr client
12
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  # Function to load documents based on file type
15
  def load_documents(file_path):
@@ -93,19 +106,36 @@ def handle_query(message, history, system_message, max_tokens, temperature, top_
93
  return respond(message, history, system_message, max_tokens, temperature, top_p, retriever)
94
 
95
  # Gradio app setup
96
- demo = gr.ChatInterface(
97
- fn=handle_query,
98
- additional_inputs=[
99
- gr.File(label="Upload File", type="file"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  gr.Textbox(value="You are a knowledgeable assistant.", label="System Message"),
 
101
  gr.Slider(1, 2048, step=1, value=512, label="Max Tokens"),
102
  gr.Slider(0.1, 4.0, step=0.1, value=0.7, label="Temperature"),
103
  gr.Slider(0.1, 1.0, step=0.05, value=0.95, label="Top-p"),
104
  ],
105
  outputs="text",
106
  title="RAG with Zephyr-7B",
107
- description="A Retrieval-Augmented Generation chatbot powered by Zephyr-7B and Chroma vector database.",
108
  )
109
 
 
 
110
  if __name__ == "__main__":
111
  demo.launch()
 
8
  from langchain.chains import RetrievalQA
9
  from langchain.prompts import PromptTemplate
10
 
11
+ # # Initialize the Zephyr client
12
+ # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
13
+
14
+ from huggingface_hub import InferenceClient
15
+
16
+ # Access the Hugging Face token from environment variables
17
+ HF_API_TOKEN = os.getenv("HF_API_TOKEN")
18
+
19
+ if not HF_API_TOKEN:
20
+ raise ValueError("Hugging Face API token is not set in environment variables.")
21
+
22
+ # Initialize the client with the token
23
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=HF_API_TOKEN)
24
+
25
+
26
 
27
  # Function to load documents based on file type
28
  def load_documents(file_path):
 
106
  return respond(message, history, system_message, max_tokens, temperature, top_p, retriever)
107
 
108
  # Gradio app setup
109
+ # demo = gr.ChatInterface(
110
+ # fn=handle_query,
111
+ # additional_inputs=[
112
+ # gr.File(label="Upload File", type="file"),
113
+ # gr.Textbox(value="You are a knowledgeable assistant.", label="System Message"),
114
+ # gr.Slider(1, 2048, step=1, value=512, label="Max Tokens"),
115
+ # gr.Slider(0.1, 4.0, step=0.1, value=0.7, label="Temperature"),
116
+ # gr.Slider(0.1, 1.0, step=0.05, value=0.95, label="Top-p"),
117
+ # ],
118
+ # outputs="text",
119
+ # title="RAG with Zephyr-7B",
120
+ # description="A Retrieval-Augmented Generation chatbot powered by Zephyr-7B and Chroma vector database.",
121
+ # )
122
+
123
+ demo = gr.Interface(
124
+ fn=handle_uploaded_file, # Handle uploaded files
125
+ inputs=[
126
+ gr.File(label="Upload Document"),
127
  gr.Textbox(value="You are a knowledgeable assistant.", label="System Message"),
128
+ gr.Textbox(label="Enter Your Query", placeholder="Ask a question..."),
129
  gr.Slider(1, 2048, step=1, value=512, label="Max Tokens"),
130
  gr.Slider(0.1, 4.0, step=0.1, value=0.7, label="Temperature"),
131
  gr.Slider(0.1, 1.0, step=0.05, value=0.95, label="Top-p"),
132
  ],
133
  outputs="text",
134
  title="RAG with Zephyr-7B",
135
+ description="Upload documents and ask questions using RAG.",
136
  )
137
 
138
+
139
+
140
  if __name__ == "__main__":
141
  demo.launch()