Moha782 commited on
Commit
733597b
·
verified ·
1 Parent(s): cc03211

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -9
app.py CHANGED
@@ -1,18 +1,14 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from transformers import pipeline, RagRetriever, RagTokenizer
4
 
5
  """
6
  For more information on huggingface_hub Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
8
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
 
10
- # Load the RAG tokenizer and retriever
11
- tokenizer = RagTokenizer.from_pretrained("deepset/roberta-base-squad2")
12
- retriever = RagRetriever.from_pretrained("deepset/roberta-base-squad2", index_name="apexcustoms", passages="apexcustoms.pdf", trust_remote_code=True)
13
-
14
  # Load the question-answering pipeline
15
- qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2", tokenizer=tokenizer)
16
 
17
  def respond(
18
  message,
@@ -40,9 +36,7 @@ def respond(
40
  stream=True,
41
  temperature=temperature,
42
  top_p=top_p,
43
- rag_retriever=retriever, # Pass the RAG retriever
44
- rag_tokenizer=tokenizer, # Pass the RAG tokenizer
45
- rag_pipeline=qa_pipeline, # Pass the question-answering pipeline
46
  ):
47
  token = message.choices[0].delta.content
48
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from transformers import pipeline
4
 
5
  """
6
  For more information on huggingface_hub Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
8
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
 
 
 
 
 
10
  # Load the question-answering pipeline
11
+ qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2")
12
 
13
  def respond(
14
  message,
 
36
  stream=True,
37
  temperature=temperature,
38
  top_p=top_p,
39
+ qa_pipeline=qa_pipeline, # Pass the question-answering pipeline
 
 
40
  ):
41
  token = message.choices[0].delta.content
42