PabloVD commited on
Commit
127f3c4
1 Parent(s): fdb4410
Files changed (3) hide show
  1. app.py +1 -1
  2. requirements.txt +5 -0
  3. worker.py +9 -11
app.py CHANGED
@@ -10,7 +10,7 @@ url = 'https://camels.readthedocs.io/_/downloads/en/latest/pdf/'
10
  r = requests.get(url, stream=True)
11
  document_path = Path('metadata.pdf')
12
  document_path.write_bytes(r.content)
13
-
14
  worker.process_document(document_path)
15
 
16
  def handle_prompt(message, history):
 
10
  r = requests.get(url, stream=True)
11
  document_path = Path('metadata.pdf')
12
  document_path.write_bytes(r.content)
13
+ # document_path="2022GS.pdf"
14
  worker.process_document(document_path)
15
 
16
  def handle_prompt(message, history):
requirements.txt CHANGED
@@ -1,4 +1,9 @@
 
 
 
1
  langchain
2
  langchain-community
3
  langchain-huggingface
4
  chromadb
 
 
 
1
+ pdf2image
2
+ pypdf
3
+ tiktoken
4
  langchain
5
  langchain-community
6
  langchain-huggingface
7
  chromadb
8
+ InstructorEmbedding
9
+ huggingface_hub==0.25.2
worker.py CHANGED
@@ -5,20 +5,20 @@ from langchain_community.document_loaders import PyPDFLoader
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
6
  from langchain_community.vectorstores import Chroma
7
  from langchain_huggingface import HuggingFaceEndpoint
8
- from sentence_transformers import SentenceTransformer # Use SentenceTransformer module to use Hugging face Model
9
- import pip
10
 
11
- def install(package):
12
- if hasattr(pip, 'main'):
13
- pip.main(['install', package])
14
- else:
15
- pip._internal.main(['install', package])
16
 
17
- # Temporal fix for incompatibility between langchain_huggingface and sentence-transformers<2.6
18
  # install("sentence-transformers==2.2.2")
19
 
20
  # Check for GPU availability and set the appropriate device for computation.
21
  DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
 
22
 
23
  # Global variables
24
  conversation_retrieval_chain = None
@@ -49,11 +49,9 @@ def init_llm():
49
  #Initialize embeddings using a pre-trained model to represent the text data.
50
  embedddings_model = "sentence-transformers/multi-qa-distilbert-cos-v1"
51
  # embedddings_model = "sentence-transformers/all-MiniLM-L6-v2"
52
-
53
- emb_model = SentenceTransformer(embedddings_model)
54
 
55
  embeddings = HuggingFaceInstructEmbeddings(
56
- model_name=emb_model,
57
  model_kwargs={"device": DEVICE}
58
  )
59
 
 
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
6
  from langchain_community.vectorstores import Chroma
7
  from langchain_huggingface import HuggingFaceEndpoint
8
+ # import pip
 
9
 
10
+ # def install(package):
11
+ # if hasattr(pip, 'main'):
12
+ # pip.main(['install', package])
13
+ # else:
14
+ # pip._internal.main(['install', package])
15
 
16
+ # # Temporal fix for incompatibility between langchain_huggingface and sentence-transformers<2.6
17
  # install("sentence-transformers==2.2.2")
18
 
19
  # Check for GPU availability and set the appropriate device for computation.
20
  DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
21
+ # DEVICE = "cpu"
22
 
23
  # Global variables
24
  conversation_retrieval_chain = None
 
49
  #Initialize embeddings using a pre-trained model to represent the text data.
50
  embedddings_model = "sentence-transformers/multi-qa-distilbert-cos-v1"
51
  # embedddings_model = "sentence-transformers/all-MiniLM-L6-v2"
 
 
52
 
53
  embeddings = HuggingFaceInstructEmbeddings(
54
+ model_name=embedddings_model,
55
  model_kwargs={"device": DEVICE}
56
  )
57