rishisim commited on
Commit
dd0bd1a
·
verified ·
1 Parent(s): 9adf0de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -18
app.py CHANGED
@@ -1,43 +1,43 @@
1
  import gradio as gr
2
  # from langchain.llms import GooglePalm
3
  from langchain_google_genai import GoogleGenerativeAI
 
 
 
 
 
 
4
 
5
- from transformers import AutoModelForCausalLM, AutoTokenizer
6
 
7
- llm = "mistralai/Mixtral-8x22B-Instruct-v0.1"
 
8
 
9
- api_key = "AIzaSyCdM_aAIsW_nPbjarOF83mbX1_z1cVX2_M"
 
 
 
 
10
 
11
  # llm = GoogleGenerativeAI(model="models/text-bison-001", google_api_key=api_key)
12
  # llm = GooglePalm(google_api_key = api_key, temperature=0.7)
13
 
14
- from langchain.document_loaders.csv_loader import CSVLoader
15
-
16
  loader = CSVLoader(file_path='aiotsmartlabs_faq.csv', source_column = 'prompt')
17
  data = loader.load()
18
 
19
- from langchain_huggingface import HuggingFaceEmbeddings
20
- from langchain.vectorstores import FAISS
21
- import warnings
22
-
23
- # Suppress specific warnings if they are not critical
24
  warnings.filterwarnings("ignore", category=UserWarning, message="TypedStorage is deprecated")
25
  warnings.filterwarnings("ignore", category=FutureWarning, message="`resume_download` is deprecated")
26
 
27
- # Define the embedding model
28
- # Using a smaller model for demonstration purposes; adjust according to your needs
29
- model_name = "BAAI/bge-m3"
30
 
31
- # Initialize HuggingFace embeddings
 
32
  instructor_embeddings = HuggingFaceEmbeddings(model_name=model_name)
33
 
34
  # Create FAISS vector store from documents
35
  vectordb = FAISS.from_documents(documents=data, embedding=instructor_embeddings)
36
-
37
  retriever = vectordb.as_retriever()
38
 
39
- from langchain.prompts import PromptTemplate
40
-
41
  prompt_template = """Given the following context and a question, generate an answer based on the context only.
42
 
43
  In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
@@ -52,7 +52,6 @@ PROMPT = PromptTemplate(
52
  template = prompt_template, input_variables = ["context", "question"]
53
  )
54
 
55
- from langchain.chains import RetrievalQA
56
 
57
  chain = RetrievalQA.from_chain_type(llm = llm,
58
  chain_type="stuff",
 
1
  import gradio as gr
2
  # from langchain.llms import GooglePalm
3
  from langchain_google_genai import GoogleGenerativeAI
4
+ from langchain.document_loaders.csv_loader import CSVLoader
5
+ from langchain_huggingface import HuggingFaceEmbeddings
6
+ from langchain.vectorstores import FAISS
7
+ from langchain.prompts import PromptTemplate
8
+ from langchain.chains import RetrievalQA
9
+ import warnings
10
 
 
11
 
12
+ # from transformers import AutoModelForCausalLM, AutoTokenizer
13
+ from transformers import pipeline
14
 
15
+ pipe = pipeline("text-generation", model = "meta-llama/Meta-Llama-3-70B-Instruct")
16
+
17
+ # llm = "mistralai/Mixtral-8x22B-Instruct-v0.1"
18
+
19
+ # api_key = "AIzaSyCdM_aAIsW_nPbjarOF83mbX1_z1cVX2_M"
20
 
21
  # llm = GoogleGenerativeAI(model="models/text-bison-001", google_api_key=api_key)
22
  # llm = GooglePalm(google_api_key = api_key, temperature=0.7)
23
 
24
+ # LOADING CSV FILE
 
25
  loader = CSVLoader(file_path='aiotsmartlabs_faq.csv', source_column = 'prompt')
26
  data = loader.load()
27
 
28
+ # SUPPRESSING WARNINGS
 
 
 
 
29
  warnings.filterwarnings("ignore", category=UserWarning, message="TypedStorage is deprecated")
30
  warnings.filterwarnings("ignore", category=FutureWarning, message="`resume_download` is deprecated")
31
 
 
 
 
32
 
33
+ # EMBEDDING MODEL
34
+ model_name = "BAAI/bge-m3"
35
  instructor_embeddings = HuggingFaceEmbeddings(model_name=model_name)
36
 
37
  # Create FAISS vector store from documents
38
  vectordb = FAISS.from_documents(documents=data, embedding=instructor_embeddings)
 
39
  retriever = vectordb.as_retriever()
40
 
 
 
41
  prompt_template = """Given the following context and a question, generate an answer based on the context only.
42
 
43
  In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
 
52
  template = prompt_template, input_variables = ["context", "question"]
53
  )
54
 
 
55
 
56
  chain = RetrievalQA.from_chain_type(llm = llm,
57
  chain_type="stuff",