muhammadshaheryar commited on
Commit
0046795
·
verified ·
1 Parent(s): c7ee2c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -49
app.py CHANGED
@@ -1,49 +1,46 @@
1
- # Install required libraries
2
- !pip install transformers langchain sentence-transformers streamlit
3
- !pip install -U langchain-community # Install langchain-community
4
- !pip install faiss-cpu # Install faiss-cpu
5
-
6
- # Continue with the rest of the code
7
- from langchain.chains import RetrievalQA
8
- from langchain.document_loaders import TextLoader
9
- from langchain.embeddings import SentenceTransformerEmbeddings
10
- from langchain.vectorstores import FAISS
11
- from transformers import pipeline
12
-
13
-
14
-
15
- # Paste your data here
16
- data = """
17
- Enter your text data here. For example:
18
- """
19
-
20
- # Split data into chunks for embedding
21
- def chunk_text(text, chunk_size=500):
22
- words = text.split()
23
- chunks = [" ".join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)]
24
- return chunks
25
-
26
- # Prepare the text chunks
27
- text_chunks = chunk_text(data)
28
-
29
- # Generate embeddings and index the data
30
- embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
31
- vectorstore = FAISS.from_texts(text_chunks, embeddings)
32
-
33
- # Load a simple LLM (Hugging Face model)
34
- from transformers import pipeline
35
- qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
36
-
37
- # Define a function to perform QA
38
- def answer_question(question):
39
- retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
40
- relevant_docs = retriever.get_relevant_documents(question)
41
- context = " ".join([doc.page_content for doc in relevant_docs])
42
- answer = qa_pipeline(question=question, context=context)
43
- return answer["answer"]
44
-
45
- # Ask a question
46
- print("Paste the text and ask your question.")
47
- question = input("Your question: ")
48
- answer = answer_question(question)
49
- print("Answer:", answer)
 
1
+
2
+
3
+ # Continue with the rest of the code
4
+ from langchain.chains import RetrievalQA
5
+ from langchain.document_loaders import TextLoader
6
+ from langchain.embeddings import SentenceTransformerEmbeddings
7
+ from langchain.vectorstores import FAISS
8
+ from transformers import pipeline
9
+
10
+
11
+
12
+ # Paste your data here
13
+ data = """
14
+ Enter your text data here. For example:
15
+ """
16
+
17
+ # Split data into chunks for embedding
18
+ def chunk_text(text, chunk_size=500):
19
+ words = text.split()
20
+ chunks = [" ".join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)]
21
+ return chunks
22
+
23
+ # Prepare the text chunks
24
+ text_chunks = chunk_text(data)
25
+
26
+ # Generate embeddings and index the data
27
+ embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
28
+ vectorstore = FAISS.from_texts(text_chunks, embeddings)
29
+
30
+ # Load a simple LLM (Hugging Face model)
31
+ from transformers import pipeline
32
+ qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
33
+
34
+ # Define a function to perform QA
35
+ def answer_question(question):
36
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
37
+ relevant_docs = retriever.get_relevant_documents(question)
38
+ context = " ".join([doc.page_content for doc in relevant_docs])
39
+ answer = qa_pipeline(question=question, context=context)
40
+ return answer["answer"]
41
+
42
+ # Ask a question
43
+ print("Paste the text and ask your question.")
44
+ question = input("Your question: ")
45
+ answer = answer_question(question)
46
+ print("Answer:", answer)