# Continue with the rest of the code from langchain.chains import RetrievalQA from langchain.document_loaders import TextLoader from langchain.embeddings import SentenceTransformerEmbeddings from langchain.vectorstores import FAISS from transformers import pipeline # Paste your data here data = """ Enter your text data here. For example: """ # Split data into chunks for embedding def chunk_text(text, chunk_size=500): words = text.split() chunks = [" ".join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)] return chunks # Prepare the text chunks text_chunks = chunk_text(data) # Generate embeddings and index the data embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") vectorstore = FAISS.from_texts(text_chunks, embeddings) # Load a simple LLM (Hugging Face model) from transformers import pipeline qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad") # Define a function to perform QA def answer_question(question): retriever = vectorstore.as_retriever(search_kwargs={"k": 3}) relevant_docs = retriever.get_relevant_documents(question) context = " ".join([doc.page_content for doc in relevant_docs]) answer = qa_pipeline(question=question, context=context) return answer["answer"] # Ask a question print("Paste the text and ask your question.") question = input("Your question: ") answer = answer_question(question) print("Answer:", answer)