|
|
|
|
|
|
|
|
|
!huggingface-cli login |
|
|
|
!mkdir pdfs |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from langchain.document_loaders import PyPDFDirectoryLoader |
|
|
|
loader=PyPDFDirectoryLoader("pdfs") |
|
|
|
data=loader.load() |
|
|
|
|
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
|
|
text_chunks=RecursiveCharacterTextSplitter(chunk_size=300,chunk_overlap=20) |
|
|
|
chunks=text_chunks.split_documents(data) |
|
|
|
len(chunks) |
|
|
|
|
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
|
|
|
|
|
|
|
|
|
|
embeddings=HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") |
|
|
|
|
|
|
|
|
|
from langchain.vectorstores import FAISS |
|
|
|
vectordabase=FAISS.from_documents(chunks,embeddings) |
|
|
|
question="what is generative ai?" |
|
|
|
vectordabase.similarity_search(question) |
|
|
|
model="google/flan-t5-large" |
|
|
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM,pipeline |
|
from langchain import HuggingFacePipeline |
|
|
|
tokenizer=AutoTokenizer.from_pretrained(model) |
|
model1=AutoModelForSeq2SeqLM.from_pretrained(model) |
|
|
|
pipe=pipeline("text2text-generation",model=model1,tokenizer=tokenizer) |
|
|
|
llm=HuggingFacePipeline(pipeline=pipe, model_kwargs={"temperature": 0, "max_length": 512}) |
|
|
|
|
|
from langchain.prompts import PromptTemplate |
|
|
|
template = """use the context to provide a concise answer and if you don't know just say don't now. |
|
{context} |
|
Question: {question} |
|
Helpful Answer:""" |
|
QA_CHAIN_PROMPT = PromptTemplate.from_template(template) |
|
|
|
from langchain.chains import RetrievalQA |
|
qa_chain = RetrievalQA.from_chain_type( |
|
llm, retriever=vectordabase.as_retriever(), chain_type_kwargs={"prompt": QA_CHAIN_PROMPT} |
|
) |
|
|
|
qa_chain.run("what is generative ai?") |
|
|
|
question="more detail about generative ai?" |
|
result = qa_chain({"query": question}) |
|
print(result["result"]) |
|
|
|
def fetch(question,history): |
|
result=qa_chain({"query":question}) |
|
return result["result"] |
|
|
|
|
|
|
|
|
|
import gradio as gr |
|
|
|
gr.ChatInterface( |
|
fetch, |
|
chatbot=gr.Chatbot(height=300), |
|
textbox=gr.Textbox(placeholder="You can ask a question", container=False, scale=7), |
|
title="A Generative AI chatbot", |
|
description="You can ask questions related to Generative AI", |
|
theme="soft", |
|
examples=["what is Generative AI?", "what are the examples?", "what is LLM?"], |
|
cache_examples=True, |
|
retry_btn=None, |
|
undo_btn="Delete Previous", |
|
clear_btn="Clear", |
|
).launch(share=True) |
|
|
|
|