File size: 2,513 Bytes
5e8fd8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ee737b
 
 
 
833a9a6
0ee737b
 
833a9a6
0ee737b
 
6ef431c
5e8fd8b
 
6ef431c
446d023
6ef431c
 
5e8fd8b
6ef431c
5e8fd8b
 
 
 
 
 
 
 
 
f1cf709
 
 
0ee737b
9266b9a
f1cf709
 
5e8fd8b
 
 
 
 
 
 
 
 
 
446d023
5e8fd8b
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
from langchain_community.vectorstores import Chroma
from langchain_community.chat_models import ChatOllama
from langchain_community.embeddings import FastEmbedEmbeddings
from langchain.schema.output_parser import StrOutputParser
from langchain_community.document_loaders import PyMuPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema.runnable import RunnablePassthrough
from langchain.prompts import PromptTemplate
from langchain_community.vectorstores.utils import filter_complex_metadata


class ChatPDF:
    vector_store = None
    retriever = None
    chain = None

    def __init__(self):
        self.model = ChatOllama(
            model="qwen:1.8b",
            keep_alive=-1,
            temperature=0,
            max_tokens=512,
            num_predict=512,
            repeat_penalty=1.3,
            metadata={"num_predict":512,"max_tokens":512},
        )

        self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=2048, chunk_overlap=128)
        self.prompt = PromptTemplate.from_template(
            """
            <|im_start|> You are an assistant for question-answering tasks. Use the following pieces of retrieved context to 
            answer the question. If you don't know the answer, just say that you don't know. Use 512 characters
             maximum and keep the answer concise. <|im_end|>
            <|im_start|> Question: {question} 
            Context: {context} 
            Answer: <|im_end|>
            """
        )

    def ingest(self, pdf_file_path: str):
        docs = PyMuPDFLoader(file_path=pdf_file_path).load()
        chunks = self.text_splitter.split_documents(docs)
        chunks = filter_complex_metadata(chunks)

        vector_store = Chroma.from_documents(documents=chunks, embedding=FastEmbedEmbeddings())
        self.retriever = vector_store.as_retriever(
            search_type="similarity_score_threshold",
            search_kwargs={
                "k": 4,
                "score_threshold": 0.5,
            },
        )

        self.chain = ({"context": self.retriever, "question": RunnablePassthrough()}
                      | self.prompt
                      | self.model
                      | StrOutputParser())

    def ask(self, query: str):
        if not self.chain:
            return "Please, add a PDF document first."

        return self.chain.invoke(query)

    def clear(self):
        self.vector_store = None
        self.retriever = None
        self.chain = None