mitulagr2 commited on
Commit
aa92e24
·
1 Parent(s): 833a9a6

Update to llm

Browse files
Files changed (1) hide show
  1. app/rag.py +4 -4
app/rag.py CHANGED
@@ -1,5 +1,5 @@
1
  from langchain_community.vectorstores import Chroma
2
- from langchain_community.chat_models import ChatOllama
3
  from langchain_community.embeddings import FastEmbedEmbeddings
4
  from langchain.schema.output_parser import StrOutputParser
5
  from langchain_community.document_loaders import PyMuPDFLoader
@@ -15,7 +15,7 @@ class ChatPDF:
15
  chain = None
16
 
17
  def __init__(self):
18
- self.model = ChatOllama(
19
  model="qwen:1.8b",
20
  keep_alive=-1,
21
  temperature=0,
@@ -30,8 +30,8 @@ class ChatPDF:
30
  """
31
  <|im_start|> You are an assistant for question-answering tasks. Use the following pieces of retrieved context to
32
  answer the question. If you don't know the answer, just say that you don't know. Use 512 characters
33
- maximum and keep the answer concise. <|im_end|>
34
- <|im_start|> Question: {question}
35
  Context: {context}
36
  Answer: <|im_end|>
37
  """
 
1
  from langchain_community.vectorstores import Chroma
2
+ from langchain_community.llms import Ollama
3
  from langchain_community.embeddings import FastEmbedEmbeddings
4
  from langchain.schema.output_parser import StrOutputParser
5
  from langchain_community.document_loaders import PyMuPDFLoader
 
15
  chain = None
16
 
17
  def __init__(self):
18
+ self.model = Ollama(
19
  model="qwen:1.8b",
20
  keep_alive=-1,
21
  temperature=0,
 
30
  """
31
  <|im_start|> You are an assistant for question-answering tasks. Use the following pieces of retrieved context to
32
  answer the question. If you don't know the answer, just say that you don't know. Use 512 characters
33
+ maximum and keep the answer concise.
34
+ Question: {question}
35
  Context: {context}
36
  Answer: <|im_end|>
37
  """