Mr-Cool commited on
Commit
9230fc6
1 Parent(s): 7940f63

Update functions.py

Browse files
Files changed (1) hide show
  1. functions.py +5 -3
functions.py CHANGED
@@ -1,6 +1,6 @@
1
  from langchain_community.document_loaders import PyMuPDFLoader
2
  from langchain_text_splitters import RecursiveCharacterTextSplitter
3
- from langchain_openai import AzureOpenAIEmbeddings, AzureChatOpenAI
4
  from operator import itemgetter
5
  from langchain_core.runnables import RunnablePassthrough
6
  from langchain_qdrant import QdrantVectorStore
@@ -43,7 +43,8 @@ def setup_vector_db():
43
 
44
  nist_chunks = text_splitter.split_documents(nist_doc)
45
 
46
- embeddings_small = AzureOpenAIEmbeddings(azure_deployment="text-embedding-3-small")
 
47
 
48
  qdrant_client = QdrantClient(":memory:") # set Qdrant DB and its location (in-memory)
49
 
@@ -68,7 +69,8 @@ def setup_vector_db():
68
 
69
  # define a global variable to store the retriever object
70
  retriever = setup_vector_db()
71
- qa_gpt4_llm = AzureChatOpenAI(azure_deployment="gpt-4o-mini", temperature=0) # GPT-4o-mini model
 
72
 
73
  # define a template for the RAG model
74
  rag_template = """
 
1
  from langchain_community.document_loaders import PyMuPDFLoader
2
  from langchain_text_splitters import RecursiveCharacterTextSplitter
3
+ from langchain_openai import AzureOpenAIEmbeddings, AzureChatOpenAI, OpenAIEmbeddings, ChatOpenAI
4
  from operator import itemgetter
5
  from langchain_core.runnables import RunnablePassthrough
6
  from langchain_qdrant import QdrantVectorStore
 
43
 
44
  nist_chunks = text_splitter.split_documents(nist_doc)
45
 
46
+ # embeddings_small = AzureOpenAIEmbeddings(azure_deployment="text-embedding-3-small")
47
+ embeddings_small = OpenAIEmbeddings(model="text-embedding-3-small")
48
 
49
  qdrant_client = QdrantClient(":memory:") # set Qdrant DB and its location (in-memory)
50
 
 
69
 
70
  # define a global variable to store the retriever object
71
  retriever = setup_vector_db()
72
+ # qa_gpt4_llm = AzureChatOpenAI(azure_deployment="gpt-4o-mini", temperature=0) # GPT-4o-mini model
73
+ qa_gpt4_llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) # GPT-4o-mini model
74
 
75
  # define a template for the RAG model
76
  rag_template = """