ali121300 commited on
Commit
e2268d4
1 Parent(s): 56ca575

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -51,19 +51,19 @@ def get_vectorstore(text_chunks : list) -> FAISS:
51
  return vectorstore
52
 
53
 
54
- def get_conversation_chain(vectorstore:FAISS) -> ConversationalRetrievalChain:
55
- # llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
56
- llm = HuggingFaceHub(
57
- #repo_id="mistralai/Mistral-7B-Instruct-v0.2",
58
- #repo_id="cognitivecomputations/Llama-3-70B-Gradient-1048k-adapter",
59
- #repo_id="TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF"
60
- repo_id="lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF",
61
- model_kwargs={"temperature": 0.1, "max_length": 2048},
 
62
  )
63
-
64
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
65
  conversation_chain = ConversationalRetrievalChain.from_llm(
66
- llm=llm, retriever=vectorstore.as_retriever(),memory=memory
67
  )
68
  return conversation_chain
69
 
 
51
  return vectorstore
52
 
53
 
54
+ def get_conversation_chain(vectorstore: FAISS) -> ConversationalRetrievalChain:
55
+ client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio")
56
+ llm = client.chat.completions.create(
57
+ model="lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF",
58
+ messages=[
59
+ {"role": "system", "content": "Always answer in rhymes."},
60
+ {"role": "user", "content": "Introduce yourself."}
61
+ ],
62
+ temperature=0.5,
63
  )
 
64
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
65
  conversation_chain = ConversationalRetrievalChain.from_llm(
66
+ llm=llm, retriever=vectorstore.as_retriever(), memory=memory
67
  )
68
  return conversation_chain
69