Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -51,19 +51,19 @@ def get_vectorstore(text_chunks : list) -> FAISS:
|
|
51 |
return vectorstore
|
52 |
|
53 |
|
54 |
-
def get_conversation_chain(vectorstore:FAISS) -> ConversationalRetrievalChain:
|
55 |
-
|
56 |
-
llm =
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
62 |
)
|
63 |
-
|
64 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
65 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
66 |
-
llm=llm, retriever=vectorstore.as_retriever(),memory=memory
|
67 |
)
|
68 |
return conversation_chain
|
69 |
|
|
|
51 |
return vectorstore
|
52 |
|
53 |
|
54 |
+
def get_conversation_chain(vectorstore: FAISS) -> ConversationalRetrievalChain:
|
55 |
+
client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio")
|
56 |
+
llm = client.chat.completions.create(
|
57 |
+
model="lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF",
|
58 |
+
messages=[
|
59 |
+
{"role": "system", "content": "Always answer in rhymes."},
|
60 |
+
{"role": "user", "content": "Introduce yourself."}
|
61 |
+
],
|
62 |
+
temperature=0.5,
|
63 |
)
|
|
|
64 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
65 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
66 |
+
llm=llm, retriever=vectorstore.as_retriever(), memory=memory
|
67 |
)
|
68 |
return conversation_chain
|
69 |
|