Update app.py
Browse files
app.py
CHANGED
@@ -28,9 +28,6 @@ import langchain
|
|
28 |
import os
|
29 |
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
# 嵌入模型
|
35 |
#embeddings = OpenAIEmbeddings()
|
36 |
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-large-en")
|
@@ -38,27 +35,17 @@ embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-large-en")
|
|
38 |
# 加载数据
|
39 |
#docsearch = FAISS.from_texts(texts, embeddings)
|
40 |
docsearch = FAISS.load_local("./faiss_index", embeddings)
|
41 |
-
|
42 |
-
#chain = load_qa_chain(OpenAI(temperature=0,model_name="gpt-3.5-turbo",prompt=chat_prompt), chain_type="stuff",verbose=True)
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
template="您是回答ANSYS软件使用查询的得力助手,所有回复必需用中文"
|
47 |
-
|
48 |
chain = load_qa_chain(OpenAI(temperature=0,model_name="gpt-3.5-turbo"), chain_type="stuff",verbose=True)
|
49 |
-
|
50 |
-
|
51 |
def predict(message, history):
|
52 |
history_langchain_format = []
|
53 |
-
for
|
54 |
-
history_langchain_format.append(SystemMessage(content=system))
|
55 |
history_langchain_format.append(HumanMessage(content=human))
|
56 |
history_langchain_format.append(AIMessage(content=ai))
|
57 |
history_langchain_format.append(HumanMessage(content=message))
|
58 |
docs = docsearch.similarity_search(message)
|
59 |
-
response = chain.run(input_documents=docs, question=message+template)
|
60 |
-
|
61 |
-
|
62 |
partial_message = ""
|
63 |
for chunk in response:
|
64 |
if len(chunk[0]) != 0:
|
|
|
28 |
import os
|
29 |
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
|
30 |
|
|
|
|
|
|
|
31 |
# 嵌入模型
|
32 |
#embeddings = OpenAIEmbeddings()
|
33 |
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-large-en")
|
|
|
35 |
# 加载数据
|
36 |
#docsearch = FAISS.from_texts(texts, embeddings)
|
37 |
docsearch = FAISS.load_local("./faiss_index", embeddings)
|
|
|
|
|
|
|
|
|
|
|
38 |
template="您是回答ANSYS软件使用查询的得力助手,所有回复必需用中文"
|
|
|
39 |
chain = load_qa_chain(OpenAI(temperature=0,model_name="gpt-3.5-turbo"), chain_type="stuff",verbose=True)
|
|
|
|
|
40 |
def predict(message, history):
|
41 |
history_langchain_format = []
|
42 |
+
for human, ai in history:
|
|
|
43 |
history_langchain_format.append(HumanMessage(content=human))
|
44 |
history_langchain_format.append(AIMessage(content=ai))
|
45 |
history_langchain_format.append(HumanMessage(content=message))
|
46 |
docs = docsearch.similarity_search(message)
|
47 |
+
response = chain.run(input_documents=docs, question=message + template)
|
48 |
+
|
|
|
49 |
partial_message = ""
|
50 |
for chunk in response:
|
51 |
if len(chunk[0]) != 0:
|