fengtc commited on
Commit
dd38e98
·
1 Parent(s): eb3b23d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -10
app.py CHANGED
@@ -6,11 +6,23 @@ from langchain.chains.question_answering import load_qa_chain
6
  from langchain.llms import OpenAI
7
  from gradio import gradio as gr
8
  from langchain.chat_models import ChatOpenAI
9
- #from langchain.memory import ConversationBufferMemor
10
- from langchain.schema import AIMessage, HumanMessage
11
  from langchain import PromptTemplate, LLMChain
12
  from langchain.llms import TextGen
13
  from langchain.cache import InMemoryCache
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  import time
15
  import langchain
16
  import os
@@ -23,19 +35,24 @@ embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-large-en")
23
  # 加载数据
24
  #docsearch = FAISS.from_texts(texts, embeddings)
25
  docsearch = FAISS.load_local("./faiss_index", embeddings)
 
26
 
27
 
28
- chain = load_qa_chain(OpenAI(temperature=0,model_name="gpt-3.5-turbo", verbose=True), chain_type="stuff",verbose=True)
29
 
30
- prompt = "您是回答所有ANSYS软件使用查询的得力助手,如果所问的内容不在范围内,请回答“您提的问题不在本知识库内,请重新提问”,所有问题必需用中文回答"
 
 
 
 
 
31
 
32
  def predict(message, history):
33
- history_openai_format = []
34
- for human, assistant in history:
35
- history_openai_format.append({"role": "system", "content": prompt })
36
- history_openai_format.append({"role": "user", "content": human })
37
- history_openai_format.append({"role": "assistant", "content":assistant})
38
- history_openai_format.append({"role": "user", "content": message})
39
  docs = docsearch.similarity_search(message)
40
  response = chain.run(input_documents=docs, question=message)
41
 
 
6
  from langchain.llms import OpenAI
7
  from gradio import gradio as gr
8
  from langchain.chat_models import ChatOpenAI
9
+
 
10
  from langchain import PromptTemplate, LLMChain
11
  from langchain.llms import TextGen
12
  from langchain.cache import InMemoryCache
13
+
14
+ from langchain.prompts.chat import (
15
+ ChatPromptTemplate,
16
+ SystemMessagePromptTemplate,
17
+ AIMessagePromptTemplate,
18
+ HumanMessagePromptTemplate,
19
+ )
20
+ from langchain.schema import (
21
+ AIMessage,
22
+ HumanMessage,
23
+ SystemMessage
24
+ )
25
+
26
  import time
27
  import langchain
28
  import os
 
35
  # 加载数据
36
  #docsearch = FAISS.from_texts(texts, embeddings)
37
  docsearch = FAISS.load_local("./faiss_index", embeddings)
38
+ chain = load_qa_chain(OpenAI(temperature=0,model_name="gpt-3.5-turbo", verbose=True), chain_type="stuff",verbose=True)
39
 
40
 
 
41
 
42
+ #template="您是回答所有ANSYS软件使用查询的得力助手,如果所问的内容不在范围内,请回答您提的问题不在本知识库内,请重新提问. {input_language} to {output_language}."
43
+ #system_message_prompt = SystemMessagePromptTemplate.from_template(template)
44
+ #human_template="{text}"
45
+ #human_message_prompt = HumanMessagePromptTemplate.from_template(human_template
46
+
47
+ prompt = "您是回答所有ANSYS软件使用查询的得力助手,如果所问的内容不在范围内,请回答您提的问题不在本知识库内,请重新提问,所有问题必需用中文回答"
48
 
49
  def predict(message, history):
50
+ history_langchain_format = []
51
+ for human, ai in history:
52
+ history_langchain_format.append(SystemMessage(content=prompt))
53
+ history_langchain_format.append(HumanMessage(content=human))
54
+ history_langchain_format.append(AIMessage(content=ai))
55
+ history_langchain_format.append(HumanMessage(content=message))
56
  docs = docsearch.similarity_search(message)
57
  response = chain.run(input_documents=docs, question=message)
58