File size: 2,972 Bytes
197f59d dd38e98 197f59d f0b0422 dd38e98 eb3b23d 84e13f4 197f59d 417067d 197f59d 417067d 2f94971 dd38e98 197f59d c559a2e dd38e98 c559a2e 197f59d dd38e98 c559a2e dd38e98 715a900 f046416 197f59d f0b0422 417067d f0b0422 417067d eb3b23d c77d9c3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from gradio import gradio as gr
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate, LLMChain
from langchain.llms import TextGen
from langchain.cache import InMemoryCache
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
import time
import langchain
import os
OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
# 嵌入模型
#embeddings = OpenAIEmbeddings()
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-large-en")
# 加载数据
#docsearch = FAISS.from_texts(texts, embeddings)
docsearch = FAISS.load_local("./faiss_index", embeddings)
chain = load_qa_chain(OpenAI(temperature=0,model_name="gpt-3.5-turbo", verbose=True), chain_type="stuff",verbose=True)
template="您是回答所有ANSYS软件使用查询的得力助手,如果所问的内容不在范围内,请回答您提的问题不在本知识库内,请重新提问. {input_language} to {output_language}."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template="{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
#prompt = "您是回答所有ANSYS软件使用查询的得力助手,如果所问的内容不在范围内,请回答您提的问题不在本知识库内,请重新提问,所有问题必需用中文回答"
def predict(message, history):
history_langchain_format = []
for human, ai in history:
history_langchain_format.append(SystemMessage(content=system))
history_langchain_format.append(HumanMessage(content=human))
history_langchain_format.append(AIMessage(content=ai))
history_langchain_format.append(HumanMessage(content=message))
docs = docsearch.similarity_search(message)
response = chain.run(input_documents=docs, question=message)
partial_message = ""
for chunk in response:
if len(chunk[0]) != 0:
time.sleep(0.1)
partial_message = partial_message + chunk[0]
yield partial_message
langchain.llm_cache = InMemoryCache()
gr.ChatInterface(predict,
textbox=gr.Textbox(placeholder="请输入您的问题", container=False, scale=7),
title="欢迎使用ANSYS软件AI机器人",
examples=["你是谁?", "请介绍一下Fluent 软件的用户界面说明", "create-bounding-box","ANSYS Fluent Architecture"],
description="🦊请避免输入有违公序良俗的问题,模型可能无法回答不合适的问题🐇",).queue().launch() |