Spaces:
Sleeping
Sleeping
Keldos
commited on
Commit
·
6e7c873
1
Parent(s):
e930b75
Revert "添加PaLM支持(未完成)"
Browse filesThis reverts commit 8043b80a0a50c2c084ea1e70b6677fbe0845bcf9 and 8029644127147928e0e43fcb45fd2c7cd8056fef.
- modules/config.py +0 -3
- modules/models/PaLM.py +0 -11
- modules/models/base_model.py +14 -3
- modules/models/models.py +0 -3
- modules/presets.py +0 -1
- requirements.txt +0 -1
modules/config.py
CHANGED
@@ -77,9 +77,6 @@ my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key)
|
|
77 |
xmchat_api_key = config.get("xmchat_api_key", "")
|
78 |
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
|
79 |
|
80 |
-
google_palm_api_key = config.get("google_palm_api_key", "")
|
81 |
-
os.environ["GOOGLE_PALM_API_KEY"] = google_palm_api_key
|
82 |
-
|
83 |
render_latex = config.get("render_latex", True)
|
84 |
|
85 |
if render_latex:
|
|
|
77 |
xmchat_api_key = config.get("xmchat_api_key", "")
|
78 |
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
|
79 |
|
|
|
|
|
|
|
80 |
render_latex = config.get("render_latex", True)
|
81 |
|
82 |
if render_latex:
|
modules/models/PaLM.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
from .base_model import BaseLLMModel, CallbackToIterator, ChuanhuCallbackHandler
|
2 |
-
from langchain.chat_models import ChatGooglePalm
|
3 |
-
import os
|
4 |
-
|
5 |
-
class PaLM_Client(BaseLLMModel):
|
6 |
-
def __init__(self, model_name, user="") -> None:
|
7 |
-
super().__init__(model_name, user)
|
8 |
-
self.llm = ChatGooglePalm(google_api_key=os.environ["GOOGLE_PALM_API_KEY"])
|
9 |
-
|
10 |
-
def get_answer_at_once(self):
|
11 |
-
self.llm.generate(self.history)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
modules/models/base_model.py
CHANGED
@@ -108,7 +108,6 @@ class ModelType(Enum):
|
|
108 |
MOSS = 5
|
109 |
YuanAI = 6
|
110 |
ChuanhuAgent = 7
|
111 |
-
PaLM = 8
|
112 |
|
113 |
@classmethod
|
114 |
def get_type(cls, model_name: str):
|
@@ -130,8 +129,6 @@ class ModelType(Enum):
|
|
130 |
model_type = ModelType.YuanAI
|
131 |
elif "川虎助理" in model_name_lower:
|
132 |
model_type = ModelType.ChuanhuAgent
|
133 |
-
elif "palm" in model_name_lower:
|
134 |
-
model_type = ModelType.PaLM
|
135 |
else:
|
136 |
model_type = ModelType.Unknown
|
137 |
return model_type
|
@@ -263,6 +260,20 @@ class BaseLLMModel:
|
|
263 |
if files:
|
264 |
index = construct_index(self.api_key, file_src=files)
|
265 |
status = i18n("索引构建完成")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
return gr.Files.update(), chatbot, status
|
267 |
|
268 |
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
|
|
|
108 |
MOSS = 5
|
109 |
YuanAI = 6
|
110 |
ChuanhuAgent = 7
|
|
|
111 |
|
112 |
@classmethod
|
113 |
def get_type(cls, model_name: str):
|
|
|
129 |
model_type = ModelType.YuanAI
|
130 |
elif "川虎助理" in model_name_lower:
|
131 |
model_type = ModelType.ChuanhuAgent
|
|
|
|
|
132 |
else:
|
133 |
model_type = ModelType.Unknown
|
134 |
return model_type
|
|
|
260 |
if files:
|
261 |
index = construct_index(self.api_key, file_src=files)
|
262 |
status = i18n("索引构建完成")
|
263 |
+
# Summarize the document
|
264 |
+
logging.info(i18n("生成内容总结中……"))
|
265 |
+
os.environ["OPENAI_API_KEY"] = self.api_key
|
266 |
+
from langchain.chains.summarize import load_summarize_chain
|
267 |
+
from langchain.prompts import PromptTemplate
|
268 |
+
from langchain.chat_models import ChatOpenAI
|
269 |
+
from langchain.callbacks import StdOutCallbackHandler
|
270 |
+
prompt_template = "Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN " + language + ":"
|
271 |
+
PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
|
272 |
+
llm = ChatOpenAI()
|
273 |
+
chain = load_summarize_chain(llm, chain_type="map_reduce", return_intermediate_steps=True, map_prompt=PROMPT, combine_prompt=PROMPT)
|
274 |
+
summary = chain({"input_documents": list(index.docstore.__dict__["_dict"].values())}, return_only_outputs=True)["output_text"]
|
275 |
+
print(i18n("总结") + f": {summary}")
|
276 |
+
chatbot.append([i18n("上传了")+len(files)+"个文件", summary])
|
277 |
return gr.Files.update(), chatbot, status
|
278 |
|
279 |
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
|
modules/models/models.py
CHANGED
@@ -606,9 +606,6 @@ def get_model(
|
|
606 |
elif model_type == ModelType.ChuanhuAgent:
|
607 |
from .ChuanhuAgent import ChuanhuAgent_Client
|
608 |
model = ChuanhuAgent_Client(model_name, access_key, user_name=user_name)
|
609 |
-
elif model_type == ModelType.PaLM:
|
610 |
-
from .PaLM import PaLM_Client
|
611 |
-
model = PaLM_Client(model_name, user_name=user_name)
|
612 |
elif model_type == ModelType.Unknown:
|
613 |
raise ValueError(f"未知模型: {model_name}")
|
614 |
logging.info(msg)
|
|
|
606 |
elif model_type == ModelType.ChuanhuAgent:
|
607 |
from .ChuanhuAgent import ChuanhuAgent_Client
|
608 |
model = ChuanhuAgent_Client(model_name, access_key, user_name=user_name)
|
|
|
|
|
|
|
609 |
elif model_type == ModelType.Unknown:
|
610 |
raise ValueError(f"未知模型: {model_name}")
|
611 |
logging.info(msg)
|
modules/presets.py
CHANGED
@@ -68,7 +68,6 @@ ONLINE_MODELS = [
|
|
68 |
"川虎助理",
|
69 |
"川虎助理 Pro",
|
70 |
"xmchat",
|
71 |
-
"Google PaLM",
|
72 |
"yuanai-1.0-base_10B",
|
73 |
"yuanai-1.0-translate",
|
74 |
"yuanai-1.0-dialog",
|
|
|
68 |
"川虎助理",
|
69 |
"川虎助理 Pro",
|
70 |
"xmchat",
|
|
|
71 |
"yuanai-1.0-base_10B",
|
72 |
"yuanai-1.0-translate",
|
73 |
"yuanai-1.0-dialog",
|
requirements.txt
CHANGED
@@ -24,4 +24,3 @@ wikipedia
|
|
24 |
google.generativeai
|
25 |
openai
|
26 |
unstructured
|
27 |
-
google-api-python-client
|
|
|
24 |
google.generativeai
|
25 |
openai
|
26 |
unstructured
|
|