import os import shutil import gradio as gr from clc.langchain_application import LangChainApplication os.environ["CUDA_VISIBLE_DEVICES"] = '0' # 修改成自己的配置!!! class LangChainCFG: llm_model_name = 'THUDM/chatglm-6b-int4-qe' # 本地模型文件 or huggingface远程仓库 embedding_model_name = 'GanymedeNil/text2vec-large-chinese' # 检索模型文件 or huggingface远程仓库 vector_store_path = './cache' docs_path = './docs' config = LangChainCFG() application = LangChainApplication(config) def get_file_list(): if not os.path.exists("docs"): return [] return [f for f in os.listdir("docs")] file_list = get_file_list() def upload_file(file): if not os.path.exists("docs"): os.mkdir("docs") filename = os.path.basename(file.name) shutil.move(file.name, "docs/" + filename) # file_list首位插入新上传的文件 file_list.insert(0, filename) application.source_service.add_document("docs/" + filename) return gr.Dropdown.update(choices=file_list, value=filename) def clear_session(): return '', None def predict(input, large_language_model, embedding_model, history=None): # print(large_language_model, embedding_model) print(input) if history == None: history = [] resp = application.get_knowledge_based_answer( query=input, history_len=1, temperature=0.1, top_p=0.9, chat_history=history ) history.append((input, resp['result'])) search_text = '' for idx, source in enumerate(resp['source_documents'][:2]): sep = f'----------【搜索结果{idx}:】---------------\n' search_text += f'{sep}\n{source.page_content}\n\n' print(search_text) return '', history, history, search_text block = gr.Blocks() with block as demo: gr.Markdown("""