Spaces:
Sleeping
Sleeping
File size: 3,228 Bytes
891bdd7 33c1020 891bdd7 33c1020 891bdd7 33c1020 891bdd7 33c1020 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
# @title set API key
import os
import getpass
from pprint import pprint
import warnings
warnings.filterwarnings("ignore")
from IPython import get_ipython
if "google.colab" in str(get_ipython()):
# Running in Google Colab. Please set the UPSTAGE_API_KEY in the Colab Secrets
from google.colab import userdata
os.environ["UPSTAGE_API_KEY"]= "up_PaTDq3Dd12IekZZ6KYG4g0deWcWyR"
else:
# Running locally. Please set the UPSTAGE_API_KEY in the .env file
from dotenv import load_dotenv
load_dotenv()
if "UPSTAGE_API_KEY" not in os.environ:
os.environ["UPSTAGE_API_KEY"] = getpass.getpass("Enter your Upstage API key: ")
from pydantic import BaseModel
from langchain_upstage import ChatUpstage, UpstageEmbeddings
from langchain_chroma import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders.csv_loader import CSVLoader
from langchain_community.document_loaders import JSONLoader
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_upstage import ChatUpstage
from langchain import hub
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import pandas as pd
import gradio as gr
from langchain_core.messages import AIMessage, HumanMessage
rag_with_history_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""
You are an intelligent assistant helping the members of the Korean National Assembly with questions related to law and policy. As you will respond to the Korean National Assembly, you must answer politely. Read the given questions carefully and give the answer in Korean ONLY using the following pieces of the context.
Do not try to make up an answer:
ย - If the answer to the question cannot be determined from the context alone, say "I cannot determine the answer to that."
ย - If the context is empty, just say "I do not know the answer to that."
Answer the question chronologically by issue.
Context: {context}
Question: {input} ๊ตฌ์ฒด์ ์ผ๋ก ์๊ฐ์์ผ๋ก ๋ต๋ณํด์ค.
Answer:
""",
),
MessagesPlaceholder(variable_name="history"),
("human", "{input}"),
]
)
llm = ChatUpstage()
chain = rag_with_history_prompt | llm | StrOutputParser()
DB_PATH = './chroma_db/chroma_db'
db = Chroma(persist_directory=DB_PATH, embedding_function=UpstageEmbeddings(model="solar-embedding-1-large"))
retriever = db.as_retriever(search_kwargs={"k": 3})
history = []
def chatbot_response(input_text, history):
result_docs = retriever.invoke(input_text)
response = chain.invoke({"history": history, "context": result_docs, "input": input_text})
history.append((input_text, response))
return history, history
with gr.Blocks() as demo:
gr.Markdown("## ๊ตญํ ํ์๋ก ๊ธฐ๋ฐ ์์ ํ๋ ์ง์ ๋ฐ ๋๊ตญ๋ฏผ ์๊ถ๋ฆฌ ๋ณด์ฅ ์ฑ๋ด")
chatbot = gr.Chatbot(label="์ฑ๋ด")
txt = gr.Textbox(label="์ง๋ฌธ์ ์
๋ ฅํ์ธ์")
submit_btn = gr.Button("์ง๋ฌธํ๊ธฐ")
submit_btn.click(chatbot_response, inputs=[txt, chatbot], outputs=[chatbot, chatbot])
# ์ฑ ์คํ
demo.launch(share=True)
|