File size: 2,835 Bytes
891bdd7
 
 
 
 
 
 
 
d9dae6b
 
891bdd7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33c1020
891bdd7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33c1020
891bdd7
 
 
33c1020
 
891bdd7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33c1020
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# @title set API key
import os
import getpass
from pprint import pprint
import warnings

warnings.filterwarnings("ignore")

if "UPSTAGE_API_KEY" not in os.environ:    
    os.environ["UPSTAGE_API_KEY"] = getpass.getpass("Enter your Upstage API key: ")


from pydantic import BaseModel
from langchain_upstage import ChatUpstage, UpstageEmbeddings

from langchain_chroma import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders.csv_loader import CSVLoader
from langchain_community.document_loaders import JSONLoader
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_upstage import ChatUpstage
from langchain import hub

from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

import pandas as pd



import gradio as gr
from langchain_core.messages import AIMessage, HumanMessage

rag_with_history_prompt = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            """
You are an intelligent assistant helping the members of the Korean National Assembly with questions related to law and policy. As you will respond to the Korean National Assembly, you must answer politely. Read the given questions carefully and give the answer in Korean ONLY using the following pieces of the context.

Do not try to make up an answer:
ย - If the answer to the question cannot be determined from the context alone, say "I cannot determine the answer to that."
ย - If the context is empty, just say "I do not know the answer to that."

Answer the question chronologically by issue.

Context: {context}

Question: {input} ๊ตฌ์ฒด์ ์œผ๋กœ ์‹œ๊ฐ„์ˆœ์œผ๋กœ ๋‹ต๋ณ€ํ•ด์ค˜.

Answer:
            """,
        ),
        MessagesPlaceholder(variable_name="history"),
        ("human", "{input}"),
    ]
)

llm = ChatUpstage()
chain = rag_with_history_prompt | llm | StrOutputParser()


DB_PATH = './chroma_db/chroma_db'
db = Chroma(persist_directory=DB_PATH, embedding_function=UpstageEmbeddings(model="solar-embedding-1-large"))
retriever = db.as_retriever(search_kwargs={"k": 3})


history = []

def chatbot_response(input_text, history):
    result_docs = retriever.invoke(input_text)  
    response = chain.invoke({"history": history, "context": result_docs, "input": input_text}) 

    history.append((input_text, response))
    
    return history, history  

with gr.Blocks() as demo:
    gr.Markdown("## ๊ตญํšŒ ํšŒ์˜๋ก ๊ธฐ๋ฐ˜ ์˜์ •ํ™œ๋™ ์ง€์› ๋ฐ ๋Œ€๊ตญ๋ฏผ ์•Œ๊ถŒ๋ฆฌ ๋ณด์žฅ ์ฑ—๋ด‡")
    
    chatbot = gr.Chatbot(label="์ฑ—๋ด‡")
    txt = gr.Textbox(label="์งˆ๋ฌธ์„ ์ž…๋ ฅํ•˜์„ธ์š”")
    submit_btn = gr.Button("์งˆ๋ฌธํ•˜๊ธฐ")
    
    submit_btn.click(chatbot_response, inputs=[txt, chatbot], outputs=[chatbot, chatbot])

# ์•ฑ ์‹คํ–‰
demo.launch(share=True)