PepperSim commited on
Commit
891bdd7
ยท
verified ยท
1 Parent(s): 74d43d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -59
app.py CHANGED
@@ -1,64 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
  ),
59
- ],
 
 
60
  )
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
+ # @title set API key
2
+ import os
3
+ import getpass
4
+ from pprint import pprint
5
+ import warnings
6
+
7
+ warnings.filterwarnings("ignore")
8
+
9
+ from IPython import get_ipython
10
+
11
+ if "google.colab" in str(get_ipython()):
12
+ # Running in Google Colab. Please set the UPSTAGE_API_KEY in the Colab Secrets
13
+ from google.colab import userdata
14
+ os.environ["UPSTAGE_API_KEY"]= "up_PaTDq3Dd12IekZZ6KYG4g0deWcWyR"
15
+ else:
16
+ # Running locally. Please set the UPSTAGE_API_KEY in the .env file
17
+ from dotenv import load_dotenv
18
+
19
+ load_dotenv()
20
+
21
+ if "UPSTAGE_API_KEY" not in os.environ:
22
+ os.environ["UPSTAGE_API_KEY"] = getpass.getpass("Enter your Upstage API key: ")
23
+
24
+
25
+ from pydantic import BaseModel
26
+ from langchain_upstage import ChatUpstage, UpstageEmbeddings
27
+
28
+ from langchain_chroma import Chroma
29
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
30
+ from langchain_community.document_loaders.csv_loader import CSVLoader
31
+ from langchain_community.document_loaders import JSONLoader
32
+ from langchain_core.prompts import PromptTemplate
33
+ from langchain_core.output_parsers import StrOutputParser
34
+ from langchain_upstage import ChatUpstage
35
+ from langchain import hub
36
+
37
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
38
+
39
+ import pandas as pd
40
+
41
+
42
+
43
  import gradio as gr
44
+ from langchain_core.messages import AIMessage, HumanMessage
45
+
46
+ rag_with_history_prompt = ChatPromptTemplate.from_messages(
47
+ [
48
+ (
49
+ "system",
50
+ """
51
+ You are an intelligent assistant helping the members of the Korean National Assembly with questions related to law and policy. As you will respond to the Korean National Assembly, you must answer politely. Read the given questions carefully and give the answer in Korean ONLY using the following pieces of the context.
52
+
53
+ Do not try to make up an answer:
54
+ ย - If the answer to the question cannot be determined from the context alone, say "I cannot determine the answer to that."
55
+ ย - If the context is empty, just say "I do not know the answer to that."
56
+
57
+ Answer the question chronologically by issue.
58
+
59
+ Context: {context}
60
+
61
+ Question: {input} ๊ตฌ์ฒด์ ์œผ๋กœ ์‹œ๊ฐ„์ˆœ์œผ๋กœ ๋‹ต๋ณ€ํ•ด์ค˜.
62
+
63
+ Answer:
64
+ """,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  ),
66
+ MessagesPlaceholder(variable_name="history"),
67
+ ("human", "{input}"),
68
+ ]
69
  )
70
 
71
+ llm = ChatUpstage()
72
+ chain = rag_with_history_prompt | llm | StrOutputParser()
73
+
74
+
75
+ DB_PATH = './chroma_db/chroma_db'
76
+ db = Chroma(persist_directory=DB_PATH, embedding_function=UpstageEmbeddings(model="solar-embedding-1-large"))
77
+ retriever = db.as_retriever(search_kwargs={"k": 3})
78
+
79
+
80
+ history = []
81
+
82
+ def chatbot_response(input_text, history):
83
+ result_docs = retriever.invoke(input_text)
84
+ response = chain.invoke({"history": history, "context": result_docs, "input": input_text})
85
+
86
+ history.append((input_text, response))
87
+
88
+ return history, history
89
+
90
+ with gr.Blocks() as demo:
91
+ gr.Markdown("## ๊ตญํšŒ ํšŒ์˜๋ก ๊ธฐ๋ฐ˜ ์˜์ •ํ™œ๋™ ์ง€์› ๋ฐ ๋Œ€๊ตญ๋ฏผ ์•Œ๊ถŒ๋ฆฌ ๋ณด์žฅ ์ฑ—๋ด‡")
92
+
93
+ chatbot = gr.Chatbot(label="์ฑ—๋ด‡")
94
+ txt = gr.Textbox(label="์งˆ๋ฌธ์„ ์ž…๋ ฅํ•˜์„ธ์š”")
95
+ submit_btn = gr.Button("์งˆ๋ฌธํ•˜๊ธฐ")
96
+
97
+ submit_btn.click(chatbot_response, inputs=[txt, chatbot], outputs=[chatbot, chatbot])
98
+
99
+ # ์•ฑ ์‹คํ–‰
100
+ demo.launch(share=True)
101
+
102
+
103