rayl-aoit commited on
Commit
b97a3c0
Β·
verified Β·
1 Parent(s): 28f8b34

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -0
app.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from hugchat import hugchat
3
+ from hugchat.login import Login
4
+
5
+ from huggingface_hub import InferenceClient
6
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
7
+
8
+
9
+ # App title
10
+ st.set_page_config(page_title="πŸ€—πŸ’¬ HugChat")
11
+
12
+ # Hugging Face Credentials
13
+ with st.sidebar:
14
+ st.title('πŸ€—πŸ’¬ HugChat')
15
+ if ('EMAIL' in st.secrets) and ('PASS' in st.secrets):
16
+ st.success('HuggingFace Login credentials already provided!', icon='βœ…')
17
+ hf_email = st.secrets['EMAIL']
18
+ hf_pass = st.secrets['PASS']
19
+ else:
20
+ hf_email = st.text_input('Enter E-mail:', type='password')
21
+ hf_pass = st.text_input('Enter password:', type='password')
22
+ if not (hf_email and hf_pass):
23
+ st.warning('Please enter your credentials!', icon='⚠️')
24
+ else:
25
+ st.success('Proceed to entering your prompt message!', icon='πŸ‘‰')
26
+ st.markdown('πŸ“– Learn how to build this app in this [blog](https://blog.streamlit.io/how-to-build-an-llm-powered-chatbot-with-streamlit/)!')
27
+
28
+ # Store LLM generated responses
29
+ if "messages" not in st.session_state.keys():
30
+ st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
31
+
32
+ # Display chat messages
33
+ for message in st.session_state.messages:
34
+ with st.chat_message(message["role"]):
35
+ st.write(message["content"])
36
+
37
+ # Function for generating LLM response
38
+ def generate_response(messages, email, passwd):
39
+ for message in client.chat_completion(
40
+ messages,
41
+ max_tokens=500,
42
+ stream=True,
43
+ temperature=0.7,
44
+ top_p=0.9,
45
+ ):
46
+ token = message.choices[0].delta.content
47
+
48
+ response += token
49
+ yield response
50
+
51
+ # def generate_response(prompt_input, email, passwd):
52
+ # # Hugging Face Login
53
+ # cookie_path_dir = "./cookies/"
54
+ # sign = Login(email, passwd)
55
+ # cookies = sign.login(cookie_dir_path=cookie_path_dir, save_cookies=True)
56
+ # # Create ChatBot
57
+ # chatbot = hugchat.ChatBot(cookies=cookies.get_dict())
58
+ # return chatbot.chat(prompt_input)
59
+
60
+ # # Function for generating LLM response based on "HuggingFaceH4/zephyr-7b-beta"
61
+ # def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p,):
62
+ # messages = [{"role": "system", "content": system_message}]
63
+
64
+ # for val in history:
65
+ # if val[0]:
66
+ # messages.append({"role": "user", "content": val[0]})
67
+ # if val[1]:
68
+ # messages.append({"role": "assistant", "content": val[1]})
69
+
70
+ # messages.append({"role": "user", "content": message})
71
+
72
+ # response = ""
73
+
74
+ # for message in client.chat_completion(
75
+ # messages,
76
+ # max_tokens=max_tokens,
77
+ # stream=True,
78
+ # temperature=temperature,
79
+ # top_p=top_p,
80
+ # ):
81
+ # token = message.choices[0].delta.content
82
+
83
+ # response += token
84
+ # yield response
85
+
86
+ # User-provided prompt
87
+ if prompt := st.chat_input(disabled=not (hf_email and hf_pass)):
88
+ st.session_state.messages.append({"role": "user", "content": prompt})
89
+ with st.chat_message("user"):
90
+ st.write(prompt)
91
+
92
+ # Generate a new response if last message is not from assistant
93
+ if st.session_state.messages[-1]["role"] != "assistant":
94
+ with st.chat_message("assistant"):
95
+ with st.spinner("Thinking..."):
96
+ response = generate_response(prompt, hf_email, hf_pass)
97
+ st.write(response)
98
+ message = {"role": "assistant", "content": response}
99
+ st.session_state.messages.append(message)