Spaces:
Runtime error
Runtime error
Commit
•
7a252d3
0
Parent(s):
Duplicate from Kaludi/OpenAI-Chatbot_App
Browse filesCo-authored-by: bkaludi <[email protected]>
- .gitattributes +34 -0
- README.md +14 -0
- app.py +66 -0
- requirements.txt +3 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: OpenAI Chatbot App
|
3 |
+
emoji: 🤖
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: purple
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.17.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
duplicated_from: Kaludi/OpenAI-Chatbot_App
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
|
5 |
+
st.title("OpenAI Chatbot Interface")
|
6 |
+
st.write("Interact with OpenAI's GPT-3 models in real-time using your OpenAI API. Choose from a selection of their best models, set the temperature and max tokens, and start a conversation. Delete the conversation at any time to start fresh.")
|
7 |
+
|
8 |
+
if "history" not in st.session_state:
|
9 |
+
st.session_state.history = []
|
10 |
+
|
11 |
+
st.sidebar.markdown("## Configuration")
|
12 |
+
KEY = st.sidebar.text_input("Enter Your OpenAI API Key", placeholder="API Key", value="")
|
13 |
+
models = ['text-davinci-003', 'text-curie-001', 'text-babbage-001', 'text-ada-001']
|
14 |
+
model = st.sidebar.selectbox("Select a model", models, index=0)
|
15 |
+
|
16 |
+
temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.7)
|
17 |
+
max_tokens = st.sidebar.slider("Max Tokens", 0, 4000, 1786)
|
18 |
+
|
19 |
+
if st.sidebar.button("Delete Conversation"):
|
20 |
+
st.session_state.history = []
|
21 |
+
st.sidebar.markdown("## GPT-3")
|
22 |
+
st.sidebar.markdown("OpenAI's GPT-3 models can understand and generate natural language. They offer four main models with different levels of power suitable for different tasks. Davinci is the most capable model, and Ada is the fastest.")
|
23 |
+
st.sidebar.markdown("text-davinci-003 | 4,000 max tokens")
|
24 |
+
st.sidebar.markdown("text-curie-001 | 2,048 max tokens")
|
25 |
+
st.sidebar.markdown("text-babbage-001 | 2,048 max tokens")
|
26 |
+
st.sidebar.markdown("text-ada-001 | 2,048 max tokens")
|
27 |
+
|
28 |
+
def generate_answer(prompt):
|
29 |
+
API_KEY = KEY
|
30 |
+
API_URL = "https://api.openai.com/v1/completions"
|
31 |
+
headers = {
|
32 |
+
'Content-Type': 'application/json',
|
33 |
+
'Authorization': 'Bearer ' + API_KEY
|
34 |
+
}
|
35 |
+
previous_messages = [chat['message'] for chat in st.session_state.history if not chat['is_user']]
|
36 |
+
previous_messages_text = '\n'.join(previous_messages)
|
37 |
+
full_prompt = previous_messages_text + '\n' + prompt if previous_messages_text else prompt
|
38 |
+
data = {
|
39 |
+
"model": model,
|
40 |
+
"prompt": full_prompt,
|
41 |
+
"temperature": temperature,
|
42 |
+
"max_tokens": max_tokens
|
43 |
+
}
|
44 |
+
if not API_KEY:
|
45 |
+
st.warning("Please input your API key")
|
46 |
+
return
|
47 |
+
response = requests.post(API_URL, headers=headers, data=json.dumps(data))
|
48 |
+
result = response.json()
|
49 |
+
if 'choices' in result:
|
50 |
+
message_bot = result['choices'][0]['text'].strip()
|
51 |
+
st.session_state.history.append({"message": prompt, "is_user": True})
|
52 |
+
st.session_state.history.append({"message": message_bot, "is_user": False})
|
53 |
+
else:
|
54 |
+
st.error("An error occurred while processing the API response. If using a model other than text-davinci-003, then lower the Max Tokens.")
|
55 |
+
|
56 |
+
prompt = st.text_input("Prompt", placeholder="Prompt Here", value="")
|
57 |
+
if st.button("Submit"):
|
58 |
+
generate_answer(prompt)
|
59 |
+
with st.spinner("Waiting for the response from the bot..."):
|
60 |
+
for chat in st.session_state.history:
|
61 |
+
if chat['is_user']:
|
62 |
+
st.markdown("<img src='https://i.ibb.co/zVSbGvb/585e4beacb11b227491c3399.png' width='50' height='50' style='float:right;'>", unsafe_allow_html=True)
|
63 |
+
st.markdown(f"<div style='float:right; padding:10px; background-color: #2E2E2E; border-radius:10px; margin:10px;'>{chat['message']}</div>", unsafe_allow_html=True)
|
64 |
+
else:
|
65 |
+
st.markdown("<img src='https://i.ibb.co/LZFvDND/5841c0bda6515b1e0ad75a9e-1.png' width='50' height='50' style='float:left;'>", unsafe_allow_html=True)
|
66 |
+
st.markdown(f"<div style='float:left; padding:10px; background-color: #2E2E2E; border-radius:10px; margin:10px;'>{chat['message']}</div>", unsafe_allow_html=True)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
fastai==2.7.4
|
2 |
+
huggingface_hub[fastai]
|
3 |
+
fastcore>=1.3.27
|